Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions cmd/cryptogen/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -344,9 +344,9 @@ func extendPeerOrg(orgSpec OrgSpec) {
publicKeyAlg := getPublicKeyAlg(orgSpec.Users.PublicKeyAlgorithm)
// TODO: add ability to specify usernames
users := []NodeSpec{}
for j := 1; j <= orgSpec.Users.Count; j++ {
for j := range orgSpec.Users.Count {
user := NodeSpec{
CommonName: fmt.Sprintf("%s%d@%s", userBaseName, j, orgName),
CommonName: fmt.Sprintf("%s%d@%s", userBaseName, j+1, orgName),
PublicKeyAlgorithm: publicKeyAlg,
}

Expand Down Expand Up @@ -485,7 +485,7 @@ func renderNodeSpec(domain string, spec *NodeSpec) error {
func renderOrgSpec(orgSpec *OrgSpec, prefix string) error {
publickKeyAlg := getPublicKeyAlg(orgSpec.Template.PublicKeyAlgorithm)
// First process all of our templated nodes
for i := 0; i < orgSpec.Template.Count; i++ {
for i := range orgSpec.Template.Count {
data := HostnameData{
Prefix: prefix,
Index: i + orgSpec.Template.Start,
Expand Down Expand Up @@ -563,9 +563,9 @@ func generatePeerOrg(baseDir string, orgSpec OrgSpec) {
publicKeyAlg := getPublicKeyAlg(orgSpec.Users.PublicKeyAlgorithm)
// TODO: add ability to specify usernames
users := []NodeSpec{}
for j := 1; j <= orgSpec.Users.Count; j++ {
for j := range orgSpec.Users.Count {
user := NodeSpec{
CommonName: fmt.Sprintf("%s%d@%s", userBaseName, j, orgName),
CommonName: fmt.Sprintf("%s%d@%s", userBaseName, j+1, orgName),
PublicKeyAlgorithm: publicKeyAlg,
}

Expand Down Expand Up @@ -638,7 +638,7 @@ func generateNodes(baseDir string, nodes []NodeSpec, signCA *ca.CA, tlsCA *ca.CA
if node.isAdmin && nodeOUs {
currentNodeType = msp.ADMIN
}
err := msp.GenerateLocalMSP(nodeDir, node.CommonName, node.SANS, signCA, tlsCA, currentNodeType, nodeOUs, node.PublicKeyAlgorithm)
err = msp.GenerateLocalMSP(nodeDir, node.CommonName, node.SANS, signCA, tlsCA, currentNodeType, nodeOUs, node.PublicKeyAlgorithm)
if err != nil {
fmt.Printf("Error generating local MSP for %v:\n%v\n", node, err)
os.Exit(1)
Expand Down
2 changes: 1 addition & 1 deletion common/channelconfig/standardvalues.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func (sv *StandardValues) initializeProtosStruct(objValue reflect.Value) error {
}

numFields := objValue.Elem().NumField()
for i := 0; i < numFields; i++ {
for i := range numFields {
structField := objType.Elem().Field(i)
logger.Debugf("Processing field: %s\n", structField.Name)
switch structField.Type.Kind() {
Expand Down
2 changes: 1 addition & 1 deletion common/channelconfig/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ func TestMarshalEtcdRaftMetadata(t *testing.T) {
inputCerts[i], _ = os.ReadFile(fmt.Sprintf("testdata/tls-client-%d.pem", i+1))
}

for i := 0; i < len(inputCerts)-1; i++ {
for i := range len(inputCerts) - 1 {
require.NotEqual(t, outputCerts[i+1], outputCerts[i], "expected extracted certs to differ from each other")
}
}
2 changes: 1 addition & 1 deletion common/deliver/acl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ var _ = Describe("SessionAccessControl", func() {
sac, err := deliver.NewSessionAC(fakeChain, envelope, fakePolicyChecker, "chain-id", expiresAt)
Expect(err).NotTo(HaveOccurred())

for i := 0; i < 5; i++ {
for range 5 {
err = sac.Evaluate()
Expect(err).NotTo(HaveOccurred())
}
Expand Down
12 changes: 6 additions & 6 deletions common/deliver/deliver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ var _ = Describe("Deliver", func() {
Expect(err).NotTo(HaveOccurred())

Expect(fakeResponseSender.SendBlockResponseCallCount()).To(Equal(5))
for i := 0; i < 5; i++ {
for i := range 5 {
b, _, _, _ := fakeResponseSender.SendBlockResponseArgsForCall(i)
Expect(b).To(Equal(&cb.Block{
Header: &cb.BlockHeader{Number: 995 + uint64(i)},
Expand All @@ -393,7 +393,7 @@ var _ = Describe("Deliver", func() {

Expect(fakeBlocksSent.AddCallCount()).To(Equal(5))
Expect(fakeBlocksSent.WithCallCount()).To(Equal(5))
for i := 0; i < 5; i++ {
for i := range 5 {
Expect(fakeBlocksSent.AddArgsForCall(i)).To(BeNumerically("~", 1.0))
labelValues := fakeBlocksSent.WithArgsForCall(i)
Expect(labelValues).To(Equal([]string{
Expand Down Expand Up @@ -461,7 +461,7 @@ var _ = Describe("Deliver", func() {

Expect(fakeBlockIterator.NextCallCount()).To(Equal(2))
Expect(fakeResponseSender.SendBlockResponseCallCount()).To(Equal(2))
for i := 0; i < fakeResponseSender.SendBlockResponseCallCount(); i++ {
for i := range fakeResponseSender.SendBlockResponseCallCount() {
b, _, _, _ := fakeResponseSender.SendBlockResponseArgsForCall(i)
Expect(b).To(ProtoEqual(&cb.Block{
Header: &cb.BlockHeader{Number: uint64(i + 1)},
Expand Down Expand Up @@ -492,7 +492,7 @@ var _ = Describe("Deliver", func() {
Expect(fakeBlockReader.IteratorCallCount()).To(Equal(1))
Expect(fakeBlockIterator.NextCallCount()).To(Equal(1))
Expect(fakeResponseSender.SendBlockResponseCallCount()).To(Equal(1))
for i := 0; i < fakeResponseSender.SendBlockResponseCallCount(); i++ {
for i := range fakeResponseSender.SendBlockResponseCallCount() {
b, _, _, _ := fakeResponseSender.SendBlockResponseArgsForCall(i)
Expect(b).To(ProtoEqual(&cb.Block{
Header: &cb.BlockHeader{Number: uint64(i)},
Expand Down Expand Up @@ -532,7 +532,7 @@ var _ = Describe("Deliver", func() {

Expect(fakeBlockIterator.NextCallCount()).To(Equal(2))
Expect(fakeResponseSender.SendBlockResponseCallCount()).To(Equal(2))
for i := 0; i < fakeResponseSender.SendBlockResponseCallCount(); i++ {
for i := range fakeResponseSender.SendBlockResponseCallCount() {
b, _, _, _ := fakeResponseSender.SendBlockResponseArgsForCall(i)
Expect(b).To(ProtoEqual(&cb.Block{
Header: &cb.BlockHeader{Number: uint64(i + 1)},
Expand Down Expand Up @@ -587,7 +587,7 @@ var _ = Describe("Deliver", func() {
Expect(start).To(ProtoEqual(&ab.SeekPosition{}))
Expect(fakeBlockIterator.NextCallCount()).To(Equal(3))
Expect(fakeResponseSender.SendBlockResponseCallCount()).To(Equal(3))
for i := 0; i < fakeResponseSender.SendBlockResponseCallCount(); i++ {
for i := range fakeResponseSender.SendBlockResponseCallCount() {
b, _, _, _ := fakeResponseSender.SendBlockResponseArgsForCall(i)
if i+1 == 1 || i+1 == 3 {
Expect(b).To(ProtoEqual(&cb.Block{
Expand Down
2 changes: 1 addition & 1 deletion common/graph/perm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func TestPermute(t *testing.T) {
func TestPermuteTooManyCombinations(t *testing.T) {
root := NewTreeVertex("r", nil)
root.Threshold = 500
for i := 0; i < 1000; i++ {
for i := range 1000 {
root.AddDescendant(NewTreeVertex(fmt.Sprintf("%d", i), nil))
}
permutations := root.ToTree().Permute(501)
Expand Down
4 changes: 2 additions & 2 deletions common/grpcmetrics/interceptor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ var _ = Describe("Interceptor", func() {
}))

Expect(fakeMessagesSent.AddCallCount()).To(Equal(2))
for i := 0; i < fakeMessagesSent.AddCallCount(); i++ {
for range fakeMessagesSent.AddCallCount() {
Expect(fakeMessagesSent.AddArgsForCall(0)).To(BeNumerically("~", 1.0))
}
})
Expand All @@ -268,7 +268,7 @@ var _ = Describe("Interceptor", func() {
}))

Expect(fakeMessagesReceived.AddCallCount()).To(Equal(2))
for i := 0; i < fakeMessagesReceived.AddCallCount(); i++ {
for range fakeMessagesReceived.AddCallCount() {
Expect(fakeMessagesReceived.AddArgsForCall(0)).To(BeNumerically("~", 1.0))
}
})
Expand Down
4 changes: 2 additions & 2 deletions common/ledger/blkstorage/block_serialization.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ func extractData(buf *buffer) (*common.BlockData, []*txindexInfo, error) {
if numItems, err = buf.DecodeVarint(); err != nil {
return nil, nil, errors.Wrap(err, "error decoding the length of block data")
}
for i := uint64(0); i < numItems; i++ {
for range numItems {
var txEnvBytes []byte
var txid string
txOffset := buf.GetBytesConsumed()
Expand All @@ -165,7 +165,7 @@ func extractMetadata(buf *buffer) (*common.BlockMetadata, error) {
if numItems, err = buf.DecodeVarint(); err != nil {
return nil, errors.Wrap(err, "error decoding the length of block metadata")
}
for i := uint64(0); i < numItems; i++ {
for range numItems {
if metadataEntry, err = buf.DecodeRawBytes(false); err != nil {
return nil, errors.Wrap(err, "error decoding the block metadata")
}
Expand Down
4 changes: 2 additions & 2 deletions common/ledger/blkstorage/block_stream_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func testBlockFileStreamUnexpectedEOF(t *testing.T, numBlocks int, partialBlockB
defer s.close()
require.NoError(t, err, "Error in constructing blockfile stream")

for i := 0; i < numBlocks; i++ {
for range numBlocks {
blockBytes, err := s.nextBlockBytes()
require.NotNil(t, blockBytes)
require.NoError(t, err, "Error in getting next block")
Expand All @@ -102,7 +102,7 @@ func testBlockStream(t *testing.T, numFiles int) {
numBlocksInEachFile := 10
bg, gb := testutil.NewBlockGenerator(t, ledgerID, false)
w.addBlocks([]*common.Block{gb})
for i := 0; i < numFiles; i++ {
for i := range numFiles {
numBlocks := numBlocksInEachFile
if i == 0 {
// genesis block already added so adding one less block
Expand Down
10 changes: 5 additions & 5 deletions common/ledger/blkstorage/blockfile_helper_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,10 +104,10 @@ func TestBinarySearchBlockFileNum(t *testing.T) {
require.NoError(t, err)
require.Len(t, files, 11)

for i := uint64(0); i < 100; i++ {
fileNum, err := binarySearchFileNumForBlock(ledgerDir, i)
for i := range 100 {
fileNum, err := binarySearchFileNumForBlock(ledgerDir, uint64(i))
require.NoError(t, err)
locFromIndex, err := blkfileMgr.index.getBlockLocByBlockNum(i)
locFromIndex, err := blkfileMgr.index.getBlockLocByBlockNum(uint64(i))
require.NoError(t, err)
expectedFileNum := locFromIndex.fileSuffixNum
require.Equal(t, expectedFileNum, fileNum)
Expand Down Expand Up @@ -145,7 +145,7 @@ func TestGetLedgersBootstrappedFromSnapshot(t *testing.T) {
testDir := t.TempDir()

// create chains directories for ledgers without bootstrappingSnapshotInfoFile
for i := 0; i < 5; i++ {
for i := range 5 {
require.NoError(t, os.MkdirAll(filepath.Join(testDir, ChainsDir, fmt.Sprintf("ledger_%d", i)), 0o755))
}

Expand All @@ -159,7 +159,7 @@ func TestGetLedgersBootstrappedFromSnapshot(t *testing.T) {

// create chains directories for ledgers
// also create bootstrappingSnapshotInfoFile for ledger_0 and ledger_1
for i := 0; i < 5; i++ {
for i := range 5 {
ledgerChainDir := filepath.Join(testDir, ChainsDir, fmt.Sprintf("ledger_%d", i))
require.NoError(t, os.MkdirAll(ledgerChainDir, 0o755))
if i < 2 {
Expand Down
2 changes: 1 addition & 1 deletion common/ledger/blkstorage/blockfile_mgr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@ func testBlockfileMgrSimulateCrashAtFirstBlockInFile(t *testing.T, deleteBlkfile
blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger")
blockfileMgr := blkfileMgrWrapper.blockfileMgr
blocks := testutil.ConstructTestBlocks(t, 10)
for i := 0; i < 10; i++ {
for i := range 10 {
fmt.Printf("blocks[i].Header.Number = %d\n", blocks[i].Header.Number)
}
blkfileMgrWrapper.addBlocks(blocks[:5])
Expand Down
2 changes: 1 addition & 1 deletion common/ledger/blkstorage/blockindex.go
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ func importTxIDsFromSnapshot(
}

batch := db.NewUpdateBatch()
for i := uint64(0); i < numTxIDs; i++ {
for i := range numTxIDs {
txID, err := txIDsData.DecodeString()
if err != nil {
return err
Expand Down
18 changes: 9 additions & 9 deletions common/ledger/blkstorage/blockindex_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,16 +57,16 @@ func testBlockIndexSync(t *testing.T, numBlocks int, numBlocksToIndex int, syncB
// Plug-in back the original index store
blkfileMgr.index.db = originalIndexStore
// Verify that the first set of blocks are indexed in the original index
for i := 0; i < numBlocksToIndex; i++ {
for i := range numBlocksToIndex {
block, err := blkfileMgr.retrieveBlockByNumber(uint64(i))
require.NoError(t, err, "block [%d] should have been present in the index", i)
require.Equal(t, blocks[i], block)
}

// Before, we test for index sync-up, verify that the last set of blocks not indexed in the original index
for i := numBlocksToIndex + 1; i <= numBlocks; i++ {
_, err := blkfileMgr.retrieveBlockByNumber(uint64(i))
require.EqualError(t, err, fmt.Sprintf("no such block number [%d] in index", i))
for i := range numBlocks - numBlocksToIndex {
_, err := blkfileMgr.retrieveBlockByNumber(uint64(i + numBlocksToIndex + 1))
require.EqualError(t, err, fmt.Sprintf("no such block number [%d] in index", i+numBlocksToIndex+1))
}

// perform index sync
Expand All @@ -80,10 +80,10 @@ func testBlockIndexSync(t *testing.T, numBlocks int, numBlocksToIndex int, syncB
}

// Now, last set of blocks should also be indexed in the original index
for i := numBlocksToIndex; i < numBlocks; i++ {
block, err := blkfileMgr.retrieveBlockByNumber(uint64(i))
require.NoError(t, err, "block [%d] should have been present in the index", i)
require.Equal(t, blocks[i], block)
for i := range numBlocks - numBlocksToIndex {
block, err := blkfileMgr.retrieveBlockByNumber(uint64(i + numBlocksToIndex))
require.NoError(t, err, "block [%d] should have been present in the index", i+numBlocksToIndex)
require.Equal(t, blocks[i+numBlocksToIndex], block)
}
})
}
Expand Down Expand Up @@ -420,7 +420,7 @@ func verifyExportedTxIDs(t *testing.T, dir string, fileHashes map[string][]byte,
numTxIDs, err := metadataReader.DecodeUVarInt()
require.NoError(t, err)
retrievedTxIDs := []string{}
for i := uint64(0); i < numTxIDs; i++ {
for range numTxIDs {
txID, err := dataReader.DecodeString()
require.NoError(t, err)
retrievedTxIDs = append(retrievedTxIDs, txID)
Expand Down
4 changes: 2 additions & 2 deletions common/ledger/blkstorage/blocks_itr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func TestRaceToDeadlock(t *testing.T) {
blocks := testutil.ConstructTestBlocks(t, 5)
blkfileMgrWrapper.addBlocks(blocks)

for i := 0; i < 1000; i++ {
for range 1000 {
itr, err := blkfileMgr.retrieveBlocks(5)
if err != nil {
panic(err)
Expand All @@ -83,7 +83,7 @@ func TestRaceToDeadlock(t *testing.T) {
itr.Close()
}

for i := 0; i < 1000; i++ {
for range 1000 {
itr, err := blkfileMgr.retrieveBlocks(5)
if err != nil {
panic(err)
Expand Down
10 changes: 5 additions & 5 deletions common/ledger/blkstorage/blockstore_provider_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,12 +99,12 @@ func checkBlocks(t *testing.T, expectedBlocks []*common.Block, store *BlockStore
require.Equal(t, protoutil.BlockHeaderHash(expectedBlocks[len(expectedBlocks)-1].GetHeader()), bcInfo.CurrentBlockHash)

itr, _ := store.RetrieveBlocks(0)
for i := 0; i < len(expectedBlocks); i++ {
for i := range len(expectedBlocks) {
block, _ := itr.Next()
require.Equal(t, expectedBlocks[i], block)
}

for blockNum := 0; blockNum < len(expectedBlocks); blockNum++ {
for blockNum := range len(expectedBlocks) {
block := expectedBlocks[blockNum]
flags := txflags.ValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
retrievedBlock, _ := store.RetrieveBlockByNumber(uint64(blockNum))
Expand All @@ -113,7 +113,7 @@ func checkBlocks(t *testing.T, expectedBlocks []*common.Block, store *BlockStore
retrievedBlock, _ = store.RetrieveBlockByHash(protoutil.BlockHeaderHash(block.Header))
require.Equal(t, block, retrievedBlock)

for txNum := 0; txNum < len(block.Data.Data); txNum++ {
for txNum := range len(block.Data.Data) {
txEnvBytes := block.Data.Data[txNum]
txEnv, _ := protoutil.GetEnvelopeFromBlock(txEnvBytes)
txid, err := protoutil.GetOrComputeTxIDFromEnvelope(txEnvBytes)
Expand Down Expand Up @@ -170,7 +170,7 @@ func TestBlockStoreProvider(t *testing.T) {

var stores []*BlockStore
numStores := 10
for i := 0; i < numStores; i++ {
for i := range numStores {
store, _ := provider.Open(constructLedgerid(i))
defer store.Shutdown()
stores = append(stores, store)
Expand All @@ -181,7 +181,7 @@ func TestBlockStoreProvider(t *testing.T) {
require.NoError(t, err)
require.Equal(t, numStores, len(storeNames))

for i := 0; i < numStores; i++ {
for i := range numStores {
exists, err := provider.Exists(constructLedgerid(i))
require.NoError(t, err)
require.Equal(t, true, exists)
Expand Down
4 changes: 2 additions & 2 deletions common/ledger/blkstorage/blockstore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func TestWrongBlockNumber(t *testing.T) {
defer store.Shutdown()

blocks := testutil.ConstructTestBlocks(t, 5)
for i := 0; i < 3; i++ {
for i := range 3 {
err := store.AddBlock(blocks[i])
require.NoError(t, err)
}
Expand All @@ -39,7 +39,7 @@ func TestTxIDIndexErrorPropagations(t *testing.T) {
store, _ := provider.Open("testLedger")
defer store.Shutdown()
blocks := testutil.ConstructTestBlocks(t, 3)
for i := 0; i < 3; i++ {
for i := range 3 {
err := store.AddBlock(blocks[i])
require.NoError(t, err)
}
Expand Down
4 changes: 2 additions & 2 deletions common/ledger/blkstorage/metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func TestStatsBlockCommit(t *testing.T) {
require.NoError(t, err)

// add 3 more blocks
for i := 1; i <= 3; i++ {
for range 3 {
b := blockGenerator.NextBlock([][]byte{})
err = store.AddBlock(b)
require.NoError(t, err)
Expand All @@ -99,7 +99,7 @@ func TestStatsBlockCommit(t *testing.T) {
require.Equal(t, expectedCallCount, fakeBlockstorageCommitTimeHist.ObserveCallCount())

// verify the value of channel in each call (0, 1, 2, 3)
for i := 0; i < expectedCallCount; i++ {
for i := range expectedCallCount {
require.Equal(t, []string{"channel", ledgerid}, fakeBlockstorageCommitTimeHist.WithArgsForCall(i))
}

Expand Down
4 changes: 2 additions & 2 deletions common/ledger/blkstorage/pkg_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func (w *testBlockfileMgrWrapper) testGetBlockByHash(blocks []*common.Block) {
}

func (w *testBlockfileMgrWrapper) testGetBlockByNumber(blocks []*common.Block) {
for i := 0; i < len(blocks); i++ {
for i := range len(blocks) {
b, err := w.blockfileMgr.retrieveBlockByNumber(blocks[0].Header.Number + uint64(i))
require.NoError(w.t, err, "Error while retrieving [%d]th block from blockfileMgr", i)
require.Equal(w.t, blocks[i], b)
Expand Down Expand Up @@ -150,7 +150,7 @@ func (w *testBlockfileMgrWrapper) testGetMultipleDataByTxID(
require.NoError(err)
defer itr.Release()

fetchedData := []*expectedBlkTxValidationCode{}
var fetchedData []*expectedBlkTxValidationCode
for itr.Next() {
v := &TxIDIndexValue{}
require.NoError(proto.Unmarshal(itr.Value(), v))
Expand Down
Loading