diff --git a/cmd/osnadmin/main_test.go b/cmd/osnadmin/main_test.go index 9ff307acd33..1436763aed2 100644 --- a/cmd/osnadmin/main_test.go +++ b/cmd/osnadmin/main_test.go @@ -677,7 +677,7 @@ var _ = Describe("osnadmin", func() { ordererCACert = filepath.Join(tempDir, "server-ca+intermediate-ca.pem") }) - It("uses the channel participation API to list all application and and the system channel (when it exists)", func() { + It("uses the channel participation API to list all application and the system channel (when it exists)", func() { args := []string{ "channel", "list", diff --git a/common/channelconfig/api.go b/common/channelconfig/api.go index 7de7fb74b3f..a074ff1cddc 100644 --- a/common/channelconfig/api.go +++ b/common/channelconfig/api.go @@ -67,7 +67,7 @@ type Channel interface { // Merkle tree to compute the BlockData hash BlockDataHashingStructureWidth() uint32 - // OrdererAddresses returns the list of valid orderer addresses to connect to to invoke Broadcast/Deliver + // OrdererAddresses returns the list of valid orderer addresses to connect to invoke Broadcast/Deliver OrdererAddresses() []string // Capabilities defines the capabilities for a channel diff --git a/common/channelconfig/channel.go b/common/channelconfig/channel.go index aa700e693a5..f3b571318a3 100644 --- a/common/channelconfig/channel.go +++ b/common/channelconfig/channel.go @@ -52,7 +52,7 @@ type ChannelValues interface { // Merkle tree to compute the BlockData hash BlockDataHashingStructureWidth() uint32 - // OrdererAddresses returns the list of valid orderer addresses to connect to to invoke Broadcast/Deliver + // OrdererAddresses returns the list of valid orderer addresses to connect to invoke Broadcast/Deliver OrdererAddresses() []string } @@ -151,7 +151,7 @@ func (cc *ChannelConfig) BlockDataHashingStructureWidth() uint32 { return cc.protos.BlockDataHashingStructure.Width } -// OrdererAddresses returns the list of valid orderer addresses to connect to to invoke Broadcast/Deliver +// OrdererAddresses returns the list of valid orderer addresses to connect to invoke Broadcast/Deliver func (cc *ChannelConfig) OrdererAddresses() []string { return cc.protos.OrdererAddresses.Addresses } diff --git a/common/channelconfig/standardvalues.go b/common/channelconfig/standardvalues.go index 85cf606cd7a..1472feb0ac1 100644 --- a/common/channelconfig/standardvalues.go +++ b/common/channelconfig/standardvalues.go @@ -55,7 +55,7 @@ func NewStandardValues(protosStructs ...interface{}) (*StandardValues, error) { // Deserialize looks up the backing Values proto of the given name, unmarshals the given bytes // to populate the backing message structure, and returns a referenced to the retained deserialized -// message (or an error, either because the key did not exist, or there was an an error unmarshalling +// message (or an error, either because the key did not exist, or there was an error unmarshalling func (sv *StandardValues) Deserialize(key string, value []byte) (proto.Message, error) { msg, ok := sv.lookup[key] if !ok { diff --git a/common/channelconfig/util.go b/common/channelconfig/util.go index d38f14fd95a..6803a21ce18 100644 --- a/common/channelconfig/util.go +++ b/common/channelconfig/util.go @@ -162,7 +162,7 @@ func MSPValue(mspDef *mspprotos.MSPConfig) *StandardConfigValue { } } -// CapabilitiesValue returns the config definition for a a set of capabilities. +// CapabilitiesValue returns the config definition for a set of capabilities. // It is a value for the /Channel/Orderer, Channel/Application/, and /Channel groups. func CapabilitiesValue(capabilities map[string]bool) *StandardConfigValue { c := &cb.Capabilities{ diff --git a/common/metrics/cmd/gendoc/main.go b/common/metrics/cmd/gendoc/main.go index 801f5fdc57d..b6f65e9c704 100644 --- a/common/metrics/cmd/gendoc/main.go +++ b/common/metrics/cmd/gendoc/main.go @@ -17,7 +17,7 @@ import ( "golang.org/x/tools/go/packages" ) -// Gendoc can be used used to discover the metrics options declared at the +// Gendoc can be used to discover the metrics options declared at the // package level in the fabric tree and output a table that can be used in the // documentation. diff --git a/core/chaincode/handler_test.go b/core/chaincode/handler_test.go index 290fdd45843..af7a94fa52c 100644 --- a/core/chaincode/handler_test.go +++ b/core/chaincode/handler_test.go @@ -1027,7 +1027,7 @@ var _ = Describe("Handler", func() { // ensure that the access cache is used }) - It("returns the the response message from GetPrivateData", func() { + It("returns the response message from GetPrivateData", func() { fakeCollectionStore.RetrieveReadWritePermissionReturns(true, false, nil) // to resp, err := handler.HandleGetState(incomingMessage, txContext) Expect(err).NotTo(HaveOccurred()) diff --git a/core/chaincode/lifecycle/cache.go b/core/chaincode/lifecycle/cache.go index c94f6c7eb0b..390d7c7daf8 100644 --- a/core/chaincode/lifecycle/cache.go +++ b/core/chaincode/lifecycle/cache.go @@ -136,7 +136,7 @@ func NewCache(resources *Resources, myOrgMSPID string, metadataManager MetadataH } // InitializeLocalChaincodes should be called once after cache creation (timing doesn't matter, -// though already installed chaincodes will not be invokable until it it completes). Ideally, +// though already installed chaincodes will not be invokable until it completes). Ideally, // this would be part of the constructor, but, we cannot rely on the chaincode store being created // before the cache is created. func (c *Cache) InitializeLocalChaincodes() error { diff --git a/core/chaincode/transaction_context_test.go b/core/chaincode/transaction_context_test.go index 6d8b4c55b7e..95fe9256b59 100644 --- a/core/chaincode/transaction_context_test.go +++ b/core/chaincode/transaction_context_test.go @@ -119,7 +119,7 @@ var _ = Describe("TransactionContext", func() { }) Describe("CleanupQueryContext", func() { - It("removes references to the the iterator and results", func() { + It("removes references to the iterator and results", func() { transactionContext.InitializeQueryContext("query-id", resultsIterator) transactionContext.CleanupQueryContext("query-id") diff --git a/core/endorser/msgvalidation.go b/core/endorser/msgvalidation.go index ebac1a5f03e..0959c18a853 100644 --- a/core/endorser/msgvalidation.go +++ b/core/endorser/msgvalidation.go @@ -37,7 +37,7 @@ func (up *UnpackedProposal) TxID() string { return up.ChannelHeader.TxId } -// UnpackProposal creates an an *UnpackedProposal which is guaranteed to have +// UnpackProposal creates an *UnpackedProposal which is guaranteed to have // no zero-ed fields or it returns an error. func UnpackProposal(signedProp *peer.SignedProposal) (*UnpackedProposal, error) { prop, err := protoutil.UnmarshalProposal(signedProp.ProposalBytes) diff --git a/core/handlers/validation/builtin/v12/validation_logic_test.go b/core/handlers/validation/builtin/v12/validation_logic_test.go index 3c44c6978f7..20b11727aa3 100644 --- a/core/handlers/validation/builtin/v12/validation_logic_test.go +++ b/core/handlers/validation/builtin/v12/validation_logic_test.go @@ -1358,7 +1358,7 @@ func validateUpgradeWithCollection(t *testing.T, V1_2Validation bool) { ccver = "3" - // Test 4: valid collection config config and peer in V1_2Validation mode --> success + // Test 4: valid collection config and peer in V1_2Validation mode --> success ccp = &peer.CollectionConfigPackage{Config: []*peer.CollectionConfig{coll1, coll2, coll3}} ccpBytes, err = proto.Marshal(ccp) require.NoError(t, err) diff --git a/core/handlers/validation/builtin/v13/validation_logic_test.go b/core/handlers/validation/builtin/v13/validation_logic_test.go index 3d2e2712ce8..0cc3505521a 100644 --- a/core/handlers/validation/builtin/v13/validation_logic_test.go +++ b/core/handlers/validation/builtin/v13/validation_logic_test.go @@ -1404,7 +1404,7 @@ func validateUpgradeWithCollection(t *testing.T, V1_2Validation bool) { ccver = "3" - // Test 4: valid collection config config and peer in V1_2Validation mode --> success + // Test 4: valid collection config and peer in V1_2Validation mode --> success ccp = &peer.CollectionConfigPackage{Config: []*peer.CollectionConfig{coll1, coll2, coll3}} ccpBytes, err = proto.Marshal(ccp) require.NoError(t, err) diff --git a/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/expiry_schedule_builder.go b/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/expiry_schedule_builder.go index cf9f27213bd..03df21fca14 100644 --- a/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/expiry_schedule_builder.go +++ b/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/expiry_schedule_builder.go @@ -65,7 +65,7 @@ func buildExpirySchedule( // i.e., when these private data key and it's hashed-keys are going to be expired // Note that the 'hashedUpdateKeys' may be superset of the pvtUpdates. This is because, // the peer may not receive all the private data either because the peer is not eligible for certain private data - // or because we allow proceeding with the missing private data data + // or because we allow proceeding with the missing private data for pvtUpdateKey, vv := range pvtUpdates.ToCompositeKeyMap() { keyHash := util.ComputeStringHash(pvtUpdateKey.Key) hashedCompisiteKey := privacyenabledstate.HashedCompositeKey{ diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb.go index 99493b81ea4..6aaa1fcc5d2 100644 --- a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb.go @@ -831,7 +831,7 @@ func (dbclient *couchDatabase) readDoc(id string) (*couchDoc, string, error) { // readDocRange method provides function to a range of documents based on the start and end keys // startKey and endKey can also be empty strings. If startKey and endKey are empty, all documents are returned // This function provides a limit option to specify the max number of entries and is supplied by config. -// Skip is reserved for possible future future use. +// Skip is reserved for possible future use. func (dbclient *couchDatabase) readDocRange(startKey, endKey string, limit int32) ([]*queryResult, string, error) { dbName := dbclient.dbName couchdbLogger.Debugf("[%s] Entering ReadDocRange() startKey=%s, endKey=%s", dbName, startKey, endKey) diff --git a/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go b/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go index e08be6102d1..e152bfdbec3 100644 --- a/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go +++ b/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go @@ -200,7 +200,7 @@ func (txmgr *LockBasedTxMgr) ValidateAndPrepare(blockAndPvtdata *ledger.BlockAnd // (1) constructs the unique pvt data from the passed reconciledPvtdata // (2) acquire a lock on oldBlockCommit // (3) checks for stale pvtData by comparing [version, valueHash] and removes stale data -// (4) creates update batch from the the non-stale pvtData +// (4) creates update batch from the non-stale pvtData // (5) update the BTL bookkeeping managed by the purge manager and update expiring keys. // (6) commit the non-stale pvt data to the stateDB // This function assumes that the passed input contains only transactions that had been diff --git a/core/ledger/util/util.go b/core/ledger/util/util.go index 8feee4a86d7..1d9ca7a706a 100644 --- a/core/ledger/util/util.go +++ b/core/ledger/util/util.go @@ -36,7 +36,7 @@ func GetSortedKeys(m interface{}) []string { } // GetValuesBySortedKeys returns the values of the map (mapPtr) in the list (listPtr) in the sorted order of key of the map -// This function assumes that the mapPtr is a pointer to a map and listPtr is is a pointer to a list. Further type of keys of the +// This function assumes that the mapPtr is a pointer to a map and listPtr is a pointer to a list. Further type of keys of the // map are assumed to be string and the types of the values of the maps and the list are same func GetValuesBySortedKeys(mapPtr interface{}, listPtr interface{}) { mapVal := reflect.ValueOf(mapPtr).Elem() diff --git a/core/scc/cscc/configure_test.go b/core/scc/cscc/configure_test.go index 93ced627578..481c230d44e 100644 --- a/core/scc/cscc/configure_test.go +++ b/core/scc/cscc/configure_test.go @@ -131,7 +131,7 @@ func TestConfigerInvokeInvalidParameters(t *testing.T) { t, int32(shim.OK), res.Status, - "invoke invoke expected wrong function name provided", + "invoke expected wrong function name provided", ) require.Equal(t, "Requested function fooFunction not found.", res.Message) diff --git a/core/scc/lscc/lscc.go b/core/scc/lscc/lscc.go index c736d9aaf0e..0c81c3e17ea 100644 --- a/core/scc/lscc/lscc.go +++ b/core/scc/lscc/lscc.go @@ -542,7 +542,7 @@ func (lscc *SCC) getCCCode(ccname string, cdbytes []byte) (*pb.ChaincodeDeployme // this is the big test and the reason every launch should go through // getChaincode call. We validate the chaincode entry against the - // the chaincode in FS + // chaincode in FS if err = ccpack.ValidateCC(cd); err != nil { return nil, nil, InvalidCCOnFSError(err.Error()) } diff --git a/discovery/endorsement/endorsement_test.go b/discovery/endorsement/endorsement_test.go index e28d64e0904..d756a92e6b5 100644 --- a/discovery/endorsement/endorsement_test.go +++ b/discovery/endorsement/endorsement_test.go @@ -453,7 +453,7 @@ func TestPeersForEndorsement(t *testing.T) { // 2 principal combinations: p0 and p6, or p12 alone. // The collection has p0, p6, and p12 in it. // The chaincode EP is (p0 and p6) or p12. - // However, the the chaincode has a collection level EP that requires p6 and p12. + // However, the chaincode has a collection level EP that requires p6 and p12. // Thus, the only combination that can satisfy would be p6 and p12. collectionOrgs := []*msp.MSPPrincipal{ peerRole("p0"), diff --git a/gossip/comm/crypto_test.go b/gossip/comm/crypto_test.go index 57bbfab0964..e2f7a1dd6a9 100644 --- a/gossip/comm/crypto_test.go +++ b/gossip/comm/crypto_test.go @@ -114,7 +114,7 @@ func TestCertificateExtraction(t *testing.T) { require.Equal(t, clientCertHash, srv.remoteCertHash, "Server side and client hash aren't equal") } -// GenerateCertificatesOrPanic generates a a random pair of public and private keys +// GenerateCertificatesOrPanic generates a random pair of public and private keys // and return TLS certificate. func GenerateCertificatesOrPanic() tls.Certificate { privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) diff --git a/gossip/discovery/discovery_test.go b/gossip/discovery/discovery_test.go index 1db4432f5dc..b7f5d849360 100644 --- a/gossip/discovery/discovery_test.go +++ b/gossip/discovery/discovery_test.go @@ -1469,7 +1469,7 @@ func TestMsgStoreExpirationWithMembershipMessages(t *testing.T) { defer instances[index].discoveryImpl().lock.RUnlock() require.Empty(t, instances[index].discoveryImpl().aliveLastTS, fmt.Sprint(step, " Data from alive msg still exists in aliveLastTS of discovery inst ", index)) require.Empty(t, instances[index].discoveryImpl().deadLastTS, fmt.Sprint(step, " Data from alive msg still exists in deadLastTS of discovery inst ", index)) - require.Empty(t, instances[index].discoveryImpl().id2Member, fmt.Sprint(step, " id2Member mapping still still contains data related to Alive msg: discovery inst ", index)) + require.Empty(t, instances[index].discoveryImpl().id2Member, fmt.Sprint(step, " id2Member mapping still contains data related to Alive msg: discovery inst ", index)) require.Empty(t, instances[index].discoveryImpl().msgStore.Get(), fmt.Sprint(step, " Expired Alive msg still stored in store of discovery inst ", index)) require.Zero(t, instances[index].discoveryImpl().aliveMembership.Size(), fmt.Sprint(step, " Alive membership list is not empty, discovery instance", index)) require.Zero(t, instances[index].discoveryImpl().deadMembership.Size(), fmt.Sprint(step, " Dead membership list is not empty, discovery instance", index)) diff --git a/gossip/gossip/gossip_impl.go b/gossip/gossip/gossip_impl.go index 1c908229395..9f198937649 100644 --- a/gossip/gossip/gossip_impl.go +++ b/gossip/gossip/gossip_impl.go @@ -434,7 +434,7 @@ func (g *Node) sendGossipBatch(a []interface{}) { // For efficiency, we first isolate all the messages that have the same routing policy // and send them together, and only after that move to the next group of messages. // i.e: we send all blocks of channel C to the same group of peers, -// and send all StateInfo messages to the same group of peers, etc. etc. +// and send all StateInfo messages to the same group of peers, etc. // When we send blocks, we send only to peers that advertised themselves in the channel. // When we send StateInfo messages, we send to peers in the channel. // When we send messages that are marked to be sent only within the org, we send all of these messages diff --git a/integration/chaincode/kvexecutor/chaincode.go b/integration/chaincode/kvexecutor/chaincode.go index bfc4011177d..7447af9377f 100644 --- a/integration/chaincode/kvexecutor/chaincode.go +++ b/integration/chaincode/kvexecutor/chaincode.go @@ -17,7 +17,7 @@ import ( ) // KVExecutor is a chaincode implementation that takes a KVData array as read parameter -// and a a KVData array as write parameter, and then calls GetXXX/PutXXX methods to read and write +// and a KVData array as write parameter, and then calls GetXXX/PutXXX methods to read and write // state/collection data. Both input params should be marshalled json data and then base64 encoded. type KVExecutor struct{} diff --git a/internal/pkg/peer/orderers/connection_test.go b/internal/pkg/peer/orderers/connection_test.go index 4ab95602f96..086b73e578c 100644 --- a/internal/pkg/peer/orderers/connection_test.go +++ b/internal/pkg/peer/orderers/connection_test.go @@ -318,7 +318,7 @@ var _ = Describe("Connection", func() { }) }) - When("an update modifies the global endpoints but does does not affect the org endpoints", func() { + When("an update modifies the global endpoints but does not affect the org endpoints", func() { BeforeEach(func() { cs.Update(nil, map[string]orderers.OrdererOrg{ "org1": org1, diff --git a/msp/nodeous_test.go b/msp/nodeous_test.go index d21648e1a17..1126de3d8fc 100644 --- a/msp/nodeous_test.go +++ b/msp/nodeous_test.go @@ -316,9 +316,9 @@ func TestLoad142MSPWithInvalidAdminConfiguration(t *testing.T) { func TestAdminInAdmincertsWith143MSP(t *testing.T) { // testdata/nodeouadminclient enables NodeOU classification and contains in the admincerts folder - // a certificate classified as client. This test checks that that identity is considered an admin anyway. + // a certificate classified as client. This test checks that identity is considered an admin anyway. // testdata/nodeouadminclient2 enables NodeOU classification and contains in the admincerts folder - // a certificate classified as client. This test checks that that identity is considered an admin anyway. + // a certificate classified as client. This test checks that identity is considered an admin anyway. // Notice that the configuration used is one that is usually expected for MSP version < 1.4.3 which // only define peer and client OU. testFolders := []string{"testdata/nodeouadminclient", "testdata/nodeouadminclient2"} diff --git a/orderer/common/bootstrap/bootstrap.go b/orderer/common/bootstrap/bootstrap.go index b18b08ef7ab..d8a734edd39 100644 --- a/orderer/common/bootstrap/bootstrap.go +++ b/orderer/common/bootstrap/bootstrap.go @@ -14,7 +14,7 @@ type Helper interface { GenesisBlock() *ab.Block } -// Replacer provides the ability to to replace the current genesis block used +// Replacer provides the ability to replace the current genesis block used // for bootstrapping with the supplied block. It is used during consensus-type // migration in order to replace the original genesis block used for // bootstrapping with the latest config block of the system channel, which diff --git a/orderer/common/cluster/replication_test.go b/orderer/common/cluster/replication_test.go index 5beaa9da4cd..bcbda0075f9 100644 --- a/orderer/common/cluster/replication_test.go +++ b/orderer/common/cluster/replication_test.go @@ -935,7 +935,7 @@ func testBlockPullerFromConfig(t *testing.T, blockVerifiers []cluster.BlockVerif validBlock := &common.Block{} require.NoError(t, proto.Unmarshal(blockBytes, validBlock)) - // And inject into it a 127.0.0.1 orderer endpoint endpoint and a new TLS CA certificate. + // And inject into it a 127.0.0.1 orderer endpoint and a new TLS CA certificate. injectTLSCACert(t, validBlock, caCert) injectGlobalOrdererEndpoint(t, validBlock, osn.srv.Address()) validBlock.Header.DataHash = protoutil.BlockDataHash(validBlock.Data) diff --git a/orderer/common/msgprocessor/maintenancefilter.go b/orderer/common/msgprocessor/maintenancefilter.go index 0b0eb7b4255..fa068dd703f 100644 --- a/orderer/common/msgprocessor/maintenancefilter.go +++ b/orderer/common/msgprocessor/maintenancefilter.go @@ -154,7 +154,7 @@ func (mf *MaintenanceFilter) inspect(configEnvelope *cb.ConfigEnvelope, ordererC return nil } -// ensureConsensusTypeChangeOnly checks that the only change is the the Channel/Orderer group, and within that, +// ensureConsensusTypeChangeOnly checks that the only change is the Channel/Orderer group, and within that, // only to the ConsensusType value. func (mf *MaintenanceFilter) ensureConsensusTypeChangeOnly(configEnvelope *cb.ConfigEnvelope) error { configUpdateEnv, err := protoutil.EnvelopeToConfigUpdate(configEnvelope.LastUpdate) diff --git a/orderer/common/server/etcdraft_test.go b/orderer/common/server/etcdraft_test.go index 4f85c2fb6b7..46a52a1c59a 100644 --- a/orderer/common/server/etcdraft_test.go +++ b/orderer/common/server/etcdraft_test.go @@ -202,7 +202,7 @@ func testEtcdRaftOSNSuccess(gt *GomegaWithT, configPath, configtxgen, orderer, c // Consensus.EvictionSuspicion is not specified in orderer.yaml, so let's ensure // it is really configured autonomously via the etcdraft chain itself. gt.Eventually(ordererProcess.Err, time.Minute).Should(gbytes.Say("EvictionSuspicion not set, defaulting to 10m")) - // Wait until the the node starts up and elects itself as a single leader in a single node cluster. + // Wait until the node starts up and elects itself as a single leader in a single node cluster. gt.Eventually(ordererProcess.Err, time.Minute).Should(gbytes.Say("Beginning to serve requests")) gt.Eventually(ordererProcess.Err, time.Minute).Should(gbytes.Say("becomeLeader")) } diff --git a/pkg/tx/interfaces.go b/pkg/tx/interfaces.go index f6baf21a9c9..5dc423acbcc 100644 --- a/pkg/tx/interfaces.go +++ b/pkg/tx/interfaces.go @@ -45,7 +45,7 @@ type ProcessorCreator interface { // // The intent is to support different transaction types via interface Processor such as pure endorser transactions, // pure post-order transactions, and a mixed transaction - e.g., a transaction that combines an endorser transaction and -// and a post-order transaction (say, a token transaction). +// a post-order transaction (say, a token transaction). // // Below is the detail description of the semantics of the function `Process` // In order to process a transaction on a committing peer, we first evaluate the simulated readwrite set of the transaction @@ -119,7 +119,7 @@ type ReadHinter interface { } // Reprocessor is an optional interface that a `Processor` is encouraged to implement if a -// a significant large number of transactions of the corresponding type are expected to be present and +// significant large number of transactions of the corresponding type are expected to be present and // validation of the transaction is significantly resource consuming (e.g., signature matching/crypto operations) // as compare to manipulating the state. // The main context in which the function in this interface is to be invoked is to rebuild the ledger constructs such as