github.com/leonlxy/hyperledger@v1.0.0-alpha.0.20170427033203-34922035d248/bddtests/regression/go/ote/ote.go (about)

     1  /*
     2  Copyright IBM Corp. 2017 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package main
    18  
    19  // Orderer Traffic Engine
    20  // ======================
    21  //
    22  // This file ote.go contains main(), for executing from command line
    23  // using environment variables to override those in sampleconfig/orderer.yaml
    24  // or to set OTE test configuration parameters.
    25  //
    26  // Function ote() is called by main after reading environment variables,
    27  // and is also called via "go test" from tests in ote_test.go. Those
    28  // tests can be executed from automated Continuous Integration processes,
    29  // which can use https://github.com/jstemmer/go-junit-report to convert the
    30  // logs to produce junit output for CI reports.
    31  //   go get github.com/jstemmer/go-junit-report
    32  //   go test -v | go-junit-report > report.xml
    33  //
    34  // ote() invokes tool driver.sh (including network.json and json2yml.js) -
    35  //   which is only slightly modified from the original version at
    36  //   https://github.com/dongmingh/v1FabricGenOption -
    37  //   to launch an orderer service network per the specified parameters
    38  //   (including kafka brokers or other necessary support processes).
    39  //   Function ote() performs several actions:
    40  // + create Producer clients to connect via grpc to all the channels on
    41  //   all the orderers to send/broadcast transaction messages
    42  // + create Consumer clients to connect via grpc to ListenAddress:ListenPort
    43  //   on all channels on all orderers and call deliver() to receive messages
    44  //   containing batches of transactions
    45  // + use parameters for specifying test configuration such as:
    46  //   number of transactions, number of channels, number of orderers ...
    47  // + load orderer/orderer.yml to retrieve environment variables used for
    48  //   overriding orderer configuration such as batchsize, batchtimeout ...
    49  // + generate unique transactions, dividing up the requested OTE_TXS count
    50  //   among all the Producers
    51  // + Consumers confirm the same number of blocks and TXs are delivered
    52  //   by all the orderers on all the channels
    53  // + print logs for any errors, and print final tallied results
    54  // + return a pass/fail result and a result summary string
    55  
    56  import (
    57          "fmt"
    58          "os"
    59          "strings"
    60          "strconv"
    61          "math"
    62          "os/exec"
    63          "log"
    64          "time"
    65          "sync"
    66          genesisconfigProvisional "github.com/hyperledger/fabric/common/configtx/tool/provisional"
    67          genesisconfig "github.com/hyperledger/fabric/common/configtx/tool/localconfig" // config for genesis.yaml
    68          "github.com/hyperledger/fabric/orderer/localconfig" // config, for the orderer.yaml
    69          cb "github.com/hyperledger/fabric/protos/common"
    70          ab "github.com/hyperledger/fabric/protos/orderer"
    71          "github.com/hyperledger/fabric/protos/utils"
    72          "golang.org/x/net/context"
    73          "github.com/golang/protobuf/proto"
    74          "google.golang.org/grpc"
    75  )
    76  
    77  var ordConf *config.TopLevel
    78  var genConf *genesisconfig.TopLevel
    79  var genesisConfigLocation = "CONFIGTX_ORDERER_"
    80  var ordererConfigLocation = "ORDERER_GENERAL_"
    81  var batchSizeParamStr = genesisConfigLocation+"BATCHSIZE_MAXMESSAGECOUNT"
    82  var batchTimeoutParamStr = genesisConfigLocation+"BATCHTIMEOUT"
    83  var ordererTypeParamStr = genesisConfigLocation+"ORDERERTYPE"
    84  
    85  var debugflagLaunch = false
    86  var debugflagAPI = true
    87  var debugflag1 = false
    88  var debugflag2 = false
    89  var debugflag3 = false // most detailed and voluminous
    90  
    91  var producersWG sync.WaitGroup
    92  var logFile *os.File
    93  var logEnabled = false
    94  var envvar string
    95  
    96  var numChannels = 1
    97  var numOrdsInNtwk  = 1
    98  var numOrdsToWatch = 1
    99  var ordererType = "solo"
   100  var numKBrokers int
   101  var producersPerCh = 1
   102  var numConsumers = 1
   103  var numProducers = 1
   104  
   105  // numTxToSend is the total number of Transactions to send; A fraction is
   106  // sent by each producer for each channel for each orderer.
   107  
   108  var numTxToSend int64 = 1
   109  
   110  // One GO thread is created for each producer and each consumer client.
   111  // To optimize go threads usage, to prevent running out of swap space
   112  // in the (laptop) test environment for tests using either numerous
   113  // channels or numerous producers per channel, set bool optimizeClientsMode
   114  // true to only create one go thread MasterProducer per orderer, which will
   115  // broadcast messages to all channels on one orderer. Note this option
   116  // works a little less efficiently on the consumer side, where we
   117  // share a single grpc connection but still need to use separate
   118  // GO threads per channel per orderer (instead of one per orderer).
   119  
   120  var optimizeClientsMode = false
   121  
   122  // ordStartPort (default port is 7050, but driver.sh uses 5005).
   123  
   124  var ordStartPort uint16 = 5005
   125  
   126  func initialize() {
   127          // When running multiple tests, e.g. from go test, reset to defaults
   128          // for the parameters that could change per test.
   129          // We do NOT reset things that would apply to every test, such as
   130          // settings for environment variables
   131          logEnabled = false
   132          envvar = ""
   133          numChannels = 1
   134          numOrdsInNtwk = 1
   135          numOrdsToWatch = 1
   136          ordererType = "solo"
   137          numKBrokers = 0
   138          numConsumers = 1
   139          numProducers = 1
   140          numTxToSend = 1
   141          producersPerCh = 1
   142          initLogger("ote")
   143  }
   144  
   145  func initLogger(fileName string) {
   146          if !logEnabled {
   147                  layout := "Jan_02_2006"
   148                  // Format Now with the layout const.
   149                  t := time.Now()
   150                  res := t.Format(layout)
   151                  var err error
   152                  logFile, err = os.OpenFile(fileName+"-"+res+".log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
   153                  if err != nil {
   154                          panic(fmt.Sprintf("error opening file: %s", err))
   155                  }
   156                  logEnabled = true
   157                  log.SetOutput(logFile)
   158                  //log.SetFlags(log.LstdFlags | log.Lshortfile)
   159                  log.SetFlags(log.LstdFlags)
   160          }
   161  }
   162  
   163  func logger(printStmt string) {
   164          fmt.Println(printStmt)
   165          if !logEnabled {
   166                  return
   167          }
   168          log.Println(printStmt)
   169  }
   170  
   171  func closeLogger() {
   172          if logFile != nil {
   173                  logFile.Close()
   174          }
   175          logEnabled = false
   176  }
   177  
   178  type ordererdriveClient struct {
   179          client  ab.AtomicBroadcast_DeliverClient
   180          chainID string
   181  }
   182  type broadcastClient struct {
   183          client  ab.AtomicBroadcast_BroadcastClient
   184          chainID string
   185  }
   186  func newOrdererdriveClient(client ab.AtomicBroadcast_DeliverClient, chainID string) *ordererdriveClient {
   187          return &ordererdriveClient{client: client, chainID: chainID}
   188  }
   189  func newBroadcastClient(client ab.AtomicBroadcast_BroadcastClient, chainID string) *broadcastClient {
   190          return &broadcastClient{client: client, chainID: chainID}
   191  }
   192  
   193  func seekHelper(chainID string, start *ab.SeekPosition) *cb.Envelope {
   194          return &cb.Envelope{
   195                  Payload: utils.MarshalOrPanic(&cb.Payload{
   196                          Header: &cb.Header{
   197                                  //ChainHeader: &cb.ChainHeader{
   198                                  //        ChainID: b.chainID,
   199                                  //},
   200                                  ChannelHeader: &cb.ChannelHeader{
   201                                          ChannelId: chainID,
   202                                  },
   203                                  SignatureHeader: &cb.SignatureHeader{},
   204                          },
   205  
   206                          Data: utils.MarshalOrPanic(&ab.SeekInfo{
   207                                  Start:    &ab.SeekPosition{Type: &ab.SeekPosition_Oldest{Oldest: &ab.SeekOldest{}}},
   208                                  Stop:     &ab.SeekPosition{Type: &ab.SeekPosition_Specified{Specified: &ab.SeekSpecified{Number: math.MaxUint64}}},
   209                                  Behavior: ab.SeekInfo_BLOCK_UNTIL_READY,
   210                          }),
   211                  }),
   212          }
   213  }
   214  
   215  func (r *ordererdriveClient) seekOldest() error {
   216          return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Oldest{Oldest: &ab.SeekOldest{}}}))
   217  }
   218  
   219  func (r *ordererdriveClient) seekNewest() error {
   220          return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Newest{Newest: &ab.SeekNewest{}}}))
   221  }
   222  
   223  func (r *ordererdriveClient) seek(blockNumber uint64) error {
   224          return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Specified{Specified: &ab.SeekSpecified{Number: blockNumber}}}))
   225  }
   226  
   227  func (r *ordererdriveClient) readUntilClose(ordererIndex int, channelIndex int, txRecvCntrP *int64, blockRecvCntrP *int64) {
   228          for {
   229                  msg, err := r.client.Recv()
   230                  if err != nil {
   231                          if !strings.Contains(err.Error(),"transport is closing") {
   232                                  // print if we do not see the msg indicating graceful closing of the connection
   233                                  logger(fmt.Sprintf("Consumer for orderer %d channel %d readUntilClose() Recv error: %v", ordererIndex, channelIndex, err))
   234                          }
   235                          return
   236                  }
   237                  switch t := msg.Type.(type) {
   238                  case *ab.DeliverResponse_Status:
   239                          logger(fmt.Sprintf("Got DeliverResponse_Status: %v", t))
   240                          return
   241                  case *ab.DeliverResponse_Block:
   242                          if t.Block.Header.Number > 0 {
   243                                  if debugflag2 { logger(fmt.Sprintf("Consumer recvd a block, o %d c %d blkNum %d numtrans %d", ordererIndex, channelIndex, t.Block.Header.Number, len(t.Block.Data.Data))) }
   244                                  if debugflag3 { logger(fmt.Sprintf("blk: %v", t.Block.Data.Data)) }
   245                          }
   246                          *txRecvCntrP += int64(len(t.Block.Data.Data))
   247                          //*blockRecvCntrP = int64(t.Block.Header.Number) // this assumes header number is the block number; instead let's just add one
   248                          (*blockRecvCntrP)++
   249                  }
   250          }
   251  }
   252  
   253  func (b *broadcastClient) broadcast(transaction []byte) error {
   254          payload, err := proto.Marshal(&cb.Payload{
   255                  Header: &cb.Header{
   256                          //ChainHeader: &cb.ChainHeader{
   257                          //        ChainID: b.chainID,
   258                          //},
   259                          ChannelHeader: &cb.ChannelHeader{
   260                                  ChannelId: b.chainID,
   261                          },
   262                          SignatureHeader: &cb.SignatureHeader{},
   263                  },
   264                  Data: transaction,
   265          })
   266          if err != nil {
   267                  panic(err)
   268          }
   269          return b.client.Send(&cb.Envelope{Payload: payload})
   270  }
   271  
   272  func (b *broadcastClient) getAck() error {
   273         msg, err := b.client.Recv()
   274         if err != nil {
   275                 return err
   276         }
   277         if msg.Status != cb.Status_SUCCESS {
   278                 return fmt.Errorf("Got unexpected status: %v", msg.Status)
   279         }
   280         return nil
   281  }
   282  
   283  func startConsumer(serverAddr string, chainID string, ordererIndex int, channelIndex int, txRecvCntrP *int64, blockRecvCntrP *int64, consumerConnP **grpc.ClientConn) {
   284          conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
   285          if err != nil {
   286                  logger(fmt.Sprintf("Error on Consumer ord[%d] ch[%d] connecting (grpc) to %s, err: %v", ordererIndex, channelIndex, serverAddr, err))
   287                  return
   288          }
   289          (*consumerConnP) = conn
   290          client, err := ab.NewAtomicBroadcastClient(*consumerConnP).Deliver(context.TODO())
   291          if err != nil {
   292                  logger(fmt.Sprintf("Error on Consumer ord[%d] ch[%d] invoking Deliver() on grpc connection to %s, err: %v", ordererIndex, channelIndex, serverAddr, err))
   293                  return
   294          }
   295          s := newOrdererdriveClient(client, chainID)
   296          err = s.seekOldest()
   297          if err == nil {
   298                  if debugflag1 { logger(fmt.Sprintf("Started Consumer to recv delivered batches from ord[%d] ch[%d] srvr=%s chID=%s", ordererIndex, channelIndex, serverAddr, chainID)) }
   299          } else {
   300                  logger(fmt.Sprintf("ERROR starting Consumer client for ord[%d] ch[%d] for srvr=%s chID=%s; err: %v", ordererIndex, channelIndex, serverAddr, chainID, err))
   301          }
   302          s.readUntilClose(ordererIndex, channelIndex, txRecvCntrP, blockRecvCntrP)
   303  }
   304  
   305  func startConsumerMaster(serverAddr string, chainIDsP *[]string, ordererIndex int, txRecvCntrsP *[]int64, blockRecvCntrsP *[]int64, consumerConnP **grpc.ClientConn) {
   306          // create one conn to the orderer and share it for communications to all channels
   307          conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
   308          if err != nil {
   309                  logger(fmt.Sprintf("Error on MasterConsumer ord[%d] connecting (grpc) to %s, err: %v", ordererIndex, serverAddr, err))
   310                  return
   311          }
   312          (*consumerConnP) = conn
   313  
   314          // create an orderer driver client for every channel on this orderer
   315          //[][]*ordererdriveClient  //  numChannels
   316          dc := make ([]*ordererdriveClient, numChannels)
   317          for c := 0; c < numChannels; c++ {
   318                  client, err := ab.NewAtomicBroadcastClient(*consumerConnP).Deliver(context.TODO())
   319                  if err != nil {
   320                          logger(fmt.Sprintf("Error on MasterConsumer ord[%d] invoking Deliver() on grpc connection to %s, err: %v", ordererIndex, serverAddr, err))
   321                          return
   322                  }
   323                  dc[c] = newOrdererdriveClient(client, (*chainIDsP)[c])
   324                  err = dc[c].seekOldest()
   325                  if err == nil {
   326                          if debugflag1 { logger(fmt.Sprintf("Started MasterConsumer to recv delivered batches from ord[%d] ch[%d] srvr=%s chID=%s", ordererIndex, c, serverAddr, (*chainIDsP)[c])) }
   327                  } else {
   328                          logger(fmt.Sprintf("ERROR starting MasterConsumer client for ord[%d] ch[%d] for srvr=%s chID=%s; err: %v", ordererIndex, c, serverAddr, (*chainIDsP)[c], err))
   329                  }
   330                  // we would prefer to skip these go threads, and just have on "readUntilClose" that looks for deliveries on all channels!!! (see below.)
   331                  // otherwise, what have we really saved?
   332                  go dc[c].readUntilClose(ordererIndex, c, &((*txRecvCntrsP)[c]), &((*blockRecvCntrsP)[c]))
   333          }
   334  }
   335  
   336  func executeCmd(cmd string) []byte {
   337          out, err := exec.Command("/bin/sh", "-c", cmd).Output()
   338          if (err != nil) {
   339                  logger(fmt.Sprintf("Unsuccessful exec command: "+cmd+"\nstdout="+string(out)+"\nstderr=%v", err))
   340                  log.Fatal(err)
   341          }
   342          return out
   343  }
   344  
   345  func executeCmdAndDisplay(cmd string) {
   346          out := executeCmd(cmd)
   347          logger("Results of exec command: "+cmd+"\nstdout="+string(out))
   348  }
   349  
   350  func connClose(consumerConnsPP **([][]*grpc.ClientConn)) {
   351          for i := 0; i < numOrdsToWatch; i++ {
   352                  for j := 0; j < numChannels; j++ {
   353                          if (**consumerConnsPP)[i][j] != nil {
   354                                  _ = (**consumerConnsPP)[i][j].Close()
   355                          }
   356                  }
   357          }
   358  }
   359  
   360  func cleanNetwork(consumerConnsP *([][]*grpc.ClientConn)) {
   361          if debugflag1 { logger("Removing the Network Consumers") }
   362          connClose(&consumerConnsP)
   363  
   364          // Docker is not perfect; we need to unpause any paused containers, before we can kill them.
   365          //_ = executeCmd("docker ps -aq -f status=paused | xargs docker unpause")
   366          if out := executeCmd("docker ps -aq -f status=paused"); out != nil && string(out) != "" {
   367                  logger("Removing paused docker containers: " + string(out))
   368                  _ = executeCmd("docker ps -aq -f status=paused | xargs docker unpause")
   369          }
   370  
   371          // kill any containers that are still running
   372          //_ = executeCmd("docker kill $(docker ps -q)")
   373  
   374          if debugflag1 { logger("Removing the Network orderers and associated docker containers") }
   375          _ = executeCmd("docker rm -f $(docker ps -aq)")
   376  }
   377  
   378  func launchNetwork(appendFlags string) {
   379          // Alternative way: hardcoded docker compose (not driver.sh tool)
   380          //  _ = executeCmd("docker-compose -f docker-compose-3orderers.yml up -d")
   381  
   382          cmd := fmt.Sprintf("./driver.sh -a create -p 1 %s", appendFlags)
   383          logger(fmt.Sprintf("Launching network:  %s", cmd))
   384          if debugflagLaunch {
   385                  executeCmdAndDisplay(cmd) // show stdout logs; debugging help
   386          } else {
   387                  executeCmd(cmd)
   388          }
   389  
   390          // display the network of docker containers with the orderers and such
   391          executeCmdAndDisplay("docker ps -a")
   392  }
   393  
   394  func countGenesis() int64 {
   395          return int64(numChannels)
   396  }
   397  func sendEqualRecv(numTxToSend int64, totalTxRecvP *[]int64, totalTxRecvMismatch bool, totalBlockRecvMismatch bool) bool {
   398          var matching = false;
   399          if (*totalTxRecvP)[0] == numTxToSend {
   400                  // recv count on orderer 0 matches the send count
   401                  if !totalTxRecvMismatch && !totalBlockRecvMismatch {
   402                          // all orderers have same recv counters
   403                          matching = true
   404                  }
   405          }
   406          return matching
   407  }
   408  
   409  func moreDeliveries(txSentP *[][]int64, totalNumTxSentP *int64, txSentFailuresP *[][]int64, totalNumTxSentFailuresP *int64, txRecvP *[][]int64, totalTxRecvP *[]int64, totalTxRecvMismatchP *bool, blockRecvP *[][]int64, totalBlockRecvP *[]int64, totalBlockRecvMismatchP *bool) (moreReceived bool) {
   410          moreReceived = false
   411          prevTotalTxRecv := *totalTxRecvP
   412          computeTotals(txSentP, totalNumTxSentP, txSentFailuresP, totalNumTxSentFailuresP, txRecvP, totalTxRecvP, totalTxRecvMismatchP, blockRecvP, totalBlockRecvP, totalBlockRecvMismatchP)
   413          for ordNum := 0; ordNum < numOrdsToWatch; ordNum++ {
   414                  if prevTotalTxRecv[ordNum] != (*totalTxRecvP)[ordNum] { moreReceived = true }
   415          }
   416          return moreReceived
   417  }
   418  
   419  func startProducer(serverAddr string, chainID string, ordererIndex int, channelIndex int, txReq int64, txSentCntrP *int64, txSentFailureCntrP *int64) {
   420          conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
   421          defer func() {
   422            _ = conn.Close()
   423          }()
   424          if err != nil {
   425                  logger(fmt.Sprintf("Error creating connection for Producer for ord[%d] ch[%d], err: %v", ordererIndex, channelIndex, err))
   426                  return
   427          }
   428          client, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.TODO())
   429          if err != nil {
   430                  logger(fmt.Sprintf("Error creating Producer for ord[%d] ch[%d], err: %v", ordererIndex, channelIndex, err))
   431                  return
   432          }
   433          if debugflag1 { logger(fmt.Sprintf("Started Producer to send %d TXs to ord[%d] ch[%d] srvr=%s chID=%s, %v", txReq, ordererIndex, channelIndex, serverAddr, chainID, time.Now())) }
   434          b := newBroadcastClient(client, chainID)
   435  
   436          // print a log after sending mulitples of this percentage of requested TX: 25,50,75%...
   437          // only on one producer, and assume all producers are generating at same rate.
   438          // e.g. when txReq = 50, to print log every 10. set progressPercentage = 20
   439          printProgressLogs := false
   440  	var progressPercentage int64 = 25    // set this between 1 and 99
   441  	printLogCnt := txReq * progressPercentage / 100
   442          if debugflag1 {
   443                  printProgressLogs = true     // to test logs for all producers
   444          } else {
   445                  if txReq > 10000 && printLogCnt > 0 && ordererIndex==0 && channelIndex==0 {
   446                          printProgressLogs = true
   447                  }
   448          }
   449          var mult int64 = 0
   450  
   451          firstErr := false
   452          for i := int64(0); i < txReq ; i++ {
   453                  b.broadcast([]byte(fmt.Sprintf("Testing %v", time.Now())))
   454                  err = b.getAck()
   455                  if err == nil {
   456                          (*txSentCntrP)++
   457                          if printProgressLogs && (*txSentCntrP)%printLogCnt == 0 {
   458                                  mult++
   459                                  if debugflag1 {
   460                                          logger(fmt.Sprintf("Producer ord[%d] ch[%d] sent %4d /%4d = %3d%%, %v", ordererIndex, channelIndex, (*txSentCntrP), txReq, progressPercentage*mult, time.Now()))
   461                                  } else {
   462                                          logger(fmt.Sprintf("Sent %3d%%, %v", progressPercentage*mult, time.Now()))
   463                                  }
   464                          }
   465                  } else {
   466                          (*txSentFailureCntrP)++
   467                          if !firstErr {
   468                                  firstErr = true
   469                                  logger(fmt.Sprintf("Broadcast error on TX %d (the first error for Producer ord[%d] ch[%d]); err: %v", i+1, ordererIndex, channelIndex, err))
   470                          }
   471                  }
   472          }
   473          if err != nil {
   474                  logger(fmt.Sprintf("Broadcast error on last TX %d of Producer ord[%d] ch[%d]: %v", txReq, ordererIndex, channelIndex, err))
   475          }
   476          if txReq == *txSentCntrP {
   477                  if debugflag1 { logger(fmt.Sprintf("Producer finished sending broadcast msgs to ord[%d] ch[%d]: ACKs  %9d  (100%%) , %v", ordererIndex, channelIndex, *txSentCntrP, time.Now())) }
   478          } else {
   479                  logger(fmt.Sprintf("Producer finished sending broadcast msgs to ord[%d] ch[%d]: ACKs  %9d  NACK %d  Other %d , %v", ordererIndex, channelIndex, *txSentCntrP, *txSentFailureCntrP, txReq - *txSentFailureCntrP - *txSentCntrP, time.Now()))
   480          }
   481          producersWG.Done()
   482  }
   483  
   484  func startProducerMaster(serverAddr string, chainIDs *[]string, ordererIndex int, txReqP *[]int64, txSentCntrP *[]int64, txSentFailureCntrP *[]int64) {
   485          // This function creates a grpc connection to one orderer,
   486          // creates multiple clients (one per numChannels) for that one orderer,
   487          // and sends a TX to all channels repeatedly until no more to send.
   488          var txReqTotal int64
   489          var txMax int64
   490          for c := 0; c < numChannels; c++ {
   491                  txReqTotal += (*txReqP)[c]
   492                  if txMax < (*txReqP)[c] { txMax = (*txReqP)[c] }
   493          }
   494          conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
   495          defer func() {
   496            _ = conn.Close()
   497          }()
   498          if err != nil {
   499                  logger(fmt.Sprintf("Error creating connection for MasterProducer for ord[%d], err: %v", ordererIndex, err))
   500                  return
   501          }
   502          client, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.TODO())
   503          if err != nil {
   504                  logger(fmt.Sprintf("Error creating MasterProducer for ord[%d], err: %v", ordererIndex, err))
   505                  return
   506          }
   507          logger(fmt.Sprintf("Started MasterProducer to send %d TXs to ord[%d] srvr=%s distributed across all channels", txReqTotal, ordererIndex, serverAddr))
   508  
   509          // create the broadcast clients for every channel on this orderer
   510          bc := make ([]*broadcastClient, numChannels)
   511          for c := 0; c < numChannels; c++ {
   512                  bc[c] = newBroadcastClient(client, (*chainIDs)[c])
   513          }
   514  
   515          firstErr := false
   516          for i := int64(0); i < txMax; i++ {
   517                  // send one TX to every broadcast client (one TX on each chnl)
   518                  for c := 0; c < numChannels; c++ {
   519                          if i < (*txReqP)[c] {
   520                                  // more TXs to send on this channel
   521                                  bc[c].broadcast([]byte(fmt.Sprintf("Testing %v", time.Now())))
   522                                  err = bc[c].getAck()
   523                                  if err == nil {
   524                                          (*txSentCntrP)[c]++
   525                                  } else {
   526                                          (*txSentFailureCntrP)[c]++
   527                                          if !firstErr {
   528                                                  firstErr = true
   529                                                  logger(fmt.Sprintf("Broadcast error on TX %d (the first error for MasterProducer on ord[%d] ch[%d] channelID=%s); err: %v", i+1, ordererIndex, c, (*chainIDs)[c], err))
   530                                          }
   531                                  }
   532                          }
   533                  }
   534          }
   535          if err != nil {
   536                  logger(fmt.Sprintf("Broadcast error on last TX %d of MasterProducer on ord[%d] ch[%d]: %v", txReqTotal, ordererIndex, numChannels-1, err))
   537          }
   538          var txSentTotal int64
   539          var txSentFailTotal int64
   540          for c := 0; c < numChannels; c++ {
   541                  txSentTotal += (*txSentCntrP)[c]
   542                  txSentFailTotal += (*txSentFailureCntrP)[c]
   543          }
   544          if txReqTotal == txSentTotal {
   545                  logger(fmt.Sprintf("MasterProducer finished sending broadcast msgs to all channels on ord[%d]: ACKs  %9d  (100%%)", ordererIndex, txSentTotal))
   546          } else {
   547                  logger(fmt.Sprintf("MasterProducer finished sending broadcast msgs to all channels on ord[%d]: ACKs  %9d  NACK %d  Other %d", ordererIndex, txSentTotal, txSentFailTotal, txReqTotal - txSentTotal - txSentFailTotal))
   548          }
   549          producersWG.Done()
   550  }
   551  
   552  func computeTotals(txSent *[][]int64, totalNumTxSent *int64, txSentFailures *[][]int64, totalNumTxSentFailures *int64, txRecv *[][]int64, totalTxRecv *[]int64, totalTxRecvMismatch *bool, blockRecv *[][]int64, totalBlockRecv *[]int64, totalBlockRecvMismatch *bool) {
   553          // The counters for Producers are indexed by orderer (numOrdsInNtwk)
   554          // and channel (numChannels).
   555          // Total count includes all counters for all channels on ALL orderers.
   556          // e.g.    totalNumTxSent         = sum of txSent[*][*]
   557          // e.g.    totalNumTxSentFailures = sum of txSentFailures[*][*]
   558  
   559          *totalNumTxSent = 0
   560          *totalNumTxSentFailures = 0
   561          for i := 0; i < numOrdsInNtwk; i++ {
   562                  for j := 0; j < numChannels; j++ {
   563                          *totalNumTxSent += (*txSent)[i][j]
   564                          *totalNumTxSentFailures += (*txSentFailures)[i][j]
   565                  }
   566          }
   567  
   568          // Counters for consumers are indexed by orderer (numOrdsToWatch)
   569          // and channel (numChannels).
   570          // The total count includes all counters for all channels on
   571          // ONLY ONE orderer.
   572          // Tally up the totals for all the channels on each orderer, and
   573          // store them for comparison; they should all be the same.
   574          // e.g.    totalTxRecv[k]    = sum of txRecv[k][*]
   575          // e.g.    totalBlockRecv[k] = sum of blockRecv[k][*]
   576  
   577          *totalTxRecvMismatch = false
   578          *totalBlockRecvMismatch = false
   579          for k := 0; k < numOrdsToWatch; k++ {
   580                  // count only the requested TXs - not the genesis block TXs
   581                  (*totalTxRecv)[k] = -countGenesis()
   582                  (*totalBlockRecv)[k] = -countGenesis()
   583                  for l := 0; l < numChannels; l++ {
   584                          (*totalTxRecv)[k] += (*txRecv)[k][l]
   585                          (*totalBlockRecv)[k] += (*blockRecv)[k][l]
   586                          if debugflag3 { logger(fmt.Sprintf("in compute(): k %d l %d txRecv[k][l] %d blockRecv[k][l] %d", k , l , (*txRecv)[k][l] , (*blockRecv)[k][l] )) }
   587                  }
   588                  if (k>0) && (*totalTxRecv)[k] != (*totalTxRecv)[k-1] { *totalTxRecvMismatch = true }
   589                  if (k>0) && (*totalBlockRecv)[k] != (*totalBlockRecv)[k-1] { *totalBlockRecvMismatch = true }
   590          }
   591          if debugflag2 { logger(fmt.Sprintf("in compute(): totalTxRecv[]= %v, totalBlockRecv[]= %v", *totalTxRecv, *totalBlockRecv)) }
   592  }
   593  
   594  func reportTotals(testname string, numTxToSendTotal int64, countToSend [][]int64, txSent [][]int64, totalNumTxSent int64, txSentFailures [][]int64, totalNumTxSentFailures int64, batchSize int64, txRecv [][]int64, totalTxRecv []int64, totalTxRecvMismatch bool, blockRecv [][]int64, totalBlockRecv []int64, totalBlockRecvMismatch bool, masterSpy bool, channelIDs *[]string) (successResult bool, resultStr string) {
   595  
   596          // default to failed
   597          var passFailStr = "FAILED"
   598          successResult = false
   599          resultStr = "TEST " + testname + " "
   600  
   601          // For each Producer, print the ordererIndex and channelIndex, the
   602          // number of TX requested to be sent, the actual number of TX sent,
   603          // and the number we failed to send.
   604  
   605          if numOrdsInNtwk > 3 || numChannels > 3 {
   606                  logger(fmt.Sprintf("Print only the first 3 chans of only the first 3 ordererIdx; and any others ONLY IF they contain failures.\nTotals numOrdInNtwk=%d numChan=%d numPRODUCERs=%d", numOrdsInNtwk, numChannels, numOrdsInNtwk*numChannels))
   607          }
   608          logger("PRODUCERS   OrdererIdx  ChannelIdx ChannelID              TX Target         ACK        NACK")
   609          for i := 0; i < numOrdsInNtwk; i++ {
   610                  for j := 0; j < numChannels; j++ {
   611                          if (i < 3 && j < 3) || txSentFailures[i][j] > 0 || countToSend[i][j] != txSent[i][j] + txSentFailures[i][j] {
   612                                  logger(fmt.Sprintf("%22d%12d %-20s%12d%12d%12d",i,j,(*channelIDs)[j],countToSend[i][j],txSent[i][j],txSentFailures[i][j]))
   613                          } else if (i < 3 && j == 3) {
   614                                  logger(fmt.Sprintf("%34s","..."))
   615                          } else if (i == 3 && j == 0) {
   616                                  logger(fmt.Sprintf("%22s","..."))
   617                          }
   618                  }
   619          }
   620  
   621          // for each consumer print the ordererIndex & channel, the num blocks and the num transactions received/delivered
   622          if numOrdsToWatch > 3 || numChannels > 3 {
   623                  logger(fmt.Sprintf("Print only the first 3 chans of only the first 3 ordererIdx (and the last ordererIdx if masterSpy is present), plus any others that contain failures.\nTotals numOrdIdx=%d numChanIdx=%d numCONSUMERS=%d", numOrdsToWatch, numChannels, numOrdsToWatch*numChannels))
   624          }
   625          logger("CONSUMERS   OrdererIdx  ChannelIdx ChannelID                    TXs     Batches")
   626          for i := 0; i < numOrdsToWatch; i++ {
   627                  for j := 0; j < numChannels; j++ {
   628                          if (j < 3 && (i < 3 || (masterSpy && i==numOrdsInNtwk-1))) || (i>1 && (blockRecv[i][j] != blockRecv[1][j] || txRecv[1][j] != txRecv[1][j])) {
   629                                  // Subtract one from the received Block count and TX count, to ignore the genesis block
   630                                  // (we already ignore genesis blocks when we compute the totals in totalTxRecv[n] , totalBlockRecv[n])
   631                                  logger(fmt.Sprintf("%22d%12d %-20s%12d%12d",i,j,(*channelIDs)[j],txRecv[i][j]-1,blockRecv[i][j]-1))
   632                          } else if (i < 3 && j == 3) {
   633                                  logger(fmt.Sprintf("%34s","..."))
   634                          } else if (i == 3 && j == 0) {
   635                                  logger(fmt.Sprintf("%22s","..."))
   636                          }
   637                  }
   638          }
   639  
   640          // Check for differences on the deliveries from the orderers. These are
   641          // probably errors - unless the test stopped an orderer on purpose and
   642          // never restarted it, while the others continued to deliver TXs.
   643          // (If an orderer is restarted, then it would reprocess all the
   644          // back-ordered transactions to catch up with the others.)
   645  
   646          if totalTxRecvMismatch { logger("!!!!! Num TXs Delivered is not same on all orderers!!!!!") }
   647          if totalBlockRecvMismatch { logger("!!!!! Num Blocks Delivered is not same on all orderers!!!!!") }
   648  
   649          if totalTxRecvMismatch || totalBlockRecvMismatch {
   650                  resultStr += "Orderers were INCONSISTENT! "
   651          }
   652          if totalTxRecv[0] == numTxToSendTotal {
   653                  // recv count on orderer 0 matches the send count
   654                  if !totalTxRecvMismatch && !totalBlockRecvMismatch {
   655                          logger("Hooray! Every TX was successfully sent AND delivered by orderer service.")
   656                          successResult = true
   657                          passFailStr = "PASSED"
   658                  } else {
   659                          resultStr += "Every TX was successfully sent AND delivered by orderer0 but not all orderers"
   660                  }
   661          } else if totalTxRecv[0] == totalNumTxSent {
   662                  resultStr += "Every ACked TX was delivered, but failures occurred:"
   663          } else if totalTxRecv[0] < totalNumTxSent {
   664                  resultStr += "BAD! Some ACKed TX were LOST by orderer service!"
   665          } else {
   666                  resultStr += "BAD! Some EXTRA TX were delivered by orderer service!"
   667          }
   668  
   669          ////////////////////////////////////////////////////////////////////////
   670          //
   671          // Before we declare success, let's check some more things...
   672          //
   673          // At this point, we have decided if most of the numbers make sense by
   674          // setting succssResult to true if the tests passed. Thus we assume
   675          // successReult=true and just set it to false if we find a problem.
   676  
   677          // Check the totals to verify if the number of blocks on each channel
   678          // is appropriate for the given batchSize and number of TXs sent.
   679  
   680          expectedBlocksOnChan := make([]int64, numChannels) // create a counter for all the channels on one orderer
   681          for c := 0; c < numChannels; c++ {
   682                  var chanSentTotal int64
   683                  for ord := 0; ord < numOrdsInNtwk; ord++ {
   684                          chanSentTotal += txSent[ord][c]
   685                  }
   686                  expectedBlocksOnChan[c] = chanSentTotal / batchSize
   687                  if chanSentTotal % batchSize > 0 { expectedBlocksOnChan[c]++ }
   688                  for ord := 0; ord < numOrdsToWatch; ord++ {
   689                          if expectedBlocksOnChan[c] != blockRecv[ord][c] - 1 { // ignore genesis block
   690                                  successResult = false
   691                                  passFailStr = "FAILED"
   692                                  logger(fmt.Sprintf("Error: Unexpected Block count %d (expected %d) on ordIndx=%d channelIDs[%d]=%s, chanSentTxTotal=%d BatchSize=%d", blockRecv[ord][c]-1, expectedBlocksOnChan[c], ord, c, (*channelIDs)[c], chanSentTotal, batchSize))
   693                          } else {
   694                                  if debugflag1 { logger(fmt.Sprintf("GOOD block count %d on ordIndx=%d channelIDs[%d]=%s chanSentTxTotal=%d BatchSize=%d", expectedBlocksOnChan[c], ord, c, (*channelIDs)[c], chanSentTotal, batchSize)) }
   695                          }
   696                  }
   697          }
   698  
   699  
   700          // TODO - Verify the contents of the last block of transactions.
   701          //        Since we do not know exactly what should be in the block,
   702          //        then at least we can do:
   703          //            for each channel, verify if the block delivered from
   704          //            each orderer is the same (i.e. contains the same
   705          //            Data bytes (transactions) in the last block)
   706  
   707  
   708          // print some counters totals
   709          logger(fmt.Sprintf("Not counting genesis blks (1 per chan)%9d", countGenesis()))
   710          logger(fmt.Sprintf("Total TX broadcasts Requested to Send %9d", numTxToSendTotal))
   711          logger(fmt.Sprintf("Total TX broadcasts send success ACK  %9d", totalNumTxSent))
   712          logger(fmt.Sprintf("Total TX broadcasts sendFailed - NACK %9d", totalNumTxSentFailures))
   713          logger(fmt.Sprintf("Total Send-LOST TX (Not Ack or Nack)) %9d", numTxToSendTotal - totalNumTxSent - totalNumTxSentFailures ))
   714          logger(fmt.Sprintf("Total Recv-LOST TX (Ack but not Recvd)%9d", totalNumTxSent - totalTxRecv[0] ))
   715          if successResult {
   716                  logger(fmt.Sprintf("Total deliveries received TX          %9d", totalTxRecv[0]))
   717                  logger(fmt.Sprintf("Total deliveries received Blocks      %9d", totalBlockRecv[0]))
   718          } else {
   719                  logger(fmt.Sprintf("Total deliveries received TX on each ordrr     %7d", totalTxRecv))
   720                  logger(fmt.Sprintf("Total deliveries received Blocks on each ordrr %7d", totalBlockRecv))
   721          }
   722  
   723          // print output result and counts : overall summary
   724          resultStr += fmt.Sprintf(" RESULT=%s: TX Req=%d BrdcstACK=%d NACK=%d DelivBlk=%d DelivTX=%d numChannels=%d batchSize=%d", passFailStr, numTxToSendTotal, totalNumTxSent, totalNumTxSentFailures, totalBlockRecv, totalTxRecv, numChannels, batchSize)
   725          logger(fmt.Sprintf(resultStr))
   726  
   727          return successResult, resultStr
   728  }
   729  
   730  // Function:    ote - the Orderer Test Engine
   731  // Outputs:     print report to stdout with lots of counters
   732  // Returns:     passed bool, resultSummary string
   733  func ote( testname string, txs int64, chans int, orderers int, ordType string, kbs int, masterSpy bool, pPerCh int ) (passed bool, resultSummary string) {
   734  
   735          initialize() // multiple go tests could be run; we must call initialize() each time
   736  
   737          passed = false
   738          resultSummary = testname + " test not completed: INPUT ERROR: "
   739          defer closeLogger()
   740  
   741          logger(fmt.Sprintf("========== OTE testname=%s TX=%d Channels=%d Orderers=%d ordererType=%s kafka-brokers=%d addMasterSpy=%t producersPerCh=%d", testname, txs, chans, orderers, ordType, kbs, masterSpy, pPerCh))
   742  
   743          // Establish the default configuration from yaml files - and this also
   744          // picks up any variables overridden on command line or in environment
   745          ordConf = config.Load()
   746          genConf = genesisconfig.Load()
   747          var launchAppendFlags string
   748  
   749          ////////////////////////////////////////////////////////////////////////
   750          // Check parameters and/or env vars to see if user wishes to override
   751          // default config parms.
   752          ////////////////////////////////////////////////////////////////////////
   753  
   754          //////////////////////////////////////////////////////////////////////
   755          // Arguments for OTE settings for test variations:
   756          //////////////////////////////////////////////////////////////////////
   757  
   758          if txs > 0        { numTxToSend = txs   }      else { return passed, resultSummary + "number of transactions must be > 0" }
   759          if chans > 0      { numChannels = chans }      else { return passed, resultSummary + "number of channels must be > 0" }
   760          if orderers > 0   {
   761                  numOrdsInNtwk = orderers
   762                  launchAppendFlags += fmt.Sprintf(" -o %d", orderers)
   763          } else { return passed, resultSummary + "number of orderers in network must be > 0" }
   764  
   765          if pPerCh > 1 {
   766                  producersPerCh = pPerCh
   767                  return passed, resultSummary + "Multiple producersPerChannel NOT SUPPORTED yet."
   768          }
   769  
   770          numOrdsToWatch = numOrdsInNtwk    // Watch every orderer to verify they are all delivering the same.
   771          if masterSpy { numOrdsToWatch++ } // We are not creating another orderer here, but we do need
   772                                            // another set of counters; the masterSpy will be created for
   773                                            // this test to watch every channel on an orderer - so that means
   774                                            // one orderer is being watched twice
   775  
   776          // this is not an argument, but user may set this tuning parameter before running test
   777          envvar = os.Getenv("OTE_CLIENTS_SHARE_CONNS")
   778          if envvar != "" {
   779                  if (strings.ToLower(envvar) == "true" || strings.ToLower(envvar) == "t") {
   780                          optimizeClientsMode = true
   781                  }
   782                  if debugflagAPI {
   783                          logger(fmt.Sprintf("%-50s %s=%t", "OTE_CLIENTS_SHARE_CONNS="+envvar, "optimizeClientsMode", optimizeClientsMode))
   784                          logger("Setting OTE_CLIENTS_SHARE_CONNS option to true does the following:\n1. All Consumers on an orderer (one GO thread per each channel) will share grpc connection.\n2. All Producers on an orderer will share a grpc conn AND share one GO-thread.\nAlthough this reduces concurrency and lengthens the test duration, it satisfies\nthe objective of reducing swap space requirements and should be selected when\nrunning tests with numerous channels or producers per channel.")
   785                  }
   786          }
   787          if optimizeClientsMode {
   788                  // use only one MasterProducer and one MasterConsumer on each orderer
   789                  numProducers = numOrdsInNtwk
   790                  numConsumers = numOrdsInNtwk
   791          } else {
   792                  // one Producer and one Consumer for EVERY channel on each orderer
   793                  numProducers = numOrdsInNtwk * numChannels
   794                  numConsumers = numOrdsInNtwk * numChannels
   795          }
   796  
   797          //////////////////////////////////////////////////////////////////////
   798          // Arguments to override configuration parameter values in yaml file:
   799          //////////////////////////////////////////////////////////////////////
   800  
   801          // ordererType is an argument of ote(), and is also in the genesisconfig
   802          ordererType = genConf.Orderer.OrdererType
   803          if ordType != "" {
   804                  ordererType = ordType
   805          } else {
   806                  logger(fmt.Sprintf("Null value provided for ordererType; using value from config file: %s", ordererType))
   807          }
   808          launchAppendFlags += fmt.Sprintf(" -t %s", ordererType)
   809          if "kafka" == strings.ToLower(ordererType) {
   810                  if kbs > 0 {
   811                          numKBrokers = kbs
   812                          launchAppendFlags += fmt.Sprintf(" -k %d", numKBrokers)
   813                  } else {
   814                          return passed, resultSummary + "When using kafka ordererType, number of kafka-brokers must be > 0"
   815                  }
   816          } else { numKBrokers = 0 }
   817  
   818          // batchSize is not an argument of ote(), but is in the genesisconfig
   819          // variable may be overridden on command line or by exporting it.
   820          batchSize := int64(genConf.Orderer.BatchSize.MaxMessageCount) // retype the uint32
   821          envvar = os.Getenv(batchSizeParamStr)
   822          if envvar != "" { launchAppendFlags += fmt.Sprintf(" -b %d", batchSize) }
   823          if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", batchSizeParamStr+"="+envvar, "batchSize", batchSize)) }
   824  
   825          // batchTimeout is not an argument of ote(), but is in the genesisconfig
   826          //logger(fmt.Sprintf("DEBUG=====BatchTimeout conf:%v Seconds-float():%v Seconds-int:%v", genConf.Orderer.BatchTimeout, (genConf.Orderer.BatchTimeout).Seconds(), int((genConf.Orderer.BatchTimeout).Seconds())))
   827          batchTimeout := int((genConf.Orderer.BatchTimeout).Seconds()) // Seconds() converts time.Duration to float64, and then retypecast to int
   828          envvar = os.Getenv(batchTimeoutParamStr)
   829          if envvar != "" { launchAppendFlags += fmt.Sprintf(" -c %d", batchTimeout) }
   830          if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", batchTimeoutParamStr+"="+envvar, "batchTimeout", batchTimeout)) }
   831  
   832          // CoreLoggingLevel
   833          envvar = strings.ToUpper(os.Getenv("CORE_LOGGING_LEVEL")) // (default = not set)|CRITICAL|ERROR|WARNING|NOTICE|INFO|DEBUG
   834          if envvar != "" {
   835                  launchAppendFlags += fmt.Sprintf(" -l %s", envvar)
   836          }
   837          if debugflagAPI { logger(fmt.Sprintf("CORE_LOGGING_LEVEL=%s", envvar)) }
   838  
   839          // CoreLedgerStateDB
   840          envvar = os.Getenv("CORE_LEDGER_STATE_STATEDATABASE")  // goleveldb | CouchDB
   841          if envvar != "" {
   842                  launchAppendFlags += fmt.Sprintf(" -d %s", envvar)
   843          }
   844          if debugflagAPI { logger(fmt.Sprintf("CORE_LEDGER_STATE_STATEDATABASE=%s", envvar)) }
   845  
   846          // CoreSecurityLevel
   847          envvar = os.Getenv("CORE_SECURITY_LEVEL")  // 256 | 384
   848          if envvar != "" {
   849                  launchAppendFlags += fmt.Sprintf(" -w %s", envvar)
   850          }
   851          if debugflagAPI { logger(fmt.Sprintf("CORE_SECURITY_LEVEL=%s", envvar)) }
   852  
   853          // CoreSecurityHashAlgorithm
   854          envvar = os.Getenv("CORE_SECURITY_HASHALGORITHM")  // SHA2 | SHA3
   855          if envvar != "" {
   856                  launchAppendFlags += fmt.Sprintf(" -x %s", envvar)
   857          }
   858          if debugflagAPI { logger(fmt.Sprintf("CORE_SECURITY_HASHALGORITHM=%s", envvar)) }
   859  
   860  
   861          //////////////////////////////////////////////////////////////////////////
   862          // Each producer sends TXs to one channel on one orderer, and increments
   863          // its own counters for the successfully sent Tx, and the send-failures
   864          // (rejected/timeout). These arrays are indexed by dimensions:
   865          // numOrdsInNtwk and numChannels
   866  
   867          var countToSend        [][]int64
   868          var txSent             [][]int64
   869          var txSentFailures     [][]int64
   870          var totalNumTxSent         int64
   871          var totalNumTxSentFailures int64
   872  
   873          // Each consumer receives blocks delivered on one channel from one
   874          // orderer, and must track its own counters for the received number of
   875          // blocks and received number of Tx.
   876          // We will create consumers for every channel on an orderer, and total
   877          // up the TXs received. And do that for all the orderers (indexed by
   878          // numOrdsToWatch). We will check to ensure all the orderers receive
   879          // all the same deliveries. These arrays are indexed by dimensions:
   880          // numOrdsToWatch and numChannels
   881  
   882          var txRecv       [][]int64
   883          var blockRecv    [][]int64
   884          var totalTxRecv    []int64 // total TXs rcvd by all consumers on an orderer, indexed by numOrdsToWatch
   885          var totalBlockRecv []int64 // total Blks recvd by all consumers on an orderer, indexed by numOrdsToWatch
   886          var totalTxRecvMismatch = false
   887          var totalBlockRecvMismatch = false
   888          var consumerConns [][]*grpc.ClientConn
   889  
   890          ////////////////////////////////////////////////////////////////////////
   891          // Create the 1D and 2D slices of counters for the producers and
   892          // consumers. All are initialized to zero.
   893  
   894          for i := 0; i < numOrdsInNtwk; i++ {                    // for all orderers
   895  
   896                  countToSendForOrd := make([]int64, numChannels) // create a counter for all the channels on one orderer
   897                  countToSend = append(countToSend, countToSendForOrd) // orderer-i gets a set
   898  
   899                  sendPassCntrs := make([]int64, numChannels)     // create a counter for all the channels on one orderer
   900                  txSent = append(txSent, sendPassCntrs)          // orderer-i gets a set
   901  
   902                  sendFailCntrs := make([]int64, numChannels)     // create a counter for all the channels on one orderer
   903                  txSentFailures = append(txSentFailures, sendFailCntrs) // orderer-i gets a set
   904          }
   905  
   906          for i := 0; i < numOrdsToWatch; i++ {  // for all orderers which we will watch/monitor for deliveries
   907  
   908                  blockRecvCntrs := make([]int64, numChannels)  // create a set of block counters for each channel
   909                  blockRecv = append(blockRecv, blockRecvCntrs) // orderer-i gets a set
   910  
   911                  txRecvCntrs := make([]int64, numChannels)     // create a set of tx counters for each channel
   912                  txRecv = append(txRecv, txRecvCntrs)          // orderer-i gets a set
   913  
   914                  consumerRow := make([]*grpc.ClientConn, numChannels)
   915                  consumerConns = append(consumerConns, consumerRow)
   916          }
   917  
   918          totalTxRecv    = make([]int64, numOrdsToWatch)  // create counter for each orderer, for total tx received (for all channels)
   919          totalBlockRecv = make([]int64, numOrdsToWatch)  // create counter for each orderer, for total blk received (for all channels)
   920  
   921  
   922          ////////////////////////////////////////////////////////////////////////
   923  
   924          launchNetwork(launchAppendFlags)
   925          time.Sleep(10 * time.Second)
   926  
   927          ////////////////////////////////////////////////////////////////////////
   928          // Create the 1D slice of channel IDs, and create names for them
   929          // which we will use when producing/broadcasting/sending msgs and
   930          // consuming/delivering/receiving msgs.
   931  
   932          var channelIDs []string
   933          channelIDs = make([]string, numChannels)
   934  
   935          // TODO (after FAB-2001 and FAB-2083 are fixed) - Remove the if-then clause.
   936          // Due to those bugs, we cannot pass many tests using multiple orderers and multiple channels.
   937          // TEMPORARY PARTIAL SOLUTION: To test multiple orderers with a single channel,
   938          // use hardcoded TestChainID and skip creating any channels.
   939      if numChannels == 1 {
   940        channelIDs[0] = genesisconfigProvisional.TestChainID
   941        logger(fmt.Sprintf("Using DEFAULT channelID = %s", channelIDs[0]))
   942      } else {
   943          logger(fmt.Sprintf("Using %d new channelIDs, e.g. test-chan.00023", numChannels))
   944          for c:=0; c < numChannels; c++ {
   945                  channelIDs[c] = fmt.Sprintf("test-chan.%05d", c)
   946                  cmd := fmt.Sprintf("cd $GOPATH/src/github.com/hyperledger/fabric && CORE_PEER_COMMITTER_LEDGER_ORDERER=127.0.0.1:%d peer channel create -c %s", ordStartPort, channelIDs[c])
   947                  _ = executeCmd(cmd)
   948                  //executeCmdAndDisplay(cmd)
   949          }
   950      }
   951  
   952          ////////////////////////////////////////////////////////////////////////
   953          // Start threads for each consumer to watch each channel on all (the
   954          // specified number of) orderers. This code assumes orderers in the
   955          // network will use increasing port numbers, which is the same logic
   956          // used by the driver.sh tool that starts the network for us: the first
   957          // orderer uses ordStartPort, the second uses ordStartPort+1, etc.
   958  
   959          for ord := 0; ord < numOrdsToWatch; ord++ {
   960                  serverAddr := fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort + uint16(ord))
   961                  if masterSpy && ord == numOrdsToWatch-1 {
   962                          // Special case: this is the last row of counters,
   963                          // added (and incremented numOrdsToWatch) for the
   964                          // masterSpy to use to watch the first orderer for
   965                          // deliveries, on all channels. This will be a duplicate
   966                          // Consumer (it is the second one monitoring the first
   967                          // orderer), so we need to reuse the first port.
   968                          serverAddr = fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort)
   969                          go startConsumerMaster(serverAddr, &channelIDs, ord, &(txRecv[ord]), &(blockRecv[ord]), &(consumerConns[ord][0]))
   970                  } else
   971                  if optimizeClientsMode {
   972                          // Create just one Consumer to receive all deliveries
   973                          // (on all channels) on an orderer.
   974                          go startConsumerMaster(serverAddr, &channelIDs, ord, &(txRecv[ord]), &(blockRecv[ord]), &(consumerConns[ord][0]))
   975                  } else {
   976                          // Normal mode: create a unique consumer client
   977                          // go-thread for each channel on each orderer.
   978                          for c := 0 ; c < numChannels ; c++ {
   979                                  go startConsumer(serverAddr, channelIDs[c], ord, c, &(txRecv[ord][c]), &(blockRecv[ord][c]), &(consumerConns[ord][c]))
   980                          }
   981                  }
   982  
   983          }
   984  
   985          logger("Finished creating all CONSUMERS clients")
   986          time.Sleep(5 * time.Second)
   987          defer cleanNetwork(&consumerConns)
   988  
   989          ////////////////////////////////////////////////////////////////////////
   990          // Now that the orderer service network is running, and the consumers
   991          // are watching for deliveries, we can start clients which will
   992          // broadcast the specified number of TXs to their associated orderers.
   993  
   994          if optimizeClientsMode {
   995                  producersWG.Add(numOrdsInNtwk)
   996          } else {
   997                  producersWG.Add(numProducers)
   998          }
   999          sendStart := time.Now().Unix()
  1000          for ord := 0; ord < numOrdsInNtwk; ord++ {
  1001                  serverAddr := fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort + uint16(ord))
  1002                  for c := 0 ; c < numChannels ; c++ {
  1003                          countToSend[ord][c] = numTxToSend / int64(numOrdsInNtwk * numChannels)
  1004                          if c==0 && ord==0 { countToSend[ord][c] += numTxToSend % int64(numOrdsInNtwk * numChannels) }
  1005                  }
  1006                  if optimizeClientsMode {
  1007                          // create one Producer for all channels on this orderer
  1008                          go startProducerMaster(serverAddr, &channelIDs, ord, &(countToSend[ord]), &(txSent[ord]), &(txSentFailures[ord]))
  1009                  } else {
  1010                          // Normal mode: create a unique consumer client
  1011                          // go thread for each channel
  1012                          for c := 0 ; c < numChannels ; c++ {
  1013                                  go startProducer(serverAddr, channelIDs[c], ord, c, countToSend[ord][c], &(txSent[ord][c]), &(txSentFailures[ord][c]))
  1014                          }
  1015                  }
  1016          }
  1017  
  1018          if optimizeClientsMode {
  1019                  logger(fmt.Sprintf("Finished creating all %d MASTER-PRODUCERs", numOrdsInNtwk))
  1020          } else {
  1021                  logger(fmt.Sprintf("Finished creating all %d PRODUCERs", numOrdsInNtwk * numChannels))
  1022          }
  1023          producersWG.Wait()
  1024          logger(fmt.Sprintf("Send Duration (seconds): %4d", time.Now().Unix() - sendStart))
  1025          recoverStart := time.Now().Unix()
  1026  
  1027          ////////////////////////////////////////////////////////////////////////
  1028          // All producer threads are finished sending broadcast transactions.
  1029          // Let's determine if the deliveries have all been received by the
  1030          // consumer threads. We will check if the receive counts match the send
  1031          // counts on all consumers, or if all consumers are no longer receiving
  1032          // blocks. Wait and continue rechecking as necessary, as long as the
  1033          // delivery (recv) counters are climbing closer to the broadcast (send)
  1034          // counter. If the counts do not match, wait for up to batchTimeout
  1035          // seconds, to ensure that we received the last (non-full) batch.
  1036  
  1037          computeTotals(&txSent, &totalNumTxSent, &txSentFailures, &totalNumTxSentFailures, &txRecv, &totalTxRecv, &totalTxRecvMismatch, &blockRecv, &totalBlockRecv, &totalBlockRecvMismatch)
  1038  
  1039          waitSecs := 0
  1040          for !sendEqualRecv(numTxToSend, &totalTxRecv, totalTxRecvMismatch, totalBlockRecvMismatch) && (moreDeliveries(&txSent, &totalNumTxSent, &txSentFailures, &totalNumTxSentFailures, &txRecv, &totalTxRecv, &totalTxRecvMismatch, &blockRecv, &totalBlockRecv, &totalBlockRecvMismatch) || waitSecs < batchTimeout) { time.Sleep(1 * time.Second); waitSecs++ }
  1041  
  1042          // Recovery Duration = time spent waiting for orderer service to finish delivering transactions,
  1043          // after all producers finished sending them.
  1044          // waitSecs = some possibly idle time spent waiting for the last batch to be generated (waiting for batchTimeout)
  1045          logger(fmt.Sprintf("Recovery Duration (secs):%4d", time.Now().Unix() - recoverStart))
  1046          logger(fmt.Sprintf("waitSecs for last batch: %4d", waitSecs))
  1047          passed, resultSummary = reportTotals(testname, numTxToSend, countToSend, txSent, totalNumTxSent, txSentFailures, totalNumTxSentFailures, batchSize, txRecv, totalTxRecv, totalTxRecvMismatch, blockRecv, totalBlockRecv, totalBlockRecvMismatch, masterSpy, &channelIDs)
  1048  
  1049          return passed, resultSummary
  1050  }
  1051  
  1052  func main() {
  1053  
  1054          initialize()
  1055  
  1056          // Set reasonable defaults in case any env vars are unset.
  1057          var txs int64 = 55
  1058          chans    := numChannels
  1059          orderers := numOrdsInNtwk
  1060          ordType  := ordererType
  1061          kbs      := numKBrokers
  1062  
  1063  
  1064          // Set addMasterSpy to true to create one additional consumer client
  1065          // that monitors all channels on one orderer with one grpc connection.
  1066          addMasterSpy := false
  1067  
  1068          pPerCh := producersPerCh
  1069          // TODO lPerCh := listenersPerCh
  1070  
  1071          // Read env vars
  1072          if debugflagAPI { logger("==========Environment variables provided for this test, and corresponding values actually used for the test:") }
  1073          testcmd := ""
  1074          envvar := os.Getenv("OTE_TXS")
  1075          if envvar != "" { txs, _ = strconv.ParseInt(envvar, 10, 64); testcmd += " OTE_TXS="+envvar }
  1076          if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", "OTE_TXS="+envvar, "txs", txs)) }
  1077  
  1078          envvar = os.Getenv("OTE_CHANNELS")
  1079          if envvar != "" { chans, _ = strconv.Atoi(envvar); testcmd += " OTE_CHANNELS="+envvar }
  1080          if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", "OTE_CHANNELS="+envvar, "chans", chans)) }
  1081  
  1082          envvar = os.Getenv("OTE_ORDERERS")
  1083          if envvar != "" { orderers, _ = strconv.Atoi(envvar); testcmd += " OTE_ORDERERS="+envvar }
  1084          if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", "OTE_ORDERERS="+envvar, "orderers", orderers)) }
  1085  
  1086          envvar = os.Getenv(ordererTypeParamStr)
  1087          if envvar != "" { ordType = envvar; testcmd += " "+ordererTypeParamStr+"="+envvar }
  1088          if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%s", ordererTypeParamStr+"="+envvar, "ordType", ordType)) }
  1089  
  1090          envvar = os.Getenv("OTE_KAFKABROKERS")
  1091          if envvar != "" { kbs, _ = strconv.Atoi(envvar); testcmd += " OTE_KAFKABROKERS="+envvar }
  1092          if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", "OTE_KAFKABROKERS="+envvar, "kbs", kbs)) }
  1093  
  1094          envvar = os.Getenv("OTE_MASTERSPY")
  1095          if "true" == strings.ToLower(envvar) || "t" == strings.ToLower(envvar) { addMasterSpy = true; testcmd += " OTE_MASTERSPY="+envvar }
  1096          if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%t", "OTE_MASTERSPY="+envvar, "masterSpy", addMasterSpy)) }
  1097  
  1098          envvar = os.Getenv("OTE_PRODUCERS_PER_CHANNEL")
  1099          if envvar != "" { pPerCh, _ = strconv.Atoi(envvar); testcmd += " OTE_PRODUCERS_PER_CHANNEL="+envvar }
  1100          if debugflagAPI { logger(fmt.Sprintf("%-50s %s=%d", "OTE_PRODUCERS_PER_CHANNEL="+envvar, "producersPerCh", pPerCh)) }
  1101  
  1102          _, _ = ote( "<commandline>"+testcmd+" ote", txs, chans, orderers, ordType, kbs, addMasterSpy, pPerCh)
  1103  }