github.com/tenywen/fabric@v1.0.0-beta.0.20170620030522-a5b1ed380643/bddtests/regression/go/ote/ote.go (about)

     1  /*
     2  Copyright IBM Corp. 2017 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package main
    18  
    19  // Orderer Traffic Engine
    20  // ======================
    21  //
    22  // This file ote.go contains main(), for executing from command line
    23  // using environment variables to override those in sampleconfig/orderer.yaml
    24  // or to set OTE test configuration parameters.
    25  //
    26  // Function ote() is called by main after reading environment variables,
    27  // and is also called via "go test" from tests in ote_test.go. Those
    28  // tests can be executed from automated Continuous Integration processes,
    29  // which can use https://github.com/jstemmer/go-junit-report to convert the
    30  // logs to produce junit output for CI reports.
    31  //   go get github.com/jstemmer/go-junit-report
    32  //   go test -v | go-junit-report > report.xml
    33  //
    34  // ote() invokes tool driver.sh (including network.json and json2yml.js) -
    35  //   which is only slightly modified from the original version at
    36  //   https://github.com/dongmingh/v1FabricGenOption -
    37  //   to launch an orderer service network per the specified parameters
    38  //   (including kafka brokers or other necessary support processes).
    39  //   Function ote() performs several actions:
    40  // + create Producer clients to connect via grpc to all the channels on
    41  //   all the orderers to send/broadcast transaction messages
    42  // + create Consumer clients to connect via grpc to ListenAddress:ListenPort
    43  //   on all channels on all orderers and call deliver() to receive messages
    44  //   containing batches of transactions
    45  // + use parameters for specifying test configuration such as:
    46  //   number of transactions, number of channels, number of orderers ...
    47  // + load orderer/orderer.yml to retrieve environment variables used for
    48  //   overriding orderer configuration such as batchsize, batchtimeout ...
    49  // + generate unique transactions, dividing up the requested OTE_TXS count
    50  //   among all the Producers
    51  // + Consumers confirm the same number of blocks and TXs are delivered
    52  //   by all the orderers on all the channels
    53  // + print logs for any errors, and print final tallied results
    54  // + return a pass/fail result and a result summary string
    55  
    56  import (
    57  	"fmt"
    58  	"log"
    59  	"math"
    60  	"os"
    61  	"os/exec"
    62  	"strconv"
    63  	"strings"
    64  	"sync"
    65  	"time"
    66  
    67  	"github.com/golang/protobuf/proto"
    68  	genesisconfig "github.com/hyperledger/fabric/common/configtx/tool/localconfig" // config for genesis.yaml
    69  	genesisconfigProvisional "github.com/hyperledger/fabric/common/configtx/tool/provisional"
    70  	"github.com/hyperledger/fabric/orderer/localconfig" // config, for the orderer.yaml
    71  	cb "github.com/hyperledger/fabric/protos/common"
    72  	ab "github.com/hyperledger/fabric/protos/orderer"
    73  	"github.com/hyperledger/fabric/protos/utils"
    74  	"golang.org/x/net/context"
    75  	"google.golang.org/grpc"
    76  )
    77  
    78  var ordConf *config.TopLevel
    79  var genConf *genesisconfig.TopLevel
    80  var genesisConfigLocation = "CONFIGTX_ORDERER_"
    81  var ordererConfigLocation = "ORDERER_GENERAL_"
    82  var batchSizeParamStr = genesisConfigLocation + "BATCHSIZE_MAXMESSAGECOUNT"
    83  var batchTimeoutParamStr = genesisConfigLocation + "BATCHTIMEOUT"
    84  var ordererTypeParamStr = genesisConfigLocation + "ORDERERTYPE"
    85  
    86  var debugflagLaunch = false
    87  var debugflagAPI = true
    88  var debugflag1 = false
    89  var debugflag2 = false
    90  var debugflag3 = false // most detailed and voluminous
    91  
    92  var producersWG sync.WaitGroup
    93  var logFile *os.File
    94  var logEnabled = false
    95  var envvar string
    96  
    97  var numChannels = 1
    98  var numOrdsInNtwk = 1
    99  var numOrdsToWatch = 1
   100  var ordererType = "solo"
   101  var numKBrokers int
   102  var producersPerCh = 1
   103  var numConsumers = 1
   104  var numProducers = 1
   105  
   106  // numTxToSend is the total number of Transactions to send; A fraction is
   107  // sent by each producer for each channel for each orderer.
   108  
   109  var numTxToSend int64 = 1
   110  
   111  // One GO thread is created for each producer and each consumer client.
   112  // To optimize go threads usage, to prevent running out of swap space
   113  // in the (laptop) test environment for tests using either numerous
   114  // channels or numerous producers per channel, set bool optimizeClientsMode
   115  // true to only create one go thread MasterProducer per orderer, which will
   116  // broadcast messages to all channels on one orderer. Note this option
   117  // works a little less efficiently on the consumer side, where we
   118  // share a single grpc connection but still need to use separate
   119  // GO threads per channel per orderer (instead of one per orderer).
   120  
   121  var optimizeClientsMode = false
   122  
   123  // ordStartPort (default port is 7050, but driver.sh uses 5005).
   124  
   125  var ordStartPort uint16 = 5005
   126  
   127  func initialize() {
   128  	// When running multiple tests, e.g. from go test, reset to defaults
   129  	// for the parameters that could change per test.
   130  	// We do NOT reset things that would apply to every test, such as
   131  	// settings for environment variables
   132  	logEnabled = false
   133  	envvar = ""
   134  	numChannels = 1
   135  	numOrdsInNtwk = 1
   136  	numOrdsToWatch = 1
   137  	ordererType = "solo"
   138  	numKBrokers = 0
   139  	numConsumers = 1
   140  	numProducers = 1
   141  	numTxToSend = 1
   142  	producersPerCh = 1
   143  	initLogger("ote")
   144  }
   145  
   146  func initLogger(fileName string) {
   147  	if !logEnabled {
   148  		layout := "Jan_02_2006"
   149  		// Format Now with the layout const.
   150  		t := time.Now()
   151  		res := t.Format(layout)
   152  		var err error
   153  		logFile, err = os.OpenFile(fileName+"-"+res+".log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
   154  		if err != nil {
   155  			panic(fmt.Sprintf("error opening file: %s", err))
   156  		}
   157  		logEnabled = true
   158  		log.SetOutput(logFile)
   159  		//log.SetFlags(log.LstdFlags | log.Lshortfile)
   160  		log.SetFlags(log.LstdFlags)
   161  	}
   162  }
   163  
   164  func logger(printStmt string) {
   165  	fmt.Println(printStmt)
   166  	if !logEnabled {
   167  		return
   168  	}
   169  	log.Println(printStmt)
   170  }
   171  
   172  func closeLogger() {
   173  	if logFile != nil {
   174  		logFile.Close()
   175  	}
   176  	logEnabled = false
   177  }
   178  
   179  type ordererdriveClient struct {
   180  	client  ab.AtomicBroadcast_DeliverClient
   181  	chainID string
   182  }
   183  type broadcastClient struct {
   184  	client  ab.AtomicBroadcast_BroadcastClient
   185  	chainID string
   186  }
   187  
   188  func newOrdererdriveClient(client ab.AtomicBroadcast_DeliverClient, chainID string) *ordererdriveClient {
   189  	return &ordererdriveClient{client: client, chainID: chainID}
   190  }
   191  func newBroadcastClient(client ab.AtomicBroadcast_BroadcastClient, chainID string) *broadcastClient {
   192  	return &broadcastClient{client: client, chainID: chainID}
   193  }
   194  
   195  func seekHelper(chainID string, start *ab.SeekPosition) *cb.Envelope {
   196  	return &cb.Envelope{
   197  		Payload: utils.MarshalOrPanic(&cb.Payload{
   198  			Header: &cb.Header{
   199  				//ChainHeader: &cb.ChainHeader{
   200  				//        ChainID: b.chainID,
   201  				//},
   202  				ChannelHeader: utils.MarshalOrPanic(&cb.ChannelHeader{
   203  					ChannelId: chainID,
   204  				}),
   205  				SignatureHeader: utils.MarshalOrPanic(&cb.SignatureHeader{}),
   206  			},
   207  
   208  			Data: utils.MarshalOrPanic(&ab.SeekInfo{
   209  				Start:    &ab.SeekPosition{Type: &ab.SeekPosition_Oldest{Oldest: &ab.SeekOldest{}}},
   210  				Stop:     &ab.SeekPosition{Type: &ab.SeekPosition_Specified{Specified: &ab.SeekSpecified{Number: math.MaxUint64}}},
   211  				Behavior: ab.SeekInfo_BLOCK_UNTIL_READY,
   212  			}),
   213  		}),
   214  	}
   215  }
   216  
   217  func (r *ordererdriveClient) seekOldest() error {
   218  	return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Oldest{Oldest: &ab.SeekOldest{}}}))
   219  }
   220  
   221  func (r *ordererdriveClient) seekNewest() error {
   222  	return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Newest{Newest: &ab.SeekNewest{}}}))
   223  }
   224  
   225  func (r *ordererdriveClient) seek(blockNumber uint64) error {
   226  	return r.client.Send(seekHelper(r.chainID, &ab.SeekPosition{Type: &ab.SeekPosition_Specified{Specified: &ab.SeekSpecified{Number: blockNumber}}}))
   227  }
   228  
   229  func (r *ordererdriveClient) readUntilClose(ordererIndex int, channelIndex int, txRecvCntrP *int64, blockRecvCntrP *int64) {
   230  	for {
   231  		msg, err := r.client.Recv()
   232  		if err != nil {
   233  			if !strings.Contains(err.Error(), "transport is closing") {
   234  				// print if we do not see the msg indicating graceful closing of the connection
   235  				logger(fmt.Sprintf("Consumer for orderer %d channel %d readUntilClose() Recv error: %v", ordererIndex, channelIndex, err))
   236  			}
   237  			return
   238  		}
   239  		switch t := msg.Type.(type) {
   240  		case *ab.DeliverResponse_Status:
   241  			logger(fmt.Sprintf("Got DeliverResponse_Status: %v", t))
   242  			return
   243  		case *ab.DeliverResponse_Block:
   244  			if t.Block.Header.Number > 0 {
   245  				if debugflag2 {
   246  					logger(fmt.Sprintf("Consumer recvd a block, o %d c %d blkNum %d numtrans %d", ordererIndex, channelIndex, t.Block.Header.Number, len(t.Block.Data.Data)))
   247  				}
   248  				if debugflag3 {
   249  					logger(fmt.Sprintf("blk: %v", t.Block.Data.Data))
   250  				}
   251  			}
   252  			*txRecvCntrP += int64(len(t.Block.Data.Data))
   253  			//*blockRecvCntrP = int64(t.Block.Header.Number) // this assumes header number is the block number; instead let's just add one
   254  			(*blockRecvCntrP)++
   255  		}
   256  	}
   257  }
   258  
   259  func (b *broadcastClient) broadcast(transaction []byte) error {
   260  	payload, err := proto.Marshal(&cb.Payload{
   261  		Header: &cb.Header{
   262  			//ChainHeader: &cb.ChainHeader{
   263  			//        ChainID: b.chainID,
   264  			//},
   265  			ChannelHeader: utils.MarshalOrPanic(&cb.ChannelHeader{
   266  				ChannelId: b.chainID,
   267  			}),
   268  			SignatureHeader: utils.MarshalOrPanic(&cb.SignatureHeader{}),
   269  		},
   270  		Data: transaction,
   271  	})
   272  	if err != nil {
   273  		panic(err)
   274  	}
   275  	return b.client.Send(&cb.Envelope{Payload: payload})
   276  }
   277  
   278  func (b *broadcastClient) getAck() error {
   279  	msg, err := b.client.Recv()
   280  	if err != nil {
   281  		return err
   282  	}
   283  	if msg.Status != cb.Status_SUCCESS {
   284  		return fmt.Errorf("Got unexpected status: %v", msg.Status)
   285  	}
   286  	return nil
   287  }
   288  
   289  func startConsumer(serverAddr string, chainID string, ordererIndex int, channelIndex int, txRecvCntrP *int64, blockRecvCntrP *int64, consumerConnP **grpc.ClientConn) {
   290  	conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
   291  	if err != nil {
   292  		logger(fmt.Sprintf("Error on Consumer ord[%d] ch[%d] connecting (grpc) to %s, err: %v", ordererIndex, channelIndex, serverAddr, err))
   293  		return
   294  	}
   295  	(*consumerConnP) = conn
   296  	client, err := ab.NewAtomicBroadcastClient(*consumerConnP).Deliver(context.TODO())
   297  	if err != nil {
   298  		logger(fmt.Sprintf("Error on Consumer ord[%d] ch[%d] invoking Deliver() on grpc connection to %s, err: %v", ordererIndex, channelIndex, serverAddr, err))
   299  		return
   300  	}
   301  	s := newOrdererdriveClient(client, chainID)
   302  	err = s.seekOldest()
   303  	if err == nil {
   304  		if debugflag1 {
   305  			logger(fmt.Sprintf("Started Consumer to recv delivered batches from ord[%d] ch[%d] srvr=%s chID=%s", ordererIndex, channelIndex, serverAddr, chainID))
   306  		}
   307  	} else {
   308  		logger(fmt.Sprintf("ERROR starting Consumer client for ord[%d] ch[%d] for srvr=%s chID=%s; err: %v", ordererIndex, channelIndex, serverAddr, chainID, err))
   309  	}
   310  	s.readUntilClose(ordererIndex, channelIndex, txRecvCntrP, blockRecvCntrP)
   311  }
   312  
   313  func startConsumerMaster(serverAddr string, chainIDsP *[]string, ordererIndex int, txRecvCntrsP *[]int64, blockRecvCntrsP *[]int64, consumerConnP **grpc.ClientConn) {
   314  	// create one conn to the orderer and share it for communications to all channels
   315  	conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
   316  	if err != nil {
   317  		logger(fmt.Sprintf("Error on MasterConsumer ord[%d] connecting (grpc) to %s, err: %v", ordererIndex, serverAddr, err))
   318  		return
   319  	}
   320  	(*consumerConnP) = conn
   321  
   322  	// create an orderer driver client for every channel on this orderer
   323  	//[][]*ordererdriveClient  //  numChannels
   324  	dc := make([]*ordererdriveClient, numChannels)
   325  	for c := 0; c < numChannels; c++ {
   326  		client, err := ab.NewAtomicBroadcastClient(*consumerConnP).Deliver(context.TODO())
   327  		if err != nil {
   328  			logger(fmt.Sprintf("Error on MasterConsumer ord[%d] invoking Deliver() on grpc connection to %s, err: %v", ordererIndex, serverAddr, err))
   329  			return
   330  		}
   331  		dc[c] = newOrdererdriveClient(client, (*chainIDsP)[c])
   332  		err = dc[c].seekOldest()
   333  		if err == nil {
   334  			if debugflag1 {
   335  				logger(fmt.Sprintf("Started MasterConsumer to recv delivered batches from ord[%d] ch[%d] srvr=%s chID=%s", ordererIndex, c, serverAddr, (*chainIDsP)[c]))
   336  			}
   337  		} else {
   338  			logger(fmt.Sprintf("ERROR starting MasterConsumer client for ord[%d] ch[%d] for srvr=%s chID=%s; err: %v", ordererIndex, c, serverAddr, (*chainIDsP)[c], err))
   339  		}
   340  		// we would prefer to skip these go threads, and just have on "readUntilClose" that looks for deliveries on all channels!!! (see below.)
   341  		// otherwise, what have we really saved?
   342  		go dc[c].readUntilClose(ordererIndex, c, &((*txRecvCntrsP)[c]), &((*blockRecvCntrsP)[c]))
   343  	}
   344  }
   345  
   346  func executeCmd(cmd string) []byte {
   347  	out, err := exec.Command("/bin/sh", "-c", cmd).Output()
   348  	if err != nil {
   349  		logger(fmt.Sprintf("Unsuccessful exec command: "+cmd+"\nstdout="+string(out)+"\nstderr=%v", err))
   350  		log.Fatal(err)
   351  	}
   352  	return out
   353  }
   354  
   355  func executeCmdAndDisplay(cmd string) {
   356  	out := executeCmd(cmd)
   357  	logger("Results of exec command: " + cmd + "\nstdout=" + string(out))
   358  }
   359  
   360  func connClose(consumerConnsPP **([][]*grpc.ClientConn)) {
   361  	for i := 0; i < numOrdsToWatch; i++ {
   362  		for j := 0; j < numChannels; j++ {
   363  			if (**consumerConnsPP)[i][j] != nil {
   364  				_ = (**consumerConnsPP)[i][j].Close()
   365  			}
   366  		}
   367  	}
   368  }
   369  
   370  func cleanNetwork(consumerConnsP *([][]*grpc.ClientConn)) {
   371  	if debugflag1 {
   372  		logger("Removing the Network Consumers")
   373  	}
   374  	connClose(&consumerConnsP)
   375  
   376  	// Docker is not perfect; we need to unpause any paused containers, before we can kill them.
   377  	//_ = executeCmd("docker ps -aq -f status=paused | xargs docker unpause")
   378  	if out := executeCmd("docker ps -aq -f status=paused"); out != nil && string(out) != "" {
   379  		logger("Removing paused docker containers: " + string(out))
   380  		_ = executeCmd("docker ps -aq -f status=paused | xargs docker unpause")
   381  	}
   382  
   383  	// kill any containers that are still running
   384  	//_ = executeCmd("docker kill $(docker ps -q)")
   385  
   386  	if debugflag1 {
   387  		logger("Removing the Network orderers and associated docker containers")
   388  	}
   389  	_ = executeCmd("docker rm -f $(docker ps -aq)")
   390  }
   391  
   392  func launchNetwork(appendFlags string) {
   393  	// Alternative way: hardcoded docker compose (not driver.sh tool)
   394  	//  _ = executeCmd("docker-compose -f docker-compose-3orderers.yml up -d")
   395  
   396  	cmd := fmt.Sprintf("./driver.sh -a create -p 1 %s", appendFlags)
   397  	logger(fmt.Sprintf("Launching network:  %s", cmd))
   398  	if debugflagLaunch {
   399  		executeCmdAndDisplay(cmd) // show stdout logs; debugging help
   400  	} else {
   401  		executeCmd(cmd)
   402  	}
   403  
   404  	// display the network of docker containers with the orderers and such
   405  	executeCmdAndDisplay("docker ps -a")
   406  }
   407  
   408  func countGenesis() int64 {
   409  	return int64(numChannels)
   410  }
   411  func sendEqualRecv(numTxToSend int64, totalTxRecvP *[]int64, totalTxRecvMismatch bool, totalBlockRecvMismatch bool) bool {
   412  	var matching = false
   413  	if (*totalTxRecvP)[0] == numTxToSend {
   414  		// recv count on orderer 0 matches the send count
   415  		if !totalTxRecvMismatch && !totalBlockRecvMismatch {
   416  			// all orderers have same recv counters
   417  			matching = true
   418  		}
   419  	}
   420  	return matching
   421  }
   422  
   423  func moreDeliveries(txSentP *[][]int64, totalNumTxSentP *int64, txSentFailuresP *[][]int64, totalNumTxSentFailuresP *int64, txRecvP *[][]int64, totalTxRecvP *[]int64, totalTxRecvMismatchP *bool, blockRecvP *[][]int64, totalBlockRecvP *[]int64, totalBlockRecvMismatchP *bool) (moreReceived bool) {
   424  	moreReceived = false
   425  	prevTotalTxRecv := *totalTxRecvP
   426  	computeTotals(txSentP, totalNumTxSentP, txSentFailuresP, totalNumTxSentFailuresP, txRecvP, totalTxRecvP, totalTxRecvMismatchP, blockRecvP, totalBlockRecvP, totalBlockRecvMismatchP)
   427  	for ordNum := 0; ordNum < numOrdsToWatch; ordNum++ {
   428  		if prevTotalTxRecv[ordNum] != (*totalTxRecvP)[ordNum] {
   429  			moreReceived = true
   430  		}
   431  	}
   432  	return moreReceived
   433  }
   434  
   435  func startProducer(serverAddr string, chainID string, ordererIndex int, channelIndex int, txReq int64, txSentCntrP *int64, txSentFailureCntrP *int64) {
   436  	conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
   437  	defer func() {
   438  		_ = conn.Close()
   439  	}()
   440  	if err != nil {
   441  		logger(fmt.Sprintf("Error creating connection for Producer for ord[%d] ch[%d], err: %v", ordererIndex, channelIndex, err))
   442  		return
   443  	}
   444  	client, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.TODO())
   445  	if err != nil {
   446  		logger(fmt.Sprintf("Error creating Producer for ord[%d] ch[%d], err: %v", ordererIndex, channelIndex, err))
   447  		return
   448  	}
   449  	if debugflag1 {
   450  		logger(fmt.Sprintf("Started Producer to send %d TXs to ord[%d] ch[%d] srvr=%s chID=%s, %v", txReq, ordererIndex, channelIndex, serverAddr, chainID, time.Now()))
   451  	}
   452  	b := newBroadcastClient(client, chainID)
   453  
   454  	// print a log after sending multiples of this percentage of requested TX: 25,50,75%...
   455  	// only on one producer, and assume all producers are generating at same rate.
   456  	// e.g. when txReq = 50, to print log every 10. set progressPercentage = 20
   457  	printProgressLogs := false
   458  	var progressPercentage int64 = 25 // set this between 1 and 99
   459  	printLogCnt := txReq * progressPercentage / 100
   460  	if debugflag1 {
   461  		printProgressLogs = true // to test logs for all producers
   462  	} else {
   463  		if txReq > 10000 && printLogCnt > 0 && ordererIndex == 0 && channelIndex == 0 {
   464  			printProgressLogs = true
   465  		}
   466  	}
   467  	var mult int64 = 0
   468  
   469  	firstErr := false
   470  	for i := int64(0); i < txReq; i++ {
   471  		b.broadcast([]byte(fmt.Sprintf("Testing %v", time.Now())))
   472  		err = b.getAck()
   473  		if err == nil {
   474  			(*txSentCntrP)++
   475  			if printProgressLogs && (*txSentCntrP)%printLogCnt == 0 {
   476  				mult++
   477  				if debugflag1 {
   478  					logger(fmt.Sprintf("Producer ord[%d] ch[%d] sent %4d /%4d = %3d%%, %v", ordererIndex, channelIndex, (*txSentCntrP), txReq, progressPercentage*mult, time.Now()))
   479  				} else {
   480  					logger(fmt.Sprintf("Sent %3d%%, %v", progressPercentage*mult, time.Now()))
   481  				}
   482  			}
   483  		} else {
   484  			(*txSentFailureCntrP)++
   485  			if !firstErr {
   486  				firstErr = true
   487  				logger(fmt.Sprintf("Broadcast error on TX %d (the first error for Producer ord[%d] ch[%d]); err: %v", i+1, ordererIndex, channelIndex, err))
   488  			}
   489  		}
   490  	}
   491  	if err != nil {
   492  		logger(fmt.Sprintf("Broadcast error on last TX %d of Producer ord[%d] ch[%d]: %v", txReq, ordererIndex, channelIndex, err))
   493  	}
   494  	if txReq == *txSentCntrP {
   495  		if debugflag1 {
   496  			logger(fmt.Sprintf("Producer finished sending broadcast msgs to ord[%d] ch[%d]: ACKs  %9d  (100%%) , %v", ordererIndex, channelIndex, *txSentCntrP, time.Now()))
   497  		}
   498  	} else {
   499  		logger(fmt.Sprintf("Producer finished sending broadcast msgs to ord[%d] ch[%d]: ACKs  %9d  NACK %d  Other %d , %v", ordererIndex, channelIndex, *txSentCntrP, *txSentFailureCntrP, txReq-*txSentFailureCntrP-*txSentCntrP, time.Now()))
   500  	}
   501  	producersWG.Done()
   502  }
   503  
   504  func startProducerMaster(serverAddr string, chainIDs *[]string, ordererIndex int, txReqP *[]int64, txSentCntrP *[]int64, txSentFailureCntrP *[]int64) {
   505  	// This function creates a grpc connection to one orderer,
   506  	// creates multiple clients (one per numChannels) for that one orderer,
   507  	// and sends a TX to all channels repeatedly until no more to send.
   508  	var txReqTotal int64
   509  	var txMax int64
   510  	for c := 0; c < numChannels; c++ {
   511  		txReqTotal += (*txReqP)[c]
   512  		if txMax < (*txReqP)[c] {
   513  			txMax = (*txReqP)[c]
   514  		}
   515  	}
   516  	conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
   517  	defer func() {
   518  		_ = conn.Close()
   519  	}()
   520  	if err != nil {
   521  		logger(fmt.Sprintf("Error creating connection for MasterProducer for ord[%d], err: %v", ordererIndex, err))
   522  		return
   523  	}
   524  	client, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.TODO())
   525  	if err != nil {
   526  		logger(fmt.Sprintf("Error creating MasterProducer for ord[%d], err: %v", ordererIndex, err))
   527  		return
   528  	}
   529  	logger(fmt.Sprintf("Started MasterProducer to send %d TXs to ord[%d] srvr=%s distributed across all channels", txReqTotal, ordererIndex, serverAddr))
   530  
   531  	// create the broadcast clients for every channel on this orderer
   532  	bc := make([]*broadcastClient, numChannels)
   533  	for c := 0; c < numChannels; c++ {
   534  		bc[c] = newBroadcastClient(client, (*chainIDs)[c])
   535  	}
   536  
   537  	firstErr := false
   538  	for i := int64(0); i < txMax; i++ {
   539  		// send one TX to every broadcast client (one TX on each chnl)
   540  		for c := 0; c < numChannels; c++ {
   541  			if i < (*txReqP)[c] {
   542  				// more TXs to send on this channel
   543  				bc[c].broadcast([]byte(fmt.Sprintf("Testing %v", time.Now())))
   544  				err = bc[c].getAck()
   545  				if err == nil {
   546  					(*txSentCntrP)[c]++
   547  				} else {
   548  					(*txSentFailureCntrP)[c]++
   549  					if !firstErr {
   550  						firstErr = true
   551  						logger(fmt.Sprintf("Broadcast error on TX %d (the first error for MasterProducer on ord[%d] ch[%d] channelID=%s); err: %v", i+1, ordererIndex, c, (*chainIDs)[c], err))
   552  					}
   553  				}
   554  			}
   555  		}
   556  	}
   557  	if err != nil {
   558  		logger(fmt.Sprintf("Broadcast error on last TX %d of MasterProducer on ord[%d] ch[%d]: %v", txReqTotal, ordererIndex, numChannels-1, err))
   559  	}
   560  	var txSentTotal int64
   561  	var txSentFailTotal int64
   562  	for c := 0; c < numChannels; c++ {
   563  		txSentTotal += (*txSentCntrP)[c]
   564  		txSentFailTotal += (*txSentFailureCntrP)[c]
   565  	}
   566  	if txReqTotal == txSentTotal {
   567  		logger(fmt.Sprintf("MasterProducer finished sending broadcast msgs to all channels on ord[%d]: ACKs  %9d  (100%%)", ordererIndex, txSentTotal))
   568  	} else {
   569  		logger(fmt.Sprintf("MasterProducer finished sending broadcast msgs to all channels on ord[%d]: ACKs  %9d  NACK %d  Other %d", ordererIndex, txSentTotal, txSentFailTotal, txReqTotal-txSentTotal-txSentFailTotal))
   570  	}
   571  	producersWG.Done()
   572  }
   573  
   574  func computeTotals(txSent *[][]int64, totalNumTxSent *int64, txSentFailures *[][]int64, totalNumTxSentFailures *int64, txRecv *[][]int64, totalTxRecv *[]int64, totalTxRecvMismatch *bool, blockRecv *[][]int64, totalBlockRecv *[]int64, totalBlockRecvMismatch *bool) {
   575  	// The counters for Producers are indexed by orderer (numOrdsInNtwk)
   576  	// and channel (numChannels).
   577  	// Total count includes all counters for all channels on ALL orderers.
   578  	// e.g.    totalNumTxSent         = sum of txSent[*][*]
   579  	// e.g.    totalNumTxSentFailures = sum of txSentFailures[*][*]
   580  
   581  	*totalNumTxSent = 0
   582  	*totalNumTxSentFailures = 0
   583  	for i := 0; i < numOrdsInNtwk; i++ {
   584  		for j := 0; j < numChannels; j++ {
   585  			*totalNumTxSent += (*txSent)[i][j]
   586  			*totalNumTxSentFailures += (*txSentFailures)[i][j]
   587  		}
   588  	}
   589  
   590  	// Counters for consumers are indexed by orderer (numOrdsToWatch)
   591  	// and channel (numChannels).
   592  	// The total count includes all counters for all channels on
   593  	// ONLY ONE orderer.
   594  	// Tally up the totals for all the channels on each orderer, and
   595  	// store them for comparison; they should all be the same.
   596  	// e.g.    totalTxRecv[k]    = sum of txRecv[k][*]
   597  	// e.g.    totalBlockRecv[k] = sum of blockRecv[k][*]
   598  
   599  	*totalTxRecvMismatch = false
   600  	*totalBlockRecvMismatch = false
   601  	for k := 0; k < numOrdsToWatch; k++ {
   602  		// count only the requested TXs - not the genesis block TXs
   603  		(*totalTxRecv)[k] = -countGenesis()
   604  		(*totalBlockRecv)[k] = -countGenesis()
   605  		for l := 0; l < numChannels; l++ {
   606  			(*totalTxRecv)[k] += (*txRecv)[k][l]
   607  			(*totalBlockRecv)[k] += (*blockRecv)[k][l]
   608  			if debugflag3 {
   609  				logger(fmt.Sprintf("in compute(): k %d l %d txRecv[k][l] %d blockRecv[k][l] %d", k, l, (*txRecv)[k][l], (*blockRecv)[k][l]))
   610  			}
   611  		}
   612  		if (k > 0) && (*totalTxRecv)[k] != (*totalTxRecv)[k-1] {
   613  			*totalTxRecvMismatch = true
   614  		}
   615  		if (k > 0) && (*totalBlockRecv)[k] != (*totalBlockRecv)[k-1] {
   616  			*totalBlockRecvMismatch = true
   617  		}
   618  	}
   619  	if debugflag2 {
   620  		logger(fmt.Sprintf("in compute(): totalTxRecv[]= %v, totalBlockRecv[]= %v", *totalTxRecv, *totalBlockRecv))
   621  	}
   622  }
   623  
   624  func reportTotals(testname string, numTxToSendTotal int64, countToSend [][]int64, txSent [][]int64, totalNumTxSent int64, txSentFailures [][]int64, totalNumTxSentFailures int64, batchSize int64, txRecv [][]int64, totalTxRecv []int64, totalTxRecvMismatch bool, blockRecv [][]int64, totalBlockRecv []int64, totalBlockRecvMismatch bool, masterSpy bool, channelIDs *[]string) (successResult bool, resultStr string) {
   625  
   626  	// default to failed
   627  	var passFailStr = "FAILED"
   628  	successResult = false
   629  	resultStr = "TEST " + testname + " "
   630  
   631  	// For each Producer, print the ordererIndex and channelIndex, the
   632  	// number of TX requested to be sent, the actual number of TX sent,
   633  	// and the number we failed to send.
   634  
   635  	if numOrdsInNtwk > 3 || numChannels > 3 {
   636  		logger(fmt.Sprintf("Print only the first 3 chans of only the first 3 ordererIdx; and any others ONLY IF they contain failures.\nTotals numOrdInNtwk=%d numChan=%d numPRODUCERs=%d", numOrdsInNtwk, numChannels, numOrdsInNtwk*numChannels))
   637  	}
   638  	logger("PRODUCERS   OrdererIdx  ChannelIdx ChannelID              TX Target         ACK        NACK")
   639  	for i := 0; i < numOrdsInNtwk; i++ {
   640  		for j := 0; j < numChannels; j++ {
   641  			if (i < 3 && j < 3) || txSentFailures[i][j] > 0 || countToSend[i][j] != txSent[i][j]+txSentFailures[i][j] {
   642  				logger(fmt.Sprintf("%22d%12d %-20s%12d%12d%12d", i, j, (*channelIDs)[j], countToSend[i][j], txSent[i][j], txSentFailures[i][j]))
   643  			} else if i < 3 && j == 3 {
   644  				logger(fmt.Sprintf("%34s", "..."))
   645  			} else if i == 3 && j == 0 {
   646  				logger(fmt.Sprintf("%22s", "..."))
   647  			}
   648  		}
   649  	}
   650  
   651  	// for each consumer print the ordererIndex & channel, the num blocks and the num transactions received/delivered
   652  	if numOrdsToWatch > 3 || numChannels > 3 {
   653  		logger(fmt.Sprintf("Print only the first 3 chans of only the first 3 ordererIdx (and the last ordererIdx if masterSpy is present), plus any others that contain failures.\nTotals numOrdIdx=%d numChanIdx=%d numCONSUMERS=%d", numOrdsToWatch, numChannels, numOrdsToWatch*numChannels))
   654  	}
   655  	logger("CONSUMERS   OrdererIdx  ChannelIdx ChannelID                    TXs     Batches")
   656  	for i := 0; i < numOrdsToWatch; i++ {
   657  		for j := 0; j < numChannels; j++ {
   658  			if (j < 3 && (i < 3 || (masterSpy && i == numOrdsInNtwk-1))) || (i > 1 && (blockRecv[i][j] != blockRecv[1][j] || txRecv[1][j] != txRecv[1][j])) {
   659  				// Subtract one from the received Block count and TX count, to ignore the genesis block
   660  				// (we already ignore genesis blocks when we compute the totals in totalTxRecv[n] , totalBlockRecv[n])
   661  				logger(fmt.Sprintf("%22d%12d %-20s%12d%12d", i, j, (*channelIDs)[j], txRecv[i][j]-1, blockRecv[i][j]-1))
   662  			} else if i < 3 && j == 3 {
   663  				logger(fmt.Sprintf("%34s", "..."))
   664  			} else if i == 3 && j == 0 {
   665  				logger(fmt.Sprintf("%22s", "..."))
   666  			}
   667  		}
   668  	}
   669  
   670  	// Check for differences on the deliveries from the orderers. These are
   671  	// probably errors - unless the test stopped an orderer on purpose and
   672  	// never restarted it, while the others continued to deliver TXs.
   673  	// (If an orderer is restarted, then it would reprocess all the
   674  	// back-ordered transactions to catch up with the others.)
   675  
   676  	if totalTxRecvMismatch {
   677  		logger("!!!!! Num TXs Delivered is not same on all orderers!!!!!")
   678  	}
   679  	if totalBlockRecvMismatch {
   680  		logger("!!!!! Num Blocks Delivered is not same on all orderers!!!!!")
   681  	}
   682  
   683  	if totalTxRecvMismatch || totalBlockRecvMismatch {
   684  		resultStr += "Orderers were INCONSISTENT! "
   685  	}
   686  	if totalTxRecv[0] == numTxToSendTotal {
   687  		// recv count on orderer 0 matches the send count
   688  		if !totalTxRecvMismatch && !totalBlockRecvMismatch {
   689  			logger("Hooray! Every TX was successfully sent AND delivered by orderer service.")
   690  			successResult = true
   691  			passFailStr = "PASSED"
   692  		} else {
   693  			resultStr += "Every TX was successfully sent AND delivered by orderer0 but not all orderers"
   694  		}
   695  	} else if totalTxRecv[0] == totalNumTxSent {
   696  		resultStr += "Every ACked TX was delivered, but failures occurred:"
   697  	} else if totalTxRecv[0] < totalNumTxSent {
   698  		resultStr += "BAD! Some ACKed TX were LOST by orderer service!"
   699  	} else {
   700  		resultStr += "BAD! Some EXTRA TX were delivered by orderer service!"
   701  	}
   702  
   703  	////////////////////////////////////////////////////////////////////////
   704  	//
   705  	// Before we declare success, let's check some more things...
   706  	//
   707  	// At this point, we have decided if most of the numbers make sense by
   708  	// setting succssResult to true if the tests passed. Thus we assume
   709  	// successReult=true and just set it to false if we find a problem.
   710  
   711  	// Check the totals to verify if the number of blocks on each channel
   712  	// is appropriate for the given batchSize and number of TXs sent.
   713  
   714  	expectedBlocksOnChan := make([]int64, numChannels) // create a counter for all the channels on one orderer
   715  	for c := 0; c < numChannels; c++ {
   716  		var chanSentTotal int64
   717  		for ord := 0; ord < numOrdsInNtwk; ord++ {
   718  			chanSentTotal += txSent[ord][c]
   719  		}
   720  		expectedBlocksOnChan[c] = chanSentTotal / batchSize
   721  		if chanSentTotal%batchSize > 0 {
   722  			expectedBlocksOnChan[c]++
   723  		}
   724  		for ord := 0; ord < numOrdsToWatch; ord++ {
   725  			if expectedBlocksOnChan[c] != blockRecv[ord][c]-1 { // ignore genesis block
   726  				successResult = false
   727  				passFailStr = "FAILED"
   728  				logger(fmt.Sprintf("Error: Unexpected Block count %d (expected %d) on ordIndx=%d channelIDs[%d]=%s, chanSentTxTotal=%d BatchSize=%d", blockRecv[ord][c]-1, expectedBlocksOnChan[c], ord, c, (*channelIDs)[c], chanSentTotal, batchSize))
   729  			} else {
   730  				if debugflag1 {
   731  					logger(fmt.Sprintf("GOOD block count %d on ordIndx=%d channelIDs[%d]=%s chanSentTxTotal=%d BatchSize=%d", expectedBlocksOnChan[c], ord, c, (*channelIDs)[c], chanSentTotal, batchSize))
   732  				}
   733  			}
   734  		}
   735  	}
   736  
   737  	// TODO - Verify the contents of the last block of transactions.
   738  	//        Since we do not know exactly what should be in the block,
   739  	//        then at least we can do:
   740  	//            for each channel, verify if the block delivered from
   741  	//            each orderer is the same (i.e. contains the same
   742  	//            Data bytes (transactions) in the last block)
   743  
   744  	// print some counters totals
   745  	logger(fmt.Sprintf("Not counting genesis blks (1 per chan)%9d", countGenesis()))
   746  	logger(fmt.Sprintf("Total TX broadcasts Requested to Send %9d", numTxToSendTotal))
   747  	logger(fmt.Sprintf("Total TX broadcasts send success ACK  %9d", totalNumTxSent))
   748  	logger(fmt.Sprintf("Total TX broadcasts sendFailed - NACK %9d", totalNumTxSentFailures))
   749  	logger(fmt.Sprintf("Total Send-LOST TX (Not Ack or Nack)) %9d", numTxToSendTotal-totalNumTxSent-totalNumTxSentFailures))
   750  	logger(fmt.Sprintf("Total Recv-LOST TX (Ack but not Recvd)%9d", totalNumTxSent-totalTxRecv[0]))
   751  	if successResult {
   752  		logger(fmt.Sprintf("Total deliveries received TX          %9d", totalTxRecv[0]))
   753  		logger(fmt.Sprintf("Total deliveries received Blocks      %9d", totalBlockRecv[0]))
   754  	} else {
   755  		logger(fmt.Sprintf("Total deliveries received TX on each ordrr     %7d", totalTxRecv))
   756  		logger(fmt.Sprintf("Total deliveries received Blocks on each ordrr %7d", totalBlockRecv))
   757  	}
   758  
   759  	// print output result and counts : overall summary
   760  	resultStr += fmt.Sprintf(" RESULT=%s: TX Req=%d BrdcstACK=%d NACK=%d DelivBlk=%d DelivTX=%d numChannels=%d batchSize=%d", passFailStr, numTxToSendTotal, totalNumTxSent, totalNumTxSentFailures, totalBlockRecv, totalTxRecv, numChannels, batchSize)
   761  	logger(fmt.Sprintf(resultStr))
   762  
   763  	return successResult, resultStr
   764  }
   765  
   766  // Function:    ote - the Orderer Test Engine
   767  // Outputs:     print report to stdout with lots of counters
   768  // Returns:     passed bool, resultSummary string
   769  func ote(testname string, txs int64, chans int, orderers int, ordType string, kbs int, masterSpy bool, pPerCh int) (passed bool, resultSummary string) {
   770  
   771  	initialize() // multiple go tests could be run; we must call initialize() each time
   772  
   773  	passed = false
   774  	resultSummary = testname + " test not completed: INPUT ERROR: "
   775  	defer closeLogger()
   776  
   777  	logger(fmt.Sprintf("========== OTE testname=%s TX=%d Channels=%d Orderers=%d ordererType=%s kafka-brokers=%d addMasterSpy=%t producersPerCh=%d", testname, txs, chans, orderers, ordType, kbs, masterSpy, pPerCh))
   778  
   779  	// Establish the default configuration from yaml files - and this also
   780  	// picks up any variables overridden on command line or in environment
   781  	ordConf := config.Load()
   782  	genConf := genesisconfig.Load(genesisconfig.SampleInsecureProfile)
   783  	var launchAppendFlags string
   784  
   785  	////////////////////////////////////////////////////////////////////////
   786  	// Check parameters and/or env vars to see if user wishes to override
   787  	// default config parms.
   788  	////////////////////////////////////////////////////////////////////////
   789  
   790  	//////////////////////////////////////////////////////////////////////
   791  	// Arguments for OTE settings for test variations:
   792  	//////////////////////////////////////////////////////////////////////
   793  
   794  	if txs > 0 {
   795  		numTxToSend = txs
   796  	} else {
   797  		return passed, resultSummary + "number of transactions must be > 0"
   798  	}
   799  	if chans > 0 {
   800  		numChannels = chans
   801  	} else {
   802  		return passed, resultSummary + "number of channels must be > 0"
   803  	}
   804  	if orderers > 0 {
   805  		numOrdsInNtwk = orderers
   806  		launchAppendFlags += fmt.Sprintf(" -o %d", orderers)
   807  	} else {
   808  		return passed, resultSummary + "number of orderers in network must be > 0"
   809  	}
   810  
   811  	if pPerCh > 1 {
   812  		producersPerCh = pPerCh
   813  		return passed, resultSummary + "Multiple producersPerChannel NOT SUPPORTED yet."
   814  	}
   815  
   816  	numOrdsToWatch = numOrdsInNtwk // Watch every orderer to verify they are all delivering the same.
   817  	if masterSpy {
   818  		numOrdsToWatch++
   819  	} // We are not creating another orderer here, but we do need
   820  	// another set of counters; the masterSpy will be created for
   821  	// this test to watch every channel on an orderer - so that means
   822  	// one orderer is being watched twice
   823  
   824  	// this is not an argument, but user may set this tuning parameter before running test
   825  	envvar = os.Getenv("OTE_CLIENTS_SHARE_CONNS")
   826  	if envvar != "" {
   827  		if strings.ToLower(envvar) == "true" || strings.ToLower(envvar) == "t" {
   828  			optimizeClientsMode = true
   829  		}
   830  		if debugflagAPI {
   831  			logger(fmt.Sprintf("%-50s %s=%t", "OTE_CLIENTS_SHARE_CONNS="+envvar, "optimizeClientsMode", optimizeClientsMode))
   832  			logger("Setting OTE_CLIENTS_SHARE_CONNS option to true does the following:\n1. All Consumers on an orderer (one GO thread per each channel) will share grpc connection.\n2. All Producers on an orderer will share a grpc conn AND share one GO-thread.\nAlthough this reduces concurrency and lengthens the test duration, it satisfies\nthe objective of reducing swap space requirements and should be selected when\nrunning tests with numerous channels or producers per channel.")
   833  		}
   834  	}
   835  	if optimizeClientsMode {
   836  		// use only one MasterProducer and one MasterConsumer on each orderer
   837  		numProducers = numOrdsInNtwk
   838  		numConsumers = numOrdsInNtwk
   839  	} else {
   840  		// one Producer and one Consumer for EVERY channel on each orderer
   841  		numProducers = numOrdsInNtwk * numChannels
   842  		numConsumers = numOrdsInNtwk * numChannels
   843  	}
   844  
   845  	//////////////////////////////////////////////////////////////////////
   846  	// Arguments to override configuration parameter values in yaml file:
   847  	//////////////////////////////////////////////////////////////////////
   848  
   849  	// ordererType is an argument of ote(), and is also in the genesisconfig
   850  	ordererType = genConf.Orderer.OrdererType
   851  	if ordType != "" {
   852  		ordererType = ordType
   853  	} else {
   854  		logger(fmt.Sprintf("Null value provided for ordererType; using value from config file: %s", ordererType))
   855  	}
   856  	launchAppendFlags += fmt.Sprintf(" -t %s", ordererType)
   857  	if "kafka" == strings.ToLower(ordererType) {
   858  		if kbs > 0 {
   859  			numKBrokers = kbs
   860  			launchAppendFlags += fmt.Sprintf(" -k %d", numKBrokers)
   861  		} else {
   862  			return passed, resultSummary + "When using kafka ordererType, number of kafka-brokers must be > 0"
   863  		}
   864  	} else {
   865  		numKBrokers = 0
   866  	}
   867  
   868  	// batchSize is not an argument of ote(), but is in the genesisconfig
   869  	// variable may be overridden on command line or by exporting it.
   870  	batchSize := int64(genConf.Orderer.BatchSize.MaxMessageCount) // retype the uint32
   871  	envvar = os.Getenv(batchSizeParamStr)
   872  	if envvar != "" {
   873  		launchAppendFlags += fmt.Sprintf(" -b %d", batchSize)
   874  	}
   875  	if debugflagAPI {
   876  		logger(fmt.Sprintf("%-50s %s=%d", batchSizeParamStr+"="+envvar, "batchSize", batchSize))
   877  	}
   878  
   879  	// batchTimeout is not an argument of ote(), but is in the genesisconfig
   880  	//logger(fmt.Sprintf("DEBUG=====BatchTimeout conf:%v Seconds-float():%v Seconds-int:%v", genConf.Orderer.BatchTimeout, (genConf.Orderer.BatchTimeout).Seconds(), int((genConf.Orderer.BatchTimeout).Seconds())))
   881  	batchTimeout := int((genConf.Orderer.BatchTimeout).Seconds()) // Seconds() converts time.Duration to float64, and then retypecast to int
   882  	envvar = os.Getenv(batchTimeoutParamStr)
   883  	if envvar != "" {
   884  		launchAppendFlags += fmt.Sprintf(" -c %d", batchTimeout)
   885  	}
   886  	if debugflagAPI {
   887  		logger(fmt.Sprintf("%-50s %s=%d", batchTimeoutParamStr+"="+envvar, "batchTimeout", batchTimeout))
   888  	}
   889  
   890  	// CoreLoggingLevel
   891  	envvar = strings.ToUpper(os.Getenv("CORE_LOGGING_LEVEL")) // (default = not set)|CRITICAL|ERROR|WARNING|NOTICE|INFO|DEBUG
   892  	if envvar != "" {
   893  		launchAppendFlags += fmt.Sprintf(" -l %s", envvar)
   894  	}
   895  	if debugflagAPI {
   896  		logger(fmt.Sprintf("CORE_LOGGING_LEVEL=%s", envvar))
   897  	}
   898  
   899  	// CoreLedgerStateDB
   900  	envvar = os.Getenv("CORE_LEDGER_STATE_STATEDATABASE") // goleveldb | CouchDB
   901  	if envvar != "" {
   902  		launchAppendFlags += fmt.Sprintf(" -d %s", envvar)
   903  	}
   904  	if debugflagAPI {
   905  		logger(fmt.Sprintf("CORE_LEDGER_STATE_STATEDATABASE=%s", envvar))
   906  	}
   907  
   908  	// CoreSecurityLevel
   909  	envvar = os.Getenv("CORE_SECURITY_LEVEL") // 256 | 384
   910  	if envvar != "" {
   911  		launchAppendFlags += fmt.Sprintf(" -w %s", envvar)
   912  	}
   913  	if debugflagAPI {
   914  		logger(fmt.Sprintf("CORE_SECURITY_LEVEL=%s", envvar))
   915  	}
   916  
   917  	// CoreSecurityHashAlgorithm
   918  	envvar = os.Getenv("CORE_SECURITY_HASHALGORITHM") // SHA2 | SHA3
   919  	if envvar != "" {
   920  		launchAppendFlags += fmt.Sprintf(" -x %s", envvar)
   921  	}
   922  	if debugflagAPI {
   923  		logger(fmt.Sprintf("CORE_SECURITY_HASHALGORITHM=%s", envvar))
   924  	}
   925  
   926  	//////////////////////////////////////////////////////////////////////////
   927  	// Each producer sends TXs to one channel on one orderer, and increments
   928  	// its own counters for the successfully sent Tx, and the send-failures
   929  	// (rejected/timeout). These arrays are indexed by dimensions:
   930  	// numOrdsInNtwk and numChannels
   931  
   932  	var countToSend [][]int64
   933  	var txSent [][]int64
   934  	var txSentFailures [][]int64
   935  	var totalNumTxSent int64
   936  	var totalNumTxSentFailures int64
   937  
   938  	// Each consumer receives blocks delivered on one channel from one
   939  	// orderer, and must track its own counters for the received number of
   940  	// blocks and received number of Tx.
   941  	// We will create consumers for every channel on an orderer, and total
   942  	// up the TXs received. And do that for all the orderers (indexed by
   943  	// numOrdsToWatch). We will check to ensure all the orderers receive
   944  	// all the same deliveries. These arrays are indexed by dimensions:
   945  	// numOrdsToWatch and numChannels
   946  
   947  	var txRecv [][]int64
   948  	var blockRecv [][]int64
   949  	var totalTxRecv []int64    // total TXs rcvd by all consumers on an orderer, indexed by numOrdsToWatch
   950  	var totalBlockRecv []int64 // total Blks recvd by all consumers on an orderer, indexed by numOrdsToWatch
   951  	var totalTxRecvMismatch = false
   952  	var totalBlockRecvMismatch = false
   953  	var consumerConns [][]*grpc.ClientConn
   954  
   955  	////////////////////////////////////////////////////////////////////////
   956  	// Create the 1D and 2D slices of counters for the producers and
   957  	// consumers. All are initialized to zero.
   958  
   959  	for i := 0; i < numOrdsInNtwk; i++ { // for all orderers
   960  
   961  		countToSendForOrd := make([]int64, numChannels)      // create a counter for all the channels on one orderer
   962  		countToSend = append(countToSend, countToSendForOrd) // orderer-i gets a set
   963  
   964  		sendPassCntrs := make([]int64, numChannels) // create a counter for all the channels on one orderer
   965  		txSent = append(txSent, sendPassCntrs)      // orderer-i gets a set
   966  
   967  		sendFailCntrs := make([]int64, numChannels)            // create a counter for all the channels on one orderer
   968  		txSentFailures = append(txSentFailures, sendFailCntrs) // orderer-i gets a set
   969  	}
   970  
   971  	for i := 0; i < numOrdsToWatch; i++ { // for all orderers which we will watch/monitor for deliveries
   972  
   973  		blockRecvCntrs := make([]int64, numChannels)  // create a set of block counters for each channel
   974  		blockRecv = append(blockRecv, blockRecvCntrs) // orderer-i gets a set
   975  
   976  		txRecvCntrs := make([]int64, numChannels) // create a set of tx counters for each channel
   977  		txRecv = append(txRecv, txRecvCntrs)      // orderer-i gets a set
   978  
   979  		consumerRow := make([]*grpc.ClientConn, numChannels)
   980  		consumerConns = append(consumerConns, consumerRow)
   981  	}
   982  
   983  	totalTxRecv = make([]int64, numOrdsToWatch)    // create counter for each orderer, for total tx received (for all channels)
   984  	totalBlockRecv = make([]int64, numOrdsToWatch) // create counter for each orderer, for total blk received (for all channels)
   985  
   986  	////////////////////////////////////////////////////////////////////////
   987  
   988  	launchNetwork(launchAppendFlags)
   989  	time.Sleep(10 * time.Second)
   990  
   991  	////////////////////////////////////////////////////////////////////////
   992  	// Create the 1D slice of channel IDs, and create names for them
   993  	// which we will use when producing/broadcasting/sending msgs and
   994  	// consuming/delivering/receiving msgs.
   995  
   996  	var channelIDs []string
   997  	channelIDs = make([]string, numChannels)
   998  
   999  	// TODO (after FAB-2001 and FAB-2083 are fixed) - Remove the if-then clause.
  1000  	// Due to those bugs, we cannot pass many tests using multiple orderers and multiple channels.
  1001  	// TEMPORARY PARTIAL SOLUTION: To test multiple orderers with a single channel,
  1002  	// use hardcoded TestChainID and skip creating any channels.
  1003  	if numChannels == 1 {
  1004  		channelIDs[0] = genesisconfigProvisional.TestChainID
  1005  		logger(fmt.Sprintf("Using DEFAULT channelID = %s", channelIDs[0]))
  1006  	} else {
  1007  		logger(fmt.Sprintf("Using %d new channelIDs, e.g. test-chan.00023", numChannels))
  1008  		for c := 0; c < numChannels; c++ {
  1009  			channelIDs[c] = fmt.Sprintf("test-chan.%05d", c)
  1010  			cmd := fmt.Sprintf("cd $GOPATH/src/github.com/hyperledger/fabric && CORE_PEER_COMMITTER_LEDGER_ORDERER=127.0.0.1:%d peer channel create -c %s", ordStartPort, channelIDs[c])
  1011  			_ = executeCmd(cmd)
  1012  			//executeCmdAndDisplay(cmd)
  1013  		}
  1014  	}
  1015  
  1016  	////////////////////////////////////////////////////////////////////////
  1017  	// Start threads for each consumer to watch each channel on all (the
  1018  	// specified number of) orderers. This code assumes orderers in the
  1019  	// network will use increasing port numbers, which is the same logic
  1020  	// used by the driver.sh tool that starts the network for us: the first
  1021  	// orderer uses ordStartPort, the second uses ordStartPort+1, etc.
  1022  
  1023  	for ord := 0; ord < numOrdsToWatch; ord++ {
  1024  		serverAddr := fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort+uint16(ord))
  1025  		if masterSpy && ord == numOrdsToWatch-1 {
  1026  			// Special case: this is the last row of counters,
  1027  			// added (and incremented numOrdsToWatch) for the
  1028  			// masterSpy to use to watch the first orderer for
  1029  			// deliveries, on all channels. This will be a duplicate
  1030  			// Consumer (it is the second one monitoring the first
  1031  			// orderer), so we need to reuse the first port.
  1032  			serverAddr = fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort)
  1033  			go startConsumerMaster(serverAddr, &channelIDs, ord, &(txRecv[ord]), &(blockRecv[ord]), &(consumerConns[ord][0]))
  1034  		} else if optimizeClientsMode {
  1035  			// Create just one Consumer to receive all deliveries
  1036  			// (on all channels) on an orderer.
  1037  			go startConsumerMaster(serverAddr, &channelIDs, ord, &(txRecv[ord]), &(blockRecv[ord]), &(consumerConns[ord][0]))
  1038  		} else {
  1039  			// Normal mode: create a unique consumer client
  1040  			// go-thread for each channel on each orderer.
  1041  			for c := 0; c < numChannels; c++ {
  1042  				go startConsumer(serverAddr, channelIDs[c], ord, c, &(txRecv[ord][c]), &(blockRecv[ord][c]), &(consumerConns[ord][c]))
  1043  			}
  1044  		}
  1045  
  1046  	}
  1047  
  1048  	logger("Finished creating all CONSUMERS clients")
  1049  	time.Sleep(5 * time.Second)
  1050  	defer cleanNetwork(&consumerConns)
  1051  
  1052  	////////////////////////////////////////////////////////////////////////
  1053  	// Now that the orderer service network is running, and the consumers
  1054  	// are watching for deliveries, we can start clients which will
  1055  	// broadcast the specified number of TXs to their associated orderers.
  1056  
  1057  	if optimizeClientsMode {
  1058  		producersWG.Add(numOrdsInNtwk)
  1059  	} else {
  1060  		producersWG.Add(numProducers)
  1061  	}
  1062  	sendStart := time.Now().Unix()
  1063  	for ord := 0; ord < numOrdsInNtwk; ord++ {
  1064  		serverAddr := fmt.Sprintf("%s:%d", ordConf.General.ListenAddress, ordStartPort+uint16(ord))
  1065  		for c := 0; c < numChannels; c++ {
  1066  			countToSend[ord][c] = numTxToSend / int64(numOrdsInNtwk*numChannels)
  1067  			if c == 0 && ord == 0 {
  1068  				countToSend[ord][c] += numTxToSend % int64(numOrdsInNtwk*numChannels)
  1069  			}
  1070  		}
  1071  		if optimizeClientsMode {
  1072  			// create one Producer for all channels on this orderer
  1073  			go startProducerMaster(serverAddr, &channelIDs, ord, &(countToSend[ord]), &(txSent[ord]), &(txSentFailures[ord]))
  1074  		} else {
  1075  			// Normal mode: create a unique consumer client
  1076  			// go thread for each channel
  1077  			for c := 0; c < numChannels; c++ {
  1078  				go startProducer(serverAddr, channelIDs[c], ord, c, countToSend[ord][c], &(txSent[ord][c]), &(txSentFailures[ord][c]))
  1079  			}
  1080  		}
  1081  	}
  1082  
  1083  	if optimizeClientsMode {
  1084  		logger(fmt.Sprintf("Finished creating all %d MASTER-PRODUCERs", numOrdsInNtwk))
  1085  	} else {
  1086  		logger(fmt.Sprintf("Finished creating all %d PRODUCERs", numOrdsInNtwk*numChannels))
  1087  	}
  1088  	producersWG.Wait()
  1089  	logger(fmt.Sprintf("Send Duration (seconds): %4d", time.Now().Unix()-sendStart))
  1090  	recoverStart := time.Now().Unix()
  1091  
  1092  	////////////////////////////////////////////////////////////////////////
  1093  	// All producer threads are finished sending broadcast transactions.
  1094  	// Let's determine if the deliveries have all been received by the
  1095  	// consumer threads. We will check if the receive counts match the send
  1096  	// counts on all consumers, or if all consumers are no longer receiving
  1097  	// blocks. Wait and continue rechecking as necessary, as long as the
  1098  	// delivery (recv) counters are climbing closer to the broadcast (send)
  1099  	// counter. If the counts do not match, wait for up to batchTimeout
  1100  	// seconds, to ensure that we received the last (non-full) batch.
  1101  
  1102  	computeTotals(&txSent, &totalNumTxSent, &txSentFailures, &totalNumTxSentFailures, &txRecv, &totalTxRecv, &totalTxRecvMismatch, &blockRecv, &totalBlockRecv, &totalBlockRecvMismatch)
  1103  
  1104  	waitSecs := 0
  1105  	for !sendEqualRecv(numTxToSend, &totalTxRecv, totalTxRecvMismatch, totalBlockRecvMismatch) && (moreDeliveries(&txSent, &totalNumTxSent, &txSentFailures, &totalNumTxSentFailures, &txRecv, &totalTxRecv, &totalTxRecvMismatch, &blockRecv, &totalBlockRecv, &totalBlockRecvMismatch) || waitSecs < batchTimeout) {
  1106  		time.Sleep(1 * time.Second)
  1107  		waitSecs++
  1108  	}
  1109  
  1110  	// Recovery Duration = time spent waiting for orderer service to finish delivering transactions,
  1111  	// after all producers finished sending them.
  1112  	// waitSecs = some possibly idle time spent waiting for the last batch to be generated (waiting for batchTimeout)
  1113  	logger(fmt.Sprintf("Recovery Duration (secs):%4d", time.Now().Unix()-recoverStart))
  1114  	logger(fmt.Sprintf("waitSecs for last batch: %4d", waitSecs))
  1115  	passed, resultSummary = reportTotals(testname, numTxToSend, countToSend, txSent, totalNumTxSent, txSentFailures, totalNumTxSentFailures, batchSize, txRecv, totalTxRecv, totalTxRecvMismatch, blockRecv, totalBlockRecv, totalBlockRecvMismatch, masterSpy, &channelIDs)
  1116  
  1117  	return passed, resultSummary
  1118  }
  1119  
  1120  func main() {
  1121  
  1122  	initialize()
  1123  
  1124  	// Set reasonable defaults in case any env vars are unset.
  1125  	var txs int64 = 55
  1126  	chans := numChannels
  1127  	orderers := numOrdsInNtwk
  1128  	ordType := ordererType
  1129  	kbs := numKBrokers
  1130  
  1131  	// Set addMasterSpy to true to create one additional consumer client
  1132  	// that monitors all channels on one orderer with one grpc connection.
  1133  	addMasterSpy := false
  1134  
  1135  	pPerCh := producersPerCh
  1136  	// TODO lPerCh := listenersPerCh
  1137  
  1138  	// Read env vars
  1139  	if debugflagAPI {
  1140  		logger("==========Environment variables provided for this test, and corresponding values actually used for the test:")
  1141  	}
  1142  	testcmd := ""
  1143  	envvar := os.Getenv("OTE_TXS")
  1144  	if envvar != "" {
  1145  		txs, _ = strconv.ParseInt(envvar, 10, 64)
  1146  		testcmd += " OTE_TXS=" + envvar
  1147  	}
  1148  	if debugflagAPI {
  1149  		logger(fmt.Sprintf("%-50s %s=%d", "OTE_TXS="+envvar, "txs", txs))
  1150  	}
  1151  
  1152  	envvar = os.Getenv("OTE_CHANNELS")
  1153  	if envvar != "" {
  1154  		chans, _ = strconv.Atoi(envvar)
  1155  		testcmd += " OTE_CHANNELS=" + envvar
  1156  	}
  1157  	if debugflagAPI {
  1158  		logger(fmt.Sprintf("%-50s %s=%d", "OTE_CHANNELS="+envvar, "chans", chans))
  1159  	}
  1160  
  1161  	envvar = os.Getenv("OTE_ORDERERS")
  1162  	if envvar != "" {
  1163  		orderers, _ = strconv.Atoi(envvar)
  1164  		testcmd += " OTE_ORDERERS=" + envvar
  1165  	}
  1166  	if debugflagAPI {
  1167  		logger(fmt.Sprintf("%-50s %s=%d", "OTE_ORDERERS="+envvar, "orderers", orderers))
  1168  	}
  1169  
  1170  	envvar = os.Getenv(ordererTypeParamStr)
  1171  	if envvar != "" {
  1172  		ordType = envvar
  1173  		testcmd += " " + ordererTypeParamStr + "=" + envvar
  1174  	}
  1175  	if debugflagAPI {
  1176  		logger(fmt.Sprintf("%-50s %s=%s", ordererTypeParamStr+"="+envvar, "ordType", ordType))
  1177  	}
  1178  
  1179  	envvar = os.Getenv("OTE_KAFKABROKERS")
  1180  	if envvar != "" {
  1181  		kbs, _ = strconv.Atoi(envvar)
  1182  		testcmd += " OTE_KAFKABROKERS=" + envvar
  1183  	}
  1184  	if debugflagAPI {
  1185  		logger(fmt.Sprintf("%-50s %s=%d", "OTE_KAFKABROKERS="+envvar, "kbs", kbs))
  1186  	}
  1187  
  1188  	envvar = os.Getenv("OTE_MASTERSPY")
  1189  	if "true" == strings.ToLower(envvar) || "t" == strings.ToLower(envvar) {
  1190  		addMasterSpy = true
  1191  		testcmd += " OTE_MASTERSPY=" + envvar
  1192  	}
  1193  	if debugflagAPI {
  1194  		logger(fmt.Sprintf("%-50s %s=%t", "OTE_MASTERSPY="+envvar, "masterSpy", addMasterSpy))
  1195  	}
  1196  
  1197  	envvar = os.Getenv("OTE_PRODUCERS_PER_CHANNEL")
  1198  	if envvar != "" {
  1199  		pPerCh, _ = strconv.Atoi(envvar)
  1200  		testcmd += " OTE_PRODUCERS_PER_CHANNEL=" + envvar
  1201  	}
  1202  	if debugflagAPI {
  1203  		logger(fmt.Sprintf("%-50s %s=%d", "OTE_PRODUCERS_PER_CHANNEL="+envvar, "producersPerCh", pPerCh))
  1204  	}
  1205  
  1206  	_, _ = ote("<commandline>"+testcmd+" ote", txs, chans, orderers, ordType, kbs, addMasterSpy, pPerCh)
  1207  }