github.com/MagHErmit/tendermint@v0.282.1/test/e2e/runner/load.go (about)

     1  package main
     2  
     3  import (
     4  	"context"
     5  	"crypto/rand"
     6  	"errors"
     7  	"fmt"
     8  	"math"
     9  	"time"
    10  
    11  	"github.com/MagHErmit/tendermint/libs/log"
    12  	rpchttp "github.com/MagHErmit/tendermint/rpc/client/http"
    13  	e2e "github.com/MagHErmit/tendermint/test/e2e/pkg"
    14  	"github.com/MagHErmit/tendermint/types"
    15  )
    16  
    17  // Load generates transactions against the network until the given context is
    18  // canceled. A multiplier of greater than one can be supplied if load needs to
    19  // be generated beyond a minimum amount.
    20  func Load(ctx context.Context, testnet *e2e.Testnet, multiplier int) error {
    21  	// Since transactions are executed across all nodes in the network, we need
    22  	// to reduce transaction load for larger networks to avoid using too much
    23  	// CPU. This gives high-throughput small networks and low-throughput large ones.
    24  	// This also limits the number of TCP connections, since each worker has
    25  	// a connection to all nodes.
    26  	concurrency := 64 / len(testnet.Nodes)
    27  	if concurrency == 0 {
    28  		concurrency = 1
    29  	}
    30  	initialTimeout := 1 * time.Minute
    31  	stallTimeout := 30 * time.Second
    32  
    33  	chTx := make(chan types.Tx)
    34  	chSuccess := make(chan types.Tx)
    35  	ctx, cancel := context.WithCancel(ctx)
    36  	defer cancel()
    37  
    38  	// Spawn job generator and processors.
    39  	logger.Info("load", "msg", log.NewLazySprintf("Starting transaction load (%v workers)...", concurrency))
    40  	started := time.Now()
    41  
    42  	go loadGenerate(ctx, chTx, multiplier)
    43  
    44  	for w := 0; w < concurrency; w++ {
    45  		go loadProcess(ctx, testnet, chTx, chSuccess)
    46  	}
    47  
    48  	// Monitor successful transactions, and abort on stalls.
    49  	success := 0
    50  	timeout := initialTimeout
    51  	for {
    52  		select {
    53  		case <-chSuccess:
    54  			success++
    55  			timeout = stallTimeout
    56  		case <-time.After(timeout):
    57  			return fmt.Errorf("unable to submit transactions for %v", timeout)
    58  		case <-ctx.Done():
    59  			if success == 0 {
    60  				return errors.New("failed to submit any transactions")
    61  			}
    62  			logger.Info("load", "msg", log.NewLazySprintf("Ending transaction load after %v txs (%.1f tx/s)...",
    63  				success, float64(success)/time.Since(started).Seconds()))
    64  			return nil
    65  		}
    66  	}
    67  }
    68  
    69  // loadGenerate generates jobs until the context is canceled
    70  func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int) {
    71  	for i := 0; i < math.MaxInt64; i++ {
    72  		// We keep generating the same 1000 keys over and over, with different values.
    73  		// This gives a reasonable load without putting too much data in the app.
    74  		id := i % 1000
    75  
    76  		bz := make([]byte, 1024) // 1kb hex-encoded
    77  		_, err := rand.Read(bz)
    78  		if err != nil {
    79  			panic(fmt.Sprintf("Failed to read random bytes: %v", err))
    80  		}
    81  		tx := types.Tx(fmt.Sprintf("load-%X=%x", id, bz))
    82  
    83  		select {
    84  		case chTx <- tx:
    85  			time.Sleep(time.Second / time.Duration(multiplier))
    86  
    87  		case <-ctx.Done():
    88  			close(chTx)
    89  			return
    90  		}
    91  	}
    92  }
    93  
    94  // loadProcess processes transactions
    95  func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan types.Tx, chSuccess chan<- types.Tx) {
    96  	// Each worker gets its own client to each node, which allows for some
    97  	// concurrency while still bounding it.
    98  	clients := map[string]*rpchttp.HTTP{}
    99  
   100  	var err error
   101  	for tx := range chTx {
   102  		node := testnet.RandomNode()
   103  		client, ok := clients[node.Name]
   104  		if !ok {
   105  			client, err = node.Client()
   106  			if err != nil {
   107  				continue
   108  			}
   109  
   110  			// check that the node is up
   111  			_, err = client.Health(ctx)
   112  			if err != nil {
   113  				continue
   114  			}
   115  
   116  			clients[node.Name] = client
   117  		}
   118  
   119  		if _, err = client.BroadcastTxSync(ctx, tx); err != nil {
   120  			continue
   121  		}
   122  		chSuccess <- tx
   123  	}
   124  }