github.com/ari-anchor/sei-tendermint@v0.0.0-20230519144642-dc826b7b56bb/test/e2e/runner/load.go (about)

     1  package main
     2  
     3  import (
     4  	"container/ring"
     5  	"context"
     6  	"fmt"
     7  	"math/rand"
     8  	"time"
     9  
    10  	"github.com/ari-anchor/sei-tendermint/libs/log"
    11  	tmrand "github.com/ari-anchor/sei-tendermint/libs/rand"
    12  	rpchttp "github.com/ari-anchor/sei-tendermint/rpc/client/http"
    13  	e2e "github.com/ari-anchor/sei-tendermint/test/e2e/pkg"
    14  	"github.com/ari-anchor/sei-tendermint/types"
    15  )
    16  
    17  // Load generates transactions against the network until the given context is
    18  // canceled.
    19  func Load(ctx context.Context, logger log.Logger, r *rand.Rand, testnet *e2e.Testnet) error {
    20  	// Since transactions are executed across all nodes in the network, we need
    21  	// to reduce transaction load for larger networks to avoid using too much
    22  	// CPU. This gives high-throughput small networks and low-throughput large ones.
    23  	// This also limits the number of TCP connections, since each worker has
    24  	// a connection to all nodes.
    25  	concurrency := len(testnet.Nodes) * 2
    26  	if concurrency > 32 {
    27  		concurrency = 32
    28  	}
    29  
    30  	chTx := make(chan types.Tx)
    31  	chSuccess := make(chan int) // success counts per iteration
    32  	ctx, cancel := context.WithCancel(ctx)
    33  	defer cancel()
    34  
    35  	// Spawn job generator and processors.
    36  	logger.Info("starting transaction load",
    37  		"workers", concurrency,
    38  		"nodes", len(testnet.Nodes),
    39  		"tx", testnet.TxSize)
    40  
    41  	started := time.Now()
    42  
    43  	go loadGenerate(ctx, r, chTx, testnet.TxSize, len(testnet.Nodes))
    44  
    45  	for w := 0; w < concurrency; w++ {
    46  		go loadProcess(ctx, testnet, chTx, chSuccess)
    47  	}
    48  
    49  	// Montior transaction to ensure load propagates to the network
    50  	//
    51  	// This loop doesn't check or time out for stalls, since a stall here just
    52  	// aborts the load generator sooner and could obscure backpressure
    53  	// from the test harness, and there are other checks for
    54  	// stalls in the framework. Ideally we should monitor latency as a guide
    55  	// for when to give up, but we don't have a good way to track that yet.
    56  	success := 0
    57  	for {
    58  		select {
    59  		case numSeen := <-chSuccess:
    60  			success += numSeen
    61  		case <-ctx.Done():
    62  			if success == 0 {
    63  				return fmt.Errorf("failed to submit transactions in %s by %d workers",
    64  					time.Since(started), concurrency)
    65  			}
    66  
    67  			// TODO perhaps allow test networks to
    68  			// declare required transaction rates, which
    69  			// might allow us to avoid the special case
    70  			// around 0 txs above.
    71  			rate := float64(success) / time.Since(started).Seconds()
    72  
    73  			logger.Info("ending transaction load",
    74  				"dur_secs", time.Since(started).Seconds(),
    75  				"txns", success,
    76  				"workers", concurrency,
    77  				"rate", rate)
    78  
    79  			return nil
    80  		}
    81  	}
    82  }
    83  
    84  // loadGenerate generates jobs until the context is canceled.
    85  //
    86  // The chTx has multiple consumers, thus the rate limiting of the load
    87  // generation is primarily the result of backpressure from the
    88  // broadcast transaction, though there is still some timer-based
    89  // limiting.
    90  func loadGenerate(ctx context.Context, r *rand.Rand, chTx chan<- types.Tx, txSize int, networkSize int) {
    91  	timer := time.NewTimer(0)
    92  	defer timer.Stop()
    93  	defer close(chTx)
    94  
    95  	for {
    96  		select {
    97  		case <-ctx.Done():
    98  			return
    99  		case <-timer.C:
   100  		}
   101  
   102  		// Constrain the key space to avoid using too much
   103  		// space, while reduce the size of the data in the app.
   104  		id := r.Int63n(100)
   105  
   106  		tx := types.Tx(fmt.Sprintf("load-%X=%s", id, tmrand.StrFromSource(r, txSize)))
   107  
   108  		select {
   109  		case <-ctx.Done():
   110  			return
   111  		case chTx <- tx:
   112  			// sleep for a bit before sending the
   113  			// next transaction.
   114  			timer.Reset(loadGenerateWaitTime(r, networkSize))
   115  		}
   116  
   117  	}
   118  }
   119  
   120  func loadGenerateWaitTime(r *rand.Rand, size int) time.Duration {
   121  	const (
   122  		min = int64(250 * time.Millisecond)
   123  		max = int64(time.Second)
   124  	)
   125  
   126  	var (
   127  		baseJitter = r.Int63n(max-min+1) + min
   128  		sizeFactor = int64(size) * min
   129  		sizeJitter = r.Int63n(sizeFactor-min+1) + min
   130  	)
   131  
   132  	return time.Duration(baseJitter + sizeJitter)
   133  }
   134  
   135  // loadProcess processes transactions
   136  func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan types.Tx, chSuccess chan<- int) {
   137  	// Each worker gets its own client to each usable node, which
   138  	// allows for some concurrency while still bounding it.
   139  	clients := make([]*rpchttp.HTTP, 0, len(testnet.Nodes))
   140  
   141  	for idx := range testnet.Nodes {
   142  		// Construct a list of usable nodes for the creating
   143  		// load. Don't send load through seed nodes because
   144  		// they do not provide the RPC endpoints required to
   145  		// broadcast transaction.
   146  		if testnet.Nodes[idx].Mode == e2e.ModeSeed {
   147  			continue
   148  		}
   149  
   150  		client, err := testnet.Nodes[idx].Client()
   151  		if err != nil {
   152  			continue
   153  		}
   154  
   155  		clients = append(clients, client)
   156  	}
   157  
   158  	if len(clients) == 0 {
   159  		panic("no clients to process load")
   160  	}
   161  
   162  	// Put the clients in a ring so they can be used in a
   163  	// round-robin fashion.
   164  	clientRing := ring.New(len(clients))
   165  	for idx := range clients {
   166  		clientRing.Value = clients[idx]
   167  		clientRing = clientRing.Next()
   168  	}
   169  
   170  	successes := 0
   171  	for {
   172  		select {
   173  		case <-ctx.Done():
   174  			return
   175  		case tx := <-chTx:
   176  			clientRing = clientRing.Next()
   177  			client := clientRing.Value.(*rpchttp.HTTP)
   178  
   179  			if status, err := client.Status(ctx); err != nil {
   180  				continue
   181  			} else if status.SyncInfo.CatchingUp {
   182  				continue
   183  			}
   184  
   185  			if _, err := client.BroadcastTxSync(ctx, tx); err != nil {
   186  				continue
   187  			}
   188  			successes++
   189  
   190  			select {
   191  			case chSuccess <- successes:
   192  				successes = 0 // reset counter for the next iteration
   193  				continue
   194  			case <-ctx.Done():
   195  				return
   196  			default:
   197  			}
   198  
   199  		}
   200  	}
   201  }