github.com/vipernet-xyz/tm@v0.34.24/test/loadtime/report/report.go (about)

     1  package report
     2  
     3  import (
     4  	"math"
     5  	"sync"
     6  	"time"
     7  
     8  	"github.com/gofrs/uuid"
     9  	"gonum.org/v1/gonum/stat"
    10  
    11  	"github.com/vipernet-xyz/tm/test/loadtime/payload"
    12  	"github.com/vipernet-xyz/tm/types"
    13  )
    14  
    15  // BlockStore defines the set of methods needed by the report generator from
    16  // Tendermint's store.Blockstore type. Using an interface allows for tests to
    17  // more easily simulate the required behavior without having to use the more
    18  // complex real API.
    19  type BlockStore interface {
    20  	Height() int64
    21  	Base() int64
    22  	LoadBlock(int64) *types.Block
    23  }
    24  
    25  // DataPoint contains the set of data collected for each transaction.
    26  type DataPoint struct {
    27  	Duration  time.Duration
    28  	BlockTime time.Time
    29  	Hash      []byte
    30  }
    31  
    32  // Report contains the data calculated from reading the timestamped transactions
    33  // of each block found in the blockstore.
    34  type Report struct {
    35  	ID                      uuid.UUID
    36  	Rate, Connections, Size uint64
    37  	Max, Min, Avg, StdDev   time.Duration
    38  
    39  	// NegativeCount is the number of negative durations encountered while
    40  	// reading the transaction data. A negative duration means that
    41  	// a transaction timestamp was greater than the timestamp of the block it
    42  	// was included in and likely indicates an issue with the experimental
    43  	// setup.
    44  	NegativeCount int
    45  
    46  	// All contains all data points gathered from all valid transactions.
    47  	// The order of the contents of All is not guaranteed to be match the order of transactions
    48  	// in the chain.
    49  	All []DataPoint
    50  
    51  	// used for calculating average during report creation.
    52  	sum int64
    53  }
    54  
    55  type Reports struct {
    56  	s map[uuid.UUID]Report
    57  	l []Report
    58  
    59  	// errorCount is the number of parsing errors encountered while reading the
    60  	// transaction data. Parsing errors may occur if a transaction not generated
    61  	// by the payload package is submitted to the chain.
    62  	errorCount int
    63  }
    64  
    65  func (rs *Reports) List() []Report {
    66  	return rs.l
    67  }
    68  
    69  func (rs *Reports) ErrorCount() int {
    70  	return rs.errorCount
    71  }
    72  
    73  func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, bt time.Time, hash []byte, conns, rate, size uint64) {
    74  	r, ok := rs.s[id]
    75  	if !ok {
    76  		r = Report{
    77  			Max:         0,
    78  			Min:         math.MaxInt64,
    79  			ID:          id,
    80  			Connections: conns,
    81  			Rate:        rate,
    82  			Size:        size,
    83  		}
    84  		rs.s[id] = r
    85  	}
    86  	r.All = append(r.All, DataPoint{Duration: l, BlockTime: bt, Hash: hash})
    87  	if l > r.Max {
    88  		r.Max = l
    89  	}
    90  	if l < r.Min {
    91  		r.Min = l
    92  	}
    93  	if int64(l) < 0 {
    94  		r.NegativeCount++
    95  	}
    96  	// Using an int64 here makes an assumption about the scale and quantity of the data we are processing.
    97  	// If all latencies were 2 seconds, we would need around 4 billion records to overflow this.
    98  	// We are therefore assuming that the data does not exceed these bounds.
    99  	r.sum += int64(l)
   100  	rs.s[id] = r
   101  }
   102  
   103  func (rs *Reports) calculateAll() {
   104  	rs.l = make([]Report, 0, len(rs.s))
   105  	for _, r := range rs.s {
   106  		if len(r.All) == 0 {
   107  			r.Min = 0
   108  			rs.l = append(rs.l, r)
   109  			continue
   110  		}
   111  		r.Avg = time.Duration(r.sum / int64(len(r.All)))
   112  		r.StdDev = time.Duration(int64(stat.StdDev(toFloat(r.All), nil)))
   113  		rs.l = append(rs.l, r)
   114  	}
   115  }
   116  
   117  func (rs *Reports) addError() {
   118  	rs.errorCount++
   119  }
   120  
   121  // GenerateFromBlockStore creates a Report using the data in the provided
   122  // BlockStore.
   123  func GenerateFromBlockStore(s BlockStore) (*Reports, error) {
   124  	type payloadData struct {
   125  		id                      uuid.UUID
   126  		l                       time.Duration
   127  		bt                      time.Time
   128  		hash                    []byte
   129  		connections, rate, size uint64
   130  		err                     error
   131  	}
   132  	type txData struct {
   133  		tx types.Tx
   134  		bt time.Time
   135  	}
   136  	reports := &Reports{
   137  		s: make(map[uuid.UUID]Report),
   138  	}
   139  
   140  	// Deserializing to proto can be slow but does not depend on other data
   141  	// and can therefore be done in parallel.
   142  	// Deserializing in parallel does mean that the resulting data is
   143  	// not guaranteed to be delivered in the same order it was given to the
   144  	// worker pool.
   145  	const poolSize = 16
   146  
   147  	txc := make(chan txData)
   148  	pdc := make(chan payloadData, poolSize)
   149  
   150  	wg := &sync.WaitGroup{}
   151  	wg.Add(poolSize)
   152  	for i := 0; i < poolSize; i++ {
   153  		go func() {
   154  			defer wg.Done()
   155  			for b := range txc {
   156  				p, err := payload.FromBytes(b.tx)
   157  				if err != nil {
   158  					pdc <- payloadData{err: err}
   159  					continue
   160  				}
   161  
   162  				l := b.bt.Sub(p.Time.AsTime())
   163  				idb := (*[16]byte)(p.Id)
   164  				pdc <- payloadData{
   165  					l:           l,
   166  					bt:          b.bt,
   167  					hash:        b.tx.Hash(),
   168  					id:          uuid.UUID(*idb),
   169  					connections: p.Connections,
   170  					rate:        p.Rate,
   171  					size:        p.Size,
   172  				}
   173  			}
   174  		}()
   175  	}
   176  	go func() {
   177  		wg.Wait()
   178  		close(pdc)
   179  	}()
   180  
   181  	go func() {
   182  		base, height := s.Base(), s.Height()
   183  		prev := s.LoadBlock(base)
   184  		for i := base + 1; i < height; i++ {
   185  			// Data from two adjacent block are used here simultaneously,
   186  			// blocks of height H and H+1. The transactions of the block of
   187  			// height H are used with the timestamp from the block of height
   188  			// H+1. This is done because the timestamp from H+1 is calculated
   189  			// by using the precommits submitted at height H. The timestamp in
   190  			// block H+1 represents the time at which block H was committed.
   191  			//
   192  			// In the (very unlikely) event that the very last block of the
   193  			// chain contains payload transactions, those transactions will not
   194  			// be used in the latency calculations because the last block whose
   195  			// transactions are used is the block one before the last.
   196  			cur := s.LoadBlock(i)
   197  			for _, tx := range prev.Data.Txs {
   198  				txc <- txData{tx: tx, bt: cur.Time}
   199  			}
   200  			prev = cur
   201  		}
   202  		close(txc)
   203  	}()
   204  	for pd := range pdc {
   205  		if pd.err != nil {
   206  			reports.addError()
   207  			continue
   208  		}
   209  		reports.addDataPoint(pd.id, pd.l, pd.bt, pd.hash, pd.connections, pd.rate, pd.size)
   210  	}
   211  	reports.calculateAll()
   212  	return reports, nil
   213  }
   214  
   215  func toFloat(in []DataPoint) []float64 {
   216  	r := make([]float64, len(in))
   217  	for i, v := range in {
   218  		r[i] = float64(int64(v.Duration))
   219  	}
   220  	return r
   221  }