github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/core/blockstm/executor_test.go (about)

     1  package blockstm
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"math/big"
     7  	"math/rand"
     8  	"os"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/stretchr/testify/assert"
    13  
    14  	"github.com/ethereum/go-ethereum/common"
    15  	"github.com/ethereum/go-ethereum/log"
    16  )
    17  
    18  type OpType int
    19  
    20  const readType = 0
    21  const writeType = 1
    22  const otherType = 2
    23  const greenTick = "✅"
    24  const redCross = "❌"
    25  
    26  const threeRockets = "🚀🚀🚀"
    27  
    28  type Op struct {
    29  	key      Key
    30  	duration time.Duration
    31  	opType   OpType
    32  	val      int
    33  }
    34  
    35  type testExecTask struct {
    36  	txIdx        int
    37  	ops          []Op
    38  	readMap      map[Key]ReadDescriptor
    39  	writeMap     map[Key]WriteDescriptor
    40  	sender       common.Address
    41  	nonce        int
    42  	dependencies []int
    43  }
    44  
    45  type PathGenerator func(addr common.Address, i int, j int, total int) Key
    46  
    47  type TaskRunner func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration)
    48  
    49  type TaskRunnerWithMetadata func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration)
    50  
    51  type Timer func(txIdx int, opIdx int) time.Duration
    52  
    53  type Sender func(int) common.Address
    54  
    55  func NewTestExecTask(txIdx int, ops []Op, sender common.Address, nonce int) *testExecTask {
    56  	return &testExecTask{
    57  		txIdx:        txIdx,
    58  		ops:          ops,
    59  		readMap:      make(map[Key]ReadDescriptor),
    60  		writeMap:     make(map[Key]WriteDescriptor),
    61  		sender:       sender,
    62  		nonce:        nonce,
    63  		dependencies: []int{},
    64  	}
    65  }
    66  
    67  func sleep(i time.Duration) {
    68  	start := time.Now()
    69  	for time.Since(start) < i {
    70  	}
    71  }
    72  
    73  func (t *testExecTask) Execute(mvh *MVHashMap, incarnation int) error {
    74  	// Sleep for 50 microsecond to simulate setup time
    75  	sleep(time.Microsecond * 50)
    76  
    77  	version := Version{TxnIndex: t.txIdx, Incarnation: incarnation}
    78  
    79  	t.readMap = make(map[Key]ReadDescriptor)
    80  	t.writeMap = make(map[Key]WriteDescriptor)
    81  
    82  	deps := -1
    83  
    84  	for i, op := range t.ops {
    85  		k := op.key
    86  
    87  		switch op.opType {
    88  		case readType:
    89  			if _, ok := t.writeMap[k]; ok {
    90  				sleep(op.duration)
    91  				continue
    92  			}
    93  
    94  			result := mvh.Read(k, t.txIdx)
    95  
    96  			val := result.Value()
    97  
    98  			if i == 0 && val != nil && (val.(int) != t.nonce) {
    99  				return ErrExecAbortError{}
   100  			}
   101  
   102  			if result.Status() == MVReadResultDependency {
   103  				if result.depIdx > deps {
   104  					deps = result.depIdx
   105  				}
   106  			}
   107  
   108  			var readKind int
   109  
   110  			if result.Status() == MVReadResultDone {
   111  				readKind = ReadKindMap
   112  			} else if result.Status() == MVReadResultNone {
   113  				readKind = ReadKindStorage
   114  			}
   115  
   116  			sleep(op.duration)
   117  
   118  			t.readMap[k] = ReadDescriptor{k, readKind, Version{TxnIndex: result.depIdx, Incarnation: result.incarnation}}
   119  		case writeType:
   120  			t.writeMap[k] = WriteDescriptor{k, version, op.val}
   121  		case otherType:
   122  			sleep(op.duration)
   123  		default:
   124  			panic(fmt.Sprintf("Unknown op type: %d", op.opType))
   125  		}
   126  	}
   127  
   128  	if deps != -1 {
   129  		return ErrExecAbortError{deps, fmt.Errorf("Dependency error")}
   130  	}
   131  
   132  	return nil
   133  }
   134  
   135  func (t *testExecTask) MVWriteList() []WriteDescriptor {
   136  	return t.MVFullWriteList()
   137  }
   138  
   139  func (t *testExecTask) MVFullWriteList() []WriteDescriptor {
   140  	writes := make([]WriteDescriptor, 0, len(t.writeMap))
   141  
   142  	for _, v := range t.writeMap {
   143  		writes = append(writes, v)
   144  	}
   145  
   146  	return writes
   147  }
   148  
   149  func (t *testExecTask) MVReadList() []ReadDescriptor {
   150  	reads := make([]ReadDescriptor, 0, len(t.readMap))
   151  
   152  	for _, v := range t.readMap {
   153  		reads = append(reads, v)
   154  	}
   155  
   156  	return reads
   157  }
   158  
   159  func (t *testExecTask) Settle() {}
   160  
   161  func (t *testExecTask) Sender() common.Address {
   162  	return t.sender
   163  }
   164  
   165  func (t *testExecTask) Hash() common.Hash {
   166  	return common.BytesToHash([]byte(fmt.Sprintf("%d", t.txIdx)))
   167  }
   168  
   169  func (t *testExecTask) Dependencies() []int {
   170  	return t.dependencies
   171  }
   172  
   173  func randTimeGenerator(min time.Duration, max time.Duration) func(txIdx int, opIdx int) time.Duration {
   174  	return func(txIdx int, opIdx int) time.Duration {
   175  		return time.Duration(rand.Int63n(int64(max-min))) + min
   176  	}
   177  }
   178  
   179  func longTailTimeGenerator(min time.Duration, max time.Duration, i int, j int) func(txIdx int, opIdx int) time.Duration {
   180  	return func(txIdx int, opIdx int) time.Duration {
   181  		if txIdx%i == 0 && opIdx == j {
   182  			return max * 100
   183  		} else {
   184  			return time.Duration(rand.Int63n(int64(max-min))) + min
   185  		}
   186  	}
   187  }
   188  
   189  var randomPathGenerator = func(sender common.Address, i int, j int, total int) Key {
   190  	return NewStateKey(common.BigToAddress((big.NewInt(int64(i % 10)))), common.BigToHash((big.NewInt(int64(total)))))
   191  }
   192  
   193  var dexPathGenerator = func(sender common.Address, i int, j int, total int) Key {
   194  	if j == total-1 || j == 2 {
   195  		return NewSubpathKey(common.BigToAddress(big.NewInt(int64(0))), 1)
   196  	} else {
   197  		return NewSubpathKey(common.BigToAddress(big.NewInt(int64(j))), 1)
   198  	}
   199  }
   200  
   201  var readTime = randTimeGenerator(4*time.Microsecond, 12*time.Microsecond)
   202  var writeTime = randTimeGenerator(2*time.Microsecond, 6*time.Microsecond)
   203  var nonIOTime = randTimeGenerator(1*time.Microsecond, 2*time.Microsecond)
   204  
   205  func taskFactory(numTask int, sender Sender, readsPerT int, writesPerT int, nonIOPerT int, pathGenerator PathGenerator, readTime Timer, writeTime Timer, nonIOTime Timer) ([]ExecTask, time.Duration) {
   206  	exec := make([]ExecTask, 0, numTask)
   207  
   208  	var serialDuration time.Duration
   209  
   210  	senderNonces := make(map[common.Address]int)
   211  
   212  	for i := 0; i < numTask; i++ {
   213  		s := sender(i)
   214  
   215  		// Set first two ops to always read and write nonce
   216  		ops := make([]Op, 0, readsPerT+writesPerT+nonIOPerT)
   217  
   218  		ops = append(ops, Op{opType: readType, key: NewSubpathKey(s, 2), duration: readTime(i, 0), val: senderNonces[s]})
   219  
   220  		senderNonces[s]++
   221  
   222  		ops = append(ops, Op{opType: writeType, key: NewSubpathKey(s, 2), duration: writeTime(i, 1), val: senderNonces[s]})
   223  
   224  		for j := 0; j < readsPerT-1; j++ {
   225  			ops = append(ops, Op{opType: readType})
   226  		}
   227  
   228  		for j := 0; j < nonIOPerT; j++ {
   229  			ops = append(ops, Op{opType: otherType})
   230  		}
   231  
   232  		for j := 0; j < writesPerT-1; j++ {
   233  			ops = append(ops, Op{opType: writeType})
   234  		}
   235  
   236  		// shuffle ops except for the first three (read nonce, write nonce, another read) ops and last write op.
   237  		// This enables random path generator to generate deterministic paths for these "special" ops.
   238  		for j := 3; j < len(ops)-1; j++ {
   239  			k := rand.Intn(len(ops)-j-1) + j
   240  			ops[j], ops[k] = ops[k], ops[j]
   241  		}
   242  
   243  		// Generate time and key path for each op except first two that are always read and write nonce
   244  		for j := 2; j < len(ops); j++ {
   245  			if ops[j].opType == readType {
   246  				ops[j].key = pathGenerator(s, i, j, len(ops))
   247  				ops[j].duration = readTime(i, j)
   248  			} else if ops[j].opType == writeType {
   249  				ops[j].key = pathGenerator(s, i, j, len(ops))
   250  				ops[j].duration = writeTime(i, j)
   251  			} else {
   252  				ops[j].duration = nonIOTime(i, j)
   253  			}
   254  
   255  			serialDuration += ops[j].duration
   256  		}
   257  
   258  		if ops[len(ops)-1].opType != writeType {
   259  			panic("Last op must be a write")
   260  		}
   261  
   262  		t := NewTestExecTask(i, ops, s, senderNonces[s]-1)
   263  		exec = append(exec, t)
   264  	}
   265  
   266  	return exec, serialDuration
   267  }
   268  
   269  func testExecutorComb(t *testing.T, totalTxs []int, numReads []int, numWrites []int, numNonIO []int, taskRunner TaskRunner) {
   270  	t.Helper()
   271  	log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
   272  
   273  	improved := 0
   274  	total := 0
   275  
   276  	totalExecDuration := time.Duration(0)
   277  	totalSerialDuration := time.Duration(0)
   278  
   279  	for _, numTx := range totalTxs {
   280  		for _, numRead := range numReads {
   281  			for _, numWrite := range numWrites {
   282  				for _, numNonIO := range numNonIO {
   283  					log.Info("Executing block", "numTx", numTx, "numRead", numRead, "numWrite", numWrite, "numNonIO", numNonIO)
   284  					execDuration, expectedSerialDuration := taskRunner(numTx, numRead, numWrite, numNonIO)
   285  
   286  					if execDuration < expectedSerialDuration {
   287  						improved++
   288  					}
   289  					total++
   290  
   291  					performance := greenTick
   292  
   293  					if execDuration >= expectedSerialDuration {
   294  						performance = redCross
   295  					}
   296  
   297  					fmt.Printf("exec duration %v, serial duration %v, time reduced %v %.2f%%, %v \n", execDuration, expectedSerialDuration, expectedSerialDuration-execDuration, float64(expectedSerialDuration-execDuration)/float64(expectedSerialDuration)*100, performance)
   298  
   299  					totalExecDuration += execDuration
   300  					totalSerialDuration += expectedSerialDuration
   301  				}
   302  			}
   303  		}
   304  	}
   305  
   306  	fmt.Println("Improved: ", improved, "Total: ", total, "success rate: ", float64(improved)/float64(total)*100)
   307  	fmt.Printf("Total exec duration: %v, total serial duration: %v, time reduced: %v, time reduced percent: %.2f%%\n", totalExecDuration, totalSerialDuration, totalSerialDuration-totalExecDuration, float64(totalSerialDuration-totalExecDuration)/float64(totalSerialDuration)*100)
   308  }
   309  
   310  // nolint: gocognit
   311  func testExecutorCombWithMetadata(t *testing.T, totalTxs []int, numReads []int, numWrites []int, numNonIOs []int, taskRunner TaskRunnerWithMetadata) {
   312  	t.Helper()
   313  	log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
   314  
   315  	improved := 0
   316  	improvedMetadata := 0
   317  	rocket := 0
   318  	total := 0
   319  
   320  	totalExecDuration := time.Duration(0)
   321  	totalExecDurationMetadata := time.Duration(0)
   322  	totalSerialDuration := time.Duration(0)
   323  
   324  	for _, numTx := range totalTxs {
   325  		for _, numRead := range numReads {
   326  			for _, numWrite := range numWrites {
   327  				for _, numNonIO := range numNonIOs {
   328  					log.Info("Executing block", "numTx", numTx, "numRead", numRead, "numWrite", numWrite, "numNonIO", numNonIO)
   329  					execDuration, execDurationMetadata, expectedSerialDuration := taskRunner(numTx, numRead, numWrite, numNonIO)
   330  
   331  					if execDuration < expectedSerialDuration {
   332  						improved++
   333  					}
   334  					total++
   335  
   336  					performance := greenTick
   337  
   338  					if execDuration >= expectedSerialDuration {
   339  						performance = redCross
   340  
   341  						if execDurationMetadata <= expectedSerialDuration {
   342  							performance = threeRockets
   343  							rocket++
   344  						}
   345  					}
   346  
   347  					if execDuration >= execDurationMetadata {
   348  						improvedMetadata++
   349  					}
   350  
   351  					fmt.Printf("WITHOUT METADATA: exec duration %v, serial duration             %v, time reduced %v %.2f%%, %v \n", execDuration, expectedSerialDuration, expectedSerialDuration-execDuration, float64(expectedSerialDuration-execDuration)/float64(expectedSerialDuration)*100, performance)
   352  					fmt.Printf("WITH METADATA:    exec duration %v, exec duration with metadata %v, time reduced %v %.2f%%\n", execDuration, execDurationMetadata, execDuration-execDurationMetadata, float64(execDuration-execDurationMetadata)/float64(execDuration)*100)
   353  
   354  					totalExecDuration += execDuration
   355  					totalExecDurationMetadata += execDurationMetadata
   356  					totalSerialDuration += expectedSerialDuration
   357  				}
   358  			}
   359  		}
   360  	}
   361  
   362  	fmt.Println("\nImproved: ", improved, "Total: ", total, "success rate: ", float64(improved)/float64(total)*100)
   363  	fmt.Println("Metadata Better: ", improvedMetadata, "out of: ", total, "success rate: ", float64(improvedMetadata)/float64(total)*100)
   364  	fmt.Println("Rockets (Time of: metadata < serial < without metadata): ", rocket)
   365  	fmt.Printf("\nWithout metadata <> serial:        Total exec duration:          %v, total serial duration       : %v, time reduced: %v, time reduced percent: %.2f%%\n", totalExecDuration, totalSerialDuration, totalSerialDuration-totalExecDuration, float64(totalSerialDuration-totalExecDuration)/float64(totalSerialDuration)*100)
   366  	fmt.Printf("With metadata    <> serial:        Total exec duration metadata: %v, total serial duration       : %v, time reduced: %v, time reduced percent: %.2f%%\n", totalExecDurationMetadata, totalSerialDuration, totalSerialDuration-totalExecDurationMetadata, float64(totalSerialDuration-totalExecDurationMetadata)/float64(totalSerialDuration)*100)
   367  	fmt.Printf("Without metadata <> with metadata: Total exec duration:          %v, total exec duration metadata: %v, time reduced: %v, time reduced percent: %.2f%%\n", totalExecDuration, totalExecDurationMetadata, totalExecDuration-totalExecDurationMetadata, float64(totalExecDuration-totalExecDurationMetadata)/float64(totalExecDuration)*100)
   368  }
   369  
   370  func composeValidations(checks []PropertyCheck) PropertyCheck {
   371  	return func(pe *ParallelExecutor) error {
   372  		for _, check := range checks {
   373  			err := check(pe)
   374  			if err != nil {
   375  				return err
   376  			}
   377  		}
   378  
   379  		return nil
   380  	}
   381  }
   382  
   383  func checkNoStatusOverlap(pe *ParallelExecutor) error {
   384  	seen := make(map[int]string)
   385  
   386  	for _, tx := range pe.execTasks.complete {
   387  		seen[tx] = "complete"
   388  	}
   389  
   390  	for _, tx := range pe.execTasks.inProgress {
   391  		if v, ok := seen[tx]; ok {
   392  			return fmt.Errorf("tx %v is in both %v and inProgress", v, tx)
   393  		}
   394  
   395  		seen[tx] = "inProgress"
   396  	}
   397  
   398  	for _, tx := range pe.execTasks.pending {
   399  		if v, ok := seen[tx]; ok {
   400  			return fmt.Errorf("tx %v is in both %v complete and pending", v, tx)
   401  		}
   402  
   403  		seen[tx] = "pending"
   404  	}
   405  
   406  	return nil
   407  }
   408  
   409  func checkNoDroppedTx(pe *ParallelExecutor) error {
   410  	for i := 0; i < len(pe.tasks); i++ {
   411  		if !pe.execTasks.checkComplete(i) && !pe.execTasks.checkInProgress(i) && !pe.execTasks.checkPending(i) {
   412  			if !pe.execTasks.isBlocked(i) {
   413  				return fmt.Errorf("tx %v is not in any status and is not blocked by any other tx", i)
   414  			}
   415  		}
   416  	}
   417  
   418  	return nil
   419  }
   420  
   421  // nolint: unparam
   422  func runParallel(t *testing.T, tasks []ExecTask, validation PropertyCheck, metadata bool) time.Duration {
   423  	t.Helper()
   424  
   425  	profile := false
   426  
   427  	start := time.Now()
   428  	result, err := executeParallelWithCheck(tasks, false, validation, metadata, nil)
   429  
   430  	if result.Deps != nil && profile {
   431  		result.Deps.Report(*result.Stats, func(str string) { fmt.Println(str) })
   432  	}
   433  
   434  	assert.NoError(t, err, "error occur during parallel execution")
   435  
   436  	// Need to apply the final write set to storage
   437  
   438  	finalWriteSet := make(map[Key]time.Duration)
   439  
   440  	for _, task := range tasks {
   441  		task := task.(*testExecTask)
   442  		for _, op := range task.ops {
   443  			if op.opType == writeType {
   444  				finalWriteSet[op.key] = op.duration
   445  			}
   446  		}
   447  	}
   448  
   449  	for _, v := range finalWriteSet {
   450  		sleep(v)
   451  	}
   452  
   453  	duration := time.Since(start)
   454  
   455  	return duration
   456  }
   457  
   458  func runParallelGetMetadata(t *testing.T, tasks []ExecTask, validation PropertyCheck) map[int]map[int]bool {
   459  	t.Helper()
   460  
   461  	res, err := executeParallelWithCheck(tasks, true, validation, false, nil)
   462  
   463  	assert.NoError(t, err, "error occur during parallel execution")
   464  
   465  	return res.AllDeps
   466  }
   467  
   468  func TestLessConflicts(t *testing.T) {
   469  	t.Parallel()
   470  	rand.Seed(0)
   471  
   472  	totalTxs := []int{10, 50, 100, 200, 300}
   473  	numReads := []int{20, 100, 200}
   474  	numWrites := []int{20, 100, 200}
   475  	numNonIO := []int{100, 500}
   476  
   477  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   478  
   479  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
   480  		sender := func(i int) common.Address {
   481  			randomness := rand.Intn(10) + 10
   482  			return common.BigToAddress(big.NewInt(int64(i % randomness)))
   483  		}
   484  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
   485  
   486  		return runParallel(t, tasks, checks, false), serialDuration
   487  	}
   488  
   489  	testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   490  }
   491  
   492  func TestLessConflictsWithMetadata(t *testing.T) {
   493  	t.Parallel()
   494  	rand.Seed(0)
   495  
   496  	totalTxs := []int{300}
   497  	numReads := []int{100, 200}
   498  	numWrites := []int{100, 200}
   499  	numNonIOs := []int{100, 500}
   500  
   501  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   502  
   503  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
   504  		sender := func(i int) common.Address {
   505  			randomness := rand.Intn(10) + 10
   506  			return common.BigToAddress(big.NewInt(int64(i % randomness)))
   507  		}
   508  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
   509  
   510  		parallelDuration := runParallel(t, tasks, checks, false)
   511  
   512  		allDeps := runParallelGetMetadata(t, tasks, checks)
   513  
   514  		newTasks := make([]ExecTask, 0, len(tasks))
   515  
   516  		for _, t := range tasks {
   517  			temp := t.(*testExecTask)
   518  
   519  			keys := make([]int, len(allDeps[temp.txIdx]))
   520  
   521  			i := 0
   522  
   523  			for k := range allDeps[temp.txIdx] {
   524  				keys[i] = k
   525  				i++
   526  			}
   527  
   528  			temp.dependencies = keys
   529  			newTasks = append(newTasks, temp)
   530  		}
   531  
   532  		return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
   533  	}
   534  
   535  	testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIOs, taskRunner)
   536  }
   537  
   538  func TestZeroTx(t *testing.T) {
   539  	t.Parallel()
   540  	rand.Seed(0)
   541  
   542  	totalTxs := []int{0}
   543  	numReads := []int{20}
   544  	numWrites := []int{20}
   545  	numNonIO := []int{100}
   546  
   547  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   548  
   549  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
   550  		sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(1))) }
   551  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
   552  
   553  		return runParallel(t, tasks, checks, false), serialDuration
   554  	}
   555  
   556  	testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   557  }
   558  
   559  func TestAlternatingTx(t *testing.T) {
   560  	t.Parallel()
   561  	rand.Seed(0)
   562  
   563  	totalTxs := []int{200}
   564  	numReads := []int{20}
   565  	numWrites := []int{20}
   566  	numNonIO := []int{100}
   567  
   568  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   569  
   570  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
   571  		sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(i % 2))) }
   572  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
   573  
   574  		return runParallel(t, tasks, checks, false), serialDuration
   575  	}
   576  
   577  	testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   578  }
   579  
   580  func TestAlternatingTxWithMetadata(t *testing.T) {
   581  	t.Parallel()
   582  	rand.Seed(0)
   583  
   584  	totalTxs := []int{200}
   585  	numReads := []int{20}
   586  	numWrites := []int{20}
   587  	numNonIO := []int{100}
   588  
   589  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   590  
   591  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
   592  		sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(i % 2))) }
   593  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
   594  
   595  		parallelDuration := runParallel(t, tasks, checks, false)
   596  
   597  		allDeps := runParallelGetMetadata(t, tasks, checks)
   598  
   599  		newTasks := make([]ExecTask, 0, len(tasks))
   600  
   601  		for _, t := range tasks {
   602  			temp := t.(*testExecTask)
   603  
   604  			keys := make([]int, len(allDeps[temp.txIdx]))
   605  
   606  			i := 0
   607  
   608  			for k := range allDeps[temp.txIdx] {
   609  				keys[i] = k
   610  				i++
   611  			}
   612  
   613  			temp.dependencies = keys
   614  			newTasks = append(newTasks, temp)
   615  		}
   616  
   617  		return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
   618  	}
   619  
   620  	testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   621  }
   622  
   623  func TestMoreConflicts(t *testing.T) {
   624  	t.Parallel()
   625  	rand.Seed(0)
   626  
   627  	totalTxs := []int{10, 50, 100, 200, 300}
   628  	numReads := []int{20, 100, 200}
   629  	numWrites := []int{20, 100, 200}
   630  	numNonIO := []int{100, 500}
   631  
   632  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   633  
   634  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
   635  		sender := func(i int) common.Address {
   636  			randomness := rand.Intn(10) + 10
   637  			return common.BigToAddress(big.NewInt(int64(i / randomness)))
   638  		}
   639  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
   640  
   641  		return runParallel(t, tasks, checks, false), serialDuration
   642  	}
   643  
   644  	testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   645  }
   646  
   647  func TestMoreConflictsWithMetadata(t *testing.T) {
   648  	t.Parallel()
   649  	rand.Seed(0)
   650  
   651  	totalTxs := []int{300}
   652  	numReads := []int{100, 200}
   653  	numWrites := []int{100, 200}
   654  	numNonIO := []int{100, 500}
   655  
   656  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   657  
   658  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
   659  		sender := func(i int) common.Address {
   660  			randomness := rand.Intn(10) + 10
   661  			return common.BigToAddress(big.NewInt(int64(i / randomness)))
   662  		}
   663  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
   664  
   665  		parallelDuration := runParallel(t, tasks, checks, false)
   666  
   667  		allDeps := runParallelGetMetadata(t, tasks, checks)
   668  
   669  		newTasks := make([]ExecTask, 0, len(tasks))
   670  
   671  		for _, t := range tasks {
   672  			temp := t.(*testExecTask)
   673  
   674  			keys := make([]int, len(allDeps[temp.txIdx]))
   675  
   676  			i := 0
   677  
   678  			for k := range allDeps[temp.txIdx] {
   679  				keys[i] = k
   680  				i++
   681  			}
   682  
   683  			temp.dependencies = keys
   684  			newTasks = append(newTasks, temp)
   685  		}
   686  
   687  		return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
   688  	}
   689  
   690  	testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   691  }
   692  
   693  func TestRandomTx(t *testing.T) {
   694  	t.Parallel()
   695  	rand.Seed(0)
   696  
   697  	totalTxs := []int{10, 50, 100, 200, 300}
   698  	numReads := []int{20, 100, 200}
   699  	numWrites := []int{20, 100, 200}
   700  	numNonIO := []int{100, 500}
   701  
   702  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   703  
   704  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
   705  		// Randomly assign this tx to one of 10 senders
   706  		sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(rand.Intn(10)))) }
   707  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
   708  
   709  		return runParallel(t, tasks, checks, false), serialDuration
   710  	}
   711  
   712  	testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   713  }
   714  
   715  func TestRandomTxWithMetadata(t *testing.T) {
   716  	t.Parallel()
   717  	rand.Seed(0)
   718  
   719  	totalTxs := []int{300}
   720  	numReads := []int{100, 200}
   721  	numWrites := []int{100, 200}
   722  	numNonIO := []int{100, 500}
   723  
   724  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   725  
   726  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
   727  		// Randomly assign this tx to one of 10 senders
   728  		sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(rand.Intn(10)))) }
   729  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
   730  
   731  		parallelDuration := runParallel(t, tasks, checks, false)
   732  
   733  		allDeps := runParallelGetMetadata(t, tasks, checks)
   734  
   735  		newTasks := make([]ExecTask, 0, len(tasks))
   736  
   737  		for _, t := range tasks {
   738  			temp := t.(*testExecTask)
   739  
   740  			keys := make([]int, len(allDeps[temp.txIdx]))
   741  
   742  			i := 0
   743  
   744  			for k := range allDeps[temp.txIdx] {
   745  				keys[i] = k
   746  				i++
   747  			}
   748  
   749  			temp.dependencies = keys
   750  			newTasks = append(newTasks, temp)
   751  		}
   752  
   753  		return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
   754  	}
   755  
   756  	testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   757  }
   758  
   759  func TestTxWithLongTailRead(t *testing.T) {
   760  	t.Parallel()
   761  	rand.Seed(0)
   762  
   763  	totalTxs := []int{10, 50, 100, 200, 300}
   764  	numReads := []int{20, 100, 200}
   765  	numWrites := []int{20, 100, 200}
   766  	numNonIO := []int{100, 500}
   767  
   768  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   769  
   770  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
   771  		sender := func(i int) common.Address {
   772  			randomness := rand.Intn(10) + 10
   773  			return common.BigToAddress(big.NewInt(int64(i / randomness)))
   774  		}
   775  
   776  		longTailReadTimer := longTailTimeGenerator(4*time.Microsecond, 12*time.Microsecond, 7, 10)
   777  
   778  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, longTailReadTimer, writeTime, nonIOTime)
   779  
   780  		return runParallel(t, tasks, checks, false), serialDuration
   781  	}
   782  
   783  	testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   784  }
   785  
   786  func TestTxWithLongTailReadWithMetadata(t *testing.T) {
   787  	t.Parallel()
   788  	rand.Seed(0)
   789  
   790  	totalTxs := []int{300}
   791  	numReads := []int{100, 200}
   792  	numWrites := []int{100, 200}
   793  	numNonIO := []int{100, 500}
   794  
   795  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
   796  
   797  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
   798  		sender := func(i int) common.Address {
   799  			randomness := rand.Intn(10) + 10
   800  			return common.BigToAddress(big.NewInt(int64(i / randomness)))
   801  		}
   802  
   803  		longTailReadTimer := longTailTimeGenerator(4*time.Microsecond, 12*time.Microsecond, 7, 10)
   804  
   805  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, longTailReadTimer, writeTime, nonIOTime)
   806  
   807  		parallelDuration := runParallel(t, tasks, checks, false)
   808  
   809  		allDeps := runParallelGetMetadata(t, tasks, checks)
   810  
   811  		newTasks := make([]ExecTask, 0, len(tasks))
   812  
   813  		for _, t := range tasks {
   814  			temp := t.(*testExecTask)
   815  
   816  			keys := make([]int, len(allDeps[temp.txIdx]))
   817  
   818  			i := 0
   819  
   820  			for k := range allDeps[temp.txIdx] {
   821  				keys[i] = k
   822  				i++
   823  			}
   824  
   825  			temp.dependencies = keys
   826  			newTasks = append(newTasks, temp)
   827  		}
   828  
   829  		return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
   830  	}
   831  
   832  	testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   833  }
   834  
   835  func TestDexScenario(t *testing.T) {
   836  	t.Parallel()
   837  	rand.Seed(0)
   838  
   839  	totalTxs := []int{10, 50, 100, 200, 300}
   840  	numReads := []int{20, 100, 200}
   841  	numWrites := []int{20, 100, 200}
   842  	numNonIO := []int{100, 500}
   843  
   844  	postValidation := func(pe *ParallelExecutor) error {
   845  		if pe.lastSettled == len(pe.tasks) {
   846  			for i, inputs := range pe.lastTxIO.inputs {
   847  				for _, input := range inputs {
   848  					if input.V.TxnIndex != i-1 {
   849  						return fmt.Errorf("Tx %d should depend on tx %d, but it actually depends on %d", i, i-1, input.V.TxnIndex)
   850  					}
   851  				}
   852  			}
   853  		}
   854  
   855  		return nil
   856  	}
   857  
   858  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, postValidation, checkNoDroppedTx})
   859  
   860  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
   861  		sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(i))) }
   862  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, dexPathGenerator, readTime, writeTime, nonIOTime)
   863  
   864  		return runParallel(t, tasks, checks, false), serialDuration
   865  	}
   866  
   867  	testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   868  }
   869  
   870  func TestDexScenarioWithMetadata(t *testing.T) {
   871  	t.Parallel()
   872  	rand.Seed(0)
   873  
   874  	totalTxs := []int{300}
   875  	numReads := []int{100, 200}
   876  	numWrites := []int{100, 200}
   877  	numNonIO := []int{100, 500}
   878  
   879  	postValidation := func(pe *ParallelExecutor) error {
   880  		if pe.lastSettled == len(pe.tasks) {
   881  			for i, inputs := range pe.lastTxIO.inputs {
   882  				for _, input := range inputs {
   883  					if input.V.TxnIndex != i-1 {
   884  						return fmt.Errorf("Tx %d should depend on tx %d, but it actually depends on %d", i, i-1, input.V.TxnIndex)
   885  					}
   886  				}
   887  			}
   888  		}
   889  
   890  		return nil
   891  	}
   892  
   893  	checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, postValidation, checkNoDroppedTx})
   894  
   895  	taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
   896  		sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(i))) }
   897  		tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, dexPathGenerator, readTime, writeTime, nonIOTime)
   898  
   899  		parallelDuration := runParallel(t, tasks, checks, false)
   900  
   901  		allDeps := runParallelGetMetadata(t, tasks, checks)
   902  
   903  		newTasks := make([]ExecTask, 0, len(tasks))
   904  
   905  		for _, t := range tasks {
   906  			temp := t.(*testExecTask)
   907  
   908  			keys := make([]int, len(allDeps[temp.txIdx]))
   909  
   910  			i := 0
   911  
   912  			for k := range allDeps[temp.txIdx] {
   913  				keys[i] = k
   914  				i++
   915  			}
   916  
   917  			temp.dependencies = keys
   918  			newTasks = append(newTasks, temp)
   919  		}
   920  
   921  		return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
   922  	}
   923  
   924  	testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
   925  }
   926  
   927  func TestBreakFromCircularDependency(t *testing.T) {
   928  	t.Parallel()
   929  	rand.Seed(0)
   930  
   931  	tasks := make([]ExecTask, 5)
   932  
   933  	for i := range tasks {
   934  		tasks[i] = &testExecTask{
   935  			txIdx: i,
   936  			dependencies: []int{
   937  				(i + len(tasks) - 1) % len(tasks),
   938  			},
   939  		}
   940  	}
   941  
   942  	ctx, cancel := context.WithCancel(context.Background())
   943  	cancel()
   944  
   945  	// This should not hang
   946  	_, err := ExecuteParallel(tasks, false, true, ctx)
   947  
   948  	if err == nil {
   949  		t.Error("Expected cancel error")
   950  	}
   951  }
   952  
   953  func TestBreakFromPartialCircularDependency(t *testing.T) {
   954  	t.Parallel()
   955  	rand.Seed(0)
   956  
   957  	tasks := make([]ExecTask, 5)
   958  
   959  	for i := range tasks {
   960  		if i < 3 {
   961  			tasks[i] = &testExecTask{
   962  				txIdx: i,
   963  				dependencies: []int{
   964  					(i + 2) % 3,
   965  				},
   966  			}
   967  		} else {
   968  			tasks[i] = &testExecTask{
   969  				txIdx:        i,
   970  				dependencies: []int{},
   971  			}
   972  		}
   973  	}
   974  
   975  	ctx, cancel := context.WithCancel(context.Background())
   976  	cancel()
   977  
   978  	// This should not hang
   979  	_, err := ExecuteParallel(tasks, false, true, ctx)
   980  
   981  	if err == nil {
   982  		t.Error("Expected cancel error")
   983  	}
   984  }