github.com/ledgerwatch/erigon-lib@v1.0.0/txpool/pool_fuzz_test.go (about)

     1  //go:build !nofuzz
     2  
     3  package txpool
     4  
     5  import (
     6  	"bytes"
     7  	"context"
     8  	"encoding/binary"
     9  	"testing"
    10  
    11  	"github.com/holiman/uint256"
    12  	"github.com/ledgerwatch/log/v3"
    13  	"github.com/stretchr/testify/assert"
    14  	"github.com/stretchr/testify/require"
    15  
    16  	"github.com/ledgerwatch/erigon-lib/common"
    17  	"github.com/ledgerwatch/erigon-lib/common/u256"
    18  	"github.com/ledgerwatch/erigon-lib/gointerfaces"
    19  	"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
    20  	"github.com/ledgerwatch/erigon-lib/kv"
    21  	"github.com/ledgerwatch/erigon-lib/kv/kvcache"
    22  	"github.com/ledgerwatch/erigon-lib/kv/memdb"
    23  	"github.com/ledgerwatch/erigon-lib/rlp"
    24  	"github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg"
    25  	"github.com/ledgerwatch/erigon-lib/types"
    26  )
    27  
    28  // https://go.dev/doc/fuzz/
    29  // golang.org/s/draft-fuzzing-design
    30  //go doc testing
    31  //go doc testing.F
    32  //go doc testing.F.AddRemoteTxs
    33  //go doc testing.F.Fuzz
    34  
    35  // go test -trimpath -v -fuzz=Fuzz -fuzztime=10s ./txpool
    36  
    37  func init() {
    38  	log.Root().SetHandler(log.LvlFilterHandler(log.LvlWarn, log.StderrHandler))
    39  }
    40  
    41  /*
    42  	func FuzzTwoQueue(f *testing.F) {
    43  		f.Add([]uint8{0b1000, 0b0101, 0b0111})
    44  		f.Add([]uint8{0b0101, 0b1110, 0b1101, 0b0001})
    45  		f.Fuzz(func(t *testing.T, in []uint8) {
    46  			t.Parallel()
    47  			assert := assert.New(t)
    48  			{
    49  				sub := NewPendingSubPool(PendingSubPool, 1024)
    50  				for _, i := range in {
    51  					sub.Add(&metaTx{subPool: SubPoolMarker(i & 0b1111), Tx: &TxSlot{nonce: 1, value: *uint256.NewInt(1)}})
    52  				}
    53  				sub.EnforceWorstInvariants()
    54  				sub.EnforceBestInvariants()
    55  				assert.Equal(len(in), sub.best.Len())
    56  				assert.Equal(len(in), sub.worst.Len())
    57  				assert.Equal(len(in), sub.Len())
    58  
    59  				var prevBest *uint8
    60  				for i := range sub.best.ms {
    61  					current := uint8(sub.best.ms[i].subPool)
    62  					if prevBest != nil {
    63  						assert.LessOrEqual(current, *prevBest)
    64  					}
    65  					assert.Equal(i, sub.best.ms[i].bestIndex)
    66  					prevBest = &current
    67  				}
    68  			}
    69  			{
    70  				sub := NewSubPool(BaseFeeSubPool, 1024)
    71  				for _, i := range in {
    72  					sub.Add(&metaTx{subPool: SubPoolMarker(i & 0b1111), Tx: &TxSlot{nonce: 1, value: *uint256.NewInt(1)}})
    73  				}
    74  				assert.Equal(len(in), sub.best.Len())
    75  				assert.Equal(len(in), sub.worst.Len())
    76  				assert.Equal(len(in), sub.Len())
    77  
    78  				for i := range sub.best.ms {
    79  					assert.Equal(i, (sub.best.ms)[i].bestIndex)
    80  				}
    81  				for i := range sub.worst.ms {
    82  					assert.Equal(i, (sub.worst.ms)[i].worstIndex)
    83  				}
    84  
    85  				var prevBest *uint8
    86  				i := sub.Len()
    87  				for sub.Len() > 0 {
    88  					best := uint8(sub.Best().subPool)
    89  					assert.Equal(best, uint8(sub.PopBest().subPool))
    90  					if prevBest != nil {
    91  						assert.LessOrEqual(best, *prevBest)
    92  					}
    93  					prevBest = &best
    94  					i--
    95  				}
    96  				assert.Zero(i)
    97  				assert.Zero(sub.Len())
    98  				assert.Zero(sub.best.Len())
    99  				assert.Zero(sub.worst.Len())
   100  			}
   101  
   102  			{
   103  				sub := NewSubPool(QueuedSubPool, 1024)
   104  				for _, i := range in {
   105  					sub.Add(&metaTx{subPool: SubPoolMarker(i & 0b1111), Tx: &TxSlot{nonce: 1, value: *uint256.NewInt(1)}})
   106  				}
   107  				var prev *uint8
   108  				i := sub.Len()
   109  				for sub.Len() > 0 {
   110  					worst := uint8(sub.Worst().subPool)
   111  					assert.Equal(worst, uint8(sub.PopWorst().subPool))
   112  					if prev != nil {
   113  						assert.GreaterOrEqual(worst, *prev)
   114  					}
   115  					prev = &worst
   116  					i--
   117  				}
   118  				assert.Zero(i)
   119  				assert.Zero(sub.Len())
   120  				assert.Zero(sub.best.Len())
   121  				assert.Zero(sub.worst.Len())
   122  			}
   123  		})
   124  	}
   125  */
   126  func u64Slice(in []byte) ([]uint64, bool) {
   127  	if len(in) < 8 {
   128  		return nil, false
   129  	}
   130  	res := make([]uint64, len(in)/8)
   131  	for i := 0; i < len(res); i++ {
   132  		res[i] = binary.BigEndian.Uint64(in[i*8:])
   133  	}
   134  	return res, true
   135  }
   136  func u8Slice(in []byte) ([]uint64, bool) {
   137  	if len(in) < 1 {
   138  		return nil, false
   139  	}
   140  	res := make([]uint64, len(in))
   141  	for i := 0; i < len(res); i++ {
   142  		res[i] = uint64(in[i] % 32)
   143  	}
   144  	return res, true
   145  }
   146  func u256Slice(in []byte) ([]uint256.Int, bool) {
   147  	if len(in) < 1 {
   148  		return nil, false
   149  	}
   150  	res := make([]uint256.Int, len(in))
   151  	for i := 0; i < len(res); i++ {
   152  		res[i].SetUint64(uint64(in[i] % 32))
   153  	}
   154  	return res, true
   155  }
   156  
   157  func parseSenders(in []byte) (nonces []uint64, balances []uint256.Int) {
   158  	for i := 0; i < len(in)-(1+1-1); i += 1 + 1 {
   159  		nonce := uint64(in[i] % 8)
   160  		if nonce == 0 {
   161  			nonce = 1
   162  		}
   163  		nonces = append(nonces, nonce)
   164  		balances = append(balances, *uint256.NewInt(uint64(in[i+1])))
   165  	}
   166  	return
   167  }
   168  
   169  func poolsFromFuzzBytes(rawTxNonce, rawValues, rawTips, rawFeeCap, rawSender []byte) (sendersInfo map[uint64]*sender, senderIDs map[common.Address]uint64, txs types.TxSlots, ok bool) {
   170  	if len(rawTxNonce) < 1 || len(rawValues) < 1 || len(rawTips) < 1 || len(rawFeeCap) < 1 || len(rawSender) < 1+1 {
   171  		return nil, nil, txs, false
   172  	}
   173  	senderNonce, senderBalance := parseSenders(rawSender)
   174  	txNonce, ok := u8Slice(rawTxNonce)
   175  	if !ok {
   176  		return nil, nil, txs, false
   177  	}
   178  	feeCap, ok := u8Slice(rawFeeCap)
   179  	if !ok {
   180  		return nil, nil, txs, false
   181  	}
   182  	tips, ok := u8Slice(rawTips)
   183  	if !ok {
   184  		return nil, nil, txs, false
   185  	}
   186  	values, ok := u256Slice(rawValues)
   187  	if !ok {
   188  		return nil, nil, txs, false
   189  	}
   190  
   191  	sendersInfo = map[uint64]*sender{}
   192  	senderIDs = map[common.Address]uint64{}
   193  	senders := make(types.Addresses, 20*len(senderNonce))
   194  	for i := 0; i < len(senderNonce); i++ {
   195  		senderID := uint64(i + 1) //non-zero expected
   196  		binary.BigEndian.PutUint64(senders.At(i%senders.Len()), senderID)
   197  		sendersInfo[senderID] = newSender(senderNonce[i], senderBalance[i%len(senderBalance)])
   198  		senderIDs[senders.AddressAt(i%senders.Len())] = senderID
   199  	}
   200  	txs.Txs = make([]*types.TxSlot, len(txNonce))
   201  	parseCtx := types.NewTxParseContext(*u256.N1)
   202  	parseCtx.WithSender(false)
   203  	for i := range txNonce {
   204  		txs.Txs[i] = &types.TxSlot{
   205  			Nonce:  txNonce[i],
   206  			Value:  values[i%len(values)],
   207  			Tip:    *uint256.NewInt(tips[i%len(tips)]),
   208  			FeeCap: *uint256.NewInt(feeCap[i%len(feeCap)]),
   209  		}
   210  		txRlp := fakeRlpTx(txs.Txs[i], senders.At(i%senders.Len()))
   211  		_, err := parseCtx.ParseTransaction(txRlp, 0, txs.Txs[i], nil, false /* hasEnvelope */, true /* wrappedWithBlobs */, nil)
   212  		if err != nil {
   213  			panic(err)
   214  		}
   215  		txs.Senders = append(txs.Senders, senders.At(i%senders.Len())...)
   216  		txs.IsLocal = append(txs.IsLocal, true)
   217  	}
   218  
   219  	return sendersInfo, senderIDs, txs, true
   220  }
   221  
   222  // fakeRlpTx add anything what identifying tx to `data` to make hash unique
   223  func fakeRlpTx(slot *types.TxSlot, data []byte) []byte {
   224  	dataLen := rlp.U64Len(1) + //chainID
   225  		rlp.U64Len(slot.Nonce) + rlp.U256Len(&slot.Tip) + rlp.U256Len(&slot.FeeCap) +
   226  		rlp.U64Len(0) + // gas
   227  		rlp.StringLen([]byte{}) + // dest addr
   228  		rlp.U256Len(&slot.Value) +
   229  		rlp.StringLen(data) + // data
   230  		rlp.ListPrefixLen(0) + //access list
   231  		+3 // v,r,s
   232  
   233  	buf := make([]byte, 1+rlp.ListPrefixLen(dataLen)+dataLen)
   234  	buf[0] = types.DynamicFeeTxType
   235  	p := 1
   236  	p += rlp.EncodeListPrefix(dataLen, buf[p:])
   237  	p += rlp.EncodeU64(1, buf[p:]) //chainID
   238  	p += rlp.EncodeU64(slot.Nonce, buf[p:])
   239  	bb := bytes.NewBuffer(buf[p:p])
   240  	_ = slot.Tip.EncodeRLP(bb)
   241  	p += rlp.U256Len(&slot.Tip)
   242  	bb = bytes.NewBuffer(buf[p:p])
   243  	_ = slot.FeeCap.EncodeRLP(bb)
   244  	p += rlp.U256Len(&slot.FeeCap)
   245  	p += rlp.EncodeU64(0, buf[p:])           //gas
   246  	p += rlp.EncodeString([]byte{}, buf[p:]) //destrination addr
   247  	bb = bytes.NewBuffer(buf[p:p])
   248  	_ = slot.Value.EncodeRLP(bb)
   249  	p += rlp.U256Len(&slot.Value)
   250  	p += rlp.EncodeString(data, buf[p:])  //data
   251  	p += rlp.EncodeListPrefix(0, buf[p:]) // access list
   252  	p += rlp.EncodeU64(1, buf[p:])        //v
   253  	p += rlp.EncodeU64(1, buf[p:])        //r
   254  	p += rlp.EncodeU64(1, buf[p:])        //s
   255  	_ = p
   256  	return buf[:]
   257  }
   258  
   259  func iterateSubPoolUnordered(subPool *SubPool, f func(tx *metaTx)) {
   260  	for i := 0; i < subPool.best.Len(); i++ {
   261  		f((subPool.best.ms)[i])
   262  	}
   263  }
   264  
   265  func splitDataset(in types.TxSlots) (types.TxSlots, types.TxSlots, types.TxSlots, types.TxSlots) {
   266  	p1, p2, p3, p4 := types.TxSlots{}, types.TxSlots{}, types.TxSlots{}, types.TxSlots{}
   267  	l := len(in.Txs) / 4
   268  
   269  	p1.Txs = in.Txs[:l]
   270  	p1.IsLocal = in.IsLocal[:l]
   271  	p1.Senders = in.Senders[:l*20]
   272  
   273  	p2.Txs = in.Txs[l : 2*l]
   274  	p2.IsLocal = in.IsLocal[l : 2*l]
   275  	p2.Senders = in.Senders[l*20 : 2*l*20]
   276  
   277  	p3.Txs = in.Txs[2*l : 3*l]
   278  	p3.IsLocal = in.IsLocal[2*l : 3*l]
   279  	p3.Senders = in.Senders[2*l*20 : 3*l*20]
   280  
   281  	p4.Txs = in.Txs[3*l : 4*l]
   282  	p4.IsLocal = in.IsLocal[3*l : 4*l]
   283  	p4.Senders = in.Senders[3*l*20 : 4*l*20]
   284  
   285  	return p1, p2, p3, p4
   286  }
   287  
   288  func FuzzOnNewBlocks(f *testing.F) {
   289  	var u64 = [1 * 4]byte{1}
   290  	var senderAddr = [1 + 1 + 1]byte{1}
   291  	f.Add(u64[:], u64[:], u64[:], u64[:], senderAddr[:], uint8(12))
   292  	f.Add(u64[:], u64[:], u64[:], u64[:], senderAddr[:], uint8(14))
   293  	f.Add(u64[:], u64[:], u64[:], u64[:], senderAddr[:], uint8(123))
   294  	f.Fuzz(func(t *testing.T, txNonce, values, tips, feeCap, senderAddr []byte, pendingBaseFee1 uint8) {
   295  		//t.Parallel()
   296  		ctx := context.Background()
   297  
   298  		pendingBaseFee := uint64(pendingBaseFee1%16 + 1)
   299  		if pendingBaseFee == 0 {
   300  			t.Skip()
   301  		}
   302  		senders, senderIDs, txs, ok := poolsFromFuzzBytes(txNonce, values, tips, feeCap, senderAddr)
   303  		if !ok {
   304  			t.Skip()
   305  		}
   306  
   307  		assert, require := assert.New(t), require.New(t)
   308  		assert.NoError(txs.Valid())
   309  
   310  		var prevHashes types.Hashes
   311  		ch := make(chan types.Announcements, 100)
   312  		db, coreDB := memdb.NewTestPoolDB(t), memdb.NewTestDB(t)
   313  
   314  		cfg := txpoolcfg.DefaultConfig
   315  		sendersCache := kvcache.New(kvcache.DefaultCoherentConfig)
   316  		pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil, nil, log.New())
   317  		assert.NoError(err)
   318  		pool.senders.senderIDs = senderIDs
   319  		for addr, id := range senderIDs {
   320  			pool.senders.senderID2Addr[id] = addr
   321  		}
   322  		pool.senders.senderID = uint64(len(senderIDs))
   323  		check := func(unwindTxs, minedTxs types.TxSlots, msg string) {
   324  			pending, baseFee, queued := pool.pending, pool.baseFee, pool.queued
   325  			best, worst := pending.Best(), pending.Worst()
   326  			assert.LessOrEqual(pending.Len(), cfg.PendingSubPoolLimit)
   327  			assert.False(worst != nil && best == nil, msg)
   328  			assert.False(worst == nil && best != nil, msg)
   329  			if worst != nil && worst.subPool < 0b1110 {
   330  				t.Fatalf("pending worst too small %b", worst.subPool)
   331  			}
   332  			for _, tx := range pending.best.ms {
   333  				i := tx.Tx
   334  				if tx.subPool&NoNonceGaps > 0 {
   335  					assert.GreaterOrEqual(i.Nonce, senders[i.SenderID].nonce, msg, i.SenderID)
   336  				}
   337  				if tx.subPool&EnoughFeeCapProtocol > 0 {
   338  					assert.LessOrEqual(calcProtocolBaseFee(pendingBaseFee), tx.Tx.FeeCap, msg)
   339  				}
   340  				if tx.subPool&EnoughFeeCapBlock > 0 {
   341  					assert.LessOrEqual(pendingBaseFee, tx.Tx.FeeCap, msg)
   342  				}
   343  
   344  				// side data structures must have all txs
   345  				assert.True(pool.all.has(tx), msg)
   346  				_, ok = pool.byHash[string(i.IDHash[:])]
   347  				assert.True(ok)
   348  
   349  				// pools can't have more then 1 tx with same SenderID+Nonce
   350  				iterateSubPoolUnordered(baseFee, func(mtx2 *metaTx) {
   351  					tx2 := mtx2.Tx
   352  					assert.False(tx2.SenderID == i.SenderID && tx2.Nonce == i.Nonce, msg)
   353  				})
   354  				iterateSubPoolUnordered(queued, func(mtx2 *metaTx) {
   355  					tx2 := mtx2.Tx
   356  					assert.False(tx2.SenderID == i.SenderID && tx2.Nonce == i.Nonce, msg)
   357  				})
   358  			}
   359  
   360  			best, worst = baseFee.Best(), baseFee.Worst()
   361  
   362  			assert.False(worst != nil && best == nil, msg)
   363  			assert.False(worst == nil && best != nil, msg)
   364  			assert.LessOrEqual(baseFee.Len(), cfg.BaseFeeSubPoolLimit, msg)
   365  			if worst != nil && worst.subPool < 0b1100 {
   366  				t.Fatalf("baseFee worst too small %b", worst.subPool)
   367  			}
   368  			iterateSubPoolUnordered(baseFee, func(tx *metaTx) {
   369  				i := tx.Tx
   370  				if tx.subPool&NoNonceGaps > 0 {
   371  					assert.GreaterOrEqual(i.Nonce, senders[i.SenderID].nonce, msg)
   372  				}
   373  				if tx.subPool&EnoughFeeCapProtocol > 0 {
   374  					assert.LessOrEqual(calcProtocolBaseFee(pendingBaseFee), tx.Tx.FeeCap, msg)
   375  				}
   376  				if tx.subPool&EnoughFeeCapBlock > 0 {
   377  					assert.LessOrEqual(pendingBaseFee, tx.Tx.FeeCap, msg)
   378  				}
   379  
   380  				assert.True(pool.all.has(tx), msg)
   381  				_, ok = pool.byHash[string(i.IDHash[:])]
   382  				assert.True(ok, msg)
   383  			})
   384  
   385  			best, worst = queued.Best(), queued.Worst()
   386  			assert.LessOrEqual(queued.Len(), cfg.QueuedSubPoolLimit)
   387  			assert.False(worst != nil && best == nil, msg)
   388  			assert.False(worst == nil && best != nil, msg)
   389  			iterateSubPoolUnordered(queued, func(tx *metaTx) {
   390  				i := tx.Tx
   391  				if tx.subPool&NoNonceGaps > 0 {
   392  					assert.GreaterOrEqual(i.Nonce, senders[i.SenderID].nonce, msg, i.SenderID, senders[i.SenderID].nonce)
   393  				}
   394  				if tx.subPool&EnoughFeeCapProtocol > 0 {
   395  					assert.LessOrEqual(calcProtocolBaseFee(pendingBaseFee), tx.Tx.FeeCap, msg)
   396  				}
   397  				if tx.subPool&EnoughFeeCapBlock > 0 {
   398  					assert.LessOrEqual(pendingBaseFee, tx.Tx.FeeCap, msg)
   399  				}
   400  
   401  				assert.True(pool.all.has(tx), "%s, %d, %x", msg, tx.Tx.Nonce, tx.Tx.IDHash)
   402  				_, ok = pool.byHash[string(i.IDHash[:])]
   403  				assert.True(ok, msg)
   404  				assert.GreaterOrEqual(tx.Tx.FeeCap, pool.cfg.MinFeeCap)
   405  			})
   406  
   407  			// all txs in side data structures must be in some queue
   408  			for _, txn := range pool.byHash {
   409  				require.True(txn.bestIndex >= 0, msg)
   410  				assert.True(txn.worstIndex >= 0, msg)
   411  			}
   412  			for id := range senders {
   413  				//assert.True(senders[i].all.Len() > 0)
   414  				pool.all.ascend(id, func(mt *metaTx) bool {
   415  					require.True(mt.worstIndex >= 0, msg)
   416  					assert.True(mt.bestIndex >= 0, msg)
   417  					return true
   418  				})
   419  			}
   420  
   421  			// mined txs must be removed
   422  			for i := range minedTxs.Txs {
   423  				_, ok = pool.byHash[string(minedTxs.Txs[i].IDHash[:])]
   424  				assert.False(ok, msg)
   425  			}
   426  
   427  			if queued.Len() > 3 {
   428  				// Less func must be transitive (choose 3 semi-random elements)
   429  				i := queued.Len() - 1
   430  				if queued.best.Less(i, i-1) && queued.best.Less(i-1, i-2) {
   431  					assert.True(queued.best.Less(i, i-2))
   432  				}
   433  			}
   434  		}
   435  
   436  		checkNotify := func(unwindTxs, minedTxs types.TxSlots, msg string) {
   437  			select {
   438  			case newAnnouncements := <-ch:
   439  				assert.Greater(newAnnouncements.Len(), 0)
   440  				for i := 0; i < newAnnouncements.Len(); i++ {
   441  					_, _, newHash := newAnnouncements.At(i)
   442  					for j := range unwindTxs.Txs {
   443  						if bytes.Equal(unwindTxs.Txs[j].IDHash[:], newHash) {
   444  							mt := pool.all.get(unwindTxs.Txs[j].SenderID, unwindTxs.Txs[j].Nonce)
   445  							require.True(mt != nil && mt.currentSubPool == PendingSubPool, msg)
   446  						}
   447  					}
   448  					for j := range minedTxs.Txs {
   449  						if bytes.Equal(minedTxs.Txs[j].IDHash[:], newHash) {
   450  							mt := pool.all.get(unwindTxs.Txs[j].SenderID, unwindTxs.Txs[j].Nonce)
   451  							require.True(mt != nil && mt.currentSubPool == PendingSubPool, msg)
   452  						}
   453  					}
   454  				}
   455  			default: // no notifications - means pools must be unchanged or drop some txs
   456  				pendingHashes := copyHashes(pool.pending)
   457  				require.Zero(extractNewHashes(pendingHashes, prevHashes).Len())
   458  			}
   459  			prevHashes = copyHashes(pool.pending)
   460  		}
   461  
   462  		tx, err := db.BeginRw(ctx)
   463  		require.NoError(err)
   464  		defer tx.Rollback()
   465  		// start blocks from 0, set empty hash - then kvcache will also work on this
   466  		h0, h22 := gointerfaces.ConvertHashToH256([32]byte{}), gointerfaces.ConvertHashToH256([32]byte{22})
   467  
   468  		var txID uint64
   469  		_ = coreDB.View(ctx, func(tx kv.Tx) error {
   470  			txID = tx.ViewID()
   471  			return nil
   472  		})
   473  		change := &remote.StateChangeBatch{
   474  			StateVersionId:      txID,
   475  			PendingBlockBaseFee: pendingBaseFee,
   476  			ChangeBatch: []*remote.StateChange{
   477  				{BlockHeight: 0, BlockHash: h0},
   478  			},
   479  		}
   480  		for id, sender := range senders {
   481  			addr := pool.senders.senderID2Addr[id]
   482  			v := make([]byte, types.EncodeSenderLengthForStorage(sender.nonce, sender.balance))
   483  			types.EncodeSender(sender.nonce, sender.balance, v)
   484  			change.ChangeBatch[0].Changes = append(change.ChangeBatch[0].Changes, &remote.AccountChange{
   485  				Action:  remote.Action_UPSERT,
   486  				Address: gointerfaces.ConvertAddressToH160(addr),
   487  				Data:    v,
   488  			})
   489  		}
   490  		// go to first fork
   491  		txs1, txs2, p2pReceived, txs3 := splitDataset(txs)
   492  		err = pool.OnNewBlock(ctx, change, txs1, types.TxSlots{}, tx)
   493  		assert.NoError(err)
   494  		check(txs1, types.TxSlots{}, "fork1")
   495  		checkNotify(txs1, types.TxSlots{}, "fork1")
   496  
   497  		_, _, _ = p2pReceived, txs2, txs3
   498  		change = &remote.StateChangeBatch{
   499  			StateVersionId:      txID,
   500  			PendingBlockBaseFee: pendingBaseFee,
   501  			ChangeBatch: []*remote.StateChange{
   502  				{BlockHeight: 1, BlockHash: h0},
   503  			},
   504  		}
   505  		err = pool.OnNewBlock(ctx, change, types.TxSlots{}, txs2, tx)
   506  		assert.NoError(err)
   507  		check(types.TxSlots{}, txs2, "fork1 mined")
   508  		checkNotify(types.TxSlots{}, txs2, "fork1 mined")
   509  
   510  		// unwind everything and switch to new fork (need unwind mined now)
   511  		change = &remote.StateChangeBatch{
   512  			StateVersionId:      txID,
   513  			PendingBlockBaseFee: pendingBaseFee,
   514  			ChangeBatch: []*remote.StateChange{
   515  				{BlockHeight: 0, BlockHash: h0, Direction: remote.Direction_UNWIND},
   516  			},
   517  		}
   518  		err = pool.OnNewBlock(ctx, change, txs2, types.TxSlots{}, tx)
   519  		assert.NoError(err)
   520  		check(txs2, types.TxSlots{}, "fork2")
   521  		checkNotify(txs2, types.TxSlots{}, "fork2")
   522  
   523  		change = &remote.StateChangeBatch{
   524  			StateVersionId:      txID,
   525  			PendingBlockBaseFee: pendingBaseFee,
   526  			ChangeBatch: []*remote.StateChange{
   527  				{BlockHeight: 1, BlockHash: h22},
   528  			},
   529  		}
   530  		err = pool.OnNewBlock(ctx, change, types.TxSlots{}, txs3, tx)
   531  		assert.NoError(err)
   532  		check(types.TxSlots{}, txs3, "fork2 mined")
   533  		checkNotify(types.TxSlots{}, txs3, "fork2 mined")
   534  
   535  		// add some remote txs from p2p
   536  		pool.AddRemoteTxs(ctx, p2pReceived)
   537  		err = pool.processRemoteTxs(ctx)
   538  		assert.NoError(err)
   539  		check(p2pReceived, types.TxSlots{}, "p2pmsg1")
   540  		checkNotify(p2pReceived, types.TxSlots{}, "p2pmsg1")
   541  
   542  		err = pool.flushLocked(tx) // we don't test eviction here, because dedicated test exists
   543  		require.NoError(err)
   544  		check(p2pReceived, types.TxSlots{}, "after_flush")
   545  		checkNotify(p2pReceived, types.TxSlots{}, "after_flush")
   546  
   547  		p2, err := New(ch, coreDB, txpoolcfg.DefaultConfig, sendersCache, *u256.N1, nil, nil, log.New())
   548  		assert.NoError(err)
   549  		p2.senders = pool.senders // senders are not persisted
   550  		err = coreDB.View(ctx, func(coreTx kv.Tx) error { return p2.fromDB(ctx, tx, coreTx) })
   551  		require.NoError(err)
   552  		for _, txn := range p2.byHash {
   553  			assert.Nil(txn.Tx.Rlp)
   554  		}
   555  
   556  		check(txs2, types.TxSlots{}, "fromDB")
   557  		checkNotify(txs2, types.TxSlots{}, "fromDB")
   558  		assert.Equal(pool.senders.senderID, p2.senders.senderID)
   559  		assert.Equal(pool.lastSeenBlock.Load(), p2.lastSeenBlock.Load())
   560  		assert.Equal(pool.pending.Len(), p2.pending.Len())
   561  		assert.Equal(pool.baseFee.Len(), p2.baseFee.Len())
   562  		require.Equal(pool.queued.Len(), p2.queued.Len())
   563  		assert.Equal(pool.pendingBaseFee.Load(), p2.pendingBaseFee.Load())
   564  	})
   565  }
   566  
   567  func copyHashes(p *PendingPool) (hashes types.Hashes) {
   568  	for i := range p.best.ms {
   569  		hashes = append(hashes, p.best.ms[i].Tx.IDHash[:]...)
   570  	}
   571  	return hashes
   572  }
   573  
   574  // extractNewHashes - extract from h1 hashes which do not exist in h2
   575  func extractNewHashes(h1, h2 types.Hashes) (result types.Hashes) {
   576  	for i := 0; i < h1.Len(); i++ {
   577  		found := false
   578  		for j := 0; j < h2.Len(); j++ {
   579  			if bytes.Equal(h1.At(i), h2.At(j)) {
   580  				found = true
   581  				break
   582  			}
   583  		}
   584  		if !found {
   585  			result = append(result, h1.At(i)...)
   586  		}
   587  	}
   588  	return result
   589  }