github.com/gnolang/gno@v0.0.0-20240520182011-228e9d0192ce/tm2/pkg/bft/mempool/clist_mempool_test.go (about) 1 package mempool 2 3 import ( 4 "crypto/rand" 5 "crypto/sha256" 6 "encoding/binary" 7 "fmt" 8 "os" 9 "path/filepath" 10 "testing" 11 "time" 12 13 "github.com/stretchr/testify/assert" 14 "github.com/stretchr/testify/require" 15 16 "github.com/gnolang/gno/tm2/pkg/bft/abci/example/counter" 17 "github.com/gnolang/gno/tm2/pkg/bft/abci/example/kvstore" 18 abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" 19 cfg "github.com/gnolang/gno/tm2/pkg/bft/mempool/config" 20 "github.com/gnolang/gno/tm2/pkg/bft/proxy" 21 "github.com/gnolang/gno/tm2/pkg/bft/types" 22 "github.com/gnolang/gno/tm2/pkg/log" 23 "github.com/gnolang/gno/tm2/pkg/random" 24 ) 25 26 // A cleanupFunc cleans up any config / test files created for a particular 27 // test. 28 type cleanupFunc func() 29 30 const testMaxTxBytes int64 = 1024 31 32 func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { 33 return newMempoolWithAppAndConfig(cc, cfg.TestMempoolConfig()) 34 } 35 36 func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.MempoolConfig) (*CListMempool, cleanupFunc) { 37 appConnMem, _ := cc.NewABCIClient() 38 appConnMem.SetLogger(log.NewNoopLogger().With("module", "abci-client", "connection", "mempool")) 39 err := appConnMem.Start() 40 if err != nil { 41 panic(err) 42 } 43 mempool := NewCListMempool(config, appConnMem, 0, testMaxTxBytes) 44 mempool.SetLogger(log.NewNoopLogger()) 45 return mempool, func() { 46 if config.RootDir != "" { 47 os.RemoveAll(config.RootDir) 48 } 49 } 50 } 51 52 func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { 53 t.Helper() 54 55 timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) 56 select { 57 case <-ch: 58 t.Fatal("Expected not to fire") 59 case <-timer.C: 60 } 61 } 62 63 func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { 64 t.Helper() 65 66 timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) 67 select { 68 case <-ch: 69 case <-timer.C: 70 t.Fatal("Expected to fire") 71 } 72 } 73 74 func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16, failOnCheckTxError bool) types.Txs { 75 t.Helper() 76 77 txs := make(types.Txs, count) 78 txInfo := TxInfo{SenderID: peerID} 79 for i := 0; i < count; i++ { 80 txBytes := make([]byte, 20) 81 txs[i] = txBytes 82 _, err := rand.Read(txBytes) 83 if err != nil { 84 t.Error(err) 85 } 86 if err := mempool.CheckTxWithInfo(txBytes, nil, txInfo); err != nil { 87 if failOnCheckTxError { 88 t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) 89 } else { 90 continue 91 } 92 } 93 } 94 return txs 95 } 96 97 func TestReapMaxBytesMaxGas(t *testing.T) { 98 app := kvstore.NewKVStoreApplication() 99 cc := proxy.NewLocalClientCreator(app) 100 mempool, cleanup := newMempoolWithApp(cc) 101 defer cleanup() 102 103 // Ensure gas calculation behaves as expected 104 checkTxs(t, mempool, 1, UnknownPeerID, true) 105 tx0 := mempool.TxsFront().Value.(*mempoolTx) 106 // assert that kv store has gas wanted = 1. 107 require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") 108 require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") 109 // ensure each tx is 20 bytes long 110 require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") 111 mempool.Flush() 112 113 // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. 114 // each tx has 20 bytes and 1 gas 115 tests := []struct { 116 numTxsToCreate int 117 maxDataBytes int64 118 maxGas int64 119 expectedNumTxs int 120 }{ 121 0: {20, 1e6, -1, 20}, 122 1: {20, 1e6, 0, 0}, 123 2: {20, 1e6, 10, 10}, 124 3: {20, 1e6, 30, 20}, 125 4: {20, 1, -1, 0}, 126 5: {20, 1, 10, 0}, 127 6: {20, 10, 10, 0}, 128 7: {20, 20, 10, 1}, 129 8: {20, 200, -1, 10}, 130 9: {20, 200, 5, 5}, 131 10: {20, 200, 10, 10}, 132 11: {20, 200, 15, 10}, 133 12: {20, 20000, -1, 20}, 134 13: {20, 20000, 5, 5}, 135 14: {20, 20000, 30, 20}, 136 } 137 for tcIndex, tt := range tests { 138 checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID, false) 139 got := mempool.ReapMaxBytesMaxGas(tt.maxDataBytes, tt.maxGas) 140 assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", 141 len(got), tt.expectedNumTxs, tcIndex) 142 mempool.Flush() 143 } 144 } 145 146 /* XXX test PreCheck filter. 147 XXX this used to be a PostCheck filter test, so the code doesn't make much sense. 148 TODO change numTxsToCreate to a slice of tx sizes. 149 TODO implement PreCheckMaxTxBytes() 150 151 func TestMempoolFilters(t *testing.T) { 152 app := kvstore.NewKVStoreApplication() 153 cc := proxy.NewLocalClientCreator(app) 154 mempool, cleanup := newMempoolWithApp(cc) 155 defer cleanup() 156 emptyTxArr := []types.Tx{[]byte{}} 157 158 nopPreFilter := func(tx types.Tx, res abci.ResponseCheckTx) error { return nil } 159 160 // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. 161 // each tx has 20 bytes and 1 gas 162 tests := []struct { 163 numTxsToCreate int 164 maxTxBytes int64 165 preFilter PreCheckFunc 166 expectedNumTxs int 167 }{ 168 {10, 1024, nopPreFilter, 10}, 169 {10, 10, nopPreFilter, 0}, 170 {10, 19, nopPreFilter, 0}, 171 {10, 20, nopPreFilter, 10}, 172 {10, 21, nopPreFilter, 10}, 173 {10, 1024, PreCheckMaxTxBytes(-1), 10}, 174 } 175 for tcIndex, tt := range tests { 176 mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), nil), nil, tt.postFilter, tt.maxTxBytes) 177 checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID, false) 178 require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex) 179 mempool.Flush() 180 } 181 } 182 */ 183 184 func TestMempoolUpdate(t *testing.T) { 185 app := kvstore.NewKVStoreApplication() 186 cc := proxy.NewLocalClientCreator(app) 187 mempool, cleanup := newMempoolWithApp(cc) 188 defer cleanup() 189 190 // 1. Adds valid txs to the cache 191 { 192 mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, nil), nil, 0) 193 err := mempool.CheckTx([]byte{0x01}, nil) 194 if assert.Error(t, err) { 195 assert.Equal(t, ErrTxInCache, err) 196 } 197 } 198 199 // 2. Removes valid txs from the mempool 200 { 201 err := mempool.CheckTx([]byte{0x02}, nil) 202 require.NoError(t, err) 203 mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, nil), nil, 0) 204 assert.Zero(t, mempool.Size()) 205 } 206 207 // 3. Removes invalid transactions from the cache and the mempool (if present) 208 { 209 err := mempool.CheckTx([]byte{0x03}, nil) 210 require.NoError(t, err) 211 mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, abci.StringError("1")), nil, 0) 212 assert.Zero(t, mempool.Size()) 213 214 err = mempool.CheckTx([]byte{0x03}, nil) 215 assert.NoError(t, err) 216 } 217 } 218 219 func TestTxsAvailable(t *testing.T) { 220 app := kvstore.NewKVStoreApplication() 221 cc := proxy.NewLocalClientCreator(app) 222 mempool, cleanup := newMempoolWithApp(cc) 223 defer cleanup() 224 mempool.EnableTxsAvailable() 225 226 timeoutMS := 500 227 228 // with no txs, it shouldn't fire 229 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 230 231 // send a bunch of txs, it should only fire once 232 txs := checkTxs(t, mempool, 100, UnknownPeerID, true) 233 ensureFire(t, mempool.TxsAvailable(), timeoutMS) 234 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 235 236 // call update with half the txs. 237 // it should fire once now for the new height 238 // since there are still txs left 239 committedTxs, txs := txs[:50], txs[50:] 240 if err := mempool.Update(1, committedTxs, abciResponses(len(committedTxs), nil), nil, 0); err != nil { 241 t.Error(err) 242 } 243 ensureFire(t, mempool.TxsAvailable(), timeoutMS) 244 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 245 246 // send a bunch more txs. we already fired for this height so it shouldn't fire again 247 moreTxs := checkTxs(t, mempool, 50, UnknownPeerID, true) 248 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 249 250 // now call update with all the txs. it should not fire as there are no txs left 251 committedTxs = append(txs, moreTxs...) //nolint: gocritic 252 if err := mempool.Update(2, committedTxs, abciResponses(len(committedTxs), nil), nil, 0); err != nil { 253 t.Error(err) 254 } 255 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 256 257 // send a bunch more txs, it should only fire once 258 checkTxs(t, mempool, 100, UnknownPeerID, true) 259 ensureFire(t, mempool.TxsAvailable(), timeoutMS) 260 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 261 } 262 263 func TestSerialReap(t *testing.T) { 264 app := counter.NewCounterApplication(true) 265 app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"}) 266 cc := proxy.NewLocalClientCreator(app) 267 268 mempool, cleanup := newMempoolWithApp(cc) 269 defer cleanup() 270 271 appConnCon, _ := cc.NewABCIClient() 272 appConnCon.SetLogger(log.NewTestingLogger(t).With("module", "abci-client", "connection", "consensus")) 273 err := appConnCon.Start() 274 require.Nil(t, err) 275 276 cacheMap := make(map[string]struct{}) 277 deliverTxsRange := func(start, end int) { 278 // Deliver some txs. 279 for i := start; i < end; i++ { 280 // This will succeed 281 txBytes := make([]byte, 8) 282 binary.BigEndian.PutUint64(txBytes, uint64(i)) 283 err := mempool.CheckTx(txBytes, nil) 284 _, cached := cacheMap[string(txBytes)] 285 if cached { 286 require.NotNil(t, err, "expected error for cached tx") 287 } else { 288 require.Nil(t, err, "expected no err for uncached tx") 289 } 290 cacheMap[string(txBytes)] = struct{}{} 291 292 // Duplicates are cached and should return error 293 err = mempool.CheckTx(txBytes, nil) 294 require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") 295 } 296 } 297 298 reapCheck := func(exp int) { 299 txs := mempool.ReapMaxBytesMaxGas(-1, -1) 300 require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) 301 } 302 303 updateRange := func(start, end int) { 304 txs := make([]types.Tx, 0) 305 for i := start; i < end; i++ { 306 txBytes := make([]byte, 8) 307 binary.BigEndian.PutUint64(txBytes, uint64(i)) 308 txs = append(txs, txBytes) 309 } 310 if err := mempool.Update(0, txs, abciResponses(len(txs), nil), nil, 0); err != nil { 311 t.Error(err) 312 } 313 } 314 315 commitRange := func(start, end int) { 316 // Deliver some txs. 317 for i := start; i < end; i++ { 318 txBytes := make([]byte, 8) 319 binary.BigEndian.PutUint64(txBytes, uint64(i)) 320 res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) 321 if err != nil { 322 t.Errorf("Client error committing tx: %v", err) 323 } 324 if res.IsErr() { 325 t.Errorf("Error committing tx. Error:%v result:%X log:%v", 326 res.Error, res.Data, res.Log) 327 } 328 } 329 res, err := appConnCon.CommitSync() 330 if err != nil { 331 t.Errorf("Client error committing: %v", err) 332 } 333 if len(res.Data) != 8 { 334 t.Errorf("Error committing. Hash:%X", res.Data) 335 } 336 } 337 338 // ---------------------------------------- 339 340 // Deliver some txs. 341 deliverTxsRange(0, 100) 342 343 // Reap the txs. 344 reapCheck(100) 345 346 // Reap again. We should get the same amount 347 reapCheck(100) 348 349 // Deliver 0 to 999, we should reap 900 new txs 350 // because 100 were already counted. 351 deliverTxsRange(0, 1000) 352 353 // Reap the txs. 354 reapCheck(1000) 355 356 // Reap again. We should get the same amount 357 reapCheck(1000) 358 359 // Commit from the consensus AppConn 360 commitRange(0, 500) 361 updateRange(0, 500) 362 363 // We should have 500 left. 364 reapCheck(500) 365 366 // Deliver 100 invalid txs and 100 valid txs 367 deliverTxsRange(900, 1100) 368 369 // We should have 600 now. 370 reapCheck(600) 371 } 372 373 func TestMempoolCloseWAL(t *testing.T) { 374 // 1. Create the temporary directory for mempool and WAL testing. 375 rootDir := t.TempDir() 376 377 // 2. Ensure that it doesn't contain any elements -- Sanity check 378 m1, err := filepath.Glob(filepath.Join(rootDir, "*")) 379 require.Nil(t, err, "successful globbing expected") 380 require.Equal(t, 0, len(m1), "no matches yet") 381 382 // 3. Create the mempool 383 wcfg := cfg.TestMempoolConfig() 384 wcfg.RootDir = rootDir 385 app := kvstore.NewKVStoreApplication() 386 cc := proxy.NewLocalClientCreator(app) 387 mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg) 388 defer cleanup() 389 mempool.height = 10 390 mempool.InitWAL() 391 392 // 4. Ensure that the directory contains the WAL file 393 m2, err := filepath.Glob(filepath.Join(rootDir, "*")) 394 require.Nil(t, err, "successful globbing expected") 395 require.Equal(t, 1, len(m2), "expecting the wal match in") 396 397 // 5. Write some contents to the WAL 398 mempool.CheckTx(types.Tx([]byte("foo")), nil) 399 walFilepath := mempool.wal.Path 400 sum1 := checksumFile(t, walFilepath) 401 402 // 6. Sanity check to ensure that the written TX matches the expectation. 403 require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written") 404 405 // 7. Invoke CloseWAL() and ensure it discards the 406 // WAL thus any other write won't go through. 407 mempool.CloseWAL() 408 mempool.CheckTx(types.Tx([]byte("bar")), nil) 409 sum2 := checksumFile(t, walFilepath) 410 require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded") 411 412 // 8. Sanity check to ensure that the WAL file still exists 413 m3, err := filepath.Glob(filepath.Join(rootDir, "*")) 414 require.Nil(t, err, "successful globbing expected") 415 require.Equal(t, 1, len(m3), "expecting the wal match in") 416 } 417 418 func TestMempoolMaxMsgSize(t *testing.T) { 419 app := kvstore.NewKVStoreApplication() 420 cc := proxy.NewLocalClientCreator(app) 421 mempl, cleanup := newMempoolWithApp(cc) 422 defer cleanup() 423 424 maxTxSize := int(mempl.MaxTxBytes()) 425 if maxTxSize < 1024 { 426 panic("expected some default greater than or equal to 1024") 427 } 428 429 testCases := []struct { 430 len int 431 err bool 432 }{ 433 // check small txs. no error 434 {10, false}, 435 {100, false}, 436 {1000, false}, 437 438 // check around maxTxSize 439 // changes from no error to error 440 {maxTxSize - 2, false}, 441 {maxTxSize - 1, false}, 442 {maxTxSize, false}, 443 {maxTxSize + 1, true}, 444 {maxTxSize + 2, true}, 445 } 446 447 for i, testCase := range testCases { 448 caseString := fmt.Sprintf("case %d, len %d", i, testCase.len) 449 450 tx := random.RandBytes(testCase.len) 451 err := mempl.CheckTx(tx, nil) 452 if !testCase.err { 453 require.True(t, len(tx) <= maxTxSize, caseString) 454 require.NoError(t, err, caseString) 455 } else { 456 require.True(t, len(tx) > maxTxSize, caseString) 457 require.Equal(t, err, TxTooLargeError{int64(maxTxSize), int64(testCase.len)}, caseString) 458 } 459 } 460 } 461 462 func TestMempoolMaxPendingTxsBytes(t *testing.T) { 463 app := kvstore.NewKVStoreApplication() 464 cc := proxy.NewLocalClientCreator(app) 465 config := cfg.TestMempoolConfig() 466 config.MaxPendingTxsBytes = 10 467 mempool, cleanup := newMempoolWithAppAndConfig(cc, config) 468 defer cleanup() 469 470 // 1. zero by default 471 assert.EqualValues(t, 0, mempool.TxsBytes()) 472 473 // 2. len(tx) after CheckTx 474 err := mempool.CheckTx([]byte{0x01}, nil) 475 require.NoError(t, err) 476 assert.EqualValues(t, 1, mempool.TxsBytes()) 477 478 // 3. zero again after tx is removed by Update 479 mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, nil), nil, 0) 480 assert.EqualValues(t, 0, mempool.TxsBytes()) 481 482 // 4. zero after Flush 483 err = mempool.CheckTx([]byte{0x02, 0x03}, nil) 484 require.NoError(t, err) 485 assert.EqualValues(t, 2, mempool.TxsBytes()) 486 487 mempool.Flush() 488 assert.EqualValues(t, 0, mempool.TxsBytes()) 489 490 // 5. MempoolIsFullError is returned when/if MaxPendingTxsBytes limit is reached. 491 err = mempool.CheckTx([]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, nil) 492 require.NoError(t, err) 493 err = mempool.CheckTx([]byte{0x05}, nil) 494 if assert.Error(t, err) { 495 assert.IsType(t, MempoolIsFullError{}, err) 496 } 497 498 // 6. zero after tx is rechecked and removed due to not being valid anymore 499 app2 := counter.NewCounterApplication(true) 500 cc = proxy.NewLocalClientCreator(app2) 501 mempool, cleanup = newMempoolWithApp(cc) 502 defer cleanup() 503 504 txBytes := make([]byte, 8) 505 binary.BigEndian.PutUint64(txBytes, uint64(0)) 506 507 err = mempool.CheckTx(txBytes, nil) 508 require.NoError(t, err) 509 assert.EqualValues(t, 8, mempool.TxsBytes()) 510 511 appConnCon, _ := cc.NewABCIClient() 512 appConnCon.SetLogger(log.NewTestingLogger(t).With("module", "abci-client", "connection", "consensus")) 513 err = appConnCon.Start() 514 require.Nil(t, err) 515 defer appConnCon.Stop() 516 res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) 517 require.NoError(t, err) 518 require.Nil(t, res.Error) 519 res2, err := appConnCon.CommitSync() 520 require.NoError(t, err) 521 require.NotEmpty(t, res2.Data) 522 523 // Pretend like we committed nothing so txBytes gets rechecked and removed. 524 mempool.Update(1, []types.Tx{}, abciResponses(0, nil), nil, 0) 525 assert.EqualValues(t, 0, mempool.TxsBytes()) 526 } 527 528 func checksumIt(data []byte) string { 529 h := sha256.New() 530 h.Write(data) 531 return fmt.Sprintf("%x", h.Sum(nil)) 532 } 533 534 func checksumFile(t *testing.T, p string) string { 535 t.Helper() 536 537 data, err := os.ReadFile(p) 538 require.Nil(t, err, "expecting successful read of %q", p) 539 return checksumIt(data) 540 } 541 542 func abciResponses(n int, err abci.Error) []abci.ResponseDeliverTx { 543 responses := make([]abci.ResponseDeliverTx, 0, n) 544 for i := 0; i < n; i++ { 545 responses = append(responses, abci.ResponseDeliverTx{ 546 ResponseBase: abci.ResponseBase{ 547 Error: err, 548 }, 549 }) 550 } 551 return responses 552 }