github.com/badrootd/celestia-core@v0.0.0-20240305091328-aa4207a4b25d/mempool/v0/clist_mempool_test.go (about) 1 package v0 2 3 import ( 4 "bytes" 5 "crypto/rand" 6 "encoding/binary" 7 "fmt" 8 mrand "math/rand" 9 "os" 10 "testing" 11 "time" 12 13 "github.com/gogo/protobuf/proto" 14 gogotypes "github.com/gogo/protobuf/types" 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/mock" 17 "github.com/stretchr/testify/require" 18 19 abciclient "github.com/badrootd/celestia-core/abci/client" 20 abciclimocks "github.com/badrootd/celestia-core/abci/client/mocks" 21 "github.com/badrootd/celestia-core/abci/example/kvstore" 22 abciserver "github.com/badrootd/celestia-core/abci/server" 23 abci "github.com/badrootd/celestia-core/abci/types" 24 "github.com/badrootd/celestia-core/config" 25 "github.com/badrootd/celestia-core/libs/log" 26 cmtrand "github.com/badrootd/celestia-core/libs/rand" 27 "github.com/badrootd/celestia-core/libs/service" 28 "github.com/badrootd/celestia-core/mempool" 29 "github.com/badrootd/celestia-core/pkg/consts" 30 tmproto "github.com/badrootd/celestia-core/proto/tendermint/types" 31 "github.com/badrootd/celestia-core/proxy" 32 "github.com/badrootd/celestia-core/types" 33 ) 34 35 // A cleanupFunc cleans up any config / test files created for a particular 36 // test. 37 type cleanupFunc func() 38 39 func newMempoolWithAppMock(cc proxy.ClientCreator, client abciclient.Client) (*CListMempool, cleanupFunc, error) { 40 conf := config.ResetTestRoot("mempool_test") 41 42 mp, cu := newMempoolWithAppAndConfigMock(cc, conf, client) 43 return mp, cu, nil 44 } 45 46 func newMempoolWithAppAndConfigMock(cc proxy.ClientCreator, 47 cfg *config.Config, 48 client abciclient.Client) (*CListMempool, cleanupFunc) { 49 appConnMem := client 50 appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) 51 err := appConnMem.Start() 52 if err != nil { 53 panic(err) 54 } 55 56 mp := NewCListMempool(cfg.Mempool, appConnMem, 0) 57 mp.SetLogger(log.TestingLogger()) 58 59 return mp, func() { os.RemoveAll(cfg.RootDir) } 60 } 61 62 func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { 63 conf := config.ResetTestRoot("mempool_test") 64 65 mp, cu := newMempoolWithAppAndConfig(cc, conf) 66 return mp, cu 67 } 68 69 func newMempoolWithAppAndConfig(cc proxy.ClientCreator, cfg *config.Config) (*CListMempool, cleanupFunc) { 70 appConnMem, _ := cc.NewABCIClient() 71 appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) 72 err := appConnMem.Start() 73 if err != nil { 74 panic(err) 75 } 76 77 mp := NewCListMempool(cfg.Mempool, appConnMem, 0) 78 mp.SetLogger(log.TestingLogger()) 79 80 return mp, func() { os.RemoveAll(cfg.RootDir) } 81 } 82 83 func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { 84 timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) 85 select { 86 case <-ch: 87 t.Fatal("Expected not to fire") 88 case <-timer.C: 89 } 90 } 91 92 func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { 93 timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) 94 select { 95 case <-ch: 96 case <-timer.C: 97 t.Fatal("Expected to fire") 98 } 99 } 100 101 func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs { 102 txs := make(types.Txs, count) 103 txInfo := mempool.TxInfo{SenderID: peerID} 104 for i := 0; i < count; i++ { 105 txBytes := make([]byte, 20) 106 txs[i] = txBytes 107 _, err := rand.Read(txBytes) 108 if err != nil { 109 t.Error(err) 110 } 111 if err := mp.CheckTx(txBytes, nil, txInfo); err != nil { 112 // Skip invalid txs. 113 // TestMempoolFilters will fail otherwise. It asserts a number of txs 114 // returned. 115 if mempool.IsPreCheckError(err) { 116 continue 117 } 118 t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) 119 } 120 } 121 return txs 122 } 123 124 func TestReapMaxBytesMaxGas(t *testing.T) { 125 app := kvstore.NewApplication() 126 cc := proxy.NewLocalClientCreator(app) 127 mp, cleanup := newMempoolWithApp(cc) 128 defer cleanup() 129 130 // Ensure gas calculation behaves as expected 131 checkTxs(t, mp, 1, mempool.UnknownPeerID) 132 tx0 := mp.TxsFront().Value.(*mempoolTx) 133 // assert that kv store has gas wanted = 1. 134 require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") 135 require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") 136 // ensure each tx is 20 bytes long 137 require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") 138 mp.Flush() 139 140 // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. 141 // each tx has 20 bytes 142 tests := []struct { 143 numTxsToCreate int 144 maxBytes int64 145 maxGas int64 146 expectedNumTxs int 147 }{ 148 {20, -1, -1, 20}, 149 {20, -1, 0, 0}, 150 {20, -1, 10, 10}, 151 {20, -1, 30, 20}, 152 {20, 0, -1, 0}, 153 {20, 0, 10, 0}, 154 {20, 10, 10, 0}, 155 {20, 24, 10, 1}, 156 {20, 240, 5, 5}, 157 {20, 240, -1, 10}, 158 {20, 240, 10, 10}, 159 {20, 240, 15, 10}, 160 {20, 20000, -1, 20}, 161 {20, 20000, 5, 5}, 162 {20, 20000, 30, 20}, 163 } 164 for tcIndex, tt := range tests { 165 checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) 166 got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) 167 assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", 168 len(got), tt.expectedNumTxs, tcIndex) 169 mp.Flush() 170 } 171 } 172 173 func TestMempoolFilters(t *testing.T) { 174 app := kvstore.NewApplication() 175 cc := proxy.NewLocalClientCreator(app) 176 mp, cleanup := newMempoolWithApp(cc) 177 defer cleanup() 178 emptyTxArr := []types.Tx{[]byte{}} 179 180 nopPreFilter := func(tx types.Tx) error { return nil } 181 nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil } 182 183 // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. 184 // each tx has 20 bytes 185 tests := []struct { 186 numTxsToCreate int 187 preFilter mempool.PreCheckFunc 188 postFilter mempool.PostCheckFunc 189 expectedNumTxs int 190 }{ 191 {10, nopPreFilter, nopPostFilter, 10}, 192 {10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0}, 193 {10, mempool.PreCheckMaxBytes(26), nopPostFilter, 10}, 194 {10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10}, 195 {10, nopPreFilter, mempool.PostCheckMaxGas(0), 0}, 196 {10, nopPreFilter, mempool.PostCheckMaxGas(1), 10}, 197 {10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10}, 198 {10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0}, 199 {10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10}, 200 {10, mempool.PreCheckMaxBytes(28), mempool.PostCheckMaxGas(1), 10}, 201 {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0}, 202 } 203 for tcIndex, tt := range tests { 204 err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) 205 require.NoError(t, err) 206 checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) 207 require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex) 208 mp.Flush() 209 } 210 } 211 212 func TestMempoolUpdate(t *testing.T) { 213 app := kvstore.NewApplication() 214 cc := proxy.NewLocalClientCreator(app) 215 mp, cleanup := newMempoolWithApp(cc) 216 defer cleanup() 217 218 // 1. Adds valid txs to the cache 219 { 220 err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 221 require.NoError(t, err) 222 err = mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{}) 223 if assert.Error(t, err) { 224 assert.Equal(t, mempool.ErrTxInCache, err) 225 } 226 } 227 228 // 2. Removes valid txs from the mempool 229 { 230 err := mp.CheckTx([]byte{0x02}, nil, mempool.TxInfo{}) 231 require.NoError(t, err) 232 err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 233 require.NoError(t, err) 234 assert.Zero(t, mp.Size()) 235 } 236 237 // 3. Removes invalid transactions from the cache and the mempool (if present) 238 { 239 err := mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{}) 240 require.NoError(t, err) 241 err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) 242 require.NoError(t, err) 243 assert.Zero(t, mp.Size()) 244 245 err = mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{}) 246 require.NoError(t, err) 247 } 248 } 249 250 func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { 251 var callback abciclient.Callback 252 mockClient := new(abciclimocks.Client) 253 mockClient.On("Start").Return(nil) 254 mockClient.On("SetLogger", mock.Anything) 255 256 mockClient.On("Error").Return(nil).Times(4) 257 mockClient.On("FlushAsync", mock.Anything).Return(abciclient.NewReqRes(abci.ToRequestFlush()), nil) 258 mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true })) 259 260 app := kvstore.NewApplication() 261 cc := proxy.NewLocalClientCreator(app) 262 mp, cleanup, err := newMempoolWithAppMock(cc, mockClient) 263 require.NoError(t, err) 264 defer cleanup() 265 266 // Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them. 267 txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}} 268 for _, tx := range txs { 269 reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx})) 270 reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK}) 271 272 mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil) 273 err := mp.CheckTx(tx, nil, mempool.TxInfo{}) 274 require.NoError(t, err) 275 276 // ensure that the callback that the mempool sets on the ReqRes is run. 277 reqRes.InvokeCallback() 278 } 279 280 // Calling update to remove the first transaction from the mempool. 281 // This call also triggers the mempool to recheck its remaining transactions. 282 err = mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil) 283 require.Nil(t, err) 284 285 // The mempool has now sent its requests off to the client to be rechecked 286 // and is waiting for the corresponding callbacks to be called. 287 // We now call the mempool-supplied callback on the first and third transaction. 288 // This simulates the client dropping the second request. 289 // Previous versions of this code panicked when the ABCI application missed 290 // a recheck-tx request. 291 resp := abci.ResponseCheckTx{Code: abci.CodeTypeOK} 292 req := abci.RequestCheckTx{Tx: txs[1]} 293 callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) 294 295 req = abci.RequestCheckTx{Tx: txs[3]} 296 callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) 297 mockClient.AssertExpectations(t) 298 } 299 300 func TestMempool_KeepInvalidTxsInCache(t *testing.T) { 301 app := kvstore.NewApplication() 302 cc := proxy.NewLocalClientCreator(app) 303 wcfg := config.DefaultConfig() 304 wcfg.Mempool.KeepInvalidTxsInCache = true 305 mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) 306 defer cleanup() 307 308 // 1. An invalid transaction must remain in the cache after Update 309 { 310 a := make([]byte, 8) 311 binary.BigEndian.PutUint64(a, 0) 312 313 b := make([]byte, 8) 314 binary.BigEndian.PutUint64(b, 1) 315 316 err := mp.CheckTx(b, nil, mempool.TxInfo{}) 317 require.NoError(t, err) 318 319 // simulate new block 320 _ = app.DeliverTx(abci.RequestDeliverTx{Tx: a}) 321 _ = app.DeliverTx(abci.RequestDeliverTx{Tx: b}) 322 err = mp.Update(1, []types.Tx{a, b}, 323 []*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil) 324 require.NoError(t, err) 325 326 // a must be added to the cache 327 err = mp.CheckTx(a, nil, mempool.TxInfo{}) 328 if assert.Error(t, err) { 329 assert.Equal(t, mempool.ErrTxInCache, err) 330 } 331 332 // b must remain in the cache 333 err = mp.CheckTx(b, nil, mempool.TxInfo{}) 334 if assert.Error(t, err) { 335 assert.Equal(t, mempool.ErrTxInCache, err) 336 } 337 } 338 339 // 2. An invalid transaction must remain in the cache 340 { 341 a := make([]byte, 8) 342 binary.BigEndian.PutUint64(a, 0) 343 344 // remove a from the cache to test (2) 345 mp.cache.Remove(a) 346 347 err := mp.CheckTx(a, nil, mempool.TxInfo{}) 348 require.NoError(t, err) 349 } 350 } 351 352 func TestTxsAvailable(t *testing.T) { 353 app := kvstore.NewApplication() 354 cc := proxy.NewLocalClientCreator(app) 355 mp, cleanup := newMempoolWithApp(cc) 356 defer cleanup() 357 mp.EnableTxsAvailable() 358 359 timeoutMS := 500 360 361 // with no txs, it shouldn't fire 362 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 363 364 // send a bunch of txs, it should only fire once 365 txs := checkTxs(t, mp, 100, mempool.UnknownPeerID) 366 ensureFire(t, mp.TxsAvailable(), timeoutMS) 367 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 368 369 // call update with half the txs. 370 // it should fire once now for the new height 371 // since there are still txs left 372 committedTxs, txs := txs[:50], txs[50:] 373 if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { 374 t.Error(err) 375 } 376 ensureFire(t, mp.TxsAvailable(), timeoutMS) 377 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 378 379 // send a bunch more txs. we already fired for this height so it shouldn't fire again 380 moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID) 381 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 382 383 // now call update with all the txs. it should not fire as there are no txs left 384 committedTxs = append(txs, moreTxs...) 385 if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { 386 t.Error(err) 387 } 388 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 389 390 // send a bunch more txs, it should only fire once 391 checkTxs(t, mp, 100, mempool.UnknownPeerID) 392 ensureFire(t, mp.TxsAvailable(), timeoutMS) 393 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 394 } 395 396 func TestSerialReap(t *testing.T) { 397 app := kvstore.NewApplication() 398 cc := proxy.NewLocalClientCreator(app) 399 400 mp, cleanup := newMempoolWithApp(cc) 401 defer cleanup() 402 403 appConnCon, _ := cc.NewABCIClient() 404 appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) 405 err := appConnCon.Start() 406 require.Nil(t, err) 407 408 cacheMap := make(map[string]struct{}) 409 deliverTxsRange := func(start, end int) { 410 // Deliver some txs. 411 for i := start; i < end; i++ { 412 413 // This will succeed 414 txBytes := make([]byte, 8) 415 binary.BigEndian.PutUint64(txBytes, uint64(i)) 416 err := mp.CheckTx(txBytes, nil, mempool.TxInfo{}) 417 _, cached := cacheMap[string(txBytes)] 418 if cached { 419 require.NotNil(t, err, "expected error for cached tx") 420 } else { 421 require.Nil(t, err, "expected no err for uncached tx") 422 } 423 cacheMap[string(txBytes)] = struct{}{} 424 425 // Duplicates are cached and should return error 426 err = mp.CheckTx(txBytes, nil, mempool.TxInfo{}) 427 require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") 428 } 429 } 430 431 reapCheck := func(exp int) { 432 txs := mp.ReapMaxBytesMaxGas(-1, -1) 433 require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) 434 } 435 436 updateRange := func(start, end int) { 437 txs := make([]types.Tx, 0) 438 for i := start; i < end; i++ { 439 txBytes := make([]byte, 8) 440 binary.BigEndian.PutUint64(txBytes, uint64(i)) 441 txs = append(txs, txBytes) 442 } 443 if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { 444 t.Error(err) 445 } 446 } 447 448 commitRange := func(start, end int) { 449 // Deliver some txs. 450 for i := start; i < end; i++ { 451 txBytes := make([]byte, 8) 452 binary.BigEndian.PutUint64(txBytes, uint64(i)) 453 res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) 454 if err != nil { 455 t.Errorf("client error committing tx: %v", err) 456 } 457 if res.IsErr() { 458 t.Errorf("error committing tx. Code:%v result:%X log:%v", 459 res.Code, res.Data, res.Log) 460 } 461 } 462 res, err := appConnCon.CommitSync() 463 if err != nil { 464 t.Errorf("client error committing: %v", err) 465 } 466 if len(res.Data) != 8 { 467 t.Errorf("error committing. Hash:%X", res.Data) 468 } 469 } 470 471 //---------------------------------------- 472 473 // Deliver some txs. 474 deliverTxsRange(0, 100) 475 476 // Reap the txs. 477 reapCheck(100) 478 479 // Reap again. We should get the same amount 480 reapCheck(100) 481 482 // Deliver 0 to 999, we should reap 900 new txs 483 // because 100 were already counted. 484 deliverTxsRange(0, 1000) 485 486 // Reap the txs. 487 reapCheck(1000) 488 489 // Reap again. We should get the same amount 490 reapCheck(1000) 491 492 // Commit from the consensus AppConn 493 commitRange(0, 500) 494 updateRange(0, 500) 495 496 // We should have 500 left. 497 reapCheck(500) 498 499 // Deliver 100 invalid txs and 100 valid txs 500 deliverTxsRange(900, 1100) 501 502 // We should have 600 now. 503 reapCheck(600) 504 } 505 506 func TestMempool_CheckTxChecksTxSize(t *testing.T) { 507 app := kvstore.NewApplication() 508 cc := proxy.NewLocalClientCreator(app) 509 510 mempl, cleanup := newMempoolWithApp(cc) 511 defer cleanup() 512 513 maxTxSize := mempl.config.MaxTxBytes 514 515 testCases := []struct { 516 len int 517 err bool 518 }{ 519 // check small txs. no error 520 0: {10, false}, 521 1: {1000, false}, 522 2: {1000000, false}, 523 524 // check around maxTxSize 525 3: {maxTxSize - 1, false}, 526 4: {maxTxSize, false}, 527 5: {maxTxSize + 1, true}, 528 } 529 530 for i, testCase := range testCases { 531 caseString := fmt.Sprintf("case %d, len %d", i, testCase.len) 532 533 tx := cmtrand.Bytes(testCase.len) 534 535 err := mempl.CheckTx(tx, nil, mempool.TxInfo{}) 536 bv := gogotypes.BytesValue{Value: tx} 537 bz, err2 := bv.Marshal() 538 require.NoError(t, err2) 539 require.Equal(t, len(bz), proto.Size(&bv), caseString) 540 541 if !testCase.err { 542 require.NoError(t, err, caseString) 543 } else { 544 require.Equal(t, err, mempool.ErrTxTooLarge{ 545 Max: maxTxSize, 546 Actual: testCase.len, 547 }, caseString) 548 } 549 } 550 } 551 552 func TestMempoolTxsBytes(t *testing.T) { 553 app := kvstore.NewApplication() 554 cc := proxy.NewLocalClientCreator(app) 555 556 cfg := config.ResetTestRoot("mempool_test") 557 558 cfg.Mempool.MaxTxsBytes = 10 559 mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) 560 defer cleanup() 561 562 // 1. zero by default 563 assert.EqualValues(t, 0, mp.SizeBytes()) 564 565 // 2. len(tx) after CheckTx 566 err := mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{}) 567 require.NoError(t, err) 568 assert.EqualValues(t, 1, mp.SizeBytes()) 569 570 // 3. zero again after tx is removed by Update 571 err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 572 require.NoError(t, err) 573 assert.EqualValues(t, 0, mp.SizeBytes()) 574 575 // 4. zero after Flush 576 err = mp.CheckTx([]byte{0x02, 0x03}, nil, mempool.TxInfo{}) 577 require.NoError(t, err) 578 assert.EqualValues(t, 2, mp.SizeBytes()) 579 580 mp.Flush() 581 assert.EqualValues(t, 0, mp.SizeBytes()) 582 583 // 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached. 584 err = mp.CheckTx( 585 []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, 586 nil, 587 mempool.TxInfo{}, 588 ) 589 require.NoError(t, err) 590 591 err = mp.CheckTx([]byte{0x05}, nil, mempool.TxInfo{}) 592 if assert.Error(t, err) { 593 assert.IsType(t, mempool.ErrMempoolIsFull{}, err) 594 } 595 596 // 6. zero after tx is rechecked and removed due to not being valid anymore 597 app2 := kvstore.NewApplication() 598 cc = proxy.NewLocalClientCreator(app2) 599 600 mp, cleanup = newMempoolWithApp(cc) 601 defer cleanup() 602 603 txBytes := make([]byte, 8) 604 binary.BigEndian.PutUint64(txBytes, uint64(0)) 605 606 err = mp.CheckTx(txBytes, nil, mempool.TxInfo{}) 607 require.NoError(t, err) 608 assert.EqualValues(t, 8, mp.SizeBytes()) 609 610 appConnCon, _ := cc.NewABCIClient() 611 appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) 612 err = appConnCon.Start() 613 require.Nil(t, err) 614 t.Cleanup(func() { 615 if err := appConnCon.Stop(); err != nil { 616 t.Error(err) 617 } 618 }) 619 620 res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) 621 require.NoError(t, err) 622 require.EqualValues(t, 0, res.Code) 623 624 res2, err := appConnCon.CommitSync() 625 require.NoError(t, err) 626 require.NotEmpty(t, res2.Data) 627 628 // Pretend like we committed nothing so txBytes gets rechecked and removed. 629 err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) 630 require.NoError(t, err) 631 assert.EqualValues(t, 8, mp.SizeBytes()) 632 633 // 7. Test RemoveTxByKey function 634 err = mp.CheckTx([]byte{0x06}, nil, mempool.TxInfo{}) 635 require.NoError(t, err) 636 assert.EqualValues(t, 9, mp.SizeBytes()) 637 assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key())) 638 assert.EqualValues(t, 9, mp.SizeBytes()) 639 assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key())) 640 assert.EqualValues(t, 8, mp.SizeBytes()) 641 642 } 643 644 // This will non-deterministically catch some concurrency failures like 645 // https://github.com/cometbft/cometbft/issues/3509 646 // TODO: all of the tests should probably also run using the remote proxy app 647 // since otherwise we're not actually testing the concurrency of the mempool here! 648 func TestMempoolRemoteAppConcurrency(t *testing.T) { 649 sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmtrand.Str(6)) 650 app := kvstore.NewApplication() 651 _, server := newRemoteApp(t, sockPath, app) 652 t.Cleanup(func() { 653 if err := server.Stop(); err != nil { 654 t.Error(err) 655 } 656 }) 657 658 cfg := config.ResetTestRoot("mempool_test") 659 660 mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg) 661 defer cleanup() 662 663 // generate small number of txs 664 nTxs := 10 665 txLen := 200 666 txs := make([]types.Tx, nTxs) 667 for i := 0; i < nTxs; i++ { 668 txs[i] = cmtrand.Bytes(txLen) 669 } 670 671 // simulate a group of peers sending them over and over 672 N := cfg.Mempool.Size 673 maxPeers := 5 674 for i := 0; i < N; i++ { 675 peerID := mrand.Intn(maxPeers) 676 txNum := mrand.Intn(nTxs) 677 tx := txs[txNum] 678 679 // this will err with ErrTxInCache many times ... 680 mp.CheckTx(tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error 681 } 682 683 require.NoError(t, mp.FlushAppConn()) 684 } 685 686 func TestRemoveBlobTx(t *testing.T) { 687 app := kvstore.NewApplication() 688 cc := proxy.NewLocalClientCreator(app) 689 namespaceOne := bytes.Repeat([]byte{1}, consts.NamespaceIDSize) 690 691 cfg := config.ResetTestRoot("mempool_test") 692 693 cfg.Mempool.MaxTxsBytes = 1000 694 mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) 695 defer cleanup() 696 697 originalTx := []byte{1, 2, 3, 4} 698 indexWrapper, err := types.MarshalIndexWrapper(originalTx, 100) 699 require.NoError(t, err) 700 701 // create the blobTx 702 b := tmproto.Blob{ 703 NamespaceId: namespaceOne, 704 Data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 9}, 705 ShareVersion: 0, 706 NamespaceVersion: 0, 707 } 708 bTx, err := types.MarshalBlobTx(originalTx, &b) 709 require.NoError(t, err) 710 711 err = mp.CheckTx(bTx, nil, mempool.TxInfo{}) 712 require.NoError(t, err) 713 714 err = mp.Update(1, []types.Tx{indexWrapper}, abciResponses(1, abci.CodeTypeOK), nil, nil) 715 require.NoError(t, err) 716 assert.EqualValues(t, 0, mp.Size()) 717 assert.EqualValues(t, 0, mp.SizeBytes()) 718 } 719 720 // caller must close server 721 func newRemoteApp(t *testing.T, addr string, app abci.Application) (abciclient.Client, service.Service) { 722 clientCreator, err := abciclient.NewClient(addr, "socket", true) 723 require.NoError(t, err) 724 725 // Start server 726 server := abciserver.NewSocketServer(addr, app) 727 server.SetLogger(log.TestingLogger().With("module", "abci-server")) 728 if err := server.Start(); err != nil { 729 t.Fatalf("Error starting socket server: %v", err.Error()) 730 } 731 732 return clientCreator, server 733 } 734 735 func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { 736 responses := make([]*abci.ResponseDeliverTx, 0, n) 737 for i := 0; i < n; i++ { 738 responses = append(responses, &abci.ResponseDeliverTx{Code: code}) 739 } 740 return responses 741 }