github.com/badrootd/nibiru-cometbft@v0.37.5-0.20240307173500-2a75559eee9b/mempool/v0/clist_mempool_test.go (about) 1 package v0 2 3 import ( 4 "encoding/binary" 5 "fmt" 6 mrand "math/rand" 7 "os" 8 "strconv" 9 "testing" 10 "time" 11 12 "github.com/cosmos/gogoproto/proto" 13 gogotypes "github.com/cosmos/gogoproto/types" 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/mock" 16 "github.com/stretchr/testify/require" 17 18 abciclient "github.com/badrootd/nibiru-cometbft/abci/client" 19 abciclimocks "github.com/badrootd/nibiru-cometbft/abci/client/mocks" 20 "github.com/badrootd/nibiru-cometbft/abci/example/kvstore" 21 abciserver "github.com/badrootd/nibiru-cometbft/abci/server" 22 abci "github.com/badrootd/nibiru-cometbft/abci/types" 23 "github.com/badrootd/nibiru-cometbft/config" 24 "github.com/badrootd/nibiru-cometbft/libs/log" 25 cmtrand "github.com/badrootd/nibiru-cometbft/libs/rand" 26 "github.com/badrootd/nibiru-cometbft/libs/service" 27 "github.com/badrootd/nibiru-cometbft/mempool" 28 "github.com/badrootd/nibiru-cometbft/proxy" 29 "github.com/badrootd/nibiru-cometbft/types" 30 ) 31 32 // A cleanupFunc cleans up any config / test files created for a particular 33 // test. 34 type cleanupFunc func() 35 36 func newMempoolWithAppMock(cc proxy.ClientCreator, client abciclient.Client) (*CListMempool, cleanupFunc, error) { 37 conf := config.ResetTestRoot("mempool_test") 38 39 mp, cu := newMempoolWithAppAndConfigMock(cc, conf, client) 40 return mp, cu, nil 41 } 42 43 func newMempoolWithAppAndConfigMock(cc proxy.ClientCreator, 44 cfg *config.Config, 45 client abciclient.Client) (*CListMempool, cleanupFunc) { 46 appConnMem := client 47 appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) 48 err := appConnMem.Start() 49 if err != nil { 50 panic(err) 51 } 52 53 mp := NewCListMempool(cfg.Mempool, appConnMem, 0) 54 mp.SetLogger(log.TestingLogger()) 55 56 return mp, func() { os.RemoveAll(cfg.RootDir) } 57 } 58 59 func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { 60 conf := config.ResetTestRoot("mempool_test") 61 62 mp, cu := newMempoolWithAppAndConfig(cc, conf) 63 return mp, cu 64 } 65 66 func newMempoolWithAppAndConfig(cc proxy.ClientCreator, cfg *config.Config) (*CListMempool, cleanupFunc) { 67 appConnMem, _ := cc.NewABCIClient() 68 appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) 69 err := appConnMem.Start() 70 if err != nil { 71 panic(err) 72 } 73 74 mp := NewCListMempool(cfg.Mempool, appConnMem, 0) 75 mp.SetLogger(log.TestingLogger()) 76 77 return mp, func() { os.RemoveAll(cfg.RootDir) } 78 } 79 80 func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { 81 timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) 82 select { 83 case <-ch: 84 t.Fatal("Expected not to fire") 85 case <-timer.C: 86 } 87 } 88 89 func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { 90 timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) 91 select { 92 case <-ch: 93 case <-timer.C: 94 t.Fatal("Expected to fire") 95 } 96 } 97 98 func callCheckTx(t *testing.T, mp mempool.Mempool, txs types.Txs) { 99 txInfo := mempool.TxInfo{SenderID: 0} 100 for i, tx := range txs { 101 if err := mp.CheckTx(tx, nil, txInfo); err != nil { 102 // Skip invalid txs. 103 // TestMempoolFilters will fail otherwise. It asserts a number of txs 104 // returned. 105 if mempool.IsPreCheckError(err) { 106 continue 107 } 108 t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) 109 } 110 } 111 } 112 113 func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs { 114 txs := make(types.Txs, count) 115 txInfo := mempool.TxInfo{SenderID: peerID} 116 for i := 0; i < count; i++ { 117 txBytes := kvstore.NewRandomTx(20) 118 txs[i] = txBytes 119 if err := mp.CheckTx(txBytes, nil, txInfo); err != nil { 120 // Skip invalid txs. 121 // TestMempoolFilters will fail otherwise. It asserts a number of txs 122 // returned. 123 if mempool.IsPreCheckError(err) { 124 continue 125 } 126 t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) 127 } 128 } 129 return txs 130 } 131 132 func TestReapMaxBytesMaxGas(t *testing.T) { 133 app := kvstore.NewApplication() 134 cc := proxy.NewLocalClientCreator(app) 135 mp, cleanup := newMempoolWithApp(cc) 136 defer cleanup() 137 138 // Ensure gas calculation behaves as expected 139 checkTxs(t, mp, 1, mempool.UnknownPeerID) 140 tx0 := mp.TxsFront().Value.(*mempoolTx) 141 // assert that kv store has gas wanted = 1. 142 require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") 143 require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") 144 // ensure each tx is 20 bytes long 145 require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") 146 mp.Flush() 147 148 // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. 149 // each tx has 20 bytes 150 tests := []struct { 151 numTxsToCreate int 152 maxBytes int64 153 maxGas int64 154 expectedNumTxs int 155 }{ 156 {20, -1, -1, 20}, 157 {20, -1, 0, 0}, 158 {20, -1, 10, 10}, 159 {20, -1, 30, 20}, 160 {20, 0, -1, 0}, 161 {20, 0, 10, 0}, 162 {20, 10, 10, 0}, 163 {20, 24, 10, 1}, 164 {20, 240, 5, 5}, 165 {20, 240, -1, 10}, 166 {20, 240, 10, 10}, 167 {20, 240, 15, 10}, 168 {20, 20000, -1, 20}, 169 {20, 20000, 5, 5}, 170 {20, 20000, 30, 20}, 171 } 172 for tcIndex, tt := range tests { 173 checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) 174 got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) 175 assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", 176 len(got), tt.expectedNumTxs, tcIndex) 177 mp.Flush() 178 } 179 } 180 181 func TestMempoolFilters(t *testing.T) { 182 app := kvstore.NewApplication() 183 cc := proxy.NewLocalClientCreator(app) 184 mp, cleanup := newMempoolWithApp(cc) 185 defer cleanup() 186 emptyTxArr := []types.Tx{[]byte{}} 187 188 nopPreFilter := func(tx types.Tx) error { return nil } 189 nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil } 190 191 // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. 192 // each tx has 20 bytes 193 tests := []struct { 194 numTxsToCreate int 195 preFilter mempool.PreCheckFunc 196 postFilter mempool.PostCheckFunc 197 expectedNumTxs int 198 }{ 199 {10, nopPreFilter, nopPostFilter, 10}, 200 {10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0}, 201 {10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10}, 202 {10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10}, 203 {10, nopPreFilter, mempool.PostCheckMaxGas(0), 0}, 204 {10, nopPreFilter, mempool.PostCheckMaxGas(1), 10}, 205 {10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10}, 206 {10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0}, 207 {10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10}, 208 {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10}, 209 {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0}, 210 } 211 for tcIndex, tt := range tests { 212 err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) 213 require.NoError(t, err) 214 checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) 215 require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex) 216 mp.Flush() 217 } 218 } 219 220 func TestMempoolUpdate(t *testing.T) { 221 app := kvstore.NewApplication() 222 cc := proxy.NewLocalClientCreator(app) 223 mp, cleanup := newMempoolWithApp(cc) 224 defer cleanup() 225 226 // 1. Adds valid txs to the cache 227 { 228 err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 229 require.NoError(t, err) 230 err = mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{}) 231 if assert.Error(t, err) { 232 assert.Equal(t, mempool.ErrTxInCache, err) 233 } 234 } 235 236 // 2. Removes valid txs from the mempool 237 { 238 err := mp.CheckTx([]byte{0x02}, nil, mempool.TxInfo{}) 239 require.NoError(t, err) 240 err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 241 require.NoError(t, err) 242 assert.Zero(t, mp.Size()) 243 } 244 245 // 3. Removes invalid transactions from the cache and the mempool (if present) 246 { 247 err := mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{}) 248 require.NoError(t, err) 249 err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) 250 require.NoError(t, err) 251 assert.Zero(t, mp.Size()) 252 253 err = mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{}) 254 require.NoError(t, err) 255 } 256 } 257 258 func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { 259 var callback abciclient.Callback 260 mockClient := new(abciclimocks.Client) 261 mockClient.On("Start").Return(nil) 262 mockClient.On("SetLogger", mock.Anything) 263 264 mockClient.On("Error").Return(nil).Times(4) 265 mockClient.On("FlushAsync", mock.Anything).Return(abciclient.NewReqRes(abci.ToRequestFlush()), nil) 266 mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true })) 267 268 app := kvstore.NewApplication() 269 cc := proxy.NewLocalClientCreator(app) 270 mp, cleanup, err := newMempoolWithAppMock(cc, mockClient) 271 require.NoError(t, err) 272 defer cleanup() 273 274 // Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them. 275 txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}} 276 for _, tx := range txs { 277 reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx})) 278 reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK}) 279 280 mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil) 281 err := mp.CheckTx(tx, nil, mempool.TxInfo{}) 282 require.NoError(t, err) 283 284 // ensure that the callback that the mempool sets on the ReqRes is run. 285 reqRes.InvokeCallback() 286 } 287 288 // Calling update to remove the first transaction from the mempool. 289 // This call also triggers the mempool to recheck its remaining transactions. 290 err = mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil) 291 require.Nil(t, err) 292 293 // The mempool has now sent its requests off to the client to be rechecked 294 // and is waiting for the corresponding callbacks to be called. 295 // We now call the mempool-supplied callback on the first and third transaction. 296 // This simulates the client dropping the second request. 297 // Previous versions of this code panicked when the ABCI application missed 298 // a recheck-tx request. 299 resp := abci.ResponseCheckTx{Code: abci.CodeTypeOK} 300 req := abci.RequestCheckTx{Tx: txs[1]} 301 callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) 302 303 req = abci.RequestCheckTx{Tx: txs[3]} 304 callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) 305 mockClient.AssertExpectations(t) 306 } 307 308 func TestMempool_KeepInvalidTxsInCache(t *testing.T) { 309 app := kvstore.NewApplication() 310 cc := proxy.NewLocalClientCreator(app) 311 wcfg := config.DefaultConfig() 312 wcfg.Mempool.KeepInvalidTxsInCache = true 313 mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) 314 defer cleanup() 315 316 // 1. An invalid transaction must remain in the cache after Update 317 { 318 a := make([]byte, 8) 319 binary.BigEndian.PutUint64(a, 0) 320 321 b := make([]byte, 8) 322 binary.BigEndian.PutUint64(b, 1) 323 324 err := mp.CheckTx(b, nil, mempool.TxInfo{}) 325 require.NoError(t, err) 326 327 // simulate new block 328 _ = app.DeliverTx(abci.RequestDeliverTx{Tx: a}) 329 _ = app.DeliverTx(abci.RequestDeliverTx{Tx: b}) 330 err = mp.Update(1, []types.Tx{a, b}, 331 []*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil) 332 require.NoError(t, err) 333 334 // a must be added to the cache 335 err = mp.CheckTx(a, nil, mempool.TxInfo{}) 336 if assert.Error(t, err) { 337 assert.Equal(t, mempool.ErrTxInCache, err) 338 } 339 340 // b must remain in the cache 341 err = mp.CheckTx(b, nil, mempool.TxInfo{}) 342 if assert.Error(t, err) { 343 assert.Equal(t, mempool.ErrTxInCache, err) 344 } 345 } 346 347 // 2. An invalid transaction must remain in the cache 348 { 349 a := make([]byte, 8) 350 binary.BigEndian.PutUint64(a, 0) 351 352 // remove a from the cache to test (2) 353 mp.cache.Remove(a) 354 355 err := mp.CheckTx(a, nil, mempool.TxInfo{}) 356 require.NoError(t, err) 357 } 358 } 359 360 func TestTxsAvailable(t *testing.T) { 361 app := kvstore.NewApplication() 362 cc := proxy.NewLocalClientCreator(app) 363 mp, cleanup := newMempoolWithApp(cc) 364 defer cleanup() 365 mp.EnableTxsAvailable() 366 367 timeoutMS := 500 368 369 // with no txs, it shouldnt fire 370 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 371 372 // send a bunch of txs, it should only fire once 373 txs := checkTxs(t, mp, 100, mempool.UnknownPeerID) 374 ensureFire(t, mp.TxsAvailable(), timeoutMS) 375 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 376 377 // call update with half the txs. 378 // it should fire once now for the new height 379 // since there are still txs left 380 committedTxs, txs := txs[:50], txs[50:] 381 if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { 382 t.Error(err) 383 } 384 ensureFire(t, mp.TxsAvailable(), timeoutMS) 385 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 386 387 // send a bunch more txs. we already fired for this height so it shouldnt fire again 388 moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID) 389 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 390 391 // now call update with all the txs. it should not fire as there are no txs left 392 committedTxs = append(txs, moreTxs...) 393 if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { 394 t.Error(err) 395 } 396 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 397 398 // send a bunch more txs, it should only fire once 399 checkTxs(t, mp, 100, mempool.UnknownPeerID) 400 ensureFire(t, mp.TxsAvailable(), timeoutMS) 401 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 402 } 403 404 func TestSerialReap(t *testing.T) { 405 app := kvstore.NewApplication() 406 cc := proxy.NewLocalClientCreator(app) 407 408 mp, cleanup := newMempoolWithApp(cc) 409 defer cleanup() 410 411 appConnCon, _ := cc.NewABCIClient() 412 appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) 413 err := appConnCon.Start() 414 require.Nil(t, err) 415 416 cacheMap := make(map[string]struct{}) 417 deliverTxsRange := func(start, end int) { 418 // Deliver some txs. 419 for i := start; i < end; i++ { 420 421 // This will succeed 422 txBytes := make([]byte, 8) 423 binary.BigEndian.PutUint64(txBytes, uint64(i)) 424 err := mp.CheckTx(txBytes, nil, mempool.TxInfo{}) 425 _, cached := cacheMap[string(txBytes)] 426 if cached { 427 require.NotNil(t, err, "expected error for cached tx") 428 } else { 429 require.Nil(t, err, "expected no err for uncached tx") 430 } 431 cacheMap[string(txBytes)] = struct{}{} 432 433 // Duplicates are cached and should return error 434 err = mp.CheckTx(txBytes, nil, mempool.TxInfo{}) 435 require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") 436 } 437 } 438 439 reapCheck := func(exp int) { 440 txs := mp.ReapMaxBytesMaxGas(-1, -1) 441 require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) 442 } 443 444 updateRange := func(start, end int) { 445 txs := make([]types.Tx, 0) 446 for i := start; i < end; i++ { 447 txBytes := make([]byte, 8) 448 binary.BigEndian.PutUint64(txBytes, uint64(i)) 449 txs = append(txs, txBytes) 450 } 451 if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { 452 t.Error(err) 453 } 454 } 455 456 commitRange := func(start, end int) { 457 // Deliver some txs. 458 for i := start; i < end; i++ { 459 txBytes := make([]byte, 8) 460 binary.BigEndian.PutUint64(txBytes, uint64(i)) 461 res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) 462 if err != nil { 463 t.Errorf("client error committing tx: %v", err) 464 } 465 if res.IsErr() { 466 t.Errorf("error committing tx. Code:%v result:%X log:%v", 467 res.Code, res.Data, res.Log) 468 } 469 } 470 res, err := appConnCon.CommitSync() 471 if err != nil { 472 t.Errorf("client error committing: %v", err) 473 } 474 if len(res.Data) != 8 { 475 t.Errorf("error committing. Hash:%X", res.Data) 476 } 477 } 478 479 //---------------------------------------- 480 481 // Deliver some txs. 482 deliverTxsRange(0, 100) 483 484 // Reap the txs. 485 reapCheck(100) 486 487 // Reap again. We should get the same amount 488 reapCheck(100) 489 490 // Deliver 0 to 999, we should reap 900 new txs 491 // because 100 were already counted. 492 deliverTxsRange(0, 1000) 493 494 // Reap the txs. 495 reapCheck(1000) 496 497 // Reap again. We should get the same amount 498 reapCheck(1000) 499 500 // Commit from the conensus AppConn 501 commitRange(0, 500) 502 updateRange(0, 500) 503 504 // We should have 500 left. 505 reapCheck(500) 506 507 // Deliver 100 invalid txs and 100 valid txs 508 deliverTxsRange(900, 1100) 509 510 // We should have 600 now. 511 reapCheck(600) 512 } 513 514 func TestMempool_CheckTxChecksTxSize(t *testing.T) { 515 app := kvstore.NewApplication() 516 cc := proxy.NewLocalClientCreator(app) 517 518 mempl, cleanup := newMempoolWithApp(cc) 519 defer cleanup() 520 521 maxTxSize := mempl.config.MaxTxBytes 522 523 testCases := []struct { 524 len int 525 err bool 526 }{ 527 // check small txs. no error 528 0: {10, false}, 529 1: {1000, false}, 530 2: {1000000, false}, 531 532 // check around maxTxSize 533 3: {maxTxSize - 1, false}, 534 4: {maxTxSize, false}, 535 5: {maxTxSize + 1, true}, 536 } 537 538 for i, testCase := range testCases { 539 caseString := fmt.Sprintf("case %d, len %d", i, testCase.len) 540 541 tx := cmtrand.Bytes(testCase.len) 542 543 err := mempl.CheckTx(tx, nil, mempool.TxInfo{}) 544 bv := gogotypes.BytesValue{Value: tx} 545 bz, err2 := bv.Marshal() 546 require.NoError(t, err2) 547 require.Equal(t, len(bz), proto.Size(&bv), caseString) 548 549 if !testCase.err { 550 require.NoError(t, err, caseString) 551 } else { 552 require.Equal(t, err, mempool.ErrTxTooLarge{ 553 Max: maxTxSize, 554 Actual: testCase.len, 555 }, caseString) 556 } 557 } 558 } 559 560 func TestMempoolTxsBytes(t *testing.T) { 561 app := kvstore.NewApplication() 562 cc := proxy.NewLocalClientCreator(app) 563 564 cfg := config.ResetTestRoot("mempool_test") 565 566 cfg.Mempool.MaxTxsBytes = 10 567 mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) 568 defer cleanup() 569 570 // 1. zero by default 571 assert.EqualValues(t, 0, mp.SizeBytes()) 572 573 // 2. len(tx) after CheckTx 574 err := mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{}) 575 require.NoError(t, err) 576 assert.EqualValues(t, 1, mp.SizeBytes()) 577 578 // 3. zero again after tx is removed by Update 579 err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 580 require.NoError(t, err) 581 assert.EqualValues(t, 0, mp.SizeBytes()) 582 583 // 4. zero after Flush 584 err = mp.CheckTx([]byte{0x02, 0x03}, nil, mempool.TxInfo{}) 585 require.NoError(t, err) 586 assert.EqualValues(t, 2, mp.SizeBytes()) 587 588 mp.Flush() 589 assert.EqualValues(t, 0, mp.SizeBytes()) 590 591 // 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached. 592 err = mp.CheckTx( 593 []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, 594 nil, 595 mempool.TxInfo{}, 596 ) 597 require.NoError(t, err) 598 599 err = mp.CheckTx([]byte{0x05}, nil, mempool.TxInfo{}) 600 if assert.Error(t, err) { 601 assert.IsType(t, mempool.ErrMempoolIsFull{}, err) 602 } 603 604 // 6. zero after tx is rechecked and removed due to not being valid anymore 605 app2 := kvstore.NewApplication() 606 cc = proxy.NewLocalClientCreator(app2) 607 608 mp, cleanup = newMempoolWithApp(cc) 609 defer cleanup() 610 611 txBytes := make([]byte, 8) 612 binary.BigEndian.PutUint64(txBytes, uint64(0)) 613 614 err = mp.CheckTx(txBytes, nil, mempool.TxInfo{}) 615 require.NoError(t, err) 616 assert.EqualValues(t, 8, mp.SizeBytes()) 617 618 appConnCon, _ := cc.NewABCIClient() 619 appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) 620 err = appConnCon.Start() 621 require.Nil(t, err) 622 t.Cleanup(func() { 623 if err := appConnCon.Stop(); err != nil { 624 t.Error(err) 625 } 626 }) 627 628 res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) 629 require.NoError(t, err) 630 require.EqualValues(t, 0, res.Code) 631 632 res2, err := appConnCon.CommitSync() 633 require.NoError(t, err) 634 require.NotEmpty(t, res2.Data) 635 636 // Pretend like we committed nothing so txBytes gets rechecked and removed. 637 err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) 638 require.NoError(t, err) 639 assert.EqualValues(t, 8, mp.SizeBytes()) 640 641 // 7. Test RemoveTxByKey function 642 err = mp.CheckTx([]byte{0x06}, nil, mempool.TxInfo{}) 643 require.NoError(t, err) 644 assert.EqualValues(t, 9, mp.SizeBytes()) 645 assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key())) 646 assert.EqualValues(t, 9, mp.SizeBytes()) 647 assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key())) 648 assert.EqualValues(t, 8, mp.SizeBytes()) 649 650 } 651 652 func TestMempoolNoCacheOverflow(t *testing.T) { 653 sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmtrand.Str(6)) 654 app := kvstore.NewApplication() 655 _, server := newRemoteApp(t, sockPath, app) 656 t.Cleanup(func() { 657 if err := server.Stop(); err != nil { 658 t.Error(err) 659 } 660 }) 661 cfg := config.ResetTestRoot("mempool_test") 662 mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg) 663 defer cleanup() 664 665 // add tx0 666 var tx0 = types.Tx([]byte{0x01}) 667 err := mp.CheckTx(tx0, nil, mempool.TxInfo{}) 668 require.NoError(t, err) 669 err = mp.FlushAppConn() 670 require.NoError(t, err) 671 672 // saturate the cache to remove tx0 673 for i := 1; i <= mp.config.CacheSize; i++ { 674 err = mp.CheckTx(types.Tx([]byte(strconv.Itoa(i))), nil, mempool.TxInfo{}) 675 require.NoError(t, err) 676 } 677 err = mp.FlushAppConn() 678 require.NoError(t, err) 679 assert.False(t, mp.cache.Has(types.Tx([]byte{0x01}))) 680 681 // add again tx0 682 err = mp.CheckTx(tx0, nil, mempool.TxInfo{}) 683 require.NoError(t, err) 684 err = mp.FlushAppConn() 685 require.NoError(t, err) 686 687 // tx0 should appear only once in mp.txs 688 found := 0 689 for e := mp.txs.Front(); e != nil; e = e.Next() { 690 if types.Tx.Key(e.Value.(*mempoolTx).tx) == types.Tx.Key(tx0) { 691 found++ 692 } 693 } 694 assert.True(t, found == 1) 695 } 696 697 // This will non-deterministically catch some concurrency failures like 698 // https://github.com/tendermint/tendermint/issues/3509 699 // TODO: all of the tests should probably also run using the remote proxy app 700 // since otherwise we're not actually testing the concurrency of the mempool here! 701 func TestMempoolRemoteAppConcurrency(t *testing.T) { 702 sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmtrand.Str(6)) 703 app := kvstore.NewApplication() 704 _, server := newRemoteApp(t, sockPath, app) 705 t.Cleanup(func() { 706 if err := server.Stop(); err != nil { 707 t.Error(err) 708 } 709 }) 710 711 cfg := config.ResetTestRoot("mempool_test") 712 713 mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg) 714 defer cleanup() 715 716 // generate small number of txs 717 nTxs := 10 718 txLen := 200 719 txs := make([]types.Tx, nTxs) 720 for i := 0; i < nTxs; i++ { 721 txs[i] = cmtrand.Bytes(txLen) 722 } 723 724 // simulate a group of peers sending them over and over 725 N := cfg.Mempool.Size 726 maxPeers := 5 727 for i := 0; i < N; i++ { 728 peerID := mrand.Intn(maxPeers) 729 txNum := mrand.Intn(nTxs) 730 tx := txs[txNum] 731 732 // this will err with ErrTxInCache many times ... 733 mp.CheckTx(tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error 734 } 735 736 require.NoError(t, mp.FlushAppConn()) 737 } 738 739 // caller must close server 740 func newRemoteApp(t *testing.T, addr string, app abci.Application) (abciclient.Client, service.Service) { 741 clientCreator, err := abciclient.NewClient(addr, "socket", true) 742 require.NoError(t, err) 743 744 // Start server 745 server := abciserver.NewSocketServer(addr, app) 746 server.SetLogger(log.TestingLogger().With("module", "abci-server")) 747 if err := server.Start(); err != nil { 748 t.Fatalf("Error starting socket server: %v", err.Error()) 749 } 750 751 return clientCreator, server 752 } 753 754 func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { 755 responses := make([]*abci.ResponseDeliverTx, 0, n) 756 for i := 0; i < n; i++ { 757 responses = append(responses, &abci.ResponseDeliverTx{Code: code}) 758 } 759 return responses 760 }