github.com/number571/tendermint@v0.34.11-gost/internal/mempool/v0/clist_mempool_test.go (about) 1 package v0 2 3 import ( 4 "context" 5 "crypto/rand" 6 "encoding/binary" 7 "fmt" 8 mrand "math/rand" 9 "os" 10 "testing" 11 "time" 12 13 "github.com/gogo/protobuf/proto" 14 gogotypes "github.com/gogo/protobuf/types" 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/require" 17 18 "github.com/number571/tendermint/abci/example/kvstore" 19 abciserver "github.com/number571/tendermint/abci/server" 20 abci "github.com/number571/tendermint/abci/types" 21 cfg "github.com/number571/tendermint/config" 22 "github.com/number571/tendermint/internal/mempool" 23 "github.com/number571/tendermint/libs/log" 24 tmrand "github.com/number571/tendermint/libs/rand" 25 "github.com/number571/tendermint/libs/service" 26 pubmempool "github.com/number571/tendermint/pkg/mempool" 27 "github.com/number571/tendermint/proxy" 28 "github.com/number571/tendermint/types" 29 ) 30 31 // A cleanupFunc cleans up any config / test files created for a particular 32 // test. 33 type cleanupFunc func() 34 35 func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { 36 return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test")) 37 } 38 39 func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) { 40 appConnMem, _ := cc.NewABCIClient() 41 appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) 42 err := appConnMem.Start() 43 if err != nil { 44 panic(err) 45 } 46 47 mp := NewCListMempool(config.Mempool, appConnMem, 0) 48 mp.SetLogger(log.TestingLogger()) 49 50 return mp, func() { os.RemoveAll(config.RootDir) } 51 } 52 53 func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { 54 timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) 55 select { 56 case <-ch: 57 t.Fatal("Expected not to fire") 58 case <-timer.C: 59 } 60 } 61 62 func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { 63 timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) 64 select { 65 case <-ch: 66 case <-timer.C: 67 t.Fatal("Expected to fire") 68 } 69 } 70 71 func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs { 72 txs := make(types.Txs, count) 73 txInfo := mempool.TxInfo{SenderID: peerID} 74 for i := 0; i < count; i++ { 75 txBytes := make([]byte, 20) 76 txs[i] = txBytes 77 _, err := rand.Read(txBytes) 78 if err != nil { 79 t.Error(err) 80 } 81 if err := mp.CheckTx(context.Background(), txBytes, nil, txInfo); err != nil { 82 // Skip invalid txs. 83 // TestMempoolFilters will fail otherwise. It asserts a number of txs 84 // returned. 85 if pubmempool.IsPreCheckError(err) { 86 continue 87 } 88 t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) 89 } 90 } 91 return txs 92 } 93 94 func TestReapMaxBytesMaxGas(t *testing.T) { 95 app := kvstore.NewApplication() 96 cc := proxy.NewLocalClientCreator(app) 97 mp, cleanup := newMempoolWithApp(cc) 98 defer cleanup() 99 100 // Ensure gas calculation behaves as expected 101 checkTxs(t, mp, 1, mempool.UnknownPeerID) 102 tx0 := mp.TxsFront().Value.(*mempoolTx) 103 // assert that kv store has gas wanted = 1. 104 require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") 105 require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") 106 // ensure each tx is 20 bytes long 107 require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") 108 mp.Flush() 109 110 // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. 111 // each tx has 20 bytes 112 tests := []struct { 113 numTxsToCreate int 114 maxBytes int64 115 maxGas int64 116 expectedNumTxs int 117 }{ 118 {20, -1, -1, 20}, 119 {20, -1, 0, 0}, 120 {20, -1, 10, 10}, 121 {20, -1, 30, 20}, 122 {20, 0, -1, 0}, 123 {20, 0, 10, 0}, 124 {20, 10, 10, 0}, 125 {20, 24, 10, 1}, 126 {20, 240, 5, 5}, 127 {20, 240, -1, 10}, 128 {20, 240, 10, 10}, 129 {20, 240, 15, 10}, 130 {20, 20000, -1, 20}, 131 {20, 20000, 5, 5}, 132 {20, 20000, 30, 20}, 133 } 134 for tcIndex, tt := range tests { 135 checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) 136 got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) 137 assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", 138 len(got), tt.expectedNumTxs, tcIndex) 139 mp.Flush() 140 } 141 } 142 143 func TestMempoolFilters(t *testing.T) { 144 app := kvstore.NewApplication() 145 cc := proxy.NewLocalClientCreator(app) 146 mp, cleanup := newMempoolWithApp(cc) 147 defer cleanup() 148 emptyTxArr := []types.Tx{[]byte{}} 149 150 nopPreFilter := func(tx types.Tx) error { return nil } 151 nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil } 152 153 // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. 154 // each tx has 20 bytes 155 tests := []struct { 156 numTxsToCreate int 157 preFilter mempool.PreCheckFunc 158 postFilter mempool.PostCheckFunc 159 expectedNumTxs int 160 }{ 161 {10, nopPreFilter, nopPostFilter, 10}, 162 {10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0}, 163 {10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10}, 164 {10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10}, 165 {10, nopPreFilter, mempool.PostCheckMaxGas(0), 0}, 166 {10, nopPreFilter, mempool.PostCheckMaxGas(1), 10}, 167 {10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10}, 168 {10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0}, 169 {10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10}, 170 {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10}, 171 {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0}, 172 } 173 for tcIndex, tt := range tests { 174 err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) 175 require.NoError(t, err) 176 checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) 177 require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex) 178 mp.Flush() 179 } 180 } 181 182 func TestMempoolUpdate(t *testing.T) { 183 app := kvstore.NewApplication() 184 cc := proxy.NewLocalClientCreator(app) 185 mp, cleanup := newMempoolWithApp(cc) 186 defer cleanup() 187 188 // 1. Adds valid txs to the cache 189 { 190 err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 191 require.NoError(t, err) 192 err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) 193 require.NoError(t, err) 194 } 195 196 // 2. Removes valid txs from the mempool 197 { 198 err := mp.CheckTx(context.Background(), []byte{0x02}, nil, mempool.TxInfo{}) 199 require.NoError(t, err) 200 err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 201 require.NoError(t, err) 202 assert.Zero(t, mp.Size()) 203 } 204 205 // 3. Removes invalid transactions from the cache and the mempool (if present) 206 { 207 err := mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) 208 require.NoError(t, err) 209 err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) 210 require.NoError(t, err) 211 assert.Zero(t, mp.Size()) 212 213 err = mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) 214 require.NoError(t, err) 215 } 216 } 217 218 func TestMempool_KeepInvalidTxsInCache(t *testing.T) { 219 app := kvstore.NewApplication() 220 cc := proxy.NewLocalClientCreator(app) 221 wcfg := cfg.DefaultConfig() 222 wcfg.Mempool.KeepInvalidTxsInCache = true 223 mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) 224 defer cleanup() 225 226 // 1. An invalid transaction must remain in the cache after Update 227 { 228 a := make([]byte, 8) 229 binary.BigEndian.PutUint64(a, 0) 230 231 b := make([]byte, 8) 232 binary.BigEndian.PutUint64(b, 1) 233 234 err := mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{}) 235 require.NoError(t, err) 236 237 // simulate new block 238 _ = app.DeliverTx(abci.RequestDeliverTx{Tx: a}) 239 _ = app.DeliverTx(abci.RequestDeliverTx{Tx: b}) 240 err = mp.Update(1, []types.Tx{a, b}, 241 []*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil) 242 require.NoError(t, err) 243 244 // a must be added to the cache 245 err = mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{}) 246 require.NoError(t, err) 247 248 // b must remain in the cache 249 err = mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{}) 250 require.NoError(t, err) 251 } 252 253 // 2. An invalid transaction must remain in the cache 254 { 255 a := make([]byte, 8) 256 binary.BigEndian.PutUint64(a, 0) 257 258 // remove a from the cache to test (2) 259 mp.cache.Remove(a) 260 261 err := mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{}) 262 require.NoError(t, err) 263 } 264 } 265 266 func TestTxsAvailable(t *testing.T) { 267 app := kvstore.NewApplication() 268 cc := proxy.NewLocalClientCreator(app) 269 mp, cleanup := newMempoolWithApp(cc) 270 defer cleanup() 271 mp.EnableTxsAvailable() 272 273 timeoutMS := 500 274 275 // with no txs, it shouldnt fire 276 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 277 278 // send a bunch of txs, it should only fire once 279 txs := checkTxs(t, mp, 100, mempool.UnknownPeerID) 280 ensureFire(t, mp.TxsAvailable(), timeoutMS) 281 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 282 283 // call update with half the txs. 284 // it should fire once now for the new height 285 // since there are still txs left 286 committedTxs, txs := txs[:50], txs[50:] 287 if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { 288 t.Error(err) 289 } 290 ensureFire(t, mp.TxsAvailable(), timeoutMS) 291 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 292 293 // send a bunch more txs. we already fired for this height so it shouldnt fire again 294 moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID) 295 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 296 297 // now call update with all the txs. it should not fire as there are no txs left 298 committedTxs = append(txs, moreTxs...) //nolint: gocritic 299 if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { 300 t.Error(err) 301 } 302 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 303 304 // send a bunch more txs, it should only fire once 305 checkTxs(t, mp, 100, mempool.UnknownPeerID) 306 ensureFire(t, mp.TxsAvailable(), timeoutMS) 307 ensureNoFire(t, mp.TxsAvailable(), timeoutMS) 308 } 309 310 func TestSerialReap(t *testing.T) { 311 app := kvstore.NewApplication() 312 cc := proxy.NewLocalClientCreator(app) 313 314 mp, cleanup := newMempoolWithApp(cc) 315 defer cleanup() 316 317 appConnCon, _ := cc.NewABCIClient() 318 appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) 319 err := appConnCon.Start() 320 require.Nil(t, err) 321 322 cacheMap := make(map[string]struct{}) 323 deliverTxsRange := func(start, end int) { 324 // Deliver some txs. 325 for i := start; i < end; i++ { 326 327 // This will succeed 328 txBytes := make([]byte, 8) 329 binary.BigEndian.PutUint64(txBytes, uint64(i)) 330 err := mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) 331 _, cached := cacheMap[string(txBytes)] 332 if cached { 333 require.NotNil(t, err, "expected error for cached tx") 334 } else { 335 require.Nil(t, err, "expected no err for uncached tx") 336 } 337 cacheMap[string(txBytes)] = struct{}{} 338 339 // Duplicates are cached and should return error 340 err = mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) 341 require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") 342 } 343 } 344 345 reapCheck := func(exp int) { 346 txs := mp.ReapMaxBytesMaxGas(-1, -1) 347 require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) 348 } 349 350 updateRange := func(start, end int) { 351 txs := make([]types.Tx, 0) 352 for i := start; i < end; i++ { 353 txBytes := make([]byte, 8) 354 binary.BigEndian.PutUint64(txBytes, uint64(i)) 355 txs = append(txs, txBytes) 356 } 357 if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { 358 t.Error(err) 359 } 360 } 361 362 commitRange := func(start, end int) { 363 ctx := context.Background() 364 // Deliver some txs. 365 for i := start; i < end; i++ { 366 txBytes := make([]byte, 8) 367 binary.BigEndian.PutUint64(txBytes, uint64(i)) 368 res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) 369 if err != nil { 370 t.Errorf("client error committing tx: %v", err) 371 } 372 if res.IsErr() { 373 t.Errorf("error committing tx. Code:%v result:%X log:%v", 374 res.Code, res.Data, res.Log) 375 } 376 } 377 res, err := appConnCon.CommitSync(ctx) 378 if err != nil { 379 t.Errorf("client error committing: %v", err) 380 } 381 if len(res.Data) != 8 { 382 t.Errorf("error committing. Hash:%X", res.Data) 383 } 384 } 385 386 //---------------------------------------- 387 388 // Deliver some txs. 389 deliverTxsRange(0, 100) 390 391 // Reap the txs. 392 reapCheck(100) 393 394 // Reap again. We should get the same amount 395 reapCheck(100) 396 397 // Deliver 0 to 999, we should reap 900 new txs 398 // because 100 were already counted. 399 deliverTxsRange(0, 1000) 400 401 // Reap the txs. 402 reapCheck(1000) 403 404 // Reap again. We should get the same amount 405 reapCheck(1000) 406 407 // Commit from the conensus AppConn 408 commitRange(0, 500) 409 updateRange(0, 500) 410 411 // We should have 500 left. 412 reapCheck(500) 413 414 // Deliver 100 invalid txs and 100 valid txs 415 deliverTxsRange(900, 1100) 416 417 // We should have 600 now. 418 reapCheck(600) 419 } 420 421 func TestMempool_CheckTxChecksTxSize(t *testing.T) { 422 app := kvstore.NewApplication() 423 cc := proxy.NewLocalClientCreator(app) 424 mempl, cleanup := newMempoolWithApp(cc) 425 defer cleanup() 426 427 maxTxSize := mempl.config.MaxTxBytes 428 429 testCases := []struct { 430 len int 431 err bool 432 }{ 433 // check small txs. no error 434 0: {10, false}, 435 1: {1000, false}, 436 2: {1000000, false}, 437 438 // check around maxTxSize 439 3: {maxTxSize - 1, false}, 440 4: {maxTxSize, false}, 441 5: {maxTxSize + 1, true}, 442 } 443 444 for i, testCase := range testCases { 445 caseString := fmt.Sprintf("case %d, len %d", i, testCase.len) 446 447 tx := tmrand.Bytes(testCase.len) 448 449 err := mempl.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) 450 bv := gogotypes.BytesValue{Value: tx} 451 bz, err2 := bv.Marshal() 452 require.NoError(t, err2) 453 require.Equal(t, len(bz), proto.Size(&bv), caseString) 454 455 if !testCase.err { 456 require.NoError(t, err, caseString) 457 } else { 458 require.Equal(t, err, pubmempool.ErrTxTooLarge{ 459 Max: maxTxSize, 460 Actual: testCase.len, 461 }, caseString) 462 } 463 } 464 } 465 466 func TestMempoolTxsBytes(t *testing.T) { 467 app := kvstore.NewApplication() 468 cc := proxy.NewLocalClientCreator(app) 469 config := cfg.ResetTestRoot("mempool_test") 470 config.Mempool.MaxTxsBytes = 10 471 mp, cleanup := newMempoolWithAppAndConfig(cc, config) 472 defer cleanup() 473 474 // 1. zero by default 475 assert.EqualValues(t, 0, mp.SizeBytes()) 476 477 // 2. len(tx) after CheckTx 478 err := mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) 479 require.NoError(t, err) 480 assert.EqualValues(t, 1, mp.SizeBytes()) 481 482 // 3. zero again after tx is removed by Update 483 err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 484 require.NoError(t, err) 485 assert.EqualValues(t, 0, mp.SizeBytes()) 486 487 // 4. zero after Flush 488 err = mp.CheckTx(context.Background(), []byte{0x02, 0x03}, nil, mempool.TxInfo{}) 489 require.NoError(t, err) 490 assert.EqualValues(t, 2, mp.SizeBytes()) 491 492 mp.Flush() 493 assert.EqualValues(t, 0, mp.SizeBytes()) 494 495 // 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached. 496 err = mp.CheckTx( 497 context.Background(), 498 []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, 499 nil, 500 mempool.TxInfo{}, 501 ) 502 require.NoError(t, err) 503 504 err = mp.CheckTx(context.Background(), []byte{0x05}, nil, mempool.TxInfo{}) 505 if assert.Error(t, err) { 506 assert.IsType(t, pubmempool.ErrMempoolIsFull{}, err) 507 } 508 509 // 6. zero after tx is rechecked and removed due to not being valid anymore 510 app2 := kvstore.NewApplication() 511 cc = proxy.NewLocalClientCreator(app2) 512 mp, cleanup = newMempoolWithApp(cc) 513 defer cleanup() 514 515 txBytes := make([]byte, 8) 516 binary.BigEndian.PutUint64(txBytes, uint64(0)) 517 518 err = mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) 519 require.NoError(t, err) 520 assert.EqualValues(t, 8, mp.SizeBytes()) 521 522 appConnCon, _ := cc.NewABCIClient() 523 appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) 524 err = appConnCon.Start() 525 require.Nil(t, err) 526 t.Cleanup(func() { 527 if err := appConnCon.Stop(); err != nil { 528 t.Error(err) 529 } 530 }) 531 ctx := context.Background() 532 res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) 533 require.NoError(t, err) 534 require.EqualValues(t, 0, res.Code) 535 res2, err := appConnCon.CommitSync(ctx) 536 require.NoError(t, err) 537 require.NotEmpty(t, res2.Data) 538 539 // Pretend like we committed nothing so txBytes gets rechecked and removed. 540 err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) 541 require.NoError(t, err) 542 assert.EqualValues(t, 8, mp.SizeBytes()) 543 544 // 7. Test RemoveTxByKey function 545 err = mp.CheckTx(context.Background(), []byte{0x06}, nil, mempool.TxInfo{}) 546 require.NoError(t, err) 547 assert.EqualValues(t, 9, mp.SizeBytes()) 548 mp.RemoveTxByKey(mempool.TxKey([]byte{0x07}), true) 549 assert.EqualValues(t, 9, mp.SizeBytes()) 550 mp.RemoveTxByKey(mempool.TxKey([]byte{0x06}), true) 551 assert.EqualValues(t, 8, mp.SizeBytes()) 552 553 } 554 555 // This will non-deterministically catch some concurrency failures like 556 // https://github.com/number571/tendermint/issues/3509 557 // TODO: all of the tests should probably also run using the remote proxy app 558 // since otherwise we're not actually testing the concurrency of the mempool here! 559 func TestMempoolRemoteAppConcurrency(t *testing.T) { 560 sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) 561 app := kvstore.NewApplication() 562 cc, server := newRemoteApp(t, sockPath, app) 563 t.Cleanup(func() { 564 if err := server.Stop(); err != nil { 565 t.Error(err) 566 } 567 }) 568 config := cfg.ResetTestRoot("mempool_test") 569 mp, cleanup := newMempoolWithAppAndConfig(cc, config) 570 defer cleanup() 571 572 // generate small number of txs 573 nTxs := 10 574 txLen := 200 575 txs := make([]types.Tx, nTxs) 576 for i := 0; i < nTxs; i++ { 577 txs[i] = tmrand.Bytes(txLen) 578 } 579 580 // simulate a group of peers sending them over and over 581 N := config.Mempool.Size 582 maxPeers := 5 583 for i := 0; i < N; i++ { 584 peerID := mrand.Intn(maxPeers) 585 txNum := mrand.Intn(nTxs) 586 tx := txs[txNum] 587 588 // this will err with ErrTxInCache many times ... 589 mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error 590 } 591 err := mp.FlushAppConn() 592 require.NoError(t, err) 593 } 594 595 // caller must close server 596 func newRemoteApp( 597 t *testing.T, 598 addr string, 599 app abci.Application, 600 ) ( 601 clientCreator proxy.ClientCreator, 602 server service.Service, 603 ) { 604 clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true) 605 606 // Start server 607 server = abciserver.NewSocketServer(addr, app) 608 server.SetLogger(log.TestingLogger().With("module", "abci-server")) 609 if err := server.Start(); err != nil { 610 t.Fatalf("Error starting socket server: %v", err.Error()) 611 } 612 return clientCreator, server 613 } 614 615 func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { 616 responses := make([]*abci.ResponseDeliverTx, 0, n) 617 for i := 0; i < n; i++ { 618 responses = append(responses, &abci.ResponseDeliverTx{Code: code}) 619 } 620 return responses 621 }