github.com/evdatsion/aphelion-dpos-bft@v0.32.1/mempool/clist_mempool_test.go (about) 1 package mempool 2 3 import ( 4 "crypto/rand" 5 "crypto/sha256" 6 "encoding/binary" 7 "fmt" 8 "io/ioutil" 9 mrand "math/rand" 10 "os" 11 "path/filepath" 12 "testing" 13 "time" 14 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/require" 17 18 amino "github.com/evdatsion/go-amino" 19 20 "github.com/evdatsion/aphelion-dpos-bft/abci/example/counter" 21 "github.com/evdatsion/aphelion-dpos-bft/abci/example/kvstore" 22 abciserver "github.com/evdatsion/aphelion-dpos-bft/abci/server" 23 abci "github.com/evdatsion/aphelion-dpos-bft/abci/types" 24 cfg "github.com/evdatsion/aphelion-dpos-bft/config" 25 cmn "github.com/evdatsion/aphelion-dpos-bft/libs/common" 26 "github.com/evdatsion/aphelion-dpos-bft/libs/log" 27 "github.com/evdatsion/aphelion-dpos-bft/proxy" 28 "github.com/evdatsion/aphelion-dpos-bft/types" 29 ) 30 31 // A cleanupFunc cleans up any config / test files created for a particular 32 // test. 33 type cleanupFunc func() 34 35 func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { 36 return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test")) 37 } 38 39 func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) { 40 appConnMem, _ := cc.NewABCIClient() 41 appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) 42 err := appConnMem.Start() 43 if err != nil { 44 panic(err) 45 } 46 mempool := NewCListMempool(config.Mempool, appConnMem, 0) 47 mempool.SetLogger(log.TestingLogger()) 48 return mempool, func() { os.RemoveAll(config.RootDir) } 49 } 50 51 func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { 52 timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) 53 select { 54 case <-ch: 55 t.Fatal("Expected not to fire") 56 case <-timer.C: 57 } 58 } 59 60 func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { 61 timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) 62 select { 63 case <-ch: 64 case <-timer.C: 65 t.Fatal("Expected to fire") 66 } 67 } 68 69 func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs { 70 txs := make(types.Txs, count) 71 txInfo := TxInfo{SenderID: peerID} 72 for i := 0; i < count; i++ { 73 txBytes := make([]byte, 20) 74 txs[i] = txBytes 75 _, err := rand.Read(txBytes) 76 if err != nil { 77 t.Error(err) 78 } 79 if err := mempool.CheckTxWithInfo(txBytes, nil, txInfo); err != nil { 80 // Skip invalid txs. 81 // TestMempoolFilters will fail otherwise. It asserts a number of txs 82 // returned. 83 if IsPreCheckError(err) { 84 continue 85 } 86 t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) 87 } 88 } 89 return txs 90 } 91 92 func TestReapMaxBytesMaxGas(t *testing.T) { 93 app := kvstore.NewKVStoreApplication() 94 cc := proxy.NewLocalClientCreator(app) 95 mempool, cleanup := newMempoolWithApp(cc) 96 defer cleanup() 97 98 // Ensure gas calculation behaves as expected 99 checkTxs(t, mempool, 1, UnknownPeerID) 100 tx0 := mempool.TxsFront().Value.(*mempoolTx) 101 // assert that kv store has gas wanted = 1. 102 require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") 103 require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") 104 // ensure each tx is 20 bytes long 105 require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") 106 mempool.Flush() 107 108 // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. 109 // each tx has 20 bytes + amino overhead = 21 bytes, 1 gas 110 tests := []struct { 111 numTxsToCreate int 112 maxBytes int64 113 maxGas int64 114 expectedNumTxs int 115 }{ 116 {20, -1, -1, 20}, 117 {20, -1, 0, 0}, 118 {20, -1, 10, 10}, 119 {20, -1, 30, 20}, 120 {20, 0, -1, 0}, 121 {20, 0, 10, 0}, 122 {20, 10, 10, 0}, 123 {20, 22, 10, 1}, 124 {20, 220, -1, 10}, 125 {20, 220, 5, 5}, 126 {20, 220, 10, 10}, 127 {20, 220, 15, 10}, 128 {20, 20000, -1, 20}, 129 {20, 20000, 5, 5}, 130 {20, 20000, 30, 20}, 131 } 132 for tcIndex, tt := range tests { 133 checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) 134 got := mempool.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) 135 assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", 136 len(got), tt.expectedNumTxs, tcIndex) 137 mempool.Flush() 138 } 139 } 140 141 func TestMempoolFilters(t *testing.T) { 142 app := kvstore.NewKVStoreApplication() 143 cc := proxy.NewLocalClientCreator(app) 144 mempool, cleanup := newMempoolWithApp(cc) 145 defer cleanup() 146 emptyTxArr := []types.Tx{[]byte{}} 147 148 nopPreFilter := func(tx types.Tx) error { return nil } 149 nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil } 150 151 // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. 152 // each tx has 20 bytes + amino overhead = 21 bytes, 1 gas 153 tests := []struct { 154 numTxsToCreate int 155 preFilter PreCheckFunc 156 postFilter PostCheckFunc 157 expectedNumTxs int 158 }{ 159 {10, nopPreFilter, nopPostFilter, 10}, 160 {10, PreCheckAminoMaxBytes(10), nopPostFilter, 0}, 161 {10, PreCheckAminoMaxBytes(20), nopPostFilter, 0}, 162 {10, PreCheckAminoMaxBytes(22), nopPostFilter, 10}, 163 {10, nopPreFilter, PostCheckMaxGas(-1), 10}, 164 {10, nopPreFilter, PostCheckMaxGas(0), 0}, 165 {10, nopPreFilter, PostCheckMaxGas(1), 10}, 166 {10, nopPreFilter, PostCheckMaxGas(3000), 10}, 167 {10, PreCheckAminoMaxBytes(10), PostCheckMaxGas(20), 0}, 168 {10, PreCheckAminoMaxBytes(30), PostCheckMaxGas(20), 10}, 169 {10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(1), 10}, 170 {10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(0), 0}, 171 } 172 for tcIndex, tt := range tests { 173 mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) 174 checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) 175 require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex) 176 mempool.Flush() 177 } 178 } 179 180 func TestMempoolUpdate(t *testing.T) { 181 app := kvstore.NewKVStoreApplication() 182 cc := proxy.NewLocalClientCreator(app) 183 mempool, cleanup := newMempoolWithApp(cc) 184 defer cleanup() 185 186 // 1. Adds valid txs to the cache 187 { 188 mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 189 err := mempool.CheckTx([]byte{0x01}, nil) 190 if assert.Error(t, err) { 191 assert.Equal(t, ErrTxInCache, err) 192 } 193 } 194 195 // 2. Removes valid txs from the mempool 196 { 197 err := mempool.CheckTx([]byte{0x02}, nil) 198 require.NoError(t, err) 199 mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 200 assert.Zero(t, mempool.Size()) 201 } 202 203 // 3. Removes invalid transactions from the cache and the mempool (if present) 204 { 205 err := mempool.CheckTx([]byte{0x03}, nil) 206 require.NoError(t, err) 207 mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) 208 assert.Zero(t, mempool.Size()) 209 210 err = mempool.CheckTx([]byte{0x03}, nil) 211 assert.NoError(t, err) 212 } 213 } 214 215 func TestTxsAvailable(t *testing.T) { 216 app := kvstore.NewKVStoreApplication() 217 cc := proxy.NewLocalClientCreator(app) 218 mempool, cleanup := newMempoolWithApp(cc) 219 defer cleanup() 220 mempool.EnableTxsAvailable() 221 222 timeoutMS := 500 223 224 // with no txs, it shouldnt fire 225 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 226 227 // send a bunch of txs, it should only fire once 228 txs := checkTxs(t, mempool, 100, UnknownPeerID) 229 ensureFire(t, mempool.TxsAvailable(), timeoutMS) 230 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 231 232 // call update with half the txs. 233 // it should fire once now for the new height 234 // since there are still txs left 235 committedTxs, txs := txs[:50], txs[50:] 236 if err := mempool.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { 237 t.Error(err) 238 } 239 ensureFire(t, mempool.TxsAvailable(), timeoutMS) 240 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 241 242 // send a bunch more txs. we already fired for this height so it shouldnt fire again 243 moreTxs := checkTxs(t, mempool, 50, UnknownPeerID) 244 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 245 246 // now call update with all the txs. it should not fire as there are no txs left 247 committedTxs = append(txs, moreTxs...) 248 if err := mempool.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { 249 t.Error(err) 250 } 251 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 252 253 // send a bunch more txs, it should only fire once 254 checkTxs(t, mempool, 100, UnknownPeerID) 255 ensureFire(t, mempool.TxsAvailable(), timeoutMS) 256 ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) 257 } 258 259 func TestSerialReap(t *testing.T) { 260 app := counter.NewCounterApplication(true) 261 app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"}) 262 cc := proxy.NewLocalClientCreator(app) 263 264 mempool, cleanup := newMempoolWithApp(cc) 265 defer cleanup() 266 267 appConnCon, _ := cc.NewABCIClient() 268 appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) 269 err := appConnCon.Start() 270 require.Nil(t, err) 271 272 cacheMap := make(map[string]struct{}) 273 deliverTxsRange := func(start, end int) { 274 // Deliver some txs. 275 for i := start; i < end; i++ { 276 277 // This will succeed 278 txBytes := make([]byte, 8) 279 binary.BigEndian.PutUint64(txBytes, uint64(i)) 280 err := mempool.CheckTx(txBytes, nil) 281 _, cached := cacheMap[string(txBytes)] 282 if cached { 283 require.NotNil(t, err, "expected error for cached tx") 284 } else { 285 require.Nil(t, err, "expected no err for uncached tx") 286 } 287 cacheMap[string(txBytes)] = struct{}{} 288 289 // Duplicates are cached and should return error 290 err = mempool.CheckTx(txBytes, nil) 291 require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") 292 } 293 } 294 295 reapCheck := func(exp int) { 296 txs := mempool.ReapMaxBytesMaxGas(-1, -1) 297 require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) 298 } 299 300 updateRange := func(start, end int) { 301 txs := make([]types.Tx, 0) 302 for i := start; i < end; i++ { 303 txBytes := make([]byte, 8) 304 binary.BigEndian.PutUint64(txBytes, uint64(i)) 305 txs = append(txs, txBytes) 306 } 307 if err := mempool.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { 308 t.Error(err) 309 } 310 } 311 312 commitRange := func(start, end int) { 313 // Deliver some txs. 314 for i := start; i < end; i++ { 315 txBytes := make([]byte, 8) 316 binary.BigEndian.PutUint64(txBytes, uint64(i)) 317 res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) 318 if err != nil { 319 t.Errorf("Client error committing tx: %v", err) 320 } 321 if res.IsErr() { 322 t.Errorf("Error committing tx. Code:%v result:%X log:%v", 323 res.Code, res.Data, res.Log) 324 } 325 } 326 res, err := appConnCon.CommitSync() 327 if err != nil { 328 t.Errorf("Client error committing: %v", err) 329 } 330 if len(res.Data) != 8 { 331 t.Errorf("Error committing. Hash:%X", res.Data) 332 } 333 } 334 335 //---------------------------------------- 336 337 // Deliver some txs. 338 deliverTxsRange(0, 100) 339 340 // Reap the txs. 341 reapCheck(100) 342 343 // Reap again. We should get the same amount 344 reapCheck(100) 345 346 // Deliver 0 to 999, we should reap 900 new txs 347 // because 100 were already counted. 348 deliverTxsRange(0, 1000) 349 350 // Reap the txs. 351 reapCheck(1000) 352 353 // Reap again. We should get the same amount 354 reapCheck(1000) 355 356 // Commit from the conensus AppConn 357 commitRange(0, 500) 358 updateRange(0, 500) 359 360 // We should have 500 left. 361 reapCheck(500) 362 363 // Deliver 100 invalid txs and 100 valid txs 364 deliverTxsRange(900, 1100) 365 366 // We should have 600 now. 367 reapCheck(600) 368 } 369 370 func TestMempoolCloseWAL(t *testing.T) { 371 // 1. Create the temporary directory for mempool and WAL testing. 372 rootDir, err := ioutil.TempDir("", "mempool-test") 373 require.Nil(t, err, "expecting successful tmpdir creation") 374 375 // 2. Ensure that it doesn't contain any elements -- Sanity check 376 m1, err := filepath.Glob(filepath.Join(rootDir, "*")) 377 require.Nil(t, err, "successful globbing expected") 378 require.Equal(t, 0, len(m1), "no matches yet") 379 380 // 3. Create the mempool 381 wcfg := cfg.DefaultConfig() 382 wcfg.Mempool.RootDir = rootDir 383 app := kvstore.NewKVStoreApplication() 384 cc := proxy.NewLocalClientCreator(app) 385 mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg) 386 defer cleanup() 387 mempool.height = 10 388 mempool.InitWAL() 389 390 // 4. Ensure that the directory contains the WAL file 391 m2, err := filepath.Glob(filepath.Join(rootDir, "*")) 392 require.Nil(t, err, "successful globbing expected") 393 require.Equal(t, 1, len(m2), "expecting the wal match in") 394 395 // 5. Write some contents to the WAL 396 mempool.CheckTx(types.Tx([]byte("foo")), nil) 397 walFilepath := mempool.wal.Path 398 sum1 := checksumFile(walFilepath, t) 399 400 // 6. Sanity check to ensure that the written TX matches the expectation. 401 require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written") 402 403 // 7. Invoke CloseWAL() and ensure it discards the 404 // WAL thus any other write won't go through. 405 mempool.CloseWAL() 406 mempool.CheckTx(types.Tx([]byte("bar")), nil) 407 sum2 := checksumFile(walFilepath, t) 408 require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded") 409 410 // 8. Sanity check to ensure that the WAL file still exists 411 m3, err := filepath.Glob(filepath.Join(rootDir, "*")) 412 require.Nil(t, err, "successful globbing expected") 413 require.Equal(t, 1, len(m3), "expecting the wal match in") 414 } 415 416 // Size of the amino encoded TxMessage is the length of the 417 // encoded byte array, plus 1 for the struct field, plus 4 418 // for the amino prefix. 419 func txMessageSize(tx types.Tx) int { 420 return amino.ByteSliceSize(tx) + 1 + 4 421 } 422 423 func TestMempoolMaxMsgSize(t *testing.T) { 424 app := kvstore.NewKVStoreApplication() 425 cc := proxy.NewLocalClientCreator(app) 426 mempl, cleanup := newMempoolWithApp(cc) 427 defer cleanup() 428 429 testCases := []struct { 430 len int 431 err bool 432 }{ 433 // check small txs. no error 434 {10, false}, 435 {1000, false}, 436 {1000000, false}, 437 438 // check around maxTxSize 439 // changes from no error to error 440 {maxTxSize - 2, false}, 441 {maxTxSize - 1, false}, 442 {maxTxSize, false}, 443 {maxTxSize + 1, true}, 444 {maxTxSize + 2, true}, 445 446 // check around maxMsgSize. all error 447 {maxMsgSize - 1, true}, 448 {maxMsgSize, true}, 449 {maxMsgSize + 1, true}, 450 } 451 452 for i, testCase := range testCases { 453 caseString := fmt.Sprintf("case %d, len %d", i, testCase.len) 454 455 tx := cmn.RandBytes(testCase.len) 456 err := mempl.CheckTx(tx, nil) 457 msg := &TxMessage{tx} 458 encoded := cdc.MustMarshalBinaryBare(msg) 459 require.Equal(t, len(encoded), txMessageSize(tx), caseString) 460 if !testCase.err { 461 require.True(t, len(encoded) <= maxMsgSize, caseString) 462 require.NoError(t, err, caseString) 463 } else { 464 require.True(t, len(encoded) > maxMsgSize, caseString) 465 require.Equal(t, err, ErrTxTooLarge, caseString) 466 } 467 } 468 469 } 470 471 func TestMempoolTxsBytes(t *testing.T) { 472 app := kvstore.NewKVStoreApplication() 473 cc := proxy.NewLocalClientCreator(app) 474 config := cfg.ResetTestRoot("mempool_test") 475 config.Mempool.MaxTxsBytes = 10 476 mempool, cleanup := newMempoolWithAppAndConfig(cc, config) 477 defer cleanup() 478 479 // 1. zero by default 480 assert.EqualValues(t, 0, mempool.TxsBytes()) 481 482 // 2. len(tx) after CheckTx 483 err := mempool.CheckTx([]byte{0x01}, nil) 484 require.NoError(t, err) 485 assert.EqualValues(t, 1, mempool.TxsBytes()) 486 487 // 3. zero again after tx is removed by Update 488 mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) 489 assert.EqualValues(t, 0, mempool.TxsBytes()) 490 491 // 4. zero after Flush 492 err = mempool.CheckTx([]byte{0x02, 0x03}, nil) 493 require.NoError(t, err) 494 assert.EqualValues(t, 2, mempool.TxsBytes()) 495 496 mempool.Flush() 497 assert.EqualValues(t, 0, mempool.TxsBytes()) 498 499 // 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached. 500 err = mempool.CheckTx([]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, nil) 501 require.NoError(t, err) 502 err = mempool.CheckTx([]byte{0x05}, nil) 503 if assert.Error(t, err) { 504 assert.IsType(t, ErrMempoolIsFull{}, err) 505 } 506 507 // 6. zero after tx is rechecked and removed due to not being valid anymore 508 app2 := counter.NewCounterApplication(true) 509 cc = proxy.NewLocalClientCreator(app2) 510 mempool, cleanup = newMempoolWithApp(cc) 511 defer cleanup() 512 513 txBytes := make([]byte, 8) 514 binary.BigEndian.PutUint64(txBytes, uint64(0)) 515 516 err = mempool.CheckTx(txBytes, nil) 517 require.NoError(t, err) 518 assert.EqualValues(t, 8, mempool.TxsBytes()) 519 520 appConnCon, _ := cc.NewABCIClient() 521 appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) 522 err = appConnCon.Start() 523 require.Nil(t, err) 524 defer appConnCon.Stop() 525 res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) 526 require.NoError(t, err) 527 require.EqualValues(t, 0, res.Code) 528 res2, err := appConnCon.CommitSync() 529 require.NoError(t, err) 530 require.NotEmpty(t, res2.Data) 531 532 // Pretend like we committed nothing so txBytes gets rechecked and removed. 533 mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) 534 assert.EqualValues(t, 0, mempool.TxsBytes()) 535 } 536 537 // This will non-deterministically catch some concurrency failures like 538 // https://github.com/evdatsion/aphelion-dpos-bft/issues/3509 539 // TODO: all of the tests should probably also run using the remote proxy app 540 // since otherwise we're not actually testing the concurrency of the mempool here! 541 func TestMempoolRemoteAppConcurrency(t *testing.T) { 542 sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) 543 app := kvstore.NewKVStoreApplication() 544 cc, server := newRemoteApp(t, sockPath, app) 545 defer server.Stop() 546 config := cfg.ResetTestRoot("mempool_test") 547 mempool, cleanup := newMempoolWithAppAndConfig(cc, config) 548 defer cleanup() 549 550 // generate small number of txs 551 nTxs := 10 552 txLen := 200 553 txs := make([]types.Tx, nTxs) 554 for i := 0; i < nTxs; i++ { 555 txs[i] = cmn.RandBytes(txLen) 556 } 557 558 // simulate a group of peers sending them over and over 559 N := config.Mempool.Size 560 maxPeers := 5 561 for i := 0; i < N; i++ { 562 peerID := mrand.Intn(maxPeers) 563 txNum := mrand.Intn(nTxs) 564 tx := txs[int(txNum)] 565 566 // this will err with ErrTxInCache many times ... 567 mempool.CheckTxWithInfo(tx, nil, TxInfo{SenderID: uint16(peerID)}) 568 } 569 err := mempool.FlushAppConn() 570 require.NoError(t, err) 571 } 572 573 // caller must close server 574 func newRemoteApp(t *testing.T, addr string, app abci.Application) (clientCreator proxy.ClientCreator, server cmn.Service) { 575 clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true) 576 577 // Start server 578 server = abciserver.NewSocketServer(addr, app) 579 server.SetLogger(log.TestingLogger().With("module", "abci-server")) 580 if err := server.Start(); err != nil { 581 t.Fatalf("Error starting socket server: %v", err.Error()) 582 } 583 return clientCreator, server 584 } 585 func checksumIt(data []byte) string { 586 h := sha256.New() 587 h.Write(data) 588 return fmt.Sprintf("%x", h.Sum(nil)) 589 } 590 591 func checksumFile(p string, t *testing.T) string { 592 data, err := ioutil.ReadFile(p) 593 require.Nil(t, err, "expecting successful read of %q", p) 594 return checksumIt(data) 595 } 596 597 func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { 598 responses := make([]*abci.ResponseDeliverTx, 0, n) 599 for i := 0; i < n; i++ { 600 responses = append(responses, &abci.ResponseDeliverTx{Code: code}) 601 } 602 return responses 603 }