github.com/number571/tendermint@v0.34.11-gost/rpc/client/rpc_test.go (about) 1 package client_test 2 3 import ( 4 "context" 5 "encoding/base64" 6 "fmt" 7 "math" 8 "net/http" 9 "strings" 10 "sync" 11 "testing" 12 "time" 13 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/require" 16 17 abci "github.com/number571/tendermint/abci/types" 18 "github.com/number571/tendermint/config" 19 mempl "github.com/number571/tendermint/internal/mempool" 20 tmjson "github.com/number571/tendermint/libs/json" 21 "github.com/number571/tendermint/libs/log" 22 tmmath "github.com/number571/tendermint/libs/math" 23 "github.com/number571/tendermint/libs/service" 24 "github.com/number571/tendermint/rpc/client" 25 rpchttp "github.com/number571/tendermint/rpc/client/http" 26 rpclocal "github.com/number571/tendermint/rpc/client/local" 27 ctypes "github.com/number571/tendermint/rpc/core/types" 28 rpcclient "github.com/number571/tendermint/rpc/jsonrpc/client" 29 "github.com/number571/tendermint/types" 30 ) 31 32 func getHTTPClient(t *testing.T, conf *config.Config) *rpchttp.HTTP { 33 t.Helper() 34 35 rpcAddr := conf.RPC.ListenAddress 36 c, err := rpchttp.New(rpcAddr) 37 require.NoError(t, err) 38 39 c.SetLogger(log.TestingLogger()) 40 return c 41 } 42 43 func getHTTPClientWithTimeout(t *testing.T, conf *config.Config, timeout time.Duration) *rpchttp.HTTP { 44 t.Helper() 45 46 rpcAddr := conf.RPC.ListenAddress 47 c, err := rpchttp.NewWithTimeout(rpcAddr, timeout) 48 require.NoError(t, err) 49 50 c.SetLogger(log.TestingLogger()) 51 52 return c 53 } 54 55 // GetClients returns a slice of clients for table-driven tests 56 func GetClients(t *testing.T, ns service.Service, conf *config.Config) []client.Client { 57 t.Helper() 58 59 node, ok := ns.(rpclocal.NodeService) 60 require.True(t, ok) 61 62 ncl, err := rpclocal.New(node) 63 require.NoError(t, err) 64 65 return []client.Client{ 66 getHTTPClient(t, conf), 67 ncl, 68 } 69 } 70 71 func TestNilCustomHTTPClient(t *testing.T) { 72 require.Panics(t, func() { 73 _, _ = rpchttp.NewWithClient("http://example.com", nil) 74 }) 75 require.Panics(t, func() { 76 _, _ = rpcclient.NewWithHTTPClient("http://example.com", nil) 77 }) 78 } 79 80 func TestParseInvalidAddress(t *testing.T) { 81 _, conf := NodeSuite(t) 82 // should remove trailing / 83 invalidRemote := conf.RPC.ListenAddress + "/" 84 _, err := rpchttp.New(invalidRemote) 85 require.NoError(t, err) 86 } 87 88 func TestCustomHTTPClient(t *testing.T) { 89 _, conf := NodeSuite(t) 90 remote := conf.RPC.ListenAddress 91 c, err := rpchttp.NewWithClient(remote, http.DefaultClient) 92 require.Nil(t, err) 93 status, err := c.Status(context.Background()) 94 require.NoError(t, err) 95 require.NotNil(t, status) 96 } 97 98 func TestCorsEnabled(t *testing.T) { 99 _, conf := NodeSuite(t) 100 origin := conf.RPC.CORSAllowedOrigins[0] 101 remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") 102 103 req, err := http.NewRequest("GET", remote, nil) 104 require.Nil(t, err, "%+v", err) 105 req.Header.Set("Origin", origin) 106 c := &http.Client{} 107 resp, err := c.Do(req) 108 require.Nil(t, err, "%+v", err) 109 defer resp.Body.Close() 110 111 assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) 112 } 113 114 // Make sure status is correct (we connect properly) 115 func TestStatus(t *testing.T) { 116 ctx, cancel := context.WithCancel(context.Background()) 117 defer cancel() 118 119 n, conf := NodeSuite(t) 120 for i, c := range GetClients(t, n, conf) { 121 moniker := conf.Moniker 122 status, err := c.Status(ctx) 123 require.Nil(t, err, "%d: %+v", i, err) 124 assert.Equal(t, moniker, status.NodeInfo.Moniker) 125 } 126 } 127 128 // Make sure info is correct (we connect properly) 129 func TestInfo(t *testing.T) { 130 ctx, cancel := context.WithCancel(context.Background()) 131 defer cancel() 132 n, conf := NodeSuite(t) 133 134 for i, c := range GetClients(t, n, conf) { 135 // status, err := c.Status() 136 // require.Nil(t, err, "%+v", err) 137 info, err := c.ABCIInfo(ctx) 138 require.Nil(t, err, "%d: %+v", i, err) 139 // TODO: this is not correct - fix merkleeyes! 140 // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) 141 assert.True(t, strings.Contains(info.Response.Data, "size")) 142 } 143 } 144 145 func TestNetInfo(t *testing.T) { 146 ctx, cancel := context.WithCancel(context.Background()) 147 defer cancel() 148 149 n, conf := NodeSuite(t) 150 for i, c := range GetClients(t, n, conf) { 151 nc, ok := c.(client.NetworkClient) 152 require.True(t, ok, "%d", i) 153 netinfo, err := nc.NetInfo(ctx) 154 require.Nil(t, err, "%d: %+v", i, err) 155 assert.True(t, netinfo.Listening) 156 assert.Equal(t, 0, len(netinfo.Peers)) 157 } 158 } 159 160 func TestDumpConsensusState(t *testing.T) { 161 ctx, cancel := context.WithCancel(context.Background()) 162 defer cancel() 163 164 n, conf := NodeSuite(t) 165 for i, c := range GetClients(t, n, conf) { 166 // FIXME: fix server so it doesn't panic on invalid input 167 nc, ok := c.(client.NetworkClient) 168 require.True(t, ok, "%d", i) 169 cons, err := nc.DumpConsensusState(ctx) 170 require.Nil(t, err, "%d: %+v", i, err) 171 assert.NotEmpty(t, cons.RoundState) 172 assert.Empty(t, cons.Peers) 173 } 174 } 175 176 func TestConsensusState(t *testing.T) { 177 ctx, cancel := context.WithCancel(context.Background()) 178 defer cancel() 179 180 n, conf := NodeSuite(t) 181 182 for i, c := range GetClients(t, n, conf) { 183 // FIXME: fix server so it doesn't panic on invalid input 184 nc, ok := c.(client.NetworkClient) 185 require.True(t, ok, "%d", i) 186 cons, err := nc.ConsensusState(ctx) 187 require.Nil(t, err, "%d: %+v", i, err) 188 assert.NotEmpty(t, cons.RoundState) 189 } 190 } 191 192 func TestHealth(t *testing.T) { 193 ctx, cancel := context.WithCancel(context.Background()) 194 defer cancel() 195 196 n, conf := NodeSuite(t) 197 198 for i, c := range GetClients(t, n, conf) { 199 nc, ok := c.(client.NetworkClient) 200 require.True(t, ok, "%d", i) 201 _, err := nc.Health(ctx) 202 require.Nil(t, err, "%d: %+v", i, err) 203 } 204 } 205 206 func TestGenesisAndValidators(t *testing.T) { 207 ctx, cancel := context.WithCancel(context.Background()) 208 defer cancel() 209 210 n, conf := NodeSuite(t) 211 for i, c := range GetClients(t, n, conf) { 212 213 // make sure this is the right genesis file 214 gen, err := c.Genesis(ctx) 215 require.Nil(t, err, "%d: %+v", i, err) 216 // get the genesis validator 217 require.Equal(t, 1, len(gen.Genesis.Validators)) 218 gval := gen.Genesis.Validators[0] 219 220 // get the current validators 221 h := int64(1) 222 vals, err := c.Validators(ctx, &h, nil, nil) 223 require.Nil(t, err, "%d: %+v", i, err) 224 require.Equal(t, 1, len(vals.Validators)) 225 require.Equal(t, 1, vals.Count) 226 require.Equal(t, 1, vals.Total) 227 val := vals.Validators[0] 228 229 // make sure the current set is also the genesis set 230 assert.Equal(t, gval.Power, val.VotingPower) 231 assert.Equal(t, gval.PubKey, val.PubKey) 232 } 233 } 234 235 func TestGenesisChunked(t *testing.T) { 236 ctx, cancel := context.WithCancel(context.Background()) 237 defer cancel() 238 239 n, conf := NodeSuite(t) 240 241 for _, c := range GetClients(t, n, conf) { 242 first, err := c.GenesisChunked(ctx, 0) 243 require.NoError(t, err) 244 245 decoded := make([]string, 0, first.TotalChunks) 246 for i := 0; i < first.TotalChunks; i++ { 247 chunk, err := c.GenesisChunked(ctx, uint(i)) 248 require.NoError(t, err) 249 data, err := base64.StdEncoding.DecodeString(chunk.Data) 250 require.NoError(t, err) 251 decoded = append(decoded, string(data)) 252 253 } 254 doc := []byte(strings.Join(decoded, "")) 255 256 var out types.GenesisDoc 257 require.NoError(t, tmjson.Unmarshal(doc, &out), 258 "first: %+v, doc: %s", first, string(doc)) 259 } 260 } 261 262 func TestABCIQuery(t *testing.T) { 263 ctx, cancel := context.WithCancel(context.Background()) 264 defer cancel() 265 266 n, conf := NodeSuite(t) 267 268 for i, c := range GetClients(t, n, conf) { 269 // write something 270 k, v, tx := MakeTxKV() 271 bres, err := c.BroadcastTxCommit(ctx, tx) 272 require.Nil(t, err, "%d: %+v", i, err) 273 apph := bres.Height + 1 // this is where the tx will be applied to the state 274 275 // wait before querying 276 err = client.WaitForHeight(c, apph, nil) 277 require.NoError(t, err) 278 res, err := c.ABCIQuery(ctx, "/key", k) 279 qres := res.Response 280 if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { 281 assert.EqualValues(t, v, qres.Value) 282 } 283 } 284 } 285 286 // Make some app checks 287 func TestAppCalls(t *testing.T) { 288 ctx, cancel := context.WithCancel(context.Background()) 289 defer cancel() 290 291 n, conf := NodeSuite(t) 292 293 for i, c := range GetClients(t, n, conf) { 294 295 // get an offset of height to avoid racing and guessing 296 s, err := c.Status(ctx) 297 require.NoError(t, err) 298 // sh is start height or status height 299 sh := s.SyncInfo.LatestBlockHeight 300 301 // look for the future 302 h := sh + 20 303 _, err = c.Block(ctx, &h) 304 require.Error(t, err) // no block yet 305 306 // write something 307 k, v, tx := MakeTxKV() 308 bres, err := c.BroadcastTxCommit(ctx, tx) 309 require.NoError(t, err) 310 require.True(t, bres.DeliverTx.IsOK()) 311 txh := bres.Height 312 apph := txh + 1 // this is where the tx will be applied to the state 313 314 // wait before querying 315 err = client.WaitForHeight(c, apph, nil) 316 require.NoError(t, err) 317 318 _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) 319 require.NoError(t, err) 320 qres := _qres.Response 321 if assert.True(t, qres.IsOK()) { 322 assert.Equal(t, k, qres.Key) 323 assert.EqualValues(t, v, qres.Value) 324 } 325 326 // make sure we can lookup the tx with proof 327 ptx, err := c.Tx(ctx, bres.Hash, true) 328 require.NoError(t, err) 329 assert.EqualValues(t, txh, ptx.Height) 330 assert.EqualValues(t, tx, ptx.Tx) 331 332 // and we can even check the block is added 333 block, err := c.Block(ctx, &apph) 334 require.NoError(t, err) 335 appHash := block.Block.Header.AppHash 336 assert.True(t, len(appHash) > 0) 337 assert.EqualValues(t, apph, block.Block.Header.Height) 338 339 blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) 340 require.NoError(t, err) 341 require.Equal(t, block, blockByHash) 342 343 // now check the results 344 blockResults, err := c.BlockResults(ctx, &txh) 345 require.NoError(t, err, "%d: %+v", i, err) 346 assert.Equal(t, txh, blockResults.Height) 347 if assert.Equal(t, 1, len(blockResults.TxsResults)) { 348 // check success code 349 assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) 350 } 351 352 // check blockchain info, now that we know there is info 353 info, err := c.BlockchainInfo(ctx, apph, apph) 354 require.NoError(t, err) 355 assert.True(t, info.LastHeight >= apph) 356 if assert.Equal(t, 1, len(info.BlockMetas)) { 357 lastMeta := info.BlockMetas[0] 358 assert.EqualValues(t, apph, lastMeta.Header.Height) 359 blockData := block.Block 360 assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) 361 assert.Equal(t, block.BlockID, lastMeta.BlockID) 362 } 363 364 // and get the corresponding commit with the same apphash 365 commit, err := c.Commit(ctx, &apph) 366 require.NoError(t, err) 367 cappHash := commit.Header.AppHash 368 assert.Equal(t, appHash, cappHash) 369 assert.NotNil(t, commit.Commit) 370 371 // compare the commits (note Commit(2) has commit from Block(3)) 372 h = apph - 1 373 commit2, err := c.Commit(ctx, &h) 374 require.NoError(t, err) 375 assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) 376 377 // and we got a proof that works! 378 _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) 379 require.NoError(t, err) 380 pres := _pres.Response 381 assert.True(t, pres.IsOK()) 382 383 // XXX Test proof 384 } 385 } 386 387 func TestBlockchainInfo(t *testing.T) { 388 ctx, cancel := context.WithCancel(context.Background()) 389 defer cancel() 390 391 n, conf := NodeSuite(t) 392 393 for i, c := range GetClients(t, n, conf) { 394 err := client.WaitForHeight(c, 10, nil) 395 require.NoError(t, err) 396 397 res, err := c.BlockchainInfo(ctx, 0, 0) 398 require.Nil(t, err, "%d: %+v", i, err) 399 assert.True(t, res.LastHeight > 0) 400 assert.True(t, len(res.BlockMetas) > 0) 401 402 res, err = c.BlockchainInfo(ctx, 1, 1) 403 require.Nil(t, err, "%d: %+v", i, err) 404 assert.True(t, res.LastHeight > 0) 405 assert.True(t, len(res.BlockMetas) == 1) 406 407 res, err = c.BlockchainInfo(ctx, 1, 10000) 408 require.Nil(t, err, "%d: %+v", i, err) 409 assert.True(t, res.LastHeight > 0) 410 assert.True(t, len(res.BlockMetas) < 100) 411 for _, m := range res.BlockMetas { 412 assert.NotNil(t, m) 413 } 414 415 res, err = c.BlockchainInfo(ctx, 10000, 1) 416 require.NotNil(t, err) 417 assert.Nil(t, res) 418 assert.Contains(t, err.Error(), "can't be greater than max") 419 } 420 } 421 422 func TestBroadcastTxSync(t *testing.T) { 423 n, conf := NodeSuite(t) 424 ctx, cancel := context.WithCancel(context.Background()) 425 defer cancel() 426 427 // TODO (melekes): use mempool which is set on RPC rather than getting it from node 428 mempool := getMempool(t, n) 429 initMempoolSize := mempool.Size() 430 431 for i, c := range GetClients(t, n, conf) { 432 _, _, tx := MakeTxKV() 433 bres, err := c.BroadcastTxSync(ctx, tx) 434 require.Nil(t, err, "%d: %+v", i, err) 435 require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME 436 437 require.Equal(t, initMempoolSize+1, mempool.Size()) 438 439 txs := mempool.ReapMaxTxs(len(tx)) 440 require.EqualValues(t, tx, txs[0]) 441 mempool.Flush() 442 } 443 } 444 445 func getMempool(t *testing.T, srv service.Service) mempl.Mempool { 446 t.Helper() 447 n, ok := srv.(interface { 448 Mempool() mempl.Mempool 449 }) 450 require.True(t, ok) 451 return n.Mempool() 452 } 453 454 func TestBroadcastTxCommit(t *testing.T) { 455 ctx, cancel := context.WithCancel(context.Background()) 456 defer cancel() 457 458 n, conf := NodeSuite(t) 459 460 mempool := getMempool(t, n) 461 for i, c := range GetClients(t, n, conf) { 462 _, _, tx := MakeTxKV() 463 bres, err := c.BroadcastTxCommit(ctx, tx) 464 require.Nil(t, err, "%d: %+v", i, err) 465 require.True(t, bres.CheckTx.IsOK()) 466 require.True(t, bres.DeliverTx.IsOK()) 467 468 require.Equal(t, 0, mempool.Size()) 469 } 470 } 471 472 func TestUnconfirmedTxs(t *testing.T) { 473 ctx, cancel := context.WithCancel(context.Background()) 474 defer cancel() 475 476 _, _, tx := MakeTxKV() 477 ch := make(chan *abci.Response, 1) 478 479 n, conf := NodeSuite(t) 480 mempool := getMempool(t, n) 481 err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) 482 483 require.NoError(t, err) 484 485 // wait for tx to arrive in mempoool. 486 select { 487 case <-ch: 488 case <-time.After(5 * time.Second): 489 t.Error("Timed out waiting for CheckTx callback") 490 } 491 492 for _, c := range GetClients(t, n, conf) { 493 mc := c.(client.MempoolClient) 494 limit := 1 495 res, err := mc.UnconfirmedTxs(ctx, &limit) 496 require.NoError(t, err) 497 498 assert.Equal(t, 1, res.Count) 499 assert.Equal(t, 1, res.Total) 500 assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) 501 assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) 502 } 503 504 mempool.Flush() 505 } 506 507 func TestNumUnconfirmedTxs(t *testing.T) { 508 ctx, cancel := context.WithCancel(context.Background()) 509 defer cancel() 510 511 _, _, tx := MakeTxKV() 512 513 n, conf := NodeSuite(t) 514 ch := make(chan *abci.Response, 1) 515 mempool := getMempool(t, n) 516 517 err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) 518 require.NoError(t, err) 519 520 // wait for tx to arrive in mempoool. 521 select { 522 case <-ch: 523 case <-time.After(5 * time.Second): 524 t.Error("Timed out waiting for CheckTx callback") 525 } 526 527 mempoolSize := mempool.Size() 528 for i, c := range GetClients(t, n, conf) { 529 mc, ok := c.(client.MempoolClient) 530 require.True(t, ok, "%d", i) 531 res, err := mc.NumUnconfirmedTxs(ctx) 532 require.Nil(t, err, "%d: %+v", i, err) 533 534 assert.Equal(t, mempoolSize, res.Count) 535 assert.Equal(t, mempoolSize, res.Total) 536 assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) 537 } 538 539 mempool.Flush() 540 } 541 542 func TestCheckTx(t *testing.T) { 543 ctx, cancel := context.WithCancel(context.Background()) 544 defer cancel() 545 546 n, conf := NodeSuite(t) 547 mempool := getMempool(t, n) 548 549 for _, c := range GetClients(t, n, conf) { 550 _, _, tx := MakeTxKV() 551 552 res, err := c.CheckTx(ctx, tx) 553 require.NoError(t, err) 554 assert.Equal(t, abci.CodeTypeOK, res.Code) 555 556 assert.Equal(t, 0, mempool.Size(), "mempool must be empty") 557 } 558 } 559 560 func TestTx(t *testing.T) { 561 ctx, cancel := context.WithCancel(context.Background()) 562 defer cancel() 563 n, conf := NodeSuite(t) 564 565 c := getHTTPClient(t, conf) 566 567 // first we broadcast a tx 568 _, _, tx := MakeTxKV() 569 bres, err := c.BroadcastTxCommit(ctx, tx) 570 require.Nil(t, err, "%+v", err) 571 572 txHeight := bres.Height 573 txHash := bres.Hash 574 575 anotherTxHash := types.Tx("a different tx").Hash() 576 577 cases := []struct { 578 valid bool 579 prove bool 580 hash []byte 581 }{ 582 // only valid if correct hash provided 583 {true, false, txHash}, 584 {true, true, txHash}, 585 {false, false, anotherTxHash}, 586 {false, true, anotherTxHash}, 587 {false, false, nil}, 588 {false, true, nil}, 589 } 590 591 for i, c := range GetClients(t, n, conf) { 592 for j, tc := range cases { 593 t.Logf("client %d, case %d", i, j) 594 595 // now we query for the tx. 596 // since there's only one tx, we know index=0. 597 ptx, err := c.Tx(ctx, tc.hash, tc.prove) 598 599 if !tc.valid { 600 require.NotNil(t, err) 601 } else { 602 require.Nil(t, err, "%+v", err) 603 assert.EqualValues(t, txHeight, ptx.Height) 604 assert.EqualValues(t, tx, ptx.Tx) 605 assert.Zero(t, ptx.Index) 606 assert.True(t, ptx.TxResult.IsOK()) 607 assert.EqualValues(t, txHash, ptx.Hash) 608 609 // time to verify the proof 610 proof := ptx.Proof 611 if tc.prove && assert.EqualValues(t, tx, proof.Data) { 612 assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) 613 } 614 } 615 } 616 } 617 } 618 619 func TestTxSearchWithTimeout(t *testing.T) { 620 ctx, cancel := context.WithCancel(context.Background()) 621 defer cancel() 622 623 _, conf := NodeSuite(t) 624 timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) 625 626 _, _, tx := MakeTxKV() 627 _, err := timeoutClient.BroadcastTxCommit(ctx, tx) 628 require.NoError(t, err) 629 630 // query using a compositeKey (see kvstore application) 631 result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") 632 require.Nil(t, err) 633 require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") 634 } 635 636 func TestTxSearch(t *testing.T) { 637 n, conf := NodeSuite(t) 638 c := getHTTPClient(t, conf) 639 640 // first we broadcast a few txs 641 for i := 0; i < 10; i++ { 642 _, _, tx := MakeTxKV() 643 _, err := c.BroadcastTxCommit(context.Background(), tx) 644 require.NoError(t, err) 645 } 646 647 // since we're not using an isolated test server, we'll have lingering transactions 648 // from other tests as well 649 result, err := c.TxSearch(context.Background(), "tx.height >= 0", true, nil, nil, "asc") 650 require.NoError(t, err) 651 txCount := len(result.Txs) 652 653 // pick out the last tx to have something to search for in tests 654 find := result.Txs[len(result.Txs)-1] 655 anotherTxHash := types.Tx("a different tx").Hash() 656 657 for i, c := range GetClients(t, n, conf) { 658 t.Logf("client %d", i) 659 660 // now we query for the tx. 661 result, err := c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") 662 require.Nil(t, err) 663 require.Len(t, result.Txs, 1) 664 require.Equal(t, find.Hash, result.Txs[0].Hash) 665 666 ptx := result.Txs[0] 667 assert.EqualValues(t, find.Height, ptx.Height) 668 assert.EqualValues(t, find.Tx, ptx.Tx) 669 assert.Zero(t, ptx.Index) 670 assert.True(t, ptx.TxResult.IsOK()) 671 assert.EqualValues(t, find.Hash, ptx.Hash) 672 673 // time to verify the proof 674 if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { 675 assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) 676 } 677 678 // query by height 679 result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") 680 require.Nil(t, err) 681 require.Len(t, result.Txs, 1) 682 683 // query for non existing tx 684 result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") 685 require.Nil(t, err) 686 require.Len(t, result.Txs, 0) 687 688 // query using a compositeKey (see kvstore application) 689 result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") 690 require.Nil(t, err) 691 require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") 692 693 // query using an index key 694 result, err = c.TxSearch(context.Background(), "app.index_key='index is working'", false, nil, nil, "asc") 695 require.Nil(t, err) 696 require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") 697 698 // query using an noindex key 699 result, err = c.TxSearch(context.Background(), "app.noindex_key='index is working'", false, nil, nil, "asc") 700 require.Nil(t, err) 701 require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") 702 703 // query using a compositeKey (see kvstore application) and height 704 result, err = c.TxSearch(context.Background(), 705 "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") 706 require.Nil(t, err) 707 require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") 708 709 // query a non existing tx with page 1 and txsPerPage 1 710 perPage := 1 711 result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") 712 require.Nil(t, err) 713 require.Len(t, result.Txs, 0) 714 715 // check sorting 716 result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "asc") 717 require.Nil(t, err) 718 for k := 0; k < len(result.Txs)-1; k++ { 719 require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) 720 require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) 721 } 722 723 result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "desc") 724 require.Nil(t, err) 725 for k := 0; k < len(result.Txs)-1; k++ { 726 require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) 727 require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) 728 } 729 // check pagination 730 perPage = 3 731 var ( 732 seen = map[int64]bool{} 733 maxHeight int64 734 pages = int(math.Ceil(float64(txCount) / float64(perPage))) 735 ) 736 737 for page := 1; page <= pages; page++ { 738 page := page 739 result, err := c.TxSearch(context.Background(), "tx.height >= 1", false, &page, &perPage, "asc") 740 require.NoError(t, err) 741 if page < pages { 742 require.Len(t, result.Txs, perPage) 743 } else { 744 require.LessOrEqual(t, len(result.Txs), perPage) 745 } 746 require.Equal(t, txCount, result.TotalCount) 747 for _, tx := range result.Txs { 748 require.False(t, seen[tx.Height], 749 "Found duplicate height %v in page %v", tx.Height, page) 750 require.Greater(t, tx.Height, maxHeight, 751 "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) 752 seen[tx.Height] = true 753 maxHeight = tx.Height 754 } 755 } 756 require.Len(t, seen, txCount) 757 } 758 } 759 760 func TestBatchedJSONRPCCalls(t *testing.T) { 761 ctx, cancel := context.WithCancel(context.Background()) 762 defer cancel() 763 764 _, conf := NodeSuite(t) 765 c := getHTTPClient(t, conf) 766 testBatchedJSONRPCCalls(ctx, t, c) 767 } 768 769 func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) { 770 k1, v1, tx1 := MakeTxKV() 771 k2, v2, tx2 := MakeTxKV() 772 773 batch := c.NewBatch() 774 r1, err := batch.BroadcastTxCommit(ctx, tx1) 775 require.NoError(t, err) 776 r2, err := batch.BroadcastTxCommit(ctx, tx2) 777 require.NoError(t, err) 778 require.Equal(t, 2, batch.Count()) 779 bresults, err := batch.Send(ctx) 780 require.NoError(t, err) 781 require.Len(t, bresults, 2) 782 require.Equal(t, 0, batch.Count()) 783 784 bresult1, ok := bresults[0].(*ctypes.ResultBroadcastTxCommit) 785 require.True(t, ok) 786 require.Equal(t, *bresult1, *r1) 787 bresult2, ok := bresults[1].(*ctypes.ResultBroadcastTxCommit) 788 require.True(t, ok) 789 require.Equal(t, *bresult2, *r2) 790 apph := tmmath.MaxInt64(bresult1.Height, bresult2.Height) + 1 791 792 err = client.WaitForHeight(c, apph, nil) 793 require.NoError(t, err) 794 795 q1, err := batch.ABCIQuery(ctx, "/key", k1) 796 require.NoError(t, err) 797 q2, err := batch.ABCIQuery(ctx, "/key", k2) 798 require.NoError(t, err) 799 require.Equal(t, 2, batch.Count()) 800 qresults, err := batch.Send(ctx) 801 require.NoError(t, err) 802 require.Len(t, qresults, 2) 803 require.Equal(t, 0, batch.Count()) 804 805 qresult1, ok := qresults[0].(*ctypes.ResultABCIQuery) 806 require.True(t, ok) 807 require.Equal(t, *qresult1, *q1) 808 qresult2, ok := qresults[1].(*ctypes.ResultABCIQuery) 809 require.True(t, ok) 810 require.Equal(t, *qresult2, *q2) 811 812 require.Equal(t, qresult1.Response.Key, k1) 813 require.Equal(t, qresult2.Response.Key, k2) 814 require.Equal(t, qresult1.Response.Value, v1) 815 require.Equal(t, qresult2.Response.Value, v2) 816 } 817 818 func TestBatchedJSONRPCCallsCancellation(t *testing.T) { 819 ctx, cancel := context.WithCancel(context.Background()) 820 defer cancel() 821 822 _, conf := NodeSuite(t) 823 c := getHTTPClient(t, conf) 824 _, _, tx1 := MakeTxKV() 825 _, _, tx2 := MakeTxKV() 826 827 batch := c.NewBatch() 828 _, err := batch.BroadcastTxCommit(ctx, tx1) 829 require.NoError(t, err) 830 _, err = batch.BroadcastTxCommit(ctx, tx2) 831 require.NoError(t, err) 832 // we should have 2 requests waiting 833 require.Equal(t, 2, batch.Count()) 834 // we want to make sure we cleared 2 pending requests 835 require.Equal(t, 2, batch.Clear()) 836 // now there should be no batched requests 837 require.Equal(t, 0, batch.Count()) 838 } 839 840 func TestSendingEmptyRequestBatch(t *testing.T) { 841 ctx, cancel := context.WithCancel(context.Background()) 842 defer cancel() 843 844 _, conf := NodeSuite(t) 845 c := getHTTPClient(t, conf) 846 batch := c.NewBatch() 847 _, err := batch.Send(ctx) 848 require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") 849 } 850 851 func TestClearingEmptyRequestBatch(t *testing.T) { 852 _, conf := NodeSuite(t) 853 c := getHTTPClient(t, conf) 854 batch := c.NewBatch() 855 require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") 856 } 857 858 func TestConcurrentJSONRPCBatching(t *testing.T) { 859 ctx, cancel := context.WithCancel(context.Background()) 860 defer cancel() 861 862 _, conf := NodeSuite(t) 863 var wg sync.WaitGroup 864 c := getHTTPClient(t, conf) 865 for i := 0; i < 50; i++ { 866 wg.Add(1) 867 go func() { 868 defer wg.Done() 869 testBatchedJSONRPCCalls(ctx, t, c) 870 }() 871 } 872 wg.Wait() 873 }