github.com/line/ostracon@v1.0.10-0.20230328032236-7f20145f065d/mempool/clist_mempool.go (about) 1 package mempool 2 3 import ( 4 "container/list" 5 "crypto/sha256" 6 "fmt" 7 "sync" 8 "sync/atomic" 9 "time" 10 11 abci "github.com/tendermint/tendermint/abci/types" 12 tmproto "github.com/tendermint/tendermint/proto/tendermint/types" 13 14 ocabci "github.com/line/ostracon/abci/types" 15 cfg "github.com/line/ostracon/config" 16 auto "github.com/line/ostracon/libs/autofile" 17 "github.com/line/ostracon/libs/clist" 18 "github.com/line/ostracon/libs/log" 19 tmmath "github.com/line/ostracon/libs/math" 20 tmos "github.com/line/ostracon/libs/os" 21 tmsync "github.com/line/ostracon/libs/sync" 22 "github.com/line/ostracon/p2p" 23 "github.com/line/ostracon/proxy" 24 "github.com/line/ostracon/types" 25 ) 26 27 // TxKeySize is the size of the transaction key index 28 const TxKeySize = sha256.Size 29 30 var newline = []byte("\n") 31 32 //-------------------------------------------------------------------------------- 33 34 // CListMempool is an ordered in-memory pool for transactions before they are 35 // proposed in a consensus round. Transaction validity is checked using the 36 // CheckTx abci message before the transaction is added to the pool. The 37 // mempool uses a concurrent list structure for storing transactions that can 38 // be efficiently accessed by multiple concurrent readers. 39 type CListMempool struct { 40 // Atomic integers 41 height int64 // the last block Update()'d to 42 txsBytes int64 // total size of mempool, in bytes 43 44 reserved int // the number of checking tx and it should be considered when checking mempool full 45 reservedBytes int64 // size of checking tx and it should be considered when checking mempool full 46 reservedMtx sync.Mutex 47 48 // notify listeners (ie. consensus) when txs are available 49 notifiedTxsAvailable bool 50 txsAvailable chan struct{} // fires once for each height, when the mempool is not empty 51 52 config *cfg.MempoolConfig 53 54 // Exclusive mutex for Update method to prevent concurrent execution of 55 // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. 56 updateMtx tmsync.RWMutex 57 preCheck PreCheckFunc 58 59 chReqCheckTx chan *requestCheckTxAsync 60 61 postCheck PostCheckFunc 62 63 wal *auto.AutoFile // a log of mempool txs 64 txs *clist.CList // concurrent linked-list of good txs 65 proxyAppConn proxy.AppConnMempool 66 67 // Map for quick access to txs to record sender in CheckTx. 68 // txsMap: txKey -> CElement 69 txsMap sync.Map 70 71 // Keep a cache of already-seen txs. 72 // This reduces the pressure on the proxyApp. 73 cache txCache 74 75 logger log.Logger 76 77 metrics *Metrics 78 } 79 80 type requestCheckTxAsync struct { 81 tx types.Tx 82 txInfo TxInfo 83 prepareCb func(error) 84 checkTxCb func(*ocabci.Response) 85 } 86 87 var _ Mempool = &CListMempool{} 88 89 // CListMempoolOption sets an optional parameter on the mempool. 90 type CListMempoolOption func(*CListMempool) 91 92 // NewCListMempool returns a new mempool with the given configuration and connection to an application. 93 func NewCListMempool( 94 config *cfg.MempoolConfig, 95 proxyAppConn proxy.AppConnMempool, 96 height int64, 97 options ...CListMempoolOption, 98 ) *CListMempool { 99 mempool := &CListMempool{ 100 config: config, 101 proxyAppConn: proxyAppConn, 102 txs: clist.New(), 103 height: height, 104 chReqCheckTx: make(chan *requestCheckTxAsync, config.Size), 105 logger: log.NewNopLogger(), 106 metrics: NopMetrics(), 107 } 108 if config.CacheSize > 0 { 109 mempool.cache = newMapTxCache(config.CacheSize) 110 } else { 111 mempool.cache = nopTxCache{} 112 } 113 proxyAppConn.SetGlobalCallback(mempool.globalCb) 114 for _, option := range options { 115 option(mempool) 116 } 117 go mempool.checkTxAsyncReactor() 118 return mempool 119 } 120 121 // NOTE: not thread safe - should only be called once, on startup 122 func (mem *CListMempool) EnableTxsAvailable() { 123 mem.txsAvailable = make(chan struct{}, 1) 124 } 125 126 // SetLogger sets the Logger. 127 func (mem *CListMempool) SetLogger(l log.Logger) { 128 mem.logger = l 129 } 130 131 // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns 132 // false. This is ran before CheckTx. Only applies to the first created block. 133 // After that, Update overwrites the existing value. 134 func WithPreCheck(f PreCheckFunc) CListMempoolOption { 135 return func(mem *CListMempool) { mem.preCheck = f } 136 } 137 138 // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns 139 // false. This is ran after CheckTx. Only applies to the first created block. 140 // After that, Update overwrites the existing value. 141 func WithPostCheck(f PostCheckFunc) CListMempoolOption { 142 return func(mem *CListMempool) { mem.postCheck = f } 143 } 144 145 // WithMetrics sets the metrics. 146 func WithMetrics(metrics *Metrics) CListMempoolOption { 147 return func(mem *CListMempool) { mem.metrics = metrics } 148 } 149 150 func (mem *CListMempool) InitWAL() error { 151 var ( 152 walDir = mem.config.WalDir() 153 walFile = walDir + "/wal" 154 ) 155 156 const perm = 0700 157 if err := tmos.EnsureDir(walDir, perm); err != nil { 158 return err 159 } 160 161 af, err := auto.OpenAutoFile(walFile) 162 if err != nil { 163 return fmt.Errorf("can't open autofile %s: %w", walFile, err) 164 } 165 166 mem.wal = af 167 return nil 168 } 169 170 func (mem *CListMempool) CloseWAL() { 171 if err := mem.wal.Close(); err != nil { 172 mem.logger.Error("Error closing WAL", "err", err) 173 } 174 mem.wal = nil 175 } 176 177 // Safe for concurrent use by multiple goroutines. 178 func (mem *CListMempool) Lock() { 179 mem.updateMtx.Lock() 180 } 181 182 // Safe for concurrent use by multiple goroutines. 183 func (mem *CListMempool) Unlock() { 184 mem.updateMtx.Unlock() 185 } 186 187 // Safe for concurrent use by multiple goroutines. 188 func (mem *CListMempool) Size() int { 189 return mem.txs.Len() 190 } 191 192 // Safe for concurrent use by multiple goroutines. 193 func (mem *CListMempool) TxsBytes() int64 { 194 return atomic.LoadInt64(&mem.txsBytes) 195 } 196 197 // Lock() must be help by the caller during execution. 198 func (mem *CListMempool) FlushAppConn() error { 199 _, err := mem.proxyAppConn.FlushSync() 200 return err 201 } 202 203 // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. 204 func (mem *CListMempool) Flush() { 205 mem.updateMtx.Lock() 206 defer mem.updateMtx.Unlock() 207 208 _ = atomic.SwapInt64(&mem.txsBytes, 0) 209 mem.cache.Reset() 210 211 for e := mem.txs.Front(); e != nil; e = e.Next() { 212 mem.txs.Remove(e) 213 e.DetachPrev() 214 } 215 216 mem.txsMap.Range(func(key, _ interface{}) bool { 217 mem.txsMap.Delete(key) 218 return true 219 }) 220 } 221 222 // TxsFront returns the first transaction in the ordered list for peer 223 // goroutines to call .NextWait() on. 224 // FIXME: leaking implementation details! 225 // 226 // Safe for concurrent use by multiple goroutines. 227 func (mem *CListMempool) TxsFront() *clist.CElement { 228 return mem.txs.Front() 229 } 230 231 // TxsWaitChan returns a channel to wait on transactions. It will be closed 232 // once the mempool is not empty (ie. the internal `mem.txs` has at least one 233 // element) 234 // 235 // Safe for concurrent use by multiple goroutines. 236 func (mem *CListMempool) TxsWaitChan() <-chan struct{} { 237 return mem.txs.WaitChan() 238 } 239 240 // It blocks if we're waiting on Update() or Reap(). 241 // Safe for concurrent use by multiple goroutines. 242 func (mem *CListMempool) CheckTxSync(tx types.Tx, txInfo TxInfo) (res *ocabci.Response, err error) { 243 mem.updateMtx.RLock() 244 // use defer to unlock mutex because application (*local client*) might panic 245 defer mem.updateMtx.RUnlock() 246 247 if err = mem.prepareCheckTx(tx, txInfo); err != nil { 248 return res, err 249 } 250 251 // CONTRACT: `app.CheckTxSync()` should check whether `GasWanted` is valid (0 <= GasWanted <= block.masGas) 252 var r *ocabci.ResponseCheckTx 253 r, err = mem.proxyAppConn.CheckTxSync(abci.RequestCheckTx{Tx: tx}) 254 if err != nil { 255 return res, err 256 } 257 258 res = ocabci.ToResponseCheckTx(*r) 259 mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, res, nil) 260 return res, err 261 } 262 263 // cb: A callback from the CheckTx command. 264 // It gets called from another goroutine. 265 // 266 // Safe for concurrent use by multiple goroutines. 267 func (mem *CListMempool) CheckTxAsync(tx types.Tx, txInfo TxInfo, prepareCb func(error), 268 checkTxCb func(*ocabci.Response)) { 269 mem.chReqCheckTx <- &requestCheckTxAsync{tx: tx, txInfo: txInfo, prepareCb: prepareCb, checkTxCb: checkTxCb} 270 } 271 272 func (mem *CListMempool) checkTxAsyncReactor() { 273 for req := range mem.chReqCheckTx { 274 mem.checkTxAsync(req.tx, req.txInfo, req.prepareCb, req.checkTxCb) 275 } 276 } 277 278 // It blocks if we're waiting on Update() or Reap(). 279 func (mem *CListMempool) checkTxAsync(tx types.Tx, txInfo TxInfo, prepareCb func(error), 280 checkTxCb func(*ocabci.Response)) { 281 mem.updateMtx.RLock() 282 defer func() { 283 if r := recover(); r != nil { 284 mem.updateMtx.RUnlock() 285 panic(r) 286 } 287 }() 288 289 err := mem.prepareCheckTx(tx, txInfo) 290 if prepareCb != nil { 291 prepareCb(err) 292 } 293 if err != nil { 294 mem.updateMtx.RUnlock() 295 return 296 } 297 298 // CONTRACT: `app.CheckTxAsync()` should check whether `GasWanted` is valid (0 <= GasWanted <= block.masGas) 299 mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}, func(res *ocabci.Response) { 300 mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, res, func(response *ocabci.Response) { 301 if checkTxCb != nil { 302 checkTxCb(response) 303 } 304 mem.updateMtx.RUnlock() 305 }) 306 }) 307 } 308 309 // CONTRACT: `caller` should held `mem.updateMtx.RLock()` 310 func (mem *CListMempool) prepareCheckTx(tx types.Tx, txInfo TxInfo) error { 311 // For keeping the consistency between `mem.txs` and `mem.txsMap` 312 if _, ok := mem.txsMap.Load(TxKey(tx)); ok { 313 return ErrTxInMap 314 } 315 316 txSize := len(tx) 317 318 if err := mem.isFull(txSize); err != nil { 319 return err 320 } 321 322 if txSize > mem.config.MaxTxBytes { 323 return ErrTxTooLarge{mem.config.MaxTxBytes, txSize} 324 } 325 326 if mem.preCheck != nil { 327 if err := mem.preCheck(tx); err != nil { 328 return ErrPreCheck{err} 329 } 330 } 331 332 // NOTE: writing to the WAL and calling proxy must be done before adding tx 333 // to the cache. otherwise, if either of them fails, next time CheckTx is 334 // called with tx, ErrTxInCache will be returned without tx being checked at 335 // all even once. 336 if mem.wal != nil { 337 // TODO: Notify administrators when WAL fails 338 _, err := mem.wal.Write(append([]byte(tx), newline...)) 339 if err != nil { 340 return fmt.Errorf("wal.Write: %w", err) 341 } 342 } 343 344 // NOTE: proxyAppConn may error if tx buffer is full 345 if err := mem.proxyAppConn.Error(); err != nil { 346 return err 347 } 348 349 if !mem.cache.Push(tx) { 350 // Record a new sender for a tx we've already seen. 351 // Note it's possible a tx is still in the cache but no longer in the mempool 352 // (eg. after committing a block, txs are removed from mempool but not cache), 353 // so we only record the sender for txs still in the mempool. 354 if e, ok := mem.txsMap.Load(TxKey(tx)); ok { 355 memTx := e.(*clist.CElement).Value.(*mempoolTx) 356 memTx.senders.LoadOrStore(txInfo.SenderID, true) 357 // TODO: consider punishing peer for dups, 358 // its non-trivial since invalid txs can become valid, 359 // but they can spam the same tx with little cost to them atm. 360 } 361 362 return ErrTxInCache 363 } 364 365 // reserve mempool that should be called just before calling `mem.proxyAppConn.CheckTxAsync()` 366 if err := mem.reserve(int64(txSize)); err != nil { 367 // remove from cache 368 mem.cache.Remove(tx) 369 return err 370 } 371 372 return nil 373 } 374 375 // Global callback that will be called after every ABCI response. 376 // Having a single global callback avoids needing to set a callback for each request. 377 // However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), 378 // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that 379 // include this information. If we're not in the midst of a recheck, this function will just return, 380 // so the request specific callback can do the work. 381 // 382 // When rechecking, we don't need the peerID, so the recheck callback happens 383 // here. 384 func (mem *CListMempool) globalCb(req *ocabci.Request, res *ocabci.Response) { 385 checkTxReq := req.GetCheckTx() 386 if checkTxReq == nil { 387 return 388 } 389 390 if checkTxReq.Type == abci.CheckTxType_Recheck { 391 mem.metrics.RecheckCount.Add(1) 392 mem.resCbRecheck(req, res) 393 394 // update metrics 395 mem.metrics.Size.Set(float64(mem.Size())) 396 } 397 } 398 399 // Request specific callback that should be set on individual reqRes objects 400 // to incorporate local information when processing the response. 401 // This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. 402 // NOTE: alternatively, we could include this information in the ABCI request itself. 403 // 404 // External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called 405 // when all other response processing is complete. 406 // 407 // Used in CheckTx to record PeerID who sent us the tx. 408 func (mem *CListMempool) reqResCb( 409 tx []byte, 410 peerID uint16, 411 peerP2PID p2p.ID, 412 res *ocabci.Response, 413 externalCb func(*ocabci.Response), 414 ) { 415 mem.resCbFirstTime(tx, peerID, peerP2PID, res) 416 417 // update metrics 418 mem.metrics.Size.Set(float64(mem.Size())) 419 420 // passed in by the caller of CheckTx, eg. the RPC 421 if externalCb != nil { 422 externalCb(res) 423 } 424 } 425 426 // Called from: 427 // - resCbFirstTime (lock not held) if tx is valid 428 func (mem *CListMempool) addTx(memTx *mempoolTx) { 429 e := mem.txs.PushBack(memTx) 430 mem.txsMap.Store(TxKey(memTx.tx), e) 431 atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) 432 mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) 433 } 434 435 // Called from: 436 // - Update (lock held) if tx was committed 437 // - resCbRecheck (lock not held) if tx was invalidated 438 func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { 439 mem.txs.Remove(elem) 440 elem.DetachPrev() 441 mem.txsMap.Delete(TxKey(tx)) 442 atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) 443 444 if removeFromCache { 445 mem.cache.Remove(tx) 446 } 447 } 448 449 // RemoveTxByKey removes a transaction from the mempool by its TxKey index. 450 func (mem *CListMempool) RemoveTxByKey(txKey [TxKeySize]byte, removeFromCache bool) { 451 if e, ok := mem.txsMap.Load(txKey); ok { 452 memTx := e.(*clist.CElement).Value.(*mempoolTx) 453 if memTx != nil { 454 mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache) 455 } 456 } 457 } 458 459 func (mem *CListMempool) isFull(txSize int) error { 460 var ( 461 memSize = mem.Size() 462 txsBytes = mem.TxsBytes() 463 ) 464 465 if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { 466 return ErrMempoolIsFull{ 467 memSize, mem.config.Size, 468 txsBytes, mem.config.MaxTxsBytes, 469 } 470 } 471 472 return nil 473 } 474 475 func (mem *CListMempool) reserve(txSize int64) error { 476 mem.reservedMtx.Lock() 477 defer mem.reservedMtx.Unlock() 478 479 var ( 480 memSize = mem.Size() 481 txsBytes = mem.TxsBytes() 482 ) 483 484 if memSize+mem.reserved >= mem.config.Size || txSize+mem.reservedBytes+txsBytes > mem.config.MaxTxsBytes { 485 return ErrMempoolIsFull{ 486 memSize + mem.reserved, mem.config.Size, 487 txsBytes + mem.reservedBytes, mem.config.MaxTxsBytes, 488 } 489 } 490 491 mem.reserved++ 492 mem.reservedBytes += txSize 493 return nil 494 } 495 496 func (mem *CListMempool) releaseReserve(txSize int64) { 497 mem.reservedMtx.Lock() 498 defer mem.reservedMtx.Unlock() 499 500 mem.reserved-- 501 mem.reservedBytes -= txSize 502 } 503 504 // callback, which is called after the app checked the tx for the first time. 505 // 506 // The case where the app checks the tx for the second and subsequent times is 507 // handled by the resCbRecheck callback. 508 func (mem *CListMempool) resCbFirstTime( 509 tx []byte, 510 peerID uint16, 511 peerP2PID p2p.ID, 512 res *ocabci.Response, 513 ) { 514 switch r := res.Value.(type) { 515 case *ocabci.Response_CheckTx: 516 if r.CheckTx.Code == ocabci.CodeTypeOK { 517 memTx := &mempoolTx{ 518 height: mem.height, 519 gasWanted: r.CheckTx.GasWanted, 520 tx: tx, 521 } 522 memTx.senders.Store(peerID, true) 523 mem.addTx(memTx) 524 mem.logger.Debug("added good transaction", 525 "tx", txID(tx), 526 "res", r, 527 "height", memTx.height, 528 "total", mem.Size(), 529 ) 530 mem.notifyTxsAvailable() 531 } else { 532 // ignore bad transaction 533 mem.logger.Debug("rejected bad transaction", 534 "tx", txID(tx), "peerID", peerP2PID, "res", r) 535 mem.metrics.FailedTxs.Add(1) 536 if !mem.config.KeepInvalidTxsInCache { 537 // remove from cache (it might be good later) 538 mem.cache.Remove(tx) 539 } 540 } 541 542 // release `reserve` regardless it's OK or not (it might be good later) 543 mem.releaseReserve(int64(len(tx))) 544 default: 545 // ignore other messages 546 } 547 } 548 549 // callback, which is called after the app rechecked the tx. 550 // 551 // The case where the app checks the tx for the first time is handled by the 552 // resCbFirstTime callback. 553 func (mem *CListMempool) resCbRecheck(req *ocabci.Request, res *ocabci.Response) { 554 switch r := res.Value.(type) { 555 case *ocabci.Response_CheckTx: 556 tx := req.GetCheckTx().Tx 557 txHash := TxKey(tx) 558 e, ok := mem.txsMap.Load(txHash) 559 if !ok { 560 mem.logger.Debug("re-CheckTx transaction does not exist", "expected", types.Tx(tx)) 561 return 562 } 563 var postCheckErr error 564 if r.CheckTx.Code == ocabci.CodeTypeOK { 565 if mem.postCheck == nil { 566 return 567 } 568 postCheckErr = mem.postCheck(tx, r.CheckTx) 569 if postCheckErr == nil { 570 return 571 } 572 r.CheckTx.MempoolError = postCheckErr.Error() 573 } 574 celem := e.(*clist.CElement) 575 // Tx became invalidated due to newly committed block. 576 mem.logger.Debug("tx is no longer valid", "tx", txID(tx), "res", r, "err", postCheckErr) 577 // NOTE: we remove tx from the cache because it might be good later 578 mem.removeTx(tx, celem, !mem.config.KeepInvalidTxsInCache) 579 default: 580 // ignore other messages 581 } 582 } 583 584 // Safe for concurrent use by multiple goroutines. 585 func (mem *CListMempool) TxsAvailable() <-chan struct{} { 586 return mem.txsAvailable 587 } 588 589 func (mem *CListMempool) notifyTxsAvailable() { 590 if mem.Size() == 0 { 591 mem.logger.Info("notified txs available but mempool is empty!") 592 } 593 if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { 594 // channel cap is 1, so this will send once 595 mem.notifiedTxsAvailable = true 596 select { 597 case mem.txsAvailable <- struct{}{}: 598 default: 599 } 600 } 601 } 602 603 // Safe for concurrent use by multiple goroutines. 604 func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { 605 mem.updateMtx.RLock() 606 defer mem.updateMtx.RUnlock() 607 608 var totalGas int64 609 610 // TODO: we will get a performance boost if we have a good estimate of avg 611 // size per tx, and set the initial capacity based off of that. 612 // txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) 613 txs := make([]types.Tx, 0, mem.txs.Len()) 614 protoTxs := tmproto.Data{} 615 for e := mem.txs.Front(); e != nil; e = e.Next() { 616 memTx := e.Value.(*mempoolTx) 617 618 protoTxs.Txs = append(protoTxs.Txs, memTx.tx) 619 // Check total size requirement 620 if maxBytes > -1 && int64(protoTxs.Size()) > maxBytes { 621 return txs 622 } 623 // Check total gas requirement. 624 // If maxGas is negative, skip this check. 625 // Since newTotalGas < masGas, which 626 // must be non-negative, it follows that this won't overflow. 627 newTotalGas := totalGas + memTx.gasWanted 628 if maxGas > -1 && newTotalGas > maxGas { 629 return txs 630 } 631 totalGas = newTotalGas 632 txs = append(txs, memTx.tx) 633 } 634 return txs 635 } 636 637 // Safe for concurrent use by multiple goroutines. 638 func (mem *CListMempool) ReapMaxBytesMaxGasMaxTxs(maxBytes, maxGas, maxTxs int64) types.Txs { 639 mem.updateMtx.RLock() 640 defer mem.updateMtx.RUnlock() 641 642 var totalGas int64 643 644 if maxTxs <= 0 { 645 maxTxs = int64(mem.txs.Len()) 646 } 647 648 // TODO: we will get a performance boost if we have a good estimate of avg 649 // size per tx, and set the initial capacity based off of that. 650 // txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) 651 txs := make([]types.Tx, 0, mem.txs.Len()) 652 protoTxs := tmproto.Data{} 653 for e := mem.txs.Front(); e != nil && len(txs) < int(maxTxs); e = e.Next() { 654 memTx := e.Value.(*mempoolTx) 655 656 protoTxs.Txs = append(protoTxs.Txs, memTx.tx) 657 // Check total size requirement 658 if maxBytes > -1 && int64(protoTxs.Size()) > maxBytes { 659 return txs 660 } 661 // Check total gas requirement. 662 // If maxGas is negative, skip this check. 663 // Since newTotalGas < masGas, which 664 // must be non-negative, it follows that this won't overflow. 665 newTotalGas := totalGas + memTx.gasWanted 666 if maxGas > -1 && newTotalGas > maxGas { 667 return txs 668 } 669 totalGas = newTotalGas 670 txs = append(txs, memTx.tx) 671 } 672 return txs 673 } 674 675 // Safe for concurrent use by multiple goroutines. 676 func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { 677 mem.updateMtx.RLock() 678 defer mem.updateMtx.RUnlock() 679 680 if max < 0 { 681 max = mem.txs.Len() 682 } 683 684 txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max)) 685 for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { 686 memTx := e.Value.(*mempoolTx) 687 txs = append(txs, memTx.tx) 688 } 689 return txs 690 } 691 692 // Lock() must be held by the caller during execution. 693 func (mem *CListMempool) Update( 694 block *types.Block, 695 deliverTxResponses []*abci.ResponseDeliverTx, 696 preCheck PreCheckFunc, 697 postCheck PostCheckFunc, 698 ) (err error) { 699 // Set height 700 mem.height = block.Height 701 mem.notifiedTxsAvailable = false 702 703 if preCheck != nil { 704 mem.preCheck = preCheck 705 } 706 if postCheck != nil { 707 mem.postCheck = postCheck 708 } 709 710 for i, tx := range block.Txs { 711 if deliverTxResponses[i].Code == ocabci.CodeTypeOK { 712 // Add valid committed tx to the cache (if missing). 713 _ = mem.cache.Push(tx) 714 } else if !mem.config.KeepInvalidTxsInCache { 715 // Allow invalid transactions to be resubmitted. 716 mem.cache.Remove(tx) 717 } 718 719 // Remove committed tx from the mempool. 720 // 721 // Note an evil proposer can drop valid txs! 722 // Mempool before: 723 // 100 -> 101 -> 102 724 // Block, proposed by an evil proposer: 725 // 101 -> 102 726 // Mempool after: 727 // 100 728 // https://github.com/tendermint/tendermint/issues/3322. 729 if e, ok := mem.txsMap.Load(TxKey(tx)); ok { 730 mem.removeTx(tx, e.(*clist.CElement), false) 731 } 732 } 733 734 if mem.config.Recheck { 735 // recheck non-committed txs to see if they became invalid 736 recheckStartTime := time.Now().UnixNano() 737 738 _, err = mem.proxyAppConn.BeginRecheckTxSync(ocabci.RequestBeginRecheckTx{ 739 Header: types.OC2PB.Header(&block.Header), 740 }) 741 if err != nil { 742 mem.logger.Error("error in proxyAppConn.BeginRecheckTxSync", "err", err) 743 } 744 mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", block.Height) 745 mem.recheckTxs() 746 _, err = mem.proxyAppConn.EndRecheckTxSync(ocabci.RequestEndRecheckTx{Height: block.Height}) 747 if err != nil { 748 mem.logger.Error("error in proxyAppConn.EndRecheckTxSync", "err", err) 749 } 750 751 recheckEndTime := time.Now().UnixNano() 752 753 recheckTimeMs := float64(recheckEndTime-recheckStartTime) / 1000000 754 mem.metrics.RecheckTime.Set(recheckTimeMs) 755 } 756 757 // notify there're some txs left. 758 if mem.Size() > 0 { 759 mem.notifyTxsAvailable() 760 } 761 762 // Update metrics 763 mem.metrics.Size.Set(float64(mem.Size())) 764 765 return err 766 } 767 768 func (mem *CListMempool) recheckTxs() { 769 if mem.Size() == 0 { 770 return 771 } 772 773 wg := sync.WaitGroup{} 774 775 // Push txs to proxyAppConn 776 // NOTE: globalCb may be called concurrently. 777 for e := mem.txs.Front(); e != nil; e = e.Next() { 778 wg.Add(1) 779 780 memTx := e.Value.(*mempoolTx) 781 req := abci.RequestCheckTx{ 782 Tx: memTx.tx, 783 Type: abci.CheckTxType_Recheck, 784 } 785 786 mem.proxyAppConn.CheckTxAsync(req, func(res *ocabci.Response) { 787 wg.Done() 788 }) 789 } 790 791 mem.proxyAppConn.FlushAsync(func(res *ocabci.Response) {}) 792 wg.Wait() 793 } 794 795 //-------------------------------------------------------------------------------- 796 797 // mempoolTx is a transaction that successfully ran 798 type mempoolTx struct { 799 height int64 // height that this tx had been validated in 800 gasWanted int64 // amount of gas this tx states it will require 801 tx types.Tx // 802 803 // ids of peers who've sent us this tx (as a map for quick lookups). 804 // senders: PeerID -> bool 805 senders sync.Map 806 } 807 808 // Height returns the height for this transaction 809 func (memTx *mempoolTx) Height() int64 { 810 return atomic.LoadInt64(&memTx.height) 811 } 812 813 //-------------------------------------------------------------------------------- 814 815 type txCache interface { 816 Reset() 817 Push(tx types.Tx) bool 818 Remove(tx types.Tx) 819 } 820 821 // mapTxCache maintains a LRU cache of transactions. This only stores the hash 822 // of the tx, due to memory concerns. 823 type mapTxCache struct { 824 mtx tmsync.Mutex 825 size int 826 cacheMap map[[TxKeySize]byte]*list.Element 827 list *list.List 828 } 829 830 var _ txCache = (*mapTxCache)(nil) 831 832 // newMapTxCache returns a new mapTxCache. 833 func newMapTxCache(cacheSize int) *mapTxCache { 834 return &mapTxCache{ 835 size: cacheSize, 836 cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize), 837 list: list.New(), 838 } 839 } 840 841 // Reset resets the cache to an empty state. 842 func (cache *mapTxCache) Reset() { 843 cache.mtx.Lock() 844 cache.cacheMap = make(map[[TxKeySize]byte]*list.Element, cache.size) 845 cache.list.Init() 846 cache.mtx.Unlock() 847 } 848 849 // Push adds the given tx to the cache and returns true. It returns 850 // false if tx is already in the cache. 851 func (cache *mapTxCache) Push(tx types.Tx) bool { 852 cache.mtx.Lock() 853 defer cache.mtx.Unlock() 854 855 // Use the tx hash in the cache 856 txHash := TxKey(tx) 857 if moved, exists := cache.cacheMap[txHash]; exists { 858 cache.list.MoveToBack(moved) 859 return false 860 } 861 862 if cache.list.Len() >= cache.size { 863 popped := cache.list.Front() 864 if popped != nil { 865 poppedTxHash := popped.Value.([TxKeySize]byte) 866 delete(cache.cacheMap, poppedTxHash) 867 cache.list.Remove(popped) 868 } 869 } 870 e := cache.list.PushBack(txHash) 871 cache.cacheMap[txHash] = e 872 return true 873 } 874 875 // Remove removes the given tx from the cache. 876 func (cache *mapTxCache) Remove(tx types.Tx) { 877 cache.mtx.Lock() 878 txHash := TxKey(tx) 879 popped := cache.cacheMap[txHash] 880 delete(cache.cacheMap, txHash) 881 if popped != nil { 882 cache.list.Remove(popped) 883 } 884 885 cache.mtx.Unlock() 886 } 887 888 type nopTxCache struct{} 889 890 var _ txCache = (*nopTxCache)(nil) 891 892 func (nopTxCache) Reset() {} 893 func (nopTxCache) Push(types.Tx) bool { return true } 894 func (nopTxCache) Remove(types.Tx) {} 895 896 //-------------------------------------------------------------------------------- 897 898 // TxKey is the fixed length array hash used as the key in maps. 899 func TxKey(tx types.Tx) [TxKeySize]byte { 900 return sha256.Sum256(tx) 901 } 902 903 // txID is a hash of the Tx. 904 func txID(tx []byte) []byte { 905 return types.Tx(tx).Hash() 906 }