github.com/badrootd/celestia-core@v0.0.0-20240305091328-aa4207a4b25d/mempool/v0/clist_mempool.go (about) 1 package v0 2 3 import ( 4 "bytes" 5 "errors" 6 "sync" 7 "sync/atomic" 8 9 abci "github.com/badrootd/celestia-core/abci/types" 10 "github.com/badrootd/celestia-core/config" 11 "github.com/badrootd/celestia-core/libs/clist" 12 "github.com/badrootd/celestia-core/libs/log" 13 cmtmath "github.com/badrootd/celestia-core/libs/math" 14 cmtsync "github.com/badrootd/celestia-core/libs/sync" 15 "github.com/badrootd/celestia-core/mempool" 16 "github.com/badrootd/celestia-core/p2p" 17 "github.com/badrootd/celestia-core/proxy" 18 "github.com/badrootd/celestia-core/types" 19 ) 20 21 // CListMempool is an ordered in-memory pool for transactions before they are 22 // proposed in a consensus round. Transaction validity is checked using the 23 // CheckTx abci message before the transaction is added to the pool. The 24 // mempool uses a concurrent list structure for storing transactions that can 25 // be efficiently accessed by multiple concurrent readers. 26 type CListMempool struct { 27 // Atomic integers 28 height int64 // the last block Update()'d to 29 txsBytes int64 // total size of mempool, in bytes 30 31 // notify listeners (ie. consensus) when txs are available 32 notifiedTxsAvailable bool 33 txsAvailable chan struct{} // fires once for each height, when the mempool is not empty 34 35 config *config.MempoolConfig 36 37 // Exclusive mutex for Update method to prevent concurrent execution of 38 // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. 39 updateMtx cmtsync.RWMutex 40 preCheck mempool.PreCheckFunc 41 postCheck mempool.PostCheckFunc 42 43 txs *clist.CList // concurrent linked-list of good txs 44 proxyAppConn proxy.AppConnMempool 45 46 // Track whether we're rechecking txs. 47 // These are not protected by a mutex and are expected to be mutated in 48 // serial (ie. by abci responses which are called in serial). 49 recheckCursor *clist.CElement // next expected response 50 recheckEnd *clist.CElement // re-checking stops here 51 52 // Map for quick access to txs to record sender in CheckTx. 53 // txsMap: txKey -> CElement 54 txsMap sync.Map 55 56 // Keep a cache of already-seen txs. 57 // This reduces the pressure on the proxyApp. 58 cache mempool.TxCache 59 60 logger log.Logger 61 metrics *mempool.Metrics 62 } 63 64 var _ mempool.Mempool = &CListMempool{} 65 66 // CListMempoolOption sets an optional parameter on the mempool. 67 type CListMempoolOption func(*CListMempool) 68 69 // NewCListMempool returns a new mempool with the given configuration and 70 // connection to an application. 71 func NewCListMempool( 72 cfg *config.MempoolConfig, 73 proxyAppConn proxy.AppConnMempool, 74 height int64, 75 options ...CListMempoolOption, 76 ) *CListMempool { 77 78 mp := &CListMempool{ 79 config: cfg, 80 proxyAppConn: proxyAppConn, 81 txs: clist.New(), 82 height: height, 83 recheckCursor: nil, 84 recheckEnd: nil, 85 logger: log.NewNopLogger(), 86 metrics: mempool.NopMetrics(), 87 } 88 89 if cfg.CacheSize > 0 { 90 mp.cache = mempool.NewLRUTxCache(cfg.CacheSize) 91 } else { 92 mp.cache = mempool.NopTxCache{} 93 } 94 95 proxyAppConn.SetResponseCallback(mp.globalCb) 96 97 for _, option := range options { 98 option(mp) 99 } 100 101 return mp 102 } 103 104 // NOTE: not thread safe - should only be called once, on startup 105 func (mem *CListMempool) EnableTxsAvailable() { 106 mem.txsAvailable = make(chan struct{}, 1) 107 } 108 109 // SetLogger sets the Logger. 110 func (mem *CListMempool) SetLogger(l log.Logger) { 111 mem.logger = l 112 } 113 114 // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns 115 // false. This is ran before CheckTx. Only applies to the first created block. 116 // After that, Update overwrites the existing value. 117 func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption { 118 return func(mem *CListMempool) { mem.preCheck = f } 119 } 120 121 // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns 122 // false. This is ran after CheckTx. Only applies to the first created block. 123 // After that, Update overwrites the existing value. 124 func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption { 125 return func(mem *CListMempool) { mem.postCheck = f } 126 } 127 128 // WithMetrics sets the metrics. 129 func WithMetrics(metrics *mempool.Metrics) CListMempoolOption { 130 return func(mem *CListMempool) { mem.metrics = metrics } 131 } 132 133 // Safe for concurrent use by multiple goroutines. 134 func (mem *CListMempool) Lock() { 135 mem.updateMtx.Lock() 136 } 137 138 // Safe for concurrent use by multiple goroutines. 139 func (mem *CListMempool) Unlock() { 140 mem.updateMtx.Unlock() 141 } 142 143 // Safe for concurrent use by multiple goroutines. 144 func (mem *CListMempool) Size() int { 145 return mem.txs.Len() 146 } 147 148 // Safe for concurrent use by multiple goroutines. 149 func (mem *CListMempool) SizeBytes() int64 { 150 return atomic.LoadInt64(&mem.txsBytes) 151 } 152 153 // Lock() must be help by the caller during execution. 154 func (mem *CListMempool) FlushAppConn() error { 155 return mem.proxyAppConn.FlushSync() 156 } 157 158 // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. 159 func (mem *CListMempool) Flush() { 160 mem.updateMtx.RLock() 161 defer mem.updateMtx.RUnlock() 162 163 _ = atomic.SwapInt64(&mem.txsBytes, 0) 164 mem.cache.Reset() 165 166 for e := mem.txs.Front(); e != nil; e = e.Next() { 167 mem.txs.Remove(e) 168 e.DetachPrev() 169 } 170 171 mem.txsMap.Range(func(key, _ interface{}) bool { 172 mem.txsMap.Delete(key) 173 return true 174 }) 175 } 176 177 // TxsFront returns the first transaction in the ordered list for peer 178 // goroutines to call .NextWait() on. 179 // FIXME: leaking implementation details! 180 // 181 // Safe for concurrent use by multiple goroutines. 182 func (mem *CListMempool) TxsFront() *clist.CElement { 183 return mem.txs.Front() 184 } 185 186 // TxsWaitChan returns a channel to wait on transactions. It will be closed 187 // once the mempool is not empty (ie. the internal `mem.txs` has at least one 188 // element) 189 // 190 // Safe for concurrent use by multiple goroutines. 191 func (mem *CListMempool) TxsWaitChan() <-chan struct{} { 192 return mem.txs.WaitChan() 193 } 194 195 // It blocks if we're waiting on Update() or Reap(). 196 // cb: A callback from the CheckTx command. 197 // 198 // It gets called from another goroutine. 199 // 200 // CONTRACT: Either cb will get called, or err returned. 201 // 202 // Safe for concurrent use by multiple goroutines. 203 func (mem *CListMempool) CheckTx( 204 tx types.Tx, 205 cb func(*abci.Response), 206 txInfo mempool.TxInfo, 207 ) error { 208 209 mem.updateMtx.RLock() 210 // use defer to unlock mutex because application (*local client*) might panic 211 defer mem.updateMtx.RUnlock() 212 213 txSize := len(tx) 214 215 if err := mem.isFull(txSize); err != nil { 216 return err 217 } 218 219 if txSize > mem.config.MaxTxBytes { 220 return mempool.ErrTxTooLarge{ 221 Max: mem.config.MaxTxBytes, 222 Actual: txSize, 223 } 224 } 225 226 if mem.preCheck != nil { 227 if err := mem.preCheck(tx); err != nil { 228 return mempool.ErrPreCheck{ 229 Reason: err, 230 } 231 } 232 } 233 234 // NOTE: proxyAppConn may error if tx buffer is full 235 if err := mem.proxyAppConn.Error(); err != nil { 236 return err 237 } 238 239 if !mem.cache.Push(tx) { // if the transaction already exists in the cache 240 // Record a new sender for a tx we've already seen. 241 // Note it's possible a tx is still in the cache but no longer in the mempool 242 // (eg. after committing a block, txs are removed from mempool but not cache), 243 // so we only record the sender for txs still in the mempool. 244 if e, ok := mem.txsMap.Load(tx.Key()); ok { 245 mem.metrics.AlreadySeenTxs.Add(1) 246 memTx := e.(*clist.CElement).Value.(*mempoolTx) 247 memTx.senders.LoadOrStore(txInfo.SenderID, true) 248 // TODO: consider punishing peer for dups, 249 // its non-trivial since invalid txs can become valid, 250 // but they can spam the same tx with little cost to them atm. 251 } 252 return mempool.ErrTxInCache 253 } 254 255 reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}) 256 reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, cb)) 257 258 return nil 259 } 260 261 // Global callback that will be called after every ABCI response. 262 // Having a single global callback avoids needing to set a callback for each request. 263 // However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), 264 // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that 265 // include this information. If we're not in the midst of a recheck, this function will just return, 266 // so the request specific callback can do the work. 267 // 268 // When rechecking, we don't need the peerID, so the recheck callback happens 269 // here. 270 func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { 271 if mem.recheckCursor == nil { 272 return 273 } 274 275 mem.metrics.RecheckTimes.Add(1) 276 mem.resCbRecheck(req, res) 277 278 // update metrics 279 mem.metrics.Size.Set(float64(mem.Size())) 280 mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) 281 } 282 283 // Request specific callback that should be set on individual reqRes objects 284 // to incorporate local information when processing the response. 285 // This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. 286 // NOTE: alternatively, we could include this information in the ABCI request itself. 287 // 288 // External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called 289 // when all other response processing is complete. 290 // 291 // Used in CheckTx to record PeerID who sent us the tx. 292 func (mem *CListMempool) reqResCb( 293 tx []byte, 294 peerID uint16, 295 peerP2PID p2p.ID, 296 externalCb func(*abci.Response), 297 ) func(res *abci.Response) { 298 return func(res *abci.Response) { 299 if mem.recheckCursor != nil { 300 // this should never happen 301 panic("recheck cursor is not nil in reqResCb") 302 } 303 304 mem.resCbFirstTime(tx, peerID, peerP2PID, res) 305 306 // update metrics 307 mem.metrics.Size.Set(float64(mem.Size())) 308 mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) 309 310 // passed in by the caller of CheckTx, eg. the RPC 311 if externalCb != nil { 312 externalCb(res) 313 } 314 } 315 } 316 317 // Called from: 318 // - resCbFirstTime (lock not held) if tx is valid 319 func (mem *CListMempool) addTx(memTx *mempoolTx) { 320 e := mem.txs.PushBack(memTx) 321 mem.txsMap.Store(memTx.tx.Key(), e) 322 atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) 323 mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) 324 } 325 326 // Called from: 327 // - Update (lock held) if tx was committed 328 // - resCbRecheck (lock not held) if tx was invalidated 329 func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { 330 mem.txs.Remove(elem) 331 elem.DetachPrev() 332 mem.txsMap.Delete(tx.Key()) 333 if memtx, ok := elem.Value.(*mempoolTx); ok { 334 tx = memtx.tx 335 } 336 atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) 337 338 if removeFromCache { 339 mem.cache.Remove(tx) 340 } 341 } 342 343 // RemoveTxByKey removes a transaction from the mempool by its TxKey index. 344 func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { 345 if e, ok := mem.txsMap.Load(txKey); ok { 346 memTx := e.(*clist.CElement).Value.(*mempoolTx) 347 if memTx != nil { 348 mem.removeTx(memTx.tx, e.(*clist.CElement), false) 349 return nil 350 } 351 return errors.New("transaction not found") 352 } 353 return errors.New("invalid transaction found") 354 } 355 356 func (mem *CListMempool) isFull(txSize int) error { 357 var ( 358 memSize = mem.Size() 359 txsBytes = mem.SizeBytes() 360 ) 361 362 if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { 363 return mempool.ErrMempoolIsFull{ 364 NumTxs: memSize, 365 MaxTxs: mem.config.Size, 366 TxsBytes: txsBytes, 367 MaxTxsBytes: mem.config.MaxTxsBytes, 368 } 369 } 370 371 return nil 372 } 373 374 // callback, which is called after the app checked the tx for the first time. 375 // 376 // The case where the app checks the tx for the second and subsequent times is 377 // handled by the resCbRecheck callback. 378 func (mem *CListMempool) resCbFirstTime( 379 tx []byte, 380 peerID uint16, 381 peerP2PID p2p.ID, 382 res *abci.Response, 383 ) { 384 switch r := res.Value.(type) { 385 case *abci.Response_CheckTx: 386 var postCheckErr error 387 if mem.postCheck != nil { 388 postCheckErr = mem.postCheck(tx, r.CheckTx) 389 } 390 if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { 391 // Check mempool isn't full again to reduce the chance of exceeding the 392 // limits. 393 if err := mem.isFull(len(tx)); err != nil { 394 // remove from cache (mempool might have a space later) 395 mem.cache.Remove(tx) 396 mem.logger.Error(err.Error()) 397 return 398 } 399 400 memTx := &mempoolTx{ 401 height: mem.height, 402 gasWanted: r.CheckTx.GasWanted, 403 tx: tx, 404 } 405 memTx.senders.Store(peerID, true) 406 mem.addTx(memTx) 407 mem.logger.Debug( 408 "added good transaction", 409 "tx", types.Tx(tx).Hash(), 410 "res", r, 411 "height", memTx.height, 412 "total", mem.Size(), 413 ) 414 mem.notifyTxsAvailable() 415 } else { 416 // ignore bad transaction 417 mem.logger.Debug( 418 "rejected bad transaction", 419 "tx", types.Tx(tx).Hash(), 420 "peerID", peerP2PID, 421 "res", r, 422 "err", postCheckErr, 423 ) 424 mem.metrics.FailedTxs.Add(1) 425 426 if !mem.config.KeepInvalidTxsInCache { 427 // remove from cache (it might be good later) 428 mem.cache.Remove(tx) 429 } 430 } 431 432 default: 433 // ignore other messages 434 } 435 } 436 437 // callback, which is called after the app rechecked the tx. 438 // 439 // The case where the app checks the tx for the first time is handled by the 440 // resCbFirstTime callback. 441 func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { 442 switch r := res.Value.(type) { 443 case *abci.Response_CheckTx: 444 tx := req.GetCheckTx().Tx 445 memTx := mem.recheckCursor.Value.(*mempoolTx) 446 447 // Search through the remaining list of tx to recheck for a transaction that matches 448 // the one we received from the ABCI application. 449 for { 450 if bytes.Equal(tx, memTx.tx) { 451 // We've found a tx in the recheck list that matches the tx that we 452 // received from the ABCI application. 453 // Break, and use this transaction for further checks. 454 break 455 } 456 457 mem.logger.Error( 458 "re-CheckTx transaction mismatch", 459 "got", types.Tx(tx), 460 "expected", memTx.tx, 461 ) 462 463 if mem.recheckCursor == mem.recheckEnd { 464 // we reached the end of the recheckTx list without finding a tx 465 // matching the one we received from the ABCI application. 466 // Return without processing any tx. 467 mem.recheckCursor = nil 468 return 469 } 470 471 mem.recheckCursor = mem.recheckCursor.Next() 472 memTx = mem.recheckCursor.Value.(*mempoolTx) 473 } 474 475 var postCheckErr error 476 if mem.postCheck != nil { 477 postCheckErr = mem.postCheck(tx, r.CheckTx) 478 } 479 480 if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { 481 // Good, nothing to do. 482 } else { 483 // Tx became invalidated due to newly committed block. 484 mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr) 485 // NOTE: we remove tx from the cache because it might be good later 486 mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) 487 } 488 if mem.recheckCursor == mem.recheckEnd { 489 mem.recheckCursor = nil 490 } else { 491 mem.recheckCursor = mem.recheckCursor.Next() 492 } 493 if mem.recheckCursor == nil { 494 // Done! 495 mem.logger.Debug("done rechecking txs") 496 497 // in case the recheck removed all txs 498 if mem.Size() > 0 { 499 mem.notifyTxsAvailable() 500 } 501 } 502 default: 503 // ignore other messages 504 } 505 } 506 507 // Safe for concurrent use by multiple goroutines. 508 func (mem *CListMempool) TxsAvailable() <-chan struct{} { 509 return mem.txsAvailable 510 } 511 512 func (mem *CListMempool) notifyTxsAvailable() { 513 if mem.Size() == 0 { 514 panic("notified txs available but mempool is empty!") 515 } 516 if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { 517 // channel cap is 1, so this will send once 518 mem.notifiedTxsAvailable = true 519 select { 520 case mem.txsAvailable <- struct{}{}: 521 default: 522 } 523 } 524 } 525 526 // Safe for concurrent use by multiple goroutines. 527 func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { 528 mem.updateMtx.RLock() 529 defer mem.updateMtx.RUnlock() 530 531 var ( 532 totalGas int64 533 runningSize int64 534 ) 535 536 // TODO: we will get a performance boost if we have a good estimate of avg 537 // size per tx, and set the initial capacity based off of that. 538 // txs := make([]types.Tx, 0, cmtmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) 539 txs := make([]types.Tx, 0, mem.txs.Len()) 540 for e := mem.txs.Front(); e != nil; e = e.Next() { 541 memTx := e.Value.(*mempoolTx) 542 543 txs = append(txs, memTx.tx) 544 545 dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx}) 546 547 // Check total size requirement 548 if maxBytes > -1 && runningSize+dataSize > maxBytes { 549 return txs[:len(txs)-1] 550 } 551 552 runningSize += dataSize 553 554 // Check total gas requirement. 555 // If maxGas is negative, skip this check. 556 // Since newTotalGas < masGas, which 557 // must be non-negative, it follows that this won't overflow. 558 newTotalGas := totalGas + memTx.gasWanted 559 if maxGas > -1 && newTotalGas > maxGas { 560 return txs[:len(txs)-1] 561 } 562 totalGas = newTotalGas 563 } 564 return txs 565 } 566 567 // Safe for concurrent use by multiple goroutines. 568 func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { 569 mem.updateMtx.RLock() 570 defer mem.updateMtx.RUnlock() 571 572 if max < 0 { 573 max = mem.txs.Len() 574 } 575 576 txs := make([]types.Tx, 0, cmtmath.MinInt(mem.txs.Len(), max)) 577 for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { 578 memTx := e.Value.(*mempoolTx) 579 txs = append(txs, memTx.tx) 580 } 581 return txs 582 } 583 584 // Lock() must be help by the caller during execution. 585 func (mem *CListMempool) Update( 586 height int64, 587 txs types.Txs, 588 deliverTxResponses []*abci.ResponseDeliverTx, 589 preCheck mempool.PreCheckFunc, 590 postCheck mempool.PostCheckFunc, 591 ) error { 592 // Set height 593 mem.height = height 594 mem.notifiedTxsAvailable = false 595 596 if preCheck != nil { 597 mem.preCheck = preCheck 598 } 599 if postCheck != nil { 600 mem.postCheck = postCheck 601 } 602 603 mem.metrics.SuccessfulTxs.Add(float64(len(txs))) 604 for i, tx := range txs { 605 if deliverTxResponses[i].Code == abci.CodeTypeOK { 606 // Add valid committed tx to the cache (if missing). 607 _ = mem.cache.Push(tx) 608 } else if !mem.config.KeepInvalidTxsInCache { 609 // Allow invalid transactions to be resubmitted. 610 mem.cache.Remove(tx) 611 } 612 613 // Remove committed tx from the mempool. 614 // 615 // Note an evil proposer can drop valid txs! 616 // Mempool before: 617 // 100 -> 101 -> 102 618 // Block, proposed by an evil proposer: 619 // 101 -> 102 620 // Mempool after: 621 // 100 622 // https://github.com/cometbft/cometbft/issues/3322. 623 if e, ok := mem.txsMap.Load(tx.Key()); ok { 624 mem.removeTx(tx, e.(*clist.CElement), false) 625 } 626 } 627 628 // Either recheck non-committed txs to see if they became invalid 629 // or just notify there're some txs left. 630 if mem.Size() > 0 { 631 if mem.config.Recheck { 632 mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", height) 633 mem.recheckTxs() 634 // At this point, mem.txs are being rechecked. 635 // mem.recheckCursor re-scans mem.txs and possibly removes some txs. 636 // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. 637 } else { 638 mem.notifyTxsAvailable() 639 } 640 } 641 642 // Update metrics 643 mem.metrics.Size.Set(float64(mem.Size())) 644 mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) 645 646 return nil 647 } 648 649 func (mem *CListMempool) recheckTxs() { 650 if mem.Size() == 0 { 651 panic("recheckTxs is called, but the mempool is empty") 652 } 653 654 mem.recheckCursor = mem.txs.Front() 655 mem.recheckEnd = mem.txs.Back() 656 657 // Push txs to proxyAppConn 658 // NOTE: globalCb may be called concurrently. 659 for e := mem.txs.Front(); e != nil; e = e.Next() { 660 memTx := e.Value.(*mempoolTx) 661 mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{ 662 Tx: memTx.tx, 663 Type: abci.CheckTxType_Recheck, 664 }) 665 } 666 667 mem.proxyAppConn.FlushAsync() 668 } 669 670 //-------------------------------------------------------------------------------- 671 672 // mempoolTx is a transaction that successfully ran 673 type mempoolTx struct { 674 height int64 // height that this tx had been validated in 675 gasWanted int64 // amount of gas this tx states it will require 676 tx types.Tx // 677 678 // ids of peers who've sent us this tx (as a map for quick lookups). 679 // senders: PeerID -> bool 680 senders sync.Map 681 } 682 683 // Height returns the height for this transaction 684 func (memTx *mempoolTx) Height() int64 { 685 return atomic.LoadInt64(&memTx.height) 686 }