bitbucket.org/number571/tendermint@v0.8.14/internal/mempool/v0/clist_mempool.go (about) 1 package v0 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "sync" 8 "sync/atomic" 9 10 abci "bitbucket.org/number571/tendermint/abci/types" 11 cfg "bitbucket.org/number571/tendermint/config" 12 "bitbucket.org/number571/tendermint/internal/libs/clist" 13 tmsync "bitbucket.org/number571/tendermint/internal/libs/sync" 14 "bitbucket.org/number571/tendermint/internal/mempool" 15 "bitbucket.org/number571/tendermint/libs/log" 16 tmmath "bitbucket.org/number571/tendermint/libs/math" 17 pubmempool "bitbucket.org/number571/tendermint/pkg/mempool" 18 "bitbucket.org/number571/tendermint/proxy" 19 "bitbucket.org/number571/tendermint/types" 20 ) 21 22 // CListMempool is an ordered in-memory pool for transactions before they are 23 // proposed in a consensus round. Transaction validity is checked using the 24 // CheckTx abci message before the transaction is added to the pool. The 25 // mempool uses a concurrent list structure for storing transactions that can 26 // be efficiently accessed by multiple concurrent readers. 27 type CListMempool struct { 28 // Atomic integers 29 height int64 // the last block Update()'d to 30 txsBytes int64 // total size of mempool, in bytes 31 32 // notify listeners (ie. consensus) when txs are available 33 notifiedTxsAvailable bool 34 txsAvailable chan struct{} // fires once for each height, when the mempool is not empty 35 36 config *cfg.MempoolConfig 37 38 // Exclusive mutex for Update method to prevent concurrent execution of 39 // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. 40 updateMtx tmsync.RWMutex 41 preCheck mempool.PreCheckFunc 42 postCheck mempool.PostCheckFunc 43 44 txs *clist.CList // concurrent linked-list of good txs 45 proxyAppConn proxy.AppConnMempool 46 47 // Track whether we're rechecking txs. 48 // These are not protected by a mutex and are expected to be mutated in 49 // serial (ie. by abci responses which are called in serial). 50 recheckCursor *clist.CElement // next expected response 51 recheckEnd *clist.CElement // re-checking stops here 52 53 // Map for quick access to txs to record sender in CheckTx. 54 // txsMap: txKey -> CElement 55 txsMap sync.Map 56 57 // Keep a cache of already-seen txs. 58 // This reduces the pressure on the proxyApp. 59 cache mempool.TxCache 60 61 logger log.Logger 62 metrics *mempool.Metrics 63 } 64 65 var _ mempool.Mempool = &CListMempool{} 66 67 // CListMempoolOption sets an optional parameter on the mempool. 68 type CListMempoolOption func(*CListMempool) 69 70 // NewCListMempool returns a new mempool with the given configuration and 71 // connection to an application. 72 func NewCListMempool( 73 config *cfg.MempoolConfig, 74 proxyAppConn proxy.AppConnMempool, 75 height int64, 76 options ...CListMempoolOption, 77 ) *CListMempool { 78 79 mp := &CListMempool{ 80 config: config, 81 proxyAppConn: proxyAppConn, 82 txs: clist.New(), 83 height: height, 84 recheckCursor: nil, 85 recheckEnd: nil, 86 logger: log.NewNopLogger(), 87 metrics: mempool.NopMetrics(), 88 } 89 90 if config.CacheSize > 0 { 91 mp.cache = mempool.NewLRUTxCache(config.CacheSize) 92 } else { 93 mp.cache = mempool.NopTxCache{} 94 } 95 96 proxyAppConn.SetResponseCallback(mp.globalCb) 97 98 for _, option := range options { 99 option(mp) 100 } 101 102 return mp 103 } 104 105 // NOTE: not thread safe - should only be called once, on startup 106 func (mem *CListMempool) EnableTxsAvailable() { 107 mem.txsAvailable = make(chan struct{}, 1) 108 } 109 110 // SetLogger sets the Logger. 111 func (mem *CListMempool) SetLogger(l log.Logger) { 112 mem.logger = l 113 } 114 115 // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns 116 // false. This is ran before CheckTx. Only applies to the first created block. 117 // After that, Update overwrites the existing value. 118 func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption { 119 return func(mem *CListMempool) { mem.preCheck = f } 120 } 121 122 // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns 123 // false. This is ran after CheckTx. Only applies to the first created block. 124 // After that, Update overwrites the existing value. 125 func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption { 126 return func(mem *CListMempool) { mem.postCheck = f } 127 } 128 129 // WithMetrics sets the metrics. 130 func WithMetrics(metrics *mempool.Metrics) CListMempoolOption { 131 return func(mem *CListMempool) { mem.metrics = metrics } 132 } 133 134 // Safe for concurrent use by multiple goroutines. 135 func (mem *CListMempool) Lock() { 136 mem.updateMtx.Lock() 137 } 138 139 // Safe for concurrent use by multiple goroutines. 140 func (mem *CListMempool) Unlock() { 141 mem.updateMtx.Unlock() 142 } 143 144 // Safe for concurrent use by multiple goroutines. 145 func (mem *CListMempool) Size() int { 146 return mem.txs.Len() 147 } 148 149 // Safe for concurrent use by multiple goroutines. 150 func (mem *CListMempool) SizeBytes() int64 { 151 return atomic.LoadInt64(&mem.txsBytes) 152 } 153 154 // Lock() must be help by the caller during execution. 155 func (mem *CListMempool) FlushAppConn() error { 156 return mem.proxyAppConn.FlushSync(context.Background()) 157 } 158 159 // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. 160 func (mem *CListMempool) Flush() { 161 mem.updateMtx.RLock() 162 defer mem.updateMtx.RUnlock() 163 164 _ = atomic.SwapInt64(&mem.txsBytes, 0) 165 mem.cache.Reset() 166 167 for e := mem.txs.Front(); e != nil; e = e.Next() { 168 mem.txs.Remove(e) 169 e.DetachPrev() 170 } 171 172 mem.txsMap.Range(func(key, _ interface{}) bool { 173 mem.txsMap.Delete(key) 174 return true 175 }) 176 } 177 178 // TxsFront returns the first transaction in the ordered list for peer 179 // goroutines to call .NextWait() on. 180 // FIXME: leaking implementation details! 181 // 182 // Safe for concurrent use by multiple goroutines. 183 func (mem *CListMempool) TxsFront() *clist.CElement { 184 return mem.txs.Front() 185 } 186 187 // TxsWaitChan returns a channel to wait on transactions. It will be closed 188 // once the mempool is not empty (ie. the internal `mem.txs` has at least one 189 // element) 190 // 191 // Safe for concurrent use by multiple goroutines. 192 func (mem *CListMempool) TxsWaitChan() <-chan struct{} { 193 return mem.txs.WaitChan() 194 } 195 196 // It blocks if we're waiting on Update() or Reap(). 197 // cb: A callback from the CheckTx command. 198 // It gets called from another goroutine. 199 // CONTRACT: Either cb will get called, or err returned. 200 // 201 // Safe for concurrent use by multiple goroutines. 202 func (mem *CListMempool) CheckTx( 203 ctx context.Context, 204 tx types.Tx, 205 cb func(*abci.Response), 206 txInfo mempool.TxInfo, 207 ) error { 208 209 mem.updateMtx.RLock() 210 // use defer to unlock mutex because application (*local client*) might panic 211 defer mem.updateMtx.RUnlock() 212 213 txSize := len(tx) 214 215 if err := mem.isFull(txSize); err != nil { 216 return err 217 } 218 219 if txSize > mem.config.MaxTxBytes { 220 return pubmempool.ErrTxTooLarge{ 221 Max: mem.config.MaxTxBytes, 222 Actual: txSize, 223 } 224 } 225 226 if mem.preCheck != nil { 227 if err := mem.preCheck(tx); err != nil { 228 return pubmempool.ErrPreCheck{ 229 Reason: err, 230 } 231 } 232 } 233 234 // NOTE: proxyAppConn may error if tx buffer is full 235 if err := mem.proxyAppConn.Error(); err != nil { 236 return err 237 } 238 239 if !mem.cache.Push(tx) { // if the transaction already exists in the cache 240 // Record a new sender for a tx we've already seen. 241 // Note it's possible a tx is still in the cache but no longer in the mempool 242 // (eg. after committing a block, txs are removed from mempool but not cache), 243 // so we only record the sender for txs still in the mempool. 244 if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok { 245 memTx := e.(*clist.CElement).Value.(*mempoolTx) 246 _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true) 247 // TODO: consider punishing peer for dups, 248 // its non-trivial since invalid txs can become valid, 249 // but they can spam the same tx with little cost to them atm. 250 if loaded { 251 return pubmempool.ErrTxInCache 252 } 253 } 254 255 mem.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) 256 return nil 257 } 258 259 if ctx == nil { 260 ctx = context.Background() 261 } 262 263 reqRes, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) 264 if err != nil { 265 mem.cache.Remove(tx) 266 return err 267 } 268 reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderNodeID, cb)) 269 270 return nil 271 } 272 273 // Global callback that will be called after every ABCI response. 274 // Having a single global callback avoids needing to set a callback for each request. 275 // However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), 276 // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that 277 // include this information. If we're not in the midst of a recheck, this function will just return, 278 // so the request specific callback can do the work. 279 // 280 // When rechecking, we don't need the peerID, so the recheck callback happens 281 // here. 282 func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { 283 if mem.recheckCursor == nil { 284 return 285 } 286 287 mem.metrics.RecheckTimes.Add(1) 288 mem.resCbRecheck(req, res) 289 290 // update metrics 291 mem.metrics.Size.Set(float64(mem.Size())) 292 } 293 294 // Request specific callback that should be set on individual reqRes objects 295 // to incorporate local information when processing the response. 296 // This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. 297 // NOTE: alternatively, we could include this information in the ABCI request itself. 298 // 299 // External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called 300 // when all other response processing is complete. 301 // 302 // Used in CheckTx to record PeerID who sent us the tx. 303 func (mem *CListMempool) reqResCb( 304 tx []byte, 305 peerID uint16, 306 peerP2PID types.NodeID, 307 externalCb func(*abci.Response), 308 ) func(res *abci.Response) { 309 return func(res *abci.Response) { 310 if mem.recheckCursor != nil { 311 // this should never happen 312 panic("recheck cursor is not nil in reqResCb") 313 } 314 315 mem.resCbFirstTime(tx, peerID, peerP2PID, res) 316 317 // update metrics 318 mem.metrics.Size.Set(float64(mem.Size())) 319 320 // passed in by the caller of CheckTx, eg. the RPC 321 if externalCb != nil { 322 externalCb(res) 323 } 324 } 325 } 326 327 // Called from: 328 // - resCbFirstTime (lock not held) if tx is valid 329 func (mem *CListMempool) addTx(memTx *mempoolTx) { 330 e := mem.txs.PushBack(memTx) 331 mem.txsMap.Store(mempool.TxKey(memTx.tx), e) 332 atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) 333 mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) 334 } 335 336 // Called from: 337 // - Update (lock held) if tx was committed 338 // - resCbRecheck (lock not held) if tx was invalidated 339 func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { 340 mem.txs.Remove(elem) 341 elem.DetachPrev() 342 mem.txsMap.Delete(mempool.TxKey(tx)) 343 atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) 344 345 if removeFromCache { 346 mem.cache.Remove(tx) 347 } 348 } 349 350 // RemoveTxByKey removes a transaction from the mempool by its TxKey index. 351 func (mem *CListMempool) RemoveTxByKey(txKey [mempool.TxKeySize]byte, removeFromCache bool) { 352 if e, ok := mem.txsMap.Load(txKey); ok { 353 memTx := e.(*clist.CElement).Value.(*mempoolTx) 354 if memTx != nil { 355 mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache) 356 } 357 } 358 } 359 360 func (mem *CListMempool) isFull(txSize int) error { 361 var ( 362 memSize = mem.Size() 363 txsBytes = mem.SizeBytes() 364 ) 365 366 if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { 367 return pubmempool.ErrMempoolIsFull{ 368 NumTxs: memSize, 369 MaxTxs: mem.config.Size, 370 TxsBytes: txsBytes, 371 MaxTxsBytes: mem.config.MaxTxsBytes, 372 } 373 } 374 375 return nil 376 } 377 378 // callback, which is called after the app checked the tx for the first time. 379 // 380 // The case where the app checks the tx for the second and subsequent times is 381 // handled by the resCbRecheck callback. 382 func (mem *CListMempool) resCbFirstTime( 383 tx []byte, 384 peerID uint16, 385 peerP2PID types.NodeID, 386 res *abci.Response, 387 ) { 388 switch r := res.Value.(type) { 389 case *abci.Response_CheckTx: 390 var postCheckErr error 391 if mem.postCheck != nil { 392 postCheckErr = mem.postCheck(tx, r.CheckTx) 393 } 394 if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { 395 // Check mempool isn't full again to reduce the chance of exceeding the 396 // limits. 397 if err := mem.isFull(len(tx)); err != nil { 398 // remove from cache (mempool might have a space later) 399 mem.cache.Remove(tx) 400 mem.logger.Error(err.Error()) 401 return 402 } 403 404 memTx := &mempoolTx{ 405 height: mem.height, 406 gasWanted: r.CheckTx.GasWanted, 407 tx: tx, 408 } 409 memTx.senders.Store(peerID, true) 410 mem.addTx(memTx) 411 mem.logger.Debug( 412 "added good transaction", 413 "tx", mempool.TxHashFromBytes(tx), 414 "res", r, 415 "height", memTx.height, 416 "total", mem.Size(), 417 ) 418 mem.notifyTxsAvailable() 419 } else { 420 // ignore bad transaction 421 mem.logger.Debug( 422 "rejected bad transaction", 423 "tx", mempool.TxHashFromBytes(tx), 424 "peerID", peerP2PID, 425 "res", r, 426 "err", postCheckErr, 427 ) 428 mem.metrics.FailedTxs.Add(1) 429 430 if !mem.config.KeepInvalidTxsInCache { 431 // remove from cache (it might be good later) 432 mem.cache.Remove(tx) 433 } 434 } 435 436 default: 437 // ignore other messages 438 } 439 } 440 441 // callback, which is called after the app rechecked the tx. 442 // 443 // The case where the app checks the tx for the first time is handled by the 444 // resCbFirstTime callback. 445 func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { 446 switch r := res.Value.(type) { 447 case *abci.Response_CheckTx: 448 tx := req.GetCheckTx().Tx 449 memTx := mem.recheckCursor.Value.(*mempoolTx) 450 if !bytes.Equal(tx, memTx.tx) { 451 panic(fmt.Sprintf( 452 "Unexpected tx response from proxy during recheck\nExpected %X, got %X", 453 memTx.tx, 454 tx)) 455 } 456 var postCheckErr error 457 if mem.postCheck != nil { 458 postCheckErr = mem.postCheck(tx, r.CheckTx) 459 } 460 if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { 461 // Good, nothing to do. 462 } else { 463 // Tx became invalidated due to newly committed block. 464 mem.logger.Debug("tx is no longer valid", "tx", mempool.TxHashFromBytes(tx), "res", r, "err", postCheckErr) 465 // NOTE: we remove tx from the cache because it might be good later 466 mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) 467 } 468 if mem.recheckCursor == mem.recheckEnd { 469 mem.recheckCursor = nil 470 } else { 471 mem.recheckCursor = mem.recheckCursor.Next() 472 } 473 if mem.recheckCursor == nil { 474 // Done! 475 mem.logger.Debug("done rechecking txs") 476 477 // incase the recheck removed all txs 478 if mem.Size() > 0 { 479 mem.notifyTxsAvailable() 480 } 481 } 482 default: 483 // ignore other messages 484 } 485 } 486 487 // Safe for concurrent use by multiple goroutines. 488 func (mem *CListMempool) TxsAvailable() <-chan struct{} { 489 return mem.txsAvailable 490 } 491 492 func (mem *CListMempool) notifyTxsAvailable() { 493 if mem.Size() == 0 { 494 panic("notified txs available but mempool is empty!") 495 } 496 if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { 497 // channel cap is 1, so this will send once 498 mem.notifiedTxsAvailable = true 499 select { 500 case mem.txsAvailable <- struct{}{}: 501 default: 502 } 503 } 504 } 505 506 // Safe for concurrent use by multiple goroutines. 507 func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { 508 mem.updateMtx.RLock() 509 defer mem.updateMtx.RUnlock() 510 511 var ( 512 totalGas int64 513 runningSize int64 514 ) 515 516 // TODO: we will get a performance boost if we have a good estimate of avg 517 // size per tx, and set the initial capacity based off of that. 518 // txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) 519 txs := make([]types.Tx, 0, mem.txs.Len()) 520 for e := mem.txs.Front(); e != nil; e = e.Next() { 521 memTx := e.Value.(*mempoolTx) 522 523 txs = append(txs, memTx.tx) 524 525 dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx}) 526 527 // Check total size requirement 528 if maxBytes > -1 && runningSize+dataSize > maxBytes { 529 return txs[:len(txs)-1] 530 } 531 532 runningSize += dataSize 533 534 // Check total gas requirement. 535 // If maxGas is negative, skip this check. 536 // Since newTotalGas < masGas, which 537 // must be non-negative, it follows that this won't overflow. 538 newTotalGas := totalGas + memTx.gasWanted 539 if maxGas > -1 && newTotalGas > maxGas { 540 return txs[:len(txs)-1] 541 } 542 totalGas = newTotalGas 543 } 544 return txs 545 } 546 547 // Safe for concurrent use by multiple goroutines. 548 func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { 549 mem.updateMtx.RLock() 550 defer mem.updateMtx.RUnlock() 551 552 if max < 0 { 553 max = mem.txs.Len() 554 } 555 556 txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max)) 557 for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { 558 memTx := e.Value.(*mempoolTx) 559 txs = append(txs, memTx.tx) 560 } 561 return txs 562 } 563 564 // Lock() must be help by the caller during execution. 565 func (mem *CListMempool) Update( 566 height int64, 567 txs types.Txs, 568 deliverTxResponses []*abci.ResponseDeliverTx, 569 preCheck mempool.PreCheckFunc, 570 postCheck mempool.PostCheckFunc, 571 ) error { 572 // Set height 573 mem.height = height 574 mem.notifiedTxsAvailable = false 575 576 if preCheck != nil { 577 mem.preCheck = preCheck 578 } 579 if postCheck != nil { 580 mem.postCheck = postCheck 581 } 582 583 for i, tx := range txs { 584 if deliverTxResponses[i].Code == abci.CodeTypeOK { 585 // Add valid committed tx to the cache (if missing). 586 _ = mem.cache.Push(tx) 587 } else if !mem.config.KeepInvalidTxsInCache { 588 // Allow invalid transactions to be resubmitted. 589 mem.cache.Remove(tx) 590 } 591 592 // Remove committed tx from the mempool. 593 // 594 // Note an evil proposer can drop valid txs! 595 // Mempool before: 596 // 100 -> 101 -> 102 597 // Block, proposed by an evil proposer: 598 // 101 -> 102 599 // Mempool after: 600 // 100 601 // https://bitbucket.org/number571/tendermint/issues/3322. 602 if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok { 603 mem.removeTx(tx, e.(*clist.CElement), false) 604 } 605 } 606 607 // Either recheck non-committed txs to see if they became invalid 608 // or just notify there're some txs left. 609 if mem.Size() > 0 { 610 if mem.config.Recheck { 611 mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", height) 612 mem.recheckTxs() 613 // At this point, mem.txs are being rechecked. 614 // mem.recheckCursor re-scans mem.txs and possibly removes some txs. 615 // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. 616 } else { 617 mem.notifyTxsAvailable() 618 } 619 } 620 621 // Update metrics 622 mem.metrics.Size.Set(float64(mem.Size())) 623 624 return nil 625 } 626 627 func (mem *CListMempool) recheckTxs() { 628 if mem.Size() == 0 { 629 panic("recheckTxs is called, but the mempool is empty") 630 } 631 632 mem.recheckCursor = mem.txs.Front() 633 mem.recheckEnd = mem.txs.Back() 634 635 ctx := context.Background() 636 637 // Push txs to proxyAppConn 638 // NOTE: globalCb may be called concurrently. 639 for e := mem.txs.Front(); e != nil; e = e.Next() { 640 memTx := e.Value.(*mempoolTx) 641 _, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ 642 Tx: memTx.tx, 643 Type: abci.CheckTxType_Recheck, 644 }) 645 if err != nil { 646 // No need in retrying since memTx will be rechecked after next block. 647 mem.logger.Error("Can't check tx", "err", err) 648 } 649 } 650 651 _, err := mem.proxyAppConn.FlushAsync(ctx) 652 if err != nil { 653 mem.logger.Error("Can't flush txs", "err", err) 654 } 655 } 656 657 //-------------------------------------------------------------------------------- 658 659 // mempoolTx is a transaction that successfully ran 660 type mempoolTx struct { 661 height int64 // height that this tx had been validated in 662 gasWanted int64 // amount of gas this tx states it will require 663 tx types.Tx // 664 665 // ids of peers who've sent us this tx (as a map for quick lookups). 666 // senders: PeerID -> bool 667 senders sync.Map 668 } 669 670 // Height returns the height for this transaction 671 func (memTx *mempoolTx) Height() int64 { 672 return atomic.LoadInt64(&memTx.height) 673 }