github.com/mydexchain/tendermint@v0.0.4/mempool/clist_mempool.go (about) 1 package mempool 2 3 import ( 4 "bytes" 5 "container/list" 6 "crypto/sha256" 7 "fmt" 8 "sync" 9 "sync/atomic" 10 11 abci "github.com/mydexchain/tendermint/abci/types" 12 cfg "github.com/mydexchain/tendermint/config" 13 auto "github.com/mydexchain/tendermint/libs/autofile" 14 "github.com/mydexchain/tendermint/libs/clist" 15 "github.com/mydexchain/tendermint/libs/log" 16 tmmath "github.com/mydexchain/tendermint/libs/math" 17 tmos "github.com/mydexchain/tendermint/libs/os" 18 tmsync "github.com/mydexchain/tendermint/libs/sync" 19 "github.com/mydexchain/tendermint/p2p" 20 "github.com/mydexchain/tendermint/proxy" 21 "github.com/mydexchain/tendermint/types" 22 ) 23 24 // TxKeySize is the size of the transaction key index 25 const TxKeySize = sha256.Size 26 27 var newline = []byte("\n") 28 29 //-------------------------------------------------------------------------------- 30 31 // CListMempool is an ordered in-memory pool for transactions before they are 32 // proposed in a consensus round. Transaction validity is checked using the 33 // CheckTx abci message before the transaction is added to the pool. The 34 // mempool uses a concurrent list structure for storing transactions that can 35 // be efficiently accessed by multiple concurrent readers. 36 type CListMempool struct { 37 // Atomic integers 38 height int64 // the last block Update()'d to 39 txsBytes int64 // total size of mempool, in bytes 40 41 // notify listeners (ie. consensus) when txs are available 42 notifiedTxsAvailable bool 43 txsAvailable chan struct{} // fires once for each height, when the mempool is not empty 44 45 config *cfg.MempoolConfig 46 47 // Exclusive mutex for Update method to prevent concurrent execution of 48 // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. 49 updateMtx tmsync.RWMutex 50 preCheck PreCheckFunc 51 postCheck PostCheckFunc 52 53 wal *auto.AutoFile // a log of mempool txs 54 txs *clist.CList // concurrent linked-list of good txs 55 proxyAppConn proxy.AppConnMempool 56 57 // Track whether we're rechecking txs. 58 // These are not protected by a mutex and are expected to be mutated in 59 // serial (ie. by abci responses which are called in serial). 60 recheckCursor *clist.CElement // next expected response 61 recheckEnd *clist.CElement // re-checking stops here 62 63 // Map for quick access to txs to record sender in CheckTx. 64 // txsMap: txKey -> CElement 65 txsMap sync.Map 66 67 // Keep a cache of already-seen txs. 68 // This reduces the pressure on the proxyApp. 69 cache txCache 70 71 logger log.Logger 72 73 metrics *Metrics 74 } 75 76 var _ Mempool = &CListMempool{} 77 78 // CListMempoolOption sets an optional parameter on the mempool. 79 type CListMempoolOption func(*CListMempool) 80 81 // NewCListMempool returns a new mempool with the given configuration and connection to an application. 82 func NewCListMempool( 83 config *cfg.MempoolConfig, 84 proxyAppConn proxy.AppConnMempool, 85 height int64, 86 options ...CListMempoolOption, 87 ) *CListMempool { 88 mempool := &CListMempool{ 89 config: config, 90 proxyAppConn: proxyAppConn, 91 txs: clist.New(), 92 height: height, 93 recheckCursor: nil, 94 recheckEnd: nil, 95 logger: log.NewNopLogger(), 96 metrics: NopMetrics(), 97 } 98 if config.CacheSize > 0 { 99 mempool.cache = newMapTxCache(config.CacheSize) 100 } else { 101 mempool.cache = nopTxCache{} 102 } 103 proxyAppConn.SetResponseCallback(mempool.globalCb) 104 for _, option := range options { 105 option(mempool) 106 } 107 return mempool 108 } 109 110 // NOTE: not thread safe - should only be called once, on startup 111 func (mem *CListMempool) EnableTxsAvailable() { 112 mem.txsAvailable = make(chan struct{}, 1) 113 } 114 115 // SetLogger sets the Logger. 116 func (mem *CListMempool) SetLogger(l log.Logger) { 117 mem.logger = l 118 } 119 120 // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns 121 // false. This is ran before CheckTx. Only applies to the first created block. 122 // After that, Update overwrites the existing value. 123 func WithPreCheck(f PreCheckFunc) CListMempoolOption { 124 return func(mem *CListMempool) { mem.preCheck = f } 125 } 126 127 // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns 128 // false. This is ran after CheckTx. Only applies to the first created block. 129 // After that, Update overwrites the existing value. 130 func WithPostCheck(f PostCheckFunc) CListMempoolOption { 131 return func(mem *CListMempool) { mem.postCheck = f } 132 } 133 134 // WithMetrics sets the metrics. 135 func WithMetrics(metrics *Metrics) CListMempoolOption { 136 return func(mem *CListMempool) { mem.metrics = metrics } 137 } 138 139 func (mem *CListMempool) InitWAL() error { 140 var ( 141 walDir = mem.config.WalDir() 142 walFile = walDir + "/wal" 143 ) 144 145 const perm = 0700 146 if err := tmos.EnsureDir(walDir, perm); err != nil { 147 return err 148 } 149 150 af, err := auto.OpenAutoFile(walFile) 151 if err != nil { 152 return fmt.Errorf("can't open autofile %s: %w", walFile, err) 153 } 154 155 mem.wal = af 156 return nil 157 } 158 159 func (mem *CListMempool) CloseWAL() { 160 if err := mem.wal.Close(); err != nil { 161 mem.logger.Error("Error closing WAL", "err", err) 162 } 163 mem.wal = nil 164 } 165 166 // Safe for concurrent use by multiple goroutines. 167 func (mem *CListMempool) Lock() { 168 mem.updateMtx.Lock() 169 } 170 171 // Safe for concurrent use by multiple goroutines. 172 func (mem *CListMempool) Unlock() { 173 mem.updateMtx.Unlock() 174 } 175 176 // Safe for concurrent use by multiple goroutines. 177 func (mem *CListMempool) Size() int { 178 return mem.txs.Len() 179 } 180 181 // Safe for concurrent use by multiple goroutines. 182 func (mem *CListMempool) TxsBytes() int64 { 183 return atomic.LoadInt64(&mem.txsBytes) 184 } 185 186 // Lock() must be help by the caller during execution. 187 func (mem *CListMempool) FlushAppConn() error { 188 return mem.proxyAppConn.FlushSync() 189 } 190 191 // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. 192 func (mem *CListMempool) Flush() { 193 mem.updateMtx.RLock() 194 defer mem.updateMtx.RUnlock() 195 196 _ = atomic.SwapInt64(&mem.txsBytes, 0) 197 mem.cache.Reset() 198 199 for e := mem.txs.Front(); e != nil; e = e.Next() { 200 mem.txs.Remove(e) 201 e.DetachPrev() 202 } 203 204 mem.txsMap.Range(func(key, _ interface{}) bool { 205 mem.txsMap.Delete(key) 206 return true 207 }) 208 } 209 210 // TxsFront returns the first transaction in the ordered list for peer 211 // goroutines to call .NextWait() on. 212 // FIXME: leaking implementation details! 213 // 214 // Safe for concurrent use by multiple goroutines. 215 func (mem *CListMempool) TxsFront() *clist.CElement { 216 return mem.txs.Front() 217 } 218 219 // TxsWaitChan returns a channel to wait on transactions. It will be closed 220 // once the mempool is not empty (ie. the internal `mem.txs` has at least one 221 // element) 222 // 223 // Safe for concurrent use by multiple goroutines. 224 func (mem *CListMempool) TxsWaitChan() <-chan struct{} { 225 return mem.txs.WaitChan() 226 } 227 228 // It blocks if we're waiting on Update() or Reap(). 229 // cb: A callback from the CheckTx command. 230 // It gets called from another goroutine. 231 // CONTRACT: Either cb will get called, or err returned. 232 // 233 // Safe for concurrent use by multiple goroutines. 234 func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) error { 235 mem.updateMtx.RLock() 236 // use defer to unlock mutex because application (*local client*) might panic 237 defer mem.updateMtx.RUnlock() 238 239 txSize := len(tx) 240 241 if err := mem.isFull(txSize); err != nil { 242 return err 243 } 244 245 if txSize > mem.config.MaxTxBytes { 246 return ErrTxTooLarge{mem.config.MaxTxBytes, txSize} 247 } 248 249 if mem.preCheck != nil { 250 if err := mem.preCheck(tx); err != nil { 251 return ErrPreCheck{err} 252 } 253 } 254 255 // NOTE: writing to the WAL and calling proxy must be done before adding tx 256 // to the cache. otherwise, if either of them fails, next time CheckTx is 257 // called with tx, ErrTxInCache will be returned without tx being checked at 258 // all even once. 259 if mem.wal != nil { 260 // TODO: Notify administrators when WAL fails 261 _, err := mem.wal.Write(append([]byte(tx), newline...)) 262 if err != nil { 263 return fmt.Errorf("wal.Write: %w", err) 264 } 265 } 266 267 // NOTE: proxyAppConn may error if tx buffer is full 268 if err := mem.proxyAppConn.Error(); err != nil { 269 return err 270 } 271 272 if !mem.cache.Push(tx) { 273 // Record a new sender for a tx we've already seen. 274 // Note it's possible a tx is still in the cache but no longer in the mempool 275 // (eg. after committing a block, txs are removed from mempool but not cache), 276 // so we only record the sender for txs still in the mempool. 277 if e, ok := mem.txsMap.Load(TxKey(tx)); ok { 278 memTx := e.(*clist.CElement).Value.(*mempoolTx) 279 memTx.senders.LoadOrStore(txInfo.SenderID, true) 280 // TODO: consider punishing peer for dups, 281 // its non-trivial since invalid txs can become valid, 282 // but they can spam the same tx with little cost to them atm. 283 } 284 285 return ErrTxInCache 286 } 287 288 reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}) 289 reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, cb)) 290 291 return nil 292 } 293 294 // Global callback that will be called after every ABCI response. 295 // Having a single global callback avoids needing to set a callback for each request. 296 // However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), 297 // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that 298 // include this information. If we're not in the midst of a recheck, this function will just return, 299 // so the request specific callback can do the work. 300 // 301 // When rechecking, we don't need the peerID, so the recheck callback happens 302 // here. 303 func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { 304 if mem.recheckCursor == nil { 305 return 306 } 307 308 mem.metrics.RecheckTimes.Add(1) 309 mem.resCbRecheck(req, res) 310 311 // update metrics 312 mem.metrics.Size.Set(float64(mem.Size())) 313 } 314 315 // Request specific callback that should be set on individual reqRes objects 316 // to incorporate local information when processing the response. 317 // This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. 318 // NOTE: alternatively, we could include this information in the ABCI request itself. 319 // 320 // External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called 321 // when all other response processing is complete. 322 // 323 // Used in CheckTx to record PeerID who sent us the tx. 324 func (mem *CListMempool) reqResCb( 325 tx []byte, 326 peerID uint16, 327 peerP2PID p2p.ID, 328 externalCb func(*abci.Response), 329 ) func(res *abci.Response) { 330 return func(res *abci.Response) { 331 if mem.recheckCursor != nil { 332 // this should never happen 333 panic("recheck cursor is not nil in reqResCb") 334 } 335 336 mem.resCbFirstTime(tx, peerID, peerP2PID, res) 337 338 // update metrics 339 mem.metrics.Size.Set(float64(mem.Size())) 340 341 // passed in by the caller of CheckTx, eg. the RPC 342 if externalCb != nil { 343 externalCb(res) 344 } 345 } 346 } 347 348 // Called from: 349 // - resCbFirstTime (lock not held) if tx is valid 350 func (mem *CListMempool) addTx(memTx *mempoolTx) { 351 e := mem.txs.PushBack(memTx) 352 mem.txsMap.Store(TxKey(memTx.tx), e) 353 atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) 354 mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) 355 } 356 357 // Called from: 358 // - Update (lock held) if tx was committed 359 // - resCbRecheck (lock not held) if tx was invalidated 360 func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { 361 mem.txs.Remove(elem) 362 elem.DetachPrev() 363 mem.txsMap.Delete(TxKey(tx)) 364 atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) 365 366 if removeFromCache { 367 mem.cache.Remove(tx) 368 } 369 } 370 371 // RemoveTxByKey removes a transaction from the mempool by its TxKey index. 372 func (mem *CListMempool) RemoveTxByKey(txKey [TxKeySize]byte, removeFromCache bool) { 373 if e, ok := mem.txsMap.Load(txKey); ok { 374 memTx := e.(*clist.CElement).Value.(*mempoolTx) 375 if memTx != nil { 376 mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache) 377 } 378 } 379 } 380 381 func (mem *CListMempool) isFull(txSize int) error { 382 var ( 383 memSize = mem.Size() 384 txsBytes = mem.TxsBytes() 385 ) 386 387 if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { 388 return ErrMempoolIsFull{ 389 memSize, mem.config.Size, 390 txsBytes, mem.config.MaxTxsBytes, 391 } 392 } 393 394 return nil 395 } 396 397 // callback, which is called after the app checked the tx for the first time. 398 // 399 // The case where the app checks the tx for the second and subsequent times is 400 // handled by the resCbRecheck callback. 401 func (mem *CListMempool) resCbFirstTime( 402 tx []byte, 403 peerID uint16, 404 peerP2PID p2p.ID, 405 res *abci.Response, 406 ) { 407 switch r := res.Value.(type) { 408 case *abci.Response_CheckTx: 409 var postCheckErr error 410 if mem.postCheck != nil { 411 postCheckErr = mem.postCheck(tx, r.CheckTx) 412 } 413 if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { 414 // Check mempool isn't full again to reduce the chance of exceeding the 415 // limits. 416 if err := mem.isFull(len(tx)); err != nil { 417 // remove from cache (mempool might have a space later) 418 mem.cache.Remove(tx) 419 mem.logger.Error(err.Error()) 420 return 421 } 422 423 memTx := &mempoolTx{ 424 height: mem.height, 425 gasWanted: r.CheckTx.GasWanted, 426 tx: tx, 427 } 428 memTx.senders.Store(peerID, true) 429 mem.addTx(memTx) 430 mem.logger.Debug("added good transaction", 431 "tx", txID(tx), 432 "res", r, 433 "height", memTx.height, 434 "total", mem.Size(), 435 ) 436 mem.notifyTxsAvailable() 437 } else { 438 // ignore bad transaction 439 mem.logger.Debug("rejected bad transaction", 440 "tx", txID(tx), "peerID", peerP2PID, "res", r, "err", postCheckErr) 441 mem.metrics.FailedTxs.Add(1) 442 if !mem.config.KeepInvalidTxsInCache { 443 // remove from cache (it might be good later) 444 mem.cache.Remove(tx) 445 } 446 } 447 default: 448 // ignore other messages 449 } 450 } 451 452 // callback, which is called after the app rechecked the tx. 453 // 454 // The case where the app checks the tx for the first time is handled by the 455 // resCbFirstTime callback. 456 func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { 457 switch r := res.Value.(type) { 458 case *abci.Response_CheckTx: 459 tx := req.GetCheckTx().Tx 460 memTx := mem.recheckCursor.Value.(*mempoolTx) 461 if !bytes.Equal(tx, memTx.tx) { 462 panic(fmt.Sprintf( 463 "Unexpected tx response from proxy during recheck\nExpected %X, got %X", 464 memTx.tx, 465 tx)) 466 } 467 var postCheckErr error 468 if mem.postCheck != nil { 469 postCheckErr = mem.postCheck(tx, r.CheckTx) 470 } 471 if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { 472 // Good, nothing to do. 473 } else { 474 // Tx became invalidated due to newly committed block. 475 mem.logger.Debug("tx is no longer valid", "tx", txID(tx), "res", r, "err", postCheckErr) 476 // NOTE: we remove tx from the cache because it might be good later 477 mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) 478 } 479 if mem.recheckCursor == mem.recheckEnd { 480 mem.recheckCursor = nil 481 } else { 482 mem.recheckCursor = mem.recheckCursor.Next() 483 } 484 if mem.recheckCursor == nil { 485 // Done! 486 mem.logger.Debug("done rechecking txs") 487 488 // incase the recheck removed all txs 489 if mem.Size() > 0 { 490 mem.notifyTxsAvailable() 491 } 492 } 493 default: 494 // ignore other messages 495 } 496 } 497 498 // Safe for concurrent use by multiple goroutines. 499 func (mem *CListMempool) TxsAvailable() <-chan struct{} { 500 return mem.txsAvailable 501 } 502 503 func (mem *CListMempool) notifyTxsAvailable() { 504 if mem.Size() == 0 { 505 panic("notified txs available but mempool is empty!") 506 } 507 if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { 508 // channel cap is 1, so this will send once 509 mem.notifiedTxsAvailable = true 510 select { 511 case mem.txsAvailable <- struct{}{}: 512 default: 513 } 514 } 515 } 516 517 // Safe for concurrent use by multiple goroutines. 518 func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { 519 mem.updateMtx.RLock() 520 defer mem.updateMtx.RUnlock() 521 522 var totalGas int64 523 524 // TODO: we will get a performance boost if we have a good estimate of avg 525 // size per tx, and set the initial capacity based off of that. 526 // txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) 527 txs := make([]types.Tx, 0, mem.txs.Len()) 528 for e := mem.txs.Front(); e != nil; e = e.Next() { 529 memTx := e.Value.(*mempoolTx) 530 531 dataSize := types.ComputeProtoSizeForTxs(append(txs, memTx.tx)) 532 533 // Check total size requirement 534 if maxBytes > -1 && dataSize > maxBytes { 535 return txs 536 } 537 // Check total gas requirement. 538 // If maxGas is negative, skip this check. 539 // Since newTotalGas < masGas, which 540 // must be non-negative, it follows that this won't overflow. 541 newTotalGas := totalGas + memTx.gasWanted 542 if maxGas > -1 && newTotalGas > maxGas { 543 return txs 544 } 545 totalGas = newTotalGas 546 txs = append(txs, memTx.tx) 547 } 548 return txs 549 } 550 551 // Safe for concurrent use by multiple goroutines. 552 func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { 553 mem.updateMtx.RLock() 554 defer mem.updateMtx.RUnlock() 555 556 if max < 0 { 557 max = mem.txs.Len() 558 } 559 560 txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max)) 561 for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { 562 memTx := e.Value.(*mempoolTx) 563 txs = append(txs, memTx.tx) 564 } 565 return txs 566 } 567 568 // Lock() must be help by the caller during execution. 569 func (mem *CListMempool) Update( 570 height int64, 571 txs types.Txs, 572 deliverTxResponses []*abci.ResponseDeliverTx, 573 preCheck PreCheckFunc, 574 postCheck PostCheckFunc, 575 ) error { 576 // Set height 577 mem.height = height 578 mem.notifiedTxsAvailable = false 579 580 if preCheck != nil { 581 mem.preCheck = preCheck 582 } 583 if postCheck != nil { 584 mem.postCheck = postCheck 585 } 586 587 for i, tx := range txs { 588 if deliverTxResponses[i].Code == abci.CodeTypeOK { 589 // Add valid committed tx to the cache (if missing). 590 _ = mem.cache.Push(tx) 591 } else if !mem.config.KeepInvalidTxsInCache { 592 // Allow invalid transactions to be resubmitted. 593 mem.cache.Remove(tx) 594 } 595 596 // Remove committed tx from the mempool. 597 // 598 // Note an evil proposer can drop valid txs! 599 // Mempool before: 600 // 100 -> 101 -> 102 601 // Block, proposed by an evil proposer: 602 // 101 -> 102 603 // Mempool after: 604 // 100 605 // https://github.com/mydexchain/tendermint/issues/3322. 606 if e, ok := mem.txsMap.Load(TxKey(tx)); ok { 607 mem.removeTx(tx, e.(*clist.CElement), false) 608 } 609 } 610 611 // Either recheck non-committed txs to see if they became invalid 612 // or just notify there're some txs left. 613 if mem.Size() > 0 { 614 if mem.config.Recheck { 615 mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", height) 616 mem.recheckTxs() 617 // At this point, mem.txs are being rechecked. 618 // mem.recheckCursor re-scans mem.txs and possibly removes some txs. 619 // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. 620 } else { 621 mem.notifyTxsAvailable() 622 } 623 } 624 625 // Update metrics 626 mem.metrics.Size.Set(float64(mem.Size())) 627 628 return nil 629 } 630 631 func (mem *CListMempool) recheckTxs() { 632 if mem.Size() == 0 { 633 panic("recheckTxs is called, but the mempool is empty") 634 } 635 636 mem.recheckCursor = mem.txs.Front() 637 mem.recheckEnd = mem.txs.Back() 638 639 // Push txs to proxyAppConn 640 // NOTE: globalCb may be called concurrently. 641 for e := mem.txs.Front(); e != nil; e = e.Next() { 642 memTx := e.Value.(*mempoolTx) 643 mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{ 644 Tx: memTx.tx, 645 Type: abci.CheckTxType_Recheck, 646 }) 647 } 648 649 mem.proxyAppConn.FlushAsync() 650 } 651 652 //-------------------------------------------------------------------------------- 653 654 // mempoolTx is a transaction that successfully ran 655 type mempoolTx struct { 656 height int64 // height that this tx had been validated in 657 gasWanted int64 // amount of gas this tx states it will require 658 tx types.Tx // 659 660 // ids of peers who've sent us this tx (as a map for quick lookups). 661 // senders: PeerID -> bool 662 senders sync.Map 663 } 664 665 // Height returns the height for this transaction 666 func (memTx *mempoolTx) Height() int64 { 667 return atomic.LoadInt64(&memTx.height) 668 } 669 670 //-------------------------------------------------------------------------------- 671 672 type txCache interface { 673 Reset() 674 Push(tx types.Tx) bool 675 Remove(tx types.Tx) 676 } 677 678 // mapTxCache maintains a LRU cache of transactions. This only stores the hash 679 // of the tx, due to memory concerns. 680 type mapTxCache struct { 681 mtx tmsync.Mutex 682 size int 683 cacheMap map[[TxKeySize]byte]*list.Element 684 list *list.List 685 } 686 687 var _ txCache = (*mapTxCache)(nil) 688 689 // newMapTxCache returns a new mapTxCache. 690 func newMapTxCache(cacheSize int) *mapTxCache { 691 return &mapTxCache{ 692 size: cacheSize, 693 cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize), 694 list: list.New(), 695 } 696 } 697 698 // Reset resets the cache to an empty state. 699 func (cache *mapTxCache) Reset() { 700 cache.mtx.Lock() 701 cache.cacheMap = make(map[[TxKeySize]byte]*list.Element, cache.size) 702 cache.list.Init() 703 cache.mtx.Unlock() 704 } 705 706 // Push adds the given tx to the cache and returns true. It returns 707 // false if tx is already in the cache. 708 func (cache *mapTxCache) Push(tx types.Tx) bool { 709 cache.mtx.Lock() 710 defer cache.mtx.Unlock() 711 712 // Use the tx hash in the cache 713 txHash := TxKey(tx) 714 if moved, exists := cache.cacheMap[txHash]; exists { 715 cache.list.MoveToBack(moved) 716 return false 717 } 718 719 if cache.list.Len() >= cache.size { 720 popped := cache.list.Front() 721 if popped != nil { 722 poppedTxHash := popped.Value.([TxKeySize]byte) 723 delete(cache.cacheMap, poppedTxHash) 724 cache.list.Remove(popped) 725 } 726 } 727 e := cache.list.PushBack(txHash) 728 cache.cacheMap[txHash] = e 729 return true 730 } 731 732 // Remove removes the given tx from the cache. 733 func (cache *mapTxCache) Remove(tx types.Tx) { 734 cache.mtx.Lock() 735 txHash := TxKey(tx) 736 popped := cache.cacheMap[txHash] 737 delete(cache.cacheMap, txHash) 738 if popped != nil { 739 cache.list.Remove(popped) 740 } 741 742 cache.mtx.Unlock() 743 } 744 745 type nopTxCache struct{} 746 747 var _ txCache = (*nopTxCache)(nil) 748 749 func (nopTxCache) Reset() {} 750 func (nopTxCache) Push(types.Tx) bool { return true } 751 func (nopTxCache) Remove(types.Tx) {} 752 753 //-------------------------------------------------------------------------------- 754 755 // TxKey is the fixed length array hash used as the key in maps. 756 func TxKey(tx types.Tx) [TxKeySize]byte { 757 return sha256.Sum256(tx) 758 } 759 760 // txID is a hash of the Tx. 761 func txID(tx []byte) []byte { 762 return types.Tx(tx).Hash() 763 }