github.com/MetalBlockchain/subnet-evm@v0.4.9/core/tx_cacher.go (about) 1 // (c) 2019-2020, Ava Labs, Inc. 2 // 3 // This file is a derived work, based on the go-ethereum library whose original 4 // notices appear below. 5 // 6 // It is distributed under a license compatible with the licensing terms of the 7 // original code from which it is derived. 8 // 9 // Much love to the original authors for their work. 10 // ********** 11 // Copyright 2018 The go-ethereum Authors 12 // This file is part of the go-ethereum library. 13 // 14 // The go-ethereum library is free software: you can redistribute it and/or modify 15 // it under the terms of the GNU Lesser General Public License as published by 16 // the Free Software Foundation, either version 3 of the License, or 17 // (at your option) any later version. 18 // 19 // The go-ethereum library is distributed in the hope that it will be useful, 20 // but WITHOUT ANY WARRANTY; without even the implied warranty of 21 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 // GNU Lesser General Public License for more details. 23 // 24 // You should have received a copy of the GNU Lesser General Public License 25 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 26 27 package core 28 29 import ( 30 "sync" 31 32 "github.com/MetalBlockchain/subnet-evm/core/types" 33 ) 34 35 // txSenderCacherRequest is a request for recovering transaction senders with a 36 // specific signature scheme and caching it into the transactions themselves. 37 // 38 // The inc field defines the number of transactions to skip after each recovery, 39 // which is used to feed the same underlying input array to different threads but 40 // ensure they process the early transactions fast. 41 type txSenderCacherRequest struct { 42 signer types.Signer 43 txs []*types.Transaction 44 inc int 45 } 46 47 // TxSenderCacher is a helper structure to concurrently ecrecover transaction 48 // senders from digital signatures on background threads. 49 type TxSenderCacher struct { 50 threads int 51 tasks chan *txSenderCacherRequest 52 53 // synchronization & cleanup 54 wg sync.WaitGroup 55 tasksMu sync.RWMutex 56 } 57 58 // newTxSenderCacher creates a new transaction sender background cacher and starts 59 // as many processing goroutines as allowed by the GOMAXPROCS on construction. 60 func newTxSenderCacher(threads int) *TxSenderCacher { 61 cacher := &TxSenderCacher{ 62 tasks: make(chan *txSenderCacherRequest, threads), 63 threads: threads, 64 } 65 for i := 0; i < threads; i++ { 66 cacher.wg.Add(1) 67 go func() { 68 defer cacher.wg.Done() 69 cacher.cache() 70 }() 71 } 72 return cacher 73 } 74 75 // cache is an infinite loop, caching transaction senders from various forms of 76 // data structures. 77 func (cacher *TxSenderCacher) cache() { 78 for task := range cacher.tasks { 79 for i := 0; i < len(task.txs); i += task.inc { 80 types.Sender(task.signer, task.txs[i]) 81 } 82 } 83 } 84 85 // Recover recovers the senders from a batch of transactions and caches them 86 // back into the same data structures. There is no validation being done, nor 87 // any reaction to invalid signatures. That is up to calling code later. 88 func (cacher *TxSenderCacher) Recover(signer types.Signer, txs []*types.Transaction) { 89 // Hold a read lock on tasksMu to make sure we don't close 90 // the channel in the middle of this call during Shutdown 91 cacher.tasksMu.RLock() 92 defer cacher.tasksMu.RUnlock() 93 94 // If there's nothing to recover, abort 95 if len(txs) == 0 { 96 return 97 } 98 // If we're shutting down, abort 99 if cacher.tasks == nil { 100 return 101 } 102 103 // Ensure we have meaningful task sizes and schedule the recoveries 104 tasks := cacher.threads 105 if len(txs) < tasks*4 { 106 tasks = (len(txs) + 3) / 4 107 } 108 for i := 0; i < tasks; i++ { 109 cacher.tasks <- &txSenderCacherRequest{ 110 signer: signer, 111 txs: txs[i:], 112 inc: tasks, 113 } 114 } 115 } 116 117 // Shutdown stops the threads started by newTxSenderCacher 118 func (cacher *TxSenderCacher) Shutdown() { 119 // Hold the lock on tasksMu to make sure we don't close 120 // the channel in the middle of Recover, which would 121 // cause it to write to a closed channel. 122 cacher.tasksMu.Lock() 123 defer cacher.tasksMu.Unlock() 124 125 close(cacher.tasks) 126 cacher.wg.Wait() 127 cacher.tasks = nil 128 }