github.com/bcnmy/go-ethereum@v1.10.27/eth/catalyst/queue.go (about) 1 // Copyright 2022 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package catalyst 18 19 import ( 20 "sync" 21 "time" 22 23 "github.com/ethereum/go-ethereum/common" 24 "github.com/ethereum/go-ethereum/core/beacon" 25 "github.com/ethereum/go-ethereum/core/types" 26 ) 27 28 // maxTrackedPayloads is the maximum number of prepared payloads the execution 29 // engine tracks before evicting old ones. Ideally we should only ever track the 30 // latest one; but have a slight wiggle room for non-ideal conditions. 31 const maxTrackedPayloads = 10 32 33 // maxTrackedHeaders is the maximum number of executed payloads the execution 34 // engine tracks before evicting old ones. Ideally we should only ever track the 35 // latest one; but have a slight wiggle room for non-ideal conditions. 36 const maxTrackedHeaders = 10 37 38 // payload wraps the miner's block production channel, allowing the mined block 39 // to be retrieved later upon the GetPayload engine API call. 40 type payload struct { 41 lock sync.Mutex 42 done bool 43 empty *types.Block 44 block *types.Block 45 result chan *types.Block 46 } 47 48 // resolve extracts the generated full block from the given channel if possible 49 // or fallback to empty block as an alternative. 50 func (req *payload) resolve() *beacon.ExecutableDataV1 { 51 // this function can be called concurrently, prevent any 52 // concurrency issue in the first place. 53 req.lock.Lock() 54 defer req.lock.Unlock() 55 56 // Try to resolve the full block first if it's not obtained 57 // yet. The returned block can be nil if the generation fails. 58 59 if !req.done { 60 timeout := time.NewTimer(500 * time.Millisecond) 61 defer timeout.Stop() 62 63 select { 64 case req.block = <-req.result: 65 req.done = true 66 case <-timeout.C: 67 // TODO(rjl49345642, Marius), should we keep this 68 // 100ms timeout allowance? Why not just use the 69 // default and then fallback to empty directly? 70 } 71 } 72 73 if req.block != nil { 74 return beacon.BlockToExecutableData(req.block) 75 } 76 return beacon.BlockToExecutableData(req.empty) 77 } 78 79 // payloadQueueItem represents an id->payload tuple to store until it's retrieved 80 // or evicted. 81 type payloadQueueItem struct { 82 id beacon.PayloadID 83 data *payload 84 } 85 86 // payloadQueue tracks the latest handful of constructed payloads to be retrieved 87 // by the beacon chain if block production is requested. 88 type payloadQueue struct { 89 payloads []*payloadQueueItem 90 lock sync.RWMutex 91 } 92 93 // newPayloadQueue creates a pre-initialized queue with a fixed number of slots 94 // all containing empty items. 95 func newPayloadQueue() *payloadQueue { 96 return &payloadQueue{ 97 payloads: make([]*payloadQueueItem, maxTrackedPayloads), 98 } 99 } 100 101 // put inserts a new payload into the queue at the given id. 102 func (q *payloadQueue) put(id beacon.PayloadID, data *payload) { 103 q.lock.Lock() 104 defer q.lock.Unlock() 105 106 copy(q.payloads[1:], q.payloads) 107 q.payloads[0] = &payloadQueueItem{ 108 id: id, 109 data: data, 110 } 111 } 112 113 // get retrieves a previously stored payload item or nil if it does not exist. 114 func (q *payloadQueue) get(id beacon.PayloadID) *beacon.ExecutableDataV1 { 115 q.lock.RLock() 116 defer q.lock.RUnlock() 117 118 for _, item := range q.payloads { 119 if item == nil { 120 return nil // no more items 121 } 122 if item.id == id { 123 return item.data.resolve() 124 } 125 } 126 return nil 127 } 128 129 // headerQueueItem represents an hash->header tuple to store until it's retrieved 130 // or evicted. 131 type headerQueueItem struct { 132 hash common.Hash 133 header *types.Header 134 } 135 136 // headerQueue tracks the latest handful of constructed headers to be retrieved 137 // by the beacon chain if block production is requested. 138 type headerQueue struct { 139 headers []*headerQueueItem 140 lock sync.RWMutex 141 } 142 143 // newHeaderQueue creates a pre-initialized queue with a fixed number of slots 144 // all containing empty items. 145 func newHeaderQueue() *headerQueue { 146 return &headerQueue{ 147 headers: make([]*headerQueueItem, maxTrackedHeaders), 148 } 149 } 150 151 // put inserts a new header into the queue at the given hash. 152 func (q *headerQueue) put(hash common.Hash, data *types.Header) { 153 q.lock.Lock() 154 defer q.lock.Unlock() 155 156 copy(q.headers[1:], q.headers) 157 q.headers[0] = &headerQueueItem{ 158 hash: hash, 159 header: data, 160 } 161 } 162 163 // get retrieves a previously stored header item or nil if it does not exist. 164 func (q *headerQueue) get(hash common.Hash) *types.Header { 165 q.lock.RLock() 166 defer q.lock.RUnlock() 167 168 for _, item := range q.headers { 169 if item == nil { 170 return nil // no more items 171 } 172 if item.hash == hash { 173 return item.header 174 } 175 } 176 return nil 177 }