github.com/MetalBlockchain/subnet-evm@v0.4.9/core/blockchain_iterator.go (about) 1 // (c) 2019-2020, Ava Labs, Inc. 2 // 3 // This file is a derived work, based on the go-ethereum library whose original 4 // notices appear below. 5 // 6 // It is distributed under a license compatible with the licensing terms of the 7 // original code from which it is derived. 8 // 9 // Much love to the original authors for their work. 10 // ********** 11 // Copyright 2014 The go-ethereum Authors 12 // This file is part of the go-ethereum library. 13 // 14 // The go-ethereum library is free software: you can redistribute it and/or modify 15 // it under the terms of the GNU Lesser General Public License as published by 16 // the Free Software Foundation, either version 3 of the License, or 17 // (at your option) any later version. 18 // 19 // The go-ethereum library is distributed in the hope that it will be useful, 20 // but WITHOUT ANY WARRANTY; without even the implied warranty of 21 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 // GNU Lesser General Public License for more details. 23 // 24 // You should have received a copy of the GNU Lesser General Public License 25 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 26 27 // Package core implements the Ethereum consensus protocol. 28 package core 29 30 import ( 31 "context" 32 "errors" 33 "fmt" 34 "sync" 35 36 "github.com/MetalBlockchain/subnet-evm/core/types" 37 ) 38 39 type blockAndState struct { 40 block *types.Block 41 hasState bool 42 err error 43 } 44 45 type blockChainIterator struct { 46 bc *BlockChain 47 48 nextReadBlockHeight uint64 49 nextBlockHeightToRead uint64 50 blocks []*blockAndState 51 blocksRead chan *blockAndState 52 heightsToRead chan uint64 53 54 wg sync.WaitGroup 55 closeOnce sync.Once 56 onClose chan struct{} 57 } 58 59 func newBlockChainIterator(bc *BlockChain, start uint64, parallelism int) *blockChainIterator { 60 i := &blockChainIterator{ 61 bc: bc, 62 63 nextReadBlockHeight: start, 64 nextBlockHeightToRead: start, 65 blocks: make([]*blockAndState, parallelism), 66 blocksRead: make(chan *blockAndState), 67 heightsToRead: make(chan uint64), 68 onClose: make(chan struct{}), 69 } 70 71 i.wg.Add(parallelism) 72 73 // Start [parallelism] worker threads to read block information 74 for j := 0; j < parallelism; j++ { 75 // Start a goroutine to read incoming heights from [heightsToRead] 76 // fetch the corresponding block information and place it on the 77 // [blocksRead] channel. 78 go func() { 79 defer i.wg.Done() 80 81 for { 82 // Read heights in from [heightsToRead] 83 var height uint64 84 select { 85 case height = <-i.heightsToRead: 86 case <-i.onClose: 87 return 88 } 89 90 block := bc.GetBlockByNumber(height) 91 if block == nil { 92 select { 93 case i.blocksRead <- &blockAndState{err: fmt.Errorf("missing block:%d", height)}: 94 continue 95 case <-i.onClose: 96 return 97 } 98 } 99 100 select { 101 case i.blocksRead <- &blockAndState{block: block, hasState: bc.HasState(block.Root())}: 102 continue 103 case <-i.onClose: 104 return 105 } 106 } 107 }() 108 } 109 lastAccepted := i.bc.LastAcceptedBlock().NumberU64() 110 // populateReaders ie. adds task for [parallelism] threads 111 i.populateReaders(lastAccepted) 112 return i 113 } 114 115 // populateReaders adds the heights for the next [parallelism] blocks to 116 // [blocksToRead]. This is called piecewise to ensure that each of the blocks 117 // is read within Next and set in [blocks] before moving on to the next tranche 118 // of blocks. 119 func (i *blockChainIterator) populateReaders(lastAccepted uint64) { 120 maxHeightToRead := i.nextReadBlockHeight + uint64(len(i.blocks)) 121 for { 122 if i.nextBlockHeightToRead > lastAccepted { 123 return 124 } 125 if maxHeightToRead <= i.nextBlockHeightToRead { 126 return 127 } 128 select { 129 case i.heightsToRead <- i.nextBlockHeightToRead: 130 i.nextBlockHeightToRead++ 131 case <-i.onClose: 132 return 133 } 134 } 135 } 136 137 // Next retrieves the next consecutive block in the iteration 138 func (i *blockChainIterator) Next(ctx context.Context) (*types.Block, bool, error) { 139 lastAccepted := i.bc.LastAcceptedBlock().NumberU64() 140 if i.nextReadBlockHeight > lastAccepted { 141 return nil, false, errors.New("no more blocks") 142 } 143 i.populateReaders(lastAccepted) 144 145 nextIndex := int(i.nextReadBlockHeight % uint64(len(i.blocks))) 146 for { 147 nextBlock := i.blocks[nextIndex] 148 // If the nextBlock in the iteration has already been populated 149 // return the block immediately. 150 if nextBlock != nil { 151 i.blocks[nextIndex] = nil 152 i.nextReadBlockHeight++ 153 i.populateReaders(lastAccepted) 154 return nextBlock.block, nextBlock.hasState, nil 155 } 156 157 // Otherwise, keep reading in block info from [blocksRead] 158 // and populate the [blocks] buffer until we hit the actual 159 // next block in the iteration. 160 select { 161 case block := <-i.blocksRead: 162 if block.err != nil { 163 i.Stop() 164 return nil, false, block.err 165 } 166 167 index := int(block.block.NumberU64() % uint64(len(i.blocks))) 168 i.blocks[index] = block 169 case <-ctx.Done(): 170 return nil, false, ctx.Err() 171 case <-i.onClose: 172 return nil, false, errors.New("closed") 173 } 174 } 175 } 176 177 // Stop closes the [onClose] channel signalling all worker threads to exit 178 // and waits for all of the worker threads to finish. 179 func (i *blockChainIterator) Stop() { 180 i.closeOnce.Do(func() { 181 close(i.onClose) 182 }) 183 i.wg.Wait() 184 }