github.com/klaytn/klaytn@v1.12.1/blockchain/bloombits/scheduler.go (about) 1 // Modifications Copyright 2018 The klaytn Authors 2 // Copyright 2017 The go-ethereum Authors 3 // This file is part of the go-ethereum library. 4 // 5 // The go-ethereum library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-ethereum library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 17 // 18 // This file is derived from core/bloombits/scheduler.go (2018/06/04). 19 // Modified and improved for the klaytn development. 20 21 package bloombits 22 23 import "sync" 24 25 // request represents a bloom retrieval task to prioritize and pull from the local 26 // database or remotely from the network. 27 type request struct { 28 section uint64 // Section index to retrieve the a bit-vector from 29 bit uint // Bit index within the section to retrieve the vector of 30 } 31 32 // response represents the state of a requested bit-vector through a scheduler. 33 type response struct { 34 cached []byte // Cached bits to dedup multiple requests 35 done chan struct{} // Channel to allow waiting for completion 36 } 37 38 // scheduler handles the scheduling of bloom-filter retrieval operations for 39 // entire section-batches belonging to a single bloom bit. Beside scheduling the 40 // retrieval operations, this struct also deduplicates the requests and caches 41 // the results to minimize network/database overhead even in complex filtering 42 // scenarios. 43 type scheduler struct { 44 bit uint // Index of the bit in the bloom filter this scheduler is responsible for 45 responses map[uint64]*response // Currently pending retrieval requests or already cached responses 46 lock sync.Mutex // Lock protecting the responses from concurrent access 47 } 48 49 // newScheduler creates a new bloom-filter retrieval scheduler for a specific 50 // bit index. 51 func newScheduler(idx uint) *scheduler { 52 return &scheduler{ 53 bit: idx, 54 responses: make(map[uint64]*response), 55 } 56 } 57 58 // run creates a retrieval pipeline, receiving section indexes from sections and 59 // returning the results in the same order through the done channel. Concurrent 60 // runs of the same scheduler are allowed, leading to retrieval task deduplication. 61 func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) { 62 // Create a forwarder channel between requests and responses of the same size as 63 // the distribution channel (since that will block the pipeline anyway). 64 pend := make(chan uint64, cap(dist)) 65 66 // Start the pipeline schedulers to forward between user -> distributor -> user 67 wg.Add(2) 68 go s.scheduleRequests(sections, dist, pend, quit, wg) 69 go s.scheduleDeliveries(pend, done, quit, wg) 70 } 71 72 // reset cleans up any leftovers from previous runs. This is required before a 73 // restart to ensure the no previously requested but never delivered state will 74 // cause a lockup. 75 func (s *scheduler) reset() { 76 s.lock.Lock() 77 defer s.lock.Unlock() 78 79 for section, res := range s.responses { 80 if res.cached == nil { 81 delete(s.responses, section) 82 } 83 } 84 } 85 86 // scheduleRequests reads section retrieval requests from the input channel, 87 // deduplicates the stream and pushes unique retrieval tasks into the distribution 88 // channel for a database or network layer to honour. 89 func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) { 90 // Clean up the goroutine and pipeline when done 91 defer wg.Done() 92 defer close(pend) 93 94 // Keep reading and scheduling section requests 95 for { 96 select { 97 case <-quit: 98 return 99 100 case section, ok := <-reqs: 101 // New section retrieval requested 102 if !ok { 103 return 104 } 105 // Deduplicate retrieval requests 106 unique := false 107 108 s.lock.Lock() 109 if s.responses[section] == nil { 110 s.responses[section] = &response{ 111 done: make(chan struct{}), 112 } 113 unique = true 114 } 115 s.lock.Unlock() 116 117 // Schedule the section for retrieval and notify the deliverer to expect this section 118 if unique { 119 select { 120 case <-quit: 121 return 122 case dist <- &request{bit: s.bit, section: section}: 123 } 124 } 125 select { 126 case <-quit: 127 return 128 case pend <- section: 129 } 130 } 131 } 132 } 133 134 // scheduleDeliveries reads section acceptance notifications and waits for them 135 // to be delivered, pushing them into the output data buffer. 136 func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) { 137 // Clean up the goroutine and pipeline when done 138 defer wg.Done() 139 defer close(done) 140 141 // Keep reading notifications and scheduling deliveries 142 for { 143 select { 144 case <-quit: 145 return 146 147 case idx, ok := <-pend: 148 // New section retrieval pending 149 if !ok { 150 return 151 } 152 // Wait until the request is honoured 153 s.lock.Lock() 154 res := s.responses[idx] 155 s.lock.Unlock() 156 157 select { 158 case <-quit: 159 return 160 case <-res.done: 161 } 162 // Deliver the result 163 select { 164 case <-quit: 165 return 166 case done <- res.cached: 167 } 168 } 169 } 170 } 171 172 // deliver is called by the request distributor when a reply to a request arrives. 173 func (s *scheduler) deliver(sections []uint64, data [][]byte) { 174 s.lock.Lock() 175 defer s.lock.Unlock() 176 177 for i, section := range sections { 178 if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries 179 res.cached = data[i] 180 close(res.done) 181 } 182 } 183 }