github.com/phillinzzz/newBsc@v1.1.6/core/state/trie_prefetcher.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package state 18 19 import ( 20 "sync" 21 22 "github.com/phillinzzz/newBsc/common" 23 "github.com/phillinzzz/newBsc/common/gopool" 24 "github.com/phillinzzz/newBsc/log" 25 "github.com/phillinzzz/newBsc/metrics" 26 ) 27 28 const abortChanSize = 64 29 30 var ( 31 // triePrefetchMetricsPrefix is the prefix under which to publis the metrics. 32 triePrefetchMetricsPrefix = "trie/prefetch/" 33 ) 34 35 // triePrefetcher is an active prefetcher, which receives accounts or storage 36 // items and does trie-loading of them. The goal is to get as much useful content 37 // into the caches as possible. 38 // 39 // Note, the prefetcher's API is not thread safe. 40 type triePrefetcher struct { 41 db Database // Database to fetch trie nodes through 42 root common.Hash // Root hash of theaccount trie for metrics 43 fetches map[common.Hash]Trie // Partially or fully fetcher tries 44 fetchers map[common.Hash]*subfetcher // Subfetchers for each trie 45 46 abortChan chan *subfetcher 47 closeChan chan struct{} 48 49 deliveryMissMeter metrics.Meter 50 accountLoadMeter metrics.Meter 51 accountDupMeter metrics.Meter 52 accountSkipMeter metrics.Meter 53 accountWasteMeter metrics.Meter 54 storageLoadMeter metrics.Meter 55 storageDupMeter metrics.Meter 56 storageSkipMeter metrics.Meter 57 storageWasteMeter metrics.Meter 58 } 59 60 // newTriePrefetcher 61 func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher { 62 prefix := triePrefetchMetricsPrefix + namespace 63 p := &triePrefetcher{ 64 db: db, 65 root: root, 66 fetchers: make(map[common.Hash]*subfetcher), // Active prefetchers use the fetchers map 67 abortChan: make(chan *subfetcher, abortChanSize), 68 closeChan: make(chan struct{}), 69 70 deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil), 71 accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), 72 accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), 73 accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), 74 accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), 75 storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), 76 storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), 77 storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), 78 storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), 79 } 80 go p.abortLoop() 81 return p 82 } 83 84 func (p *triePrefetcher) abortLoop() { 85 for { 86 select { 87 case fetcher := <-p.abortChan: 88 fetcher.abort() 89 case <-p.closeChan: 90 // drain fetcher channel 91 for { 92 select { 93 case fetcher := <-p.abortChan: 94 fetcher.abort() 95 default: 96 return 97 } 98 } 99 } 100 } 101 } 102 103 // close iterates over all the subfetchers, aborts any that were left spinning 104 // and reports the stats to the metrics subsystem. 105 func (p *triePrefetcher) close() { 106 for _, fetcher := range p.fetchers { 107 p.abortChan <- fetcher // safe to do multiple times 108 <-fetcher.term 109 if metrics.Enabled { 110 if fetcher.root == p.root { 111 p.accountLoadMeter.Mark(int64(len(fetcher.seen))) 112 p.accountDupMeter.Mark(int64(fetcher.dups)) 113 p.accountSkipMeter.Mark(int64(len(fetcher.tasks))) 114 115 for _, key := range fetcher.used { 116 delete(fetcher.seen, string(key)) 117 } 118 p.accountWasteMeter.Mark(int64(len(fetcher.seen))) 119 } else { 120 p.storageLoadMeter.Mark(int64(len(fetcher.seen))) 121 p.storageDupMeter.Mark(int64(fetcher.dups)) 122 p.storageSkipMeter.Mark(int64(len(fetcher.tasks))) 123 124 for _, key := range fetcher.used { 125 delete(fetcher.seen, string(key)) 126 } 127 p.storageWasteMeter.Mark(int64(len(fetcher.seen))) 128 } 129 } 130 } 131 close(p.closeChan) 132 // Clear out all fetchers (will crash on a second call, deliberate) 133 p.fetchers = nil 134 } 135 136 // copy creates a deep-but-inactive copy of the trie prefetcher. Any trie data 137 // already loaded will be copied over, but no goroutines will be started. This 138 // is mostly used in the miner which creates a copy of it's actively mutated 139 // state to be sealed while it may further mutate the state. 140 func (p *triePrefetcher) copy() *triePrefetcher { 141 copy := &triePrefetcher{ 142 db: p.db, 143 root: p.root, 144 fetches: make(map[common.Hash]Trie), // Active prefetchers use the fetches map 145 146 deliveryMissMeter: p.deliveryMissMeter, 147 accountLoadMeter: p.accountLoadMeter, 148 accountDupMeter: p.accountDupMeter, 149 accountSkipMeter: p.accountSkipMeter, 150 accountWasteMeter: p.accountWasteMeter, 151 storageLoadMeter: p.storageLoadMeter, 152 storageDupMeter: p.storageDupMeter, 153 storageSkipMeter: p.storageSkipMeter, 154 storageWasteMeter: p.storageWasteMeter, 155 } 156 // If the prefetcher is already a copy, duplicate the data 157 if p.fetches != nil { 158 for root, fetch := range p.fetches { 159 copy.fetches[root] = p.db.CopyTrie(fetch) 160 } 161 return copy 162 } 163 // Otherwise we're copying an active fetcher, retrieve the current states 164 for root, fetcher := range p.fetchers { 165 copy.fetches[root] = fetcher.peek() 166 } 167 return copy 168 } 169 170 // prefetch schedules a batch of trie items to prefetch. 171 func (p *triePrefetcher) prefetch(root common.Hash, keys [][]byte, accountHash common.Hash) { 172 // If the prefetcher is an inactive one, bail out 173 if p.fetches != nil { 174 return 175 } 176 // Active fetcher, schedule the retrievals 177 fetcher := p.fetchers[root] 178 if fetcher == nil { 179 fetcher = newSubfetcher(p.db, root, accountHash) 180 p.fetchers[root] = fetcher 181 } 182 fetcher.schedule(keys) 183 } 184 185 // trie returns the trie matching the root hash, or nil if the prefetcher doesn't 186 // have it. 187 func (p *triePrefetcher) trie(root common.Hash) Trie { 188 // If the prefetcher is inactive, return from existing deep copies 189 if p.fetches != nil { 190 trie := p.fetches[root] 191 if trie == nil { 192 p.deliveryMissMeter.Mark(1) 193 return nil 194 } 195 return p.db.CopyTrie(trie) 196 } 197 // Otherwise the prefetcher is active, bail if no trie was prefetched for this root 198 fetcher := p.fetchers[root] 199 if fetcher == nil { 200 p.deliveryMissMeter.Mark(1) 201 return nil 202 } 203 // Interrupt the prefetcher if it's by any chance still running and return 204 // a copy of any pre-loaded trie. 205 p.abortChan <- fetcher // safe to do multiple times 206 207 trie := fetcher.peek() 208 if trie == nil { 209 p.deliveryMissMeter.Mark(1) 210 return nil 211 } 212 return trie 213 } 214 215 // used marks a batch of state items used to allow creating statistics as to 216 // how useful or wasteful the prefetcher is. 217 func (p *triePrefetcher) used(root common.Hash, used [][]byte) { 218 if fetcher := p.fetchers[root]; fetcher != nil { 219 fetcher.used = used 220 } 221 } 222 223 // subfetcher is a trie fetcher goroutine responsible for pulling entries for a 224 // single trie. It is spawned when a new root is encountered and lives until the 225 // main prefetcher is paused and either all requested items are processed or if 226 // the trie being worked on is retrieved from the prefetcher. 227 type subfetcher struct { 228 db Database // Database to load trie nodes through 229 root common.Hash // Root hash of the trie to prefetch 230 trie Trie // Trie being populated with nodes 231 232 tasks [][]byte // Items queued up for retrieval 233 lock sync.Mutex // Lock protecting the task queue 234 235 wake chan struct{} // Wake channel if a new task is scheduled 236 stop chan struct{} // Channel to interrupt processing 237 term chan struct{} // Channel to signal iterruption 238 copy chan chan Trie // Channel to request a copy of the current trie 239 240 seen map[string]struct{} // Tracks the entries already loaded 241 dups int // Number of duplicate preload tasks 242 used [][]byte // Tracks the entries used in the end 243 244 accountHash common.Hash 245 } 246 247 // newSubfetcher creates a goroutine to prefetch state items belonging to a 248 // particular root hash. 249 func newSubfetcher(db Database, root common.Hash, accountHash common.Hash) *subfetcher { 250 sf := &subfetcher{ 251 db: db, 252 root: root, 253 wake: make(chan struct{}, 1), 254 stop: make(chan struct{}), 255 term: make(chan struct{}), 256 copy: make(chan chan Trie), 257 seen: make(map[string]struct{}), 258 accountHash: accountHash, 259 } 260 gopool.Submit(func() { 261 sf.loop() 262 }) 263 return sf 264 } 265 266 // schedule adds a batch of trie keys to the queue to prefetch. 267 func (sf *subfetcher) schedule(keys [][]byte) { 268 // Append the tasks to the current queue 269 sf.lock.Lock() 270 sf.tasks = append(sf.tasks, keys...) 271 sf.lock.Unlock() 272 273 // Notify the prefetcher, it's fine if it's already terminated 274 select { 275 case sf.wake <- struct{}{}: 276 default: 277 } 278 } 279 280 // peek tries to retrieve a deep copy of the fetcher's trie in whatever form it 281 // is currently. 282 func (sf *subfetcher) peek() Trie { 283 ch := make(chan Trie) 284 select { 285 case sf.copy <- ch: 286 // Subfetcher still alive, return copy from it 287 return <-ch 288 289 case <-sf.term: 290 // Subfetcher already terminated, return a copy directly 291 if sf.trie == nil { 292 return nil 293 } 294 return sf.db.CopyTrie(sf.trie) 295 } 296 } 297 298 // abort interrupts the subfetcher immediately. It is safe to call abort multiple 299 // times but it is not thread safe. 300 func (sf *subfetcher) abort() { 301 select { 302 case <-sf.stop: 303 default: 304 close(sf.stop) 305 } 306 <-sf.term 307 } 308 309 // loop waits for new tasks to be scheduled and keeps loading them until it runs 310 // out of tasks or its underlying trie is retrieved for committing. 311 func (sf *subfetcher) loop() { 312 // No matter how the loop stops, signal anyone waiting that it's terminated 313 defer close(sf.term) 314 315 // Start by opening the trie and stop processing if it fails 316 var trie Trie 317 var err error 318 if sf.accountHash == emptyAddr { 319 trie, err = sf.db.OpenTrie(sf.root) 320 } else { 321 // address is useless 322 trie, err = sf.db.OpenStorageTrie(sf.accountHash, sf.root) 323 } 324 if err != nil { 325 log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) 326 return 327 } 328 sf.trie = trie 329 330 // Trie opened successfully, keep prefetching items 331 for { 332 select { 333 case <-sf.wake: 334 // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock 335 sf.lock.Lock() 336 tasks := sf.tasks 337 sf.tasks = nil 338 sf.lock.Unlock() 339 340 // Prefetch any tasks until the loop is interrupted 341 for i, task := range tasks { 342 select { 343 case <-sf.stop: 344 // If termination is requested, add any leftover back and return 345 sf.lock.Lock() 346 sf.tasks = append(sf.tasks, tasks[i:]...) 347 sf.lock.Unlock() 348 return 349 350 case ch := <-sf.copy: 351 // Somebody wants a copy of the current trie, grant them 352 ch <- sf.db.CopyTrie(sf.trie) 353 354 default: 355 // No termination request yet, prefetch the next entry 356 taskid := string(task) 357 if _, ok := sf.seen[taskid]; ok { 358 sf.dups++ 359 } else { 360 sf.trie.TryGet(task) 361 sf.seen[taskid] = struct{}{} 362 } 363 } 364 } 365 366 case ch := <-sf.copy: 367 // Somebody wants a copy of the current trie, grant them 368 ch <- sf.db.CopyTrie(sf.trie) 369 370 case <-sf.stop: 371 // Termination is requested, abort and leave remaining tasks 372 return 373 } 374 } 375 }