github.com/aswedchain/aswed@v1.0.1/eth/downloader/statesync.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "fmt" 21 "hash" 22 "sync" 23 "time" 24 25 "github.com/aswedchain/aswed/common" 26 "github.com/aswedchain/aswed/core/rawdb" 27 "github.com/aswedchain/aswed/core/state" 28 "github.com/aswedchain/aswed/ethdb" 29 "github.com/aswedchain/aswed/log" 30 "github.com/aswedchain/aswed/trie" 31 "golang.org/x/crypto/sha3" 32 ) 33 34 // stateReq represents a batch of state fetch requests grouped together into 35 // a single data retrieval network packet. 36 type stateReq struct { 37 nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient) 38 trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts 39 codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts 40 timeout time.Duration // Maximum round trip time for this to complete 41 timer *time.Timer // Timer to fire when the RTT timeout expires 42 peer *peerConnection // Peer that we're requesting from 43 delivered time.Time // Time when the packet was delivered (independent when we process it) 44 response [][]byte // Response data of the peer (nil for timeouts) 45 dropped bool // Flag whether the peer dropped off early 46 } 47 48 // timedOut returns if this request timed out. 49 func (req *stateReq) timedOut() bool { 50 return req.response == nil 51 } 52 53 // stateSyncStats is a collection of progress stats to report during a state trie 54 // sync to RPC requests as well as to display in user logs. 55 type stateSyncStats struct { 56 processed uint64 // Number of state entries processed 57 duplicate uint64 // Number of state entries downloaded twice 58 unexpected uint64 // Number of non-requested state entries received 59 pending uint64 // Number of still pending state entries 60 } 61 62 // syncState starts downloading state with the given root hash. 63 func (d *Downloader) syncState(root common.Hash) *stateSync { 64 // Create the state sync 65 s := newStateSync(d, root) 66 select { 67 case d.stateSyncStart <- s: 68 // If we tell the statesync to restart with a new root, we also need 69 // to wait for it to actually also start -- when old requests have timed 70 // out or been delivered 71 <-s.started 72 case <-d.quitCh: 73 s.err = errCancelStateFetch 74 close(s.done) 75 } 76 return s 77 } 78 79 // stateFetcher manages the active state sync and accepts requests 80 // on its behalf. 81 func (d *Downloader) stateFetcher() { 82 for { 83 select { 84 case s := <-d.stateSyncStart: 85 for next := s; next != nil; { 86 next = d.runStateSync(next) 87 } 88 case <-d.stateCh: 89 // Ignore state responses while no sync is running. 90 case <-d.quitCh: 91 return 92 } 93 } 94 } 95 96 // runStateSync runs a state synchronisation until it completes or another root 97 // hash is requested to be switched over to. 98 func (d *Downloader) runStateSync(s *stateSync) *stateSync { 99 var ( 100 active = make(map[string]*stateReq) // Currently in-flight requests 101 finished []*stateReq // Completed or failed requests 102 timeout = make(chan *stateReq) // Timed out active requests 103 ) 104 // Run the state sync. 105 log.Trace("State sync starting", "root", s.root) 106 go s.run() 107 defer s.Cancel() 108 109 // Listen for peer departure events to cancel assigned tasks 110 peerDrop := make(chan *peerConnection, 1024) 111 peerSub := s.d.peers.SubscribePeerDrops(peerDrop) 112 defer peerSub.Unsubscribe() 113 114 for { 115 // Enable sending of the first buffered element if there is one. 116 var ( 117 deliverReq *stateReq 118 deliverReqCh chan *stateReq 119 ) 120 if len(finished) > 0 { 121 deliverReq = finished[0] 122 deliverReqCh = s.deliver 123 } 124 125 select { 126 // The stateSync lifecycle: 127 case next := <-d.stateSyncStart: 128 d.spindownStateSync(active, finished, timeout, peerDrop) 129 return next 130 131 case <-s.done: 132 d.spindownStateSync(active, finished, timeout, peerDrop) 133 return nil 134 135 // Send the next finished request to the current sync: 136 case deliverReqCh <- deliverReq: 137 // Shift out the first request, but also set the emptied slot to nil for GC 138 copy(finished, finished[1:]) 139 finished[len(finished)-1] = nil 140 finished = finished[:len(finished)-1] 141 142 // Handle incoming state packs: 143 case pack := <-d.stateCh: 144 // Discard any data not requested (or previously timed out) 145 req := active[pack.PeerId()] 146 if req == nil { 147 log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items()) 148 continue 149 } 150 // Finalize the request and queue up for processing 151 req.timer.Stop() 152 req.response = pack.(*statePack).states 153 req.delivered = time.Now() 154 155 finished = append(finished, req) 156 delete(active, pack.PeerId()) 157 158 // Handle dropped peer connections: 159 case p := <-peerDrop: 160 // Skip if no request is currently pending 161 req := active[p.id] 162 if req == nil { 163 continue 164 } 165 // Finalize the request and queue up for processing 166 req.timer.Stop() 167 req.dropped = true 168 req.delivered = time.Now() 169 170 finished = append(finished, req) 171 delete(active, p.id) 172 173 // Handle timed-out requests: 174 case req := <-timeout: 175 // If the peer is already requesting something else, ignore the stale timeout. 176 // This can happen when the timeout and the delivery happens simultaneously, 177 // causing both pathways to trigger. 178 if active[req.peer.id] != req { 179 continue 180 } 181 req.delivered = time.Now() 182 // Move the timed out data back into the download queue 183 finished = append(finished, req) 184 delete(active, req.peer.id) 185 186 // Track outgoing state requests: 187 case req := <-d.trackStateReq: 188 // If an active request already exists for this peer, we have a problem. In 189 // theory the trie node schedule must never assign two requests to the same 190 // peer. In practice however, a peer might receive a request, disconnect and 191 // immediately reconnect before the previous times out. In this case the first 192 // request is never honored, alas we must not silently overwrite it, as that 193 // causes valid requests to go missing and sync to get stuck. 194 if old := active[req.peer.id]; old != nil { 195 log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id) 196 // Move the previous request to the finished set 197 old.timer.Stop() 198 old.dropped = true 199 old.delivered = time.Now() 200 finished = append(finished, old) 201 } 202 // Start a timer to notify the sync loop if the peer stalled. 203 req.timer = time.AfterFunc(req.timeout, func() { 204 timeout <- req 205 }) 206 active[req.peer.id] = req 207 } 208 } 209 } 210 211 // spindownStateSync 'drains' the outstanding requests; some will be delivered and other 212 // will time out. This is to ensure that when the next stateSync starts working, all peers 213 // are marked as idle and de facto _are_ idle. 214 func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) { 215 log.Trace("State sync spinning down", "active", len(active), "finished", len(finished)) 216 for len(active) > 0 { 217 var ( 218 req *stateReq 219 reason string 220 ) 221 select { 222 // Handle (drop) incoming state packs: 223 case pack := <-d.stateCh: 224 req = active[pack.PeerId()] 225 reason = "delivered" 226 // Handle dropped peer connections: 227 case p := <-peerDrop: 228 req = active[p.id] 229 reason = "peerdrop" 230 // Handle timed-out requests: 231 case req = <-timeout: 232 reason = "timeout" 233 } 234 if req == nil { 235 continue 236 } 237 req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason) 238 req.timer.Stop() 239 delete(active, req.peer.id) 240 req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) 241 } 242 // The 'finished' set contains deliveries that we were going to pass to processing. 243 // Those are now moot, but we still need to set those peers as idle, which would 244 // otherwise have been done after processing 245 for _, req := range finished { 246 req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) 247 } 248 } 249 250 // stateSync schedules requests for downloading a particular state trie defined 251 // by a given state root. 252 type stateSync struct { 253 d *Downloader // Downloader instance to access and manage current peerset 254 255 sched *trie.Sync // State trie sync scheduler defining the tasks 256 keccak hash.Hash // Keccak256 hasher to verify deliveries with 257 258 trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval 259 codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval 260 261 numUncommitted int 262 bytesUncommitted int 263 264 started chan struct{} // Started is signalled once the sync loop starts 265 266 deliver chan *stateReq // Delivery channel multiplexing peer responses 267 cancel chan struct{} // Channel to signal a termination request 268 cancelOnce sync.Once // Ensures cancel only ever gets called once 269 done chan struct{} // Channel to signal termination completion 270 err error // Any error hit during sync (set before completion) 271 272 root common.Hash 273 } 274 275 // trieTask represents a single trie node download task, containing a set of 276 // peers already attempted retrieval from to detect stalled syncs and abort. 277 type trieTask struct { 278 path [][]byte 279 attempts map[string]struct{} 280 } 281 282 // codeTask represents a single byte code download task, containing a set of 283 // peers already attempted retrieval from to detect stalled syncs and abort. 284 type codeTask struct { 285 attempts map[string]struct{} 286 } 287 288 // newStateSync creates a new state trie download scheduler. This method does not 289 // yet start the sync. The user needs to call run to initiate. 290 func newStateSync(d *Downloader, root common.Hash) *stateSync { 291 return &stateSync{ 292 d: d, 293 sched: state.NewStateSync(root, d.stateDB, d.stateBloom), 294 keccak: sha3.NewLegacyKeccak256(), 295 trieTasks: make(map[common.Hash]*trieTask), 296 codeTasks: make(map[common.Hash]*codeTask), 297 deliver: make(chan *stateReq), 298 cancel: make(chan struct{}), 299 done: make(chan struct{}), 300 started: make(chan struct{}), 301 root: root, 302 } 303 } 304 305 // run starts the task assignment and response processing loop, blocking until 306 // it finishes, and finally notifying any goroutines waiting for the loop to 307 // finish. 308 func (s *stateSync) run() { 309 s.err = s.loop() 310 close(s.done) 311 } 312 313 // Wait blocks until the sync is done or canceled. 314 func (s *stateSync) Wait() error { 315 <-s.done 316 return s.err 317 } 318 319 // Cancel cancels the sync and waits until it has shut down. 320 func (s *stateSync) Cancel() error { 321 s.cancelOnce.Do(func() { close(s.cancel) }) 322 return s.Wait() 323 } 324 325 // loop is the main event loop of a state trie sync. It it responsible for the 326 // assignment of new tasks to peers (including sending it to them) as well as 327 // for the processing of inbound data. Note, that the loop does not directly 328 // receive data from peers, rather those are buffered up in the downloader and 329 // pushed here async. The reason is to decouple processing from data receipt 330 // and timeouts. 331 func (s *stateSync) loop() (err error) { 332 close(s.started) 333 // Listen for new peer events to assign tasks to them 334 newPeer := make(chan *peerConnection, 1024) 335 peerSub := s.d.peers.SubscribeNewPeers(newPeer) 336 defer peerSub.Unsubscribe() 337 defer func() { 338 cerr := s.commit(true) 339 if err == nil { 340 err = cerr 341 } 342 }() 343 344 // Keep assigning new tasks until the sync completes or aborts 345 for s.sched.Pending() > 0 { 346 if err = s.commit(false); err != nil { 347 return err 348 } 349 s.assignTasks() 350 // Tasks assigned, wait for something to happen 351 select { 352 case <-newPeer: 353 // New peer arrived, try to assign it download tasks 354 355 case <-s.cancel: 356 return errCancelStateFetch 357 358 case <-s.d.cancelCh: 359 return errCanceled 360 361 case req := <-s.deliver: 362 // Response, disconnect or timeout triggered, drop the peer if stalling 363 log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut()) 364 if req.nItems <= 2 && !req.dropped && req.timedOut() { 365 // 2 items are the minimum requested, if even that times out, we've no use of 366 // this peer at the moment. 367 log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id) 368 if s.d.dropPeer == nil { 369 // The dropPeer method is nil when `--copydb` is used for a local copy. 370 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 371 req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id) 372 } else { 373 s.d.dropPeer(req.peer.id) 374 375 // If this peer was the master peer, abort sync immediately 376 s.d.cancelLock.RLock() 377 master := req.peer.id == s.d.cancelPeer 378 s.d.cancelLock.RUnlock() 379 380 if master { 381 s.d.cancel() 382 return errTimeout 383 } 384 } 385 } 386 // Process all the received blobs and check for stale delivery 387 delivered, err := s.process(req) 388 req.peer.SetNodeDataIdle(delivered, req.delivered) 389 if err != nil { 390 log.Warn("Node data write error", "err", err) 391 return err 392 } 393 } 394 } 395 return nil 396 } 397 398 func (s *stateSync) commit(force bool) error { 399 if !force && s.bytesUncommitted < ethdb.IdealBatchSize { 400 return nil 401 } 402 start := time.Now() 403 b := s.d.stateDB.NewBatch() 404 if err := s.sched.Commit(b); err != nil { 405 return err 406 } 407 if err := b.Write(); err != nil { 408 return fmt.Errorf("DB write error: %v", err) 409 } 410 s.updateStats(s.numUncommitted, 0, 0, time.Since(start)) 411 s.numUncommitted = 0 412 s.bytesUncommitted = 0 413 return nil 414 } 415 416 // assignTasks attempts to assign new tasks to all idle peers, either from the 417 // batch currently being retried, or fetching new data from the trie sync itself. 418 func (s *stateSync) assignTasks() { 419 // Iterate over all idle peers and try to assign them state fetches 420 peers, _ := s.d.peers.NodeDataIdlePeers() 421 for _, p := range peers { 422 // Assign a batch of fetches proportional to the estimated latency/bandwidth 423 cap := p.NodeDataCapacity(s.d.requestRTT()) 424 req := &stateReq{peer: p, timeout: s.d.requestTTL()} 425 426 nodes, _, codes := s.fillTasks(cap, req) 427 428 // If the peer was assigned tasks to fetch, send the network request 429 if len(nodes)+len(codes) > 0 { 430 req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root) 431 select { 432 case s.d.trackStateReq <- req: 433 req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x 434 case <-s.cancel: 435 case <-s.d.cancelCh: 436 } 437 } 438 } 439 } 440 441 // fillTasks fills the given request object with a maximum of n state download 442 // tasks to send to the remote peer. 443 func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { 444 // Refill available tasks from the scheduler. 445 if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 { 446 nodes, paths, codes := s.sched.Missing(fill) 447 for i, hash := range nodes { 448 s.trieTasks[hash] = &trieTask{ 449 path: paths[i], 450 attempts: make(map[string]struct{}), 451 } 452 } 453 for _, hash := range codes { 454 s.codeTasks[hash] = &codeTask{ 455 attempts: make(map[string]struct{}), 456 } 457 } 458 } 459 // Find tasks that haven't been tried with the request's peer. Prefer code 460 // over trie nodes as those can be written to disk and forgotten about. 461 nodes = make([]common.Hash, 0, n) 462 paths = make([]trie.SyncPath, 0, n) 463 codes = make([]common.Hash, 0, n) 464 465 req.trieTasks = make(map[common.Hash]*trieTask, n) 466 req.codeTasks = make(map[common.Hash]*codeTask, n) 467 468 for hash, t := range s.codeTasks { 469 // Stop when we've gathered enough requests 470 if len(nodes)+len(codes) == n { 471 break 472 } 473 // Skip any requests we've already tried from this peer 474 if _, ok := t.attempts[req.peer.id]; ok { 475 continue 476 } 477 // Assign the request to this peer 478 t.attempts[req.peer.id] = struct{}{} 479 codes = append(codes, hash) 480 req.codeTasks[hash] = t 481 delete(s.codeTasks, hash) 482 } 483 for hash, t := range s.trieTasks { 484 // Stop when we've gathered enough requests 485 if len(nodes)+len(codes) == n { 486 break 487 } 488 // Skip any requests we've already tried from this peer 489 if _, ok := t.attempts[req.peer.id]; ok { 490 continue 491 } 492 // Assign the request to this peer 493 t.attempts[req.peer.id] = struct{}{} 494 495 nodes = append(nodes, hash) 496 paths = append(paths, t.path) 497 498 req.trieTasks[hash] = t 499 delete(s.trieTasks, hash) 500 } 501 req.nItems = uint16(len(nodes) + len(codes)) 502 return nodes, paths, codes 503 } 504 505 // process iterates over a batch of delivered state data, injecting each item 506 // into a running state sync, re-queuing any items that were requested but not 507 // delivered. Returns whether the peer actually managed to deliver anything of 508 // value, and any error that occurred. 509 func (s *stateSync) process(req *stateReq) (int, error) { 510 // Collect processing stats and update progress if valid data was received 511 duplicate, unexpected, successful := 0, 0, 0 512 513 defer func(start time.Time) { 514 if duplicate > 0 || unexpected > 0 { 515 s.updateStats(0, duplicate, unexpected, time.Since(start)) 516 } 517 }(time.Now()) 518 519 // Iterate over all the delivered data and inject one-by-one into the trie 520 for _, blob := range req.response { 521 hash, err := s.processNodeData(blob) 522 switch err { 523 case nil: 524 s.numUncommitted++ 525 s.bytesUncommitted += len(blob) 526 successful++ 527 case trie.ErrNotRequested: 528 unexpected++ 529 case trie.ErrAlreadyProcessed: 530 duplicate++ 531 default: 532 return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) 533 } 534 // Delete from both queues (one delivery is enough for the syncer) 535 delete(req.trieTasks, hash) 536 delete(req.codeTasks, hash) 537 } 538 // Put unfulfilled tasks back into the retry queue 539 npeers := s.d.peers.Len() 540 for hash, task := range req.trieTasks { 541 // If the node did deliver something, missing items may be due to a protocol 542 // limit or a previous timeout + delayed delivery. Both cases should permit 543 // the node to retry the missing items (to avoid single-peer stalls). 544 if len(req.response) > 0 || req.timedOut() { 545 delete(task.attempts, req.peer.id) 546 } 547 // If we've requested the node too many times already, it may be a malicious 548 // sync where nobody has the right data. Abort. 549 if len(task.attempts) >= npeers { 550 return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) 551 } 552 // Missing item, place into the retry queue. 553 s.trieTasks[hash] = task 554 } 555 for hash, task := range req.codeTasks { 556 // If the node did deliver something, missing items may be due to a protocol 557 // limit or a previous timeout + delayed delivery. Both cases should permit 558 // the node to retry the missing items (to avoid single-peer stalls). 559 if len(req.response) > 0 || req.timedOut() { 560 delete(task.attempts, req.peer.id) 561 } 562 // If we've requested the node too many times already, it may be a malicious 563 // sync where nobody has the right data. Abort. 564 if len(task.attempts) >= npeers { 565 return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) 566 } 567 // Missing item, place into the retry queue. 568 s.codeTasks[hash] = task 569 } 570 return successful, nil 571 } 572 573 // processNodeData tries to inject a trie node data blob delivered from a remote 574 // peer into the state trie, returning whether anything useful was written or any 575 // error occurred. 576 func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) { 577 res := trie.SyncResult{Data: blob} 578 s.keccak.Reset() 579 s.keccak.Write(blob) 580 s.keccak.Sum(res.Hash[:0]) 581 err := s.sched.Process(res) 582 return res.Hash, err 583 } 584 585 // updateStats bumps the various state sync progress counters and displays a log 586 // message for the user to see. 587 func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) { 588 s.d.syncStatsLock.Lock() 589 defer s.d.syncStatsLock.Unlock() 590 591 s.d.syncStatsState.pending = uint64(s.sched.Pending()) 592 s.d.syncStatsState.processed += uint64(written) 593 s.d.syncStatsState.duplicate += uint64(duplicate) 594 s.d.syncStatsState.unexpected += uint64(unexpected) 595 596 if written > 0 || duplicate > 0 || unexpected > 0 { 597 log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected) 598 } 599 if written > 0 { 600 rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed) 601 } 602 }