github.com/klaytn/klaytn@v1.12.1/datasync/downloader/statesync.go (about) 1 // Modifications Copyright 2018 The klaytn Authors 2 // Copyright 2017 The go-ethereum Authors 3 // This file is part of the go-ethereum library. 4 // 5 // The go-ethereum library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-ethereum library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 17 // 18 // This file is derived from eth/downloader/statesync.go (2018/06/04). 19 // Modified and improved for the klaytn development. 20 21 package downloader 22 23 import ( 24 "fmt" 25 "hash" 26 "sync" 27 "time" 28 29 "github.com/klaytn/klaytn/blockchain/state" 30 "github.com/klaytn/klaytn/common" 31 "github.com/klaytn/klaytn/crypto/sha3" 32 "github.com/klaytn/klaytn/storage/database" 33 "github.com/klaytn/klaytn/storage/statedb" 34 ) 35 36 // stateReq represents a batch of state fetch requests grouped together into 37 // a single data retrieval network packet. 38 type stateReq struct { 39 nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient) 40 trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts 41 codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts 42 timeout time.Duration // Maximum round trip time for this to complete 43 timer *time.Timer // Timer to fire when the RTT timeout expires 44 peer *peerConnection // Peer that we're requesting from 45 delivered time.Time // Time when the packet was delivered (independent when we process it) 46 response [][]byte // Response data of the peer (nil for timeouts) 47 dropped bool // Flag whether the peer dropped off early 48 } 49 50 // timedOut returns if this request timed out. 51 func (req *stateReq) timedOut() bool { 52 return req.response == nil 53 } 54 55 // stateSyncStats is a collection of progress stats to report during a state trie 56 // sync to RPC requests as well as to display in user logs. 57 type stateSyncStats struct { 58 processed uint64 // Number of state entries processed 59 duplicate uint64 // Number of state entries downloaded twice 60 unexpected uint64 // Number of non-requested state entries received 61 pending uint64 // Number of still pending state entries 62 } 63 64 // syncState starts downloading state with the given root hash. 65 func (d *Downloader) syncState(root common.Hash) *stateSync { 66 s := newStateSync(d, root) 67 select { 68 case d.stateSyncStart <- s: 69 // If we tell the statesync to restart with a new root, we also need 70 // to wait for it to actually also start -- when old requests have timed 71 // out or been delivered 72 <-s.started 73 case <-d.quitCh: 74 s.err = errCancelStateFetch 75 close(s.done) 76 } 77 return s 78 } 79 80 // stateFetcher manages the active state sync and accepts requests 81 // on its behalf. 82 func (d *Downloader) stateFetcher() { 83 for { 84 select { 85 case s := <-d.stateSyncStart: 86 for next := s; next != nil; { 87 next = d.runStateSync(next) 88 } 89 case <-d.stateCh: 90 // Ignore state responses while no sync is running. 91 case <-d.quitCh: 92 return 93 } 94 } 95 } 96 97 // runStateSync runs a state synchronisation until it completes or another root 98 // hash is requested to be switched over to. 99 func (d *Downloader) runStateSync(s *stateSync) *stateSync { 100 var ( 101 active = make(map[string]*stateReq) // Currently in-flight requests 102 finished []*stateReq // Completed or failed requests 103 timeout = make(chan *stateReq) // Timed out active requests 104 ) 105 106 // Run the state sync. 107 logger.Trace("State sync starting", "root", s.root) 108 109 defer func() { 110 // Cancel active request timers on exit. Also set peers to idle so they're 111 // available for the next sync. 112 for _, req := range active { 113 req.timer.Stop() 114 req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) 115 } 116 }() 117 go s.run() 118 defer s.Cancel() 119 120 // Listen for peer departure events to cancel assigned tasks 121 peerDrop := make(chan *peerConnection, 1024) 122 peerSub := s.d.peers.SubscribePeerDrops(peerDrop) 123 defer peerSub.Unsubscribe() 124 125 for { 126 // Enable sending of the first buffered element if there is one. 127 var ( 128 deliverReq *stateReq 129 deliverReqCh chan *stateReq 130 ) 131 if len(finished) > 0 { 132 deliverReq = finished[0] 133 deliverReqCh = s.deliver 134 } 135 136 select { 137 // The stateSync lifecycle: 138 case next := <-d.stateSyncStart: 139 d.spindownStateSync(active, finished, timeout, peerDrop) 140 return next 141 142 case <-s.done: 143 d.spindownStateSync(active, finished, timeout, peerDrop) 144 return nil 145 146 // Send the next finished request to the current sync: 147 case deliverReqCh <- deliverReq: 148 // Shift out the first request, but also set the emptied slot to nil for GC 149 copy(finished, finished[1:]) 150 finished[len(finished)-1] = nil 151 finished = finished[:len(finished)-1] 152 153 // Handle incoming state packs: 154 case pack := <-d.stateCh: 155 // Discard any data not requested (or previously timed out) 156 req := active[pack.PeerId()] 157 if req == nil { 158 logger.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items()) 159 continue 160 } 161 // Finalize the request and queue up for processing 162 req.timer.Stop() 163 req.response = pack.(*statePack).states 164 req.delivered = time.Now() 165 166 finished = append(finished, req) 167 delete(active, pack.PeerId()) 168 169 // Handle dropped peer connections: 170 case p := <-peerDrop: 171 // Skip if no request is currently pending 172 req := active[p.id] 173 if req == nil { 174 continue 175 } 176 // Finalize the request and queue up for processing 177 req.timer.Stop() 178 req.dropped = true 179 req.delivered = time.Now() 180 181 finished = append(finished, req) 182 delete(active, p.id) 183 184 // Handle timed-out requests: 185 case req := <-timeout: 186 // If the peer is already requesting something else, ignore the stale timeout. 187 // This can happen when the timeout and the delivery happens simultaneously, 188 // causing both pathways to trigger. 189 if active[req.peer.id] != req { 190 continue 191 } 192 req.delivered = time.Now() 193 // Move the timed out data back into the download queue 194 finished = append(finished, req) 195 delete(active, req.peer.id) 196 197 // Track outgoing state requests: 198 case req := <-d.trackStateReq: 199 // If an active request already exists for this peer, we have a problem. In 200 // theory the trie node schedule must never assign two requests to the same 201 // peer. In practice however, a peer might receive a request, disconnect and 202 // immediately reconnect before the previous times out. In this case the first 203 // request is never honored, alas we must not silently overwrite it, as that 204 // causes valid requests to go missing and sync to get stuck. 205 if old := active[req.peer.id]; old != nil { 206 logger.Warn("Busy peer assigned new state fetch", "peer", old.peer.id) 207 208 // Move the previous request to the finished set 209 old.timer.Stop() 210 old.dropped = true 211 old.delivered = time.Now() 212 finished = append(finished, old) 213 } 214 // Start a timer to notify the sync loop if the peer stalled. 215 req.timer = time.AfterFunc(req.timeout, func() { 216 timeout <- req 217 }) 218 active[req.peer.id] = req 219 } 220 } 221 } 222 223 // spindownStateSync 'drains' the outstanding requests; some will be delivered and other 224 // will time out. This is to ensure that when the next stateSync starts working, all peers 225 // are marked as idle and de facto _are_ idle. 226 func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) { 227 logger.Trace("State sync spinning down", "active", len(active), "finished", len(finished)) 228 229 for len(active) > 0 { 230 var ( 231 req *stateReq 232 reason string 233 ) 234 select { 235 // Handle (drop) incoming state packs: 236 case pack := <-d.stateCh: 237 req = active[pack.PeerId()] 238 reason = "delivered" 239 // Handle dropped peer connections: 240 case p := <-peerDrop: 241 req = active[p.id] 242 reason = "peerdrop" 243 // Handle timed-out requests: 244 case req = <-timeout: 245 reason = "timeout" 246 } 247 if req == nil { 248 continue 249 } 250 req.peer.logger.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason) 251 req.timer.Stop() 252 delete(active, req.peer.id) 253 req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) 254 } 255 // The 'finished' set contains deliveries that we were going to pass to processing. 256 // Those are now moot, but we still need to set those peers as idle, which would 257 // otherwise have been done after processing 258 for _, req := range finished { 259 req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) 260 } 261 } 262 263 // stateSync schedules requests for downloading a particular state trie defined 264 // by a given state root. 265 type stateSync struct { 266 d *Downloader // Downloader instance to access and manage current peerset 267 268 root common.Hash // State root currently being synced 269 sched *statedb.TrieSync // State trie sync scheduler defining the tasks 270 keccak hash.Hash // Keccak256 hasher to verify deliveries with 271 272 trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval 273 codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval 274 275 numUncommitted int 276 bytesUncommitted int 277 278 started chan struct{} // Started is signalled once the sync loop starts 279 deliver chan *stateReq // Delivery channel multiplexing peer responses 280 cancel chan struct{} // Channel to signal a termination request 281 cancelOnce sync.Once // Ensures cancel only ever gets called once 282 done chan struct{} // Channel to signal termination completion 283 err error // Any error hit during sync (set before completion) 284 } 285 286 // trieTask represents a single trie node download task, containing a set of 287 // peers already attempted retrieval from to detect stalled syncs and abort. 288 type trieTask struct { 289 path [][]byte 290 attempts map[string]struct{} 291 } 292 293 // codeTask represents a single byte code download task, containing a set of 294 // peers already attempted retrieval from to detect stalled syncs and abort. 295 type codeTask struct { 296 attempts map[string]struct{} 297 } 298 299 // newStateSync creates a new state trie download scheduler. This method does not 300 // yet start the sync. The user needs to call run to initiate. 301 func newStateSync(d *Downloader, root common.Hash) *stateSync { 302 return &stateSync{ 303 d: d, 304 root: root, 305 sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil, nil), 306 keccak: sha3.NewKeccak256(), 307 trieTasks: make(map[common.Hash]*trieTask), 308 codeTasks: make(map[common.Hash]*codeTask), 309 deliver: make(chan *stateReq), 310 cancel: make(chan struct{}), 311 done: make(chan struct{}), 312 started: make(chan struct{}), 313 } 314 } 315 316 // run starts the task assignment and response processing loop, blocking until 317 // it finishes, and finally notifying any goroutines waiting for the loop to 318 // finish. 319 func (s *stateSync) run() { 320 close(s.started) 321 if s.d.snapSync { 322 s.err = s.d.SnapSyncer.Sync(s.root, s.cancel) 323 } else { 324 s.err = s.loop() 325 } 326 close(s.done) 327 } 328 329 // Wait blocks until the sync is done or canceled. 330 func (s *stateSync) Wait() error { 331 <-s.done 332 return s.err 333 } 334 335 // Cancel cancels the sync and waits until it has shut down. 336 func (s *stateSync) Cancel() error { 337 s.cancelOnce.Do(func() { close(s.cancel) }) 338 return s.Wait() 339 } 340 341 // loop is the main event loop of a state trie sync. It it responsible for the 342 // assignment of new tasks to peers (including sending it to them) as well as 343 // for the processing of inbound data. Note, that the loop does not directly 344 // receive data from peers, rather those are buffered up in the downloader and 345 // pushed here async. The reason is to decouple processing from data receipt 346 // and timeouts. 347 func (s *stateSync) loop() (err error) { 348 // Listen for new peer events to assign tasks to them 349 newPeer := make(chan *peerConnection, 1024) 350 peerSub := s.d.peers.SubscribeNewPeers(newPeer) 351 defer peerSub.Unsubscribe() 352 defer func() { 353 cerr := s.commit(true) 354 if err == nil { 355 err = cerr 356 } 357 }() 358 359 // Keep assigning new tasks until the sync completes or aborts 360 for s.sched.Pending() > 0 { 361 if err = s.commit(false); err != nil { 362 return err 363 } 364 s.assignTasks() 365 // Tasks assigned, wait for something to happen 366 select { 367 case <-newPeer: 368 // New peer arrived, try to assign it download tasks 369 370 case <-s.cancel: 371 return errCancelStateFetch 372 373 case <-s.d.cancelCh: 374 return errCanceled 375 376 case req := <-s.deliver: 377 // Response, disconnect or timeout triggered, drop the peer if stalling 378 logger.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut()) 379 if req.nItems <= 2 && !req.dropped && req.timedOut() { 380 // 2 items are the minimum requested, if even that times out, we've no use of 381 // this peer at the moment. 382 logger.Warn("Stalling state sync, dropping peer", "peer", req.peer.id) 383 if s.d.dropPeer == nil { 384 req.peer.logger.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id) 385 } else { 386 s.d.dropPeer(req.peer.id) 387 388 // If this peer was the master peer, abort sync immediately 389 s.d.cancelLock.RLock() 390 master := req.peer.id == s.d.cancelPeer 391 s.d.cancelLock.RUnlock() 392 393 if master { 394 s.d.cancel() 395 return errTimeout 396 } 397 } 398 } 399 // Process all the received blobs and check for stale delivery 400 delivered, err := s.process(req) 401 req.peer.SetNodeDataIdle(delivered, req.delivered) 402 if err != nil { 403 logger.Error("Node data write error", "err", err) 404 return err 405 } 406 } 407 } 408 return nil 409 } 410 411 func (s *stateSync) commit(force bool) error { 412 if !force && s.bytesUncommitted < database.IdealBatchSize { 413 return nil 414 } 415 start := time.Now() 416 stateTrieBatch := s.d.stateDB.NewBatch(database.StateTrieDB) 417 defer stateTrieBatch.Release() 418 if written, err := s.sched.Commit(stateTrieBatch); written == 0 || err != nil { 419 return err 420 } 421 if err := stateTrieBatch.Write(); err != nil { 422 return fmt.Errorf("DB write error: %v", err) 423 } 424 s.updateStats(s.numUncommitted, 0, 0, time.Since(start)) 425 s.numUncommitted = 0 426 s.bytesUncommitted = 0 427 return nil 428 } 429 430 // assignTasks attempts to assign new tasks to all idle peers, either from the 431 // batch currently being retried, or fetching new data from the trie sync itself. 432 func (s *stateSync) assignTasks() { 433 // Iterate over all idle peers and try to assign them state fetches 434 peers, _ := s.d.peers.NodeDataIdlePeers() 435 for _, p := range peers { 436 // Assign a batch of fetches proportional to the estimated latency/bandwidth 437 cap := p.NodeDataCapacity(s.d.requestRTT()) 438 req := &stateReq{peer: p, timeout: s.d.requestTTL()} 439 nodes, _, codes := s.fillTasks(cap, req) 440 441 // If the peer was assigned tasks to fetch, send the network request 442 if len(nodes)+len(codes) > 0 { 443 req.peer.logger.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root) 444 select { 445 case s.d.trackStateReq <- req: 446 req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under klay/6x 447 case <-s.cancel: 448 case <-s.d.cancelCh: 449 } 450 } 451 } 452 } 453 454 // fillTasks fills the given request object with a maximum of n state download 455 // tasks to send to the remote peer. 456 func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []statedb.SyncPath, codes []common.Hash) { 457 // Refill available tasks from the scheduler. 458 if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 { 459 nodes, paths, codes := s.sched.Missing(fill) 460 for i, hash := range nodes { 461 s.trieTasks[hash] = &trieTask{ 462 path: paths[i], 463 attempts: make(map[string]struct{}), 464 } 465 } 466 for _, hash := range codes { 467 s.codeTasks[hash] = &codeTask{ 468 attempts: make(map[string]struct{}), 469 } 470 } 471 } 472 // Find tasks that haven't been tried with the request's peer. Prefer code 473 // over trie nodes as those can be written to disk and forgotten about. 474 nodes = make([]common.Hash, 0, n) 475 paths = make([]statedb.SyncPath, 0, n) 476 codes = make([]common.Hash, 0, n) 477 478 req.trieTasks = make(map[common.Hash]*trieTask, n) 479 req.codeTasks = make(map[common.Hash]*codeTask, n) 480 481 for hash, t := range s.codeTasks { 482 // Stop when we've gathered enough requests 483 if len(nodes)+len(codes) == n { 484 break 485 } 486 // Skip any requests we've already tried from this peer 487 if _, ok := t.attempts[req.peer.id]; ok { 488 continue 489 } 490 // Assign the request to this peer 491 t.attempts[req.peer.id] = struct{}{} 492 codes = append(codes, hash) 493 req.codeTasks[hash] = t 494 delete(s.codeTasks, hash) 495 } 496 for hash, t := range s.trieTasks { 497 // Stop when we've gathered enough requests 498 if len(nodes)+len(codes) == n { 499 break 500 } 501 // Skip any requests we've already tried from this peer 502 if _, ok := t.attempts[req.peer.id]; ok { 503 continue 504 } 505 // Assign the request to this peer 506 t.attempts[req.peer.id] = struct{}{} 507 508 nodes = append(nodes, hash) 509 paths = append(paths, t.path) 510 511 req.trieTasks[hash] = t 512 delete(s.trieTasks, hash) 513 } 514 req.nItems = uint16(len(nodes) + len(codes)) 515 return nodes, paths, codes 516 } 517 518 // process iterates over a batch of delivered state data, injecting each item 519 // into a running state sync, re-queuing any items that were requested but not 520 // delivered. 521 // Returns whether the peer actually managed to deliver anything of value, 522 // and any error that occurred 523 func (s *stateSync) process(req *stateReq) (int, error) { 524 // Collect processing stats and update progress if valid data was received 525 duplicate, unexpected, successful := 0, 0, 0 526 527 defer func(start time.Time) { 528 if duplicate > 0 || unexpected > 0 { 529 s.updateStats(0, duplicate, unexpected, time.Since(start)) 530 } 531 }(time.Now()) 532 533 // Iterate over all the delivered data and inject one-by-one into the trie 534 for _, blob := range req.response { 535 hash, err := s.processNodeData(blob) 536 switch err { 537 case nil: 538 s.numUncommitted++ 539 s.bytesUncommitted += len(blob) 540 successful++ 541 case statedb.ErrNotRequested: 542 unexpected++ 543 case statedb.ErrAlreadyProcessed: 544 duplicate++ 545 default: 546 return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) 547 } 548 // Delete from both queues (one delivery is enough for the syncer) 549 delete(req.trieTasks, hash) 550 delete(req.codeTasks, hash) 551 } 552 // Put unfulfilled tasks back into the retry queue 553 npeers := s.d.peers.Len() 554 for hash, task := range req.trieTasks { 555 // If the node did deliver something, missing items may be due to a protocol 556 // limit or a previous timeout + delayed delivery. Both cases should permit 557 // the node to retry the missing items (to avoid single-peer stalls). 558 if len(req.response) > 0 || req.timedOut() { 559 delete(task.attempts, req.peer.id) 560 } 561 // If we've requested the node too many times already, it may be a malicious 562 // sync where nobody has the right data. Abort. 563 if len(task.attempts) >= npeers { 564 return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) 565 } 566 // Missing item, place into the retry queue. 567 s.trieTasks[hash] = task 568 } 569 for hash, task := range req.codeTasks { 570 // If the node did deliver something, missing items may be due to a protocol 571 // limit or a previous timeout + delayed delivery. Both cases should permit 572 // the node to retry the missing items (to avoid single-peer stalls). 573 if len(req.response) > 0 || req.timedOut() { 574 delete(task.attempts, req.peer.id) 575 } 576 // If we've requested the node too many times already, it may be a malicious 577 // sync where nobody has the right data. Abort. 578 if len(task.attempts) >= npeers { 579 return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) 580 } 581 // Missing item, place into the retry queue. 582 s.codeTasks[hash] = task 583 } 584 return successful, nil 585 } 586 587 // processNodeData tries to inject a trie node data blob delivered from a remote 588 // peer into the state trie, returning whether anything useful was written or any 589 // error occurred. 590 func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) { 591 res := statedb.SyncResult{Data: blob} 592 s.keccak.Reset() 593 s.keccak.Write(blob) 594 s.keccak.Sum(res.Hash[:0]) 595 err := s.sched.Process(res) 596 return res.Hash, err 597 } 598 599 // updateStats bumps the various state sync progress counters and displays a log 600 // message for the user to see. 601 func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) { 602 s.d.syncStatsLock.Lock() 603 defer s.d.syncStatsLock.Unlock() 604 605 s.d.syncStatsState.pending = uint64(s.sched.Pending()) 606 s.d.syncStatsState.processed += uint64(written) 607 s.d.syncStatsState.duplicate += uint64(duplicate) 608 s.d.syncStatsState.unexpected += uint64(unexpected) 609 610 if written > 0 || duplicate > 0 || unexpected > 0 { 611 logger.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected) 612 } 613 if written > 0 { 614 s.d.stateDB.WriteFastTrieProgress(s.d.syncStatsState.processed) 615 } 616 }