github.com/r8d8/go-ethereum@v5.5.2+incompatible/eth/downloader/peer.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the active peer-set of the downloader, maintaining both failures 18 // as well as reputation metrics to prioritize the block retrievals. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "math" 26 "math/big" 27 "sort" 28 "sync" 29 "sync/atomic" 30 "time" 31 32 "github.com/ethereumproject/go-ethereum/common" 33 "github.com/ethereumproject/go-ethereum/event" 34 "github.com/ethereumproject/go-ethereum/logger" 35 "github.com/ethereumproject/go-ethereum/logger/glog" 36 ) 37 38 const ( 39 maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items 40 measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. 41 ) 42 43 // Head hash and total difficulty retriever for 44 type currentHeadRetrievalFn func() (common.Hash, *big.Int) 45 46 // Block header and body fetchers belonging to eth/62 and above 47 type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error 48 type absoluteHeaderFetcherFn func(uint64, int, int, bool) error 49 type blockBodyFetcherFn func([]common.Hash) error 50 type receiptFetcherFn func([]common.Hash) error 51 type stateFetcherFn func([]common.Hash) error 52 53 var ( 54 errAlreadyFetching = errors.New("already fetching blocks from peer") 55 errAlreadyRegistered = errors.New("peer is already registered") 56 errNotRegistered = errors.New("peer is not registered") 57 ) 58 59 // peer represents an active peer from which hashes and blocks are retrieved. 60 type peer struct { 61 id string // Unique identifier of the peer 62 63 headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) 64 blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) 65 receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) 66 stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) 67 68 headerThroughput float64 // Number of headers measured to be retrievable per second 69 blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second 70 receiptThroughput float64 // Number of receipts measured to be retrievable per second 71 stateThroughput float64 // Number of node data pieces measured to be retrievable per second 72 73 rtt time.Duration // Request round trip time to track responsiveness (QoS) 74 75 headerStarted time.Time // Time instance when the last header fetch was started 76 blockStarted time.Time // Time instance when the last block (body) fetch was started 77 receiptStarted time.Time // Time instance when the last receipt fetch was started 78 stateStarted time.Time // Time instance when the last node data fetch was started 79 80 lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) 81 82 currentHead currentHeadRetrievalFn // Method to fetch the currently known head of the peer 83 84 getRelHeaders relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash 85 getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position 86 getBlockBodies blockBodyFetcherFn // [eth/62] Method to retrieve a batch of block bodies 87 88 getReceipts receiptFetcherFn // [eth/63] Method to retrieve a batch of block transaction receipts 89 getNodeData stateFetcherFn // [eth/63] Method to retrieve a batch of state trie data 90 91 version int // Eth protocol version number to switch strategies 92 name string 93 lock sync.RWMutex 94 } 95 96 // newPeer create a new downloader peer, with specific hash and block retrieval 97 // mechanisms. 98 func newPeer(id string, version int, name string, currentHead currentHeadRetrievalFn, 99 getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, 100 getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer { 101 return &peer{ 102 id: id, 103 lacking: make(map[common.Hash]struct{}), 104 105 currentHead: currentHead, 106 getRelHeaders: getRelHeaders, 107 getAbsHeaders: getAbsHeaders, 108 getBlockBodies: getBlockBodies, 109 110 getReceipts: getReceipts, 111 getNodeData: getNodeData, 112 113 version: version, 114 name: name, 115 } 116 } 117 118 // Reset clears the internal state of a peer entity. 119 func (p *peer) Reset() { 120 p.lock.Lock() 121 defer p.lock.Unlock() 122 123 atomic.StoreInt32(&p.headerIdle, 0) 124 atomic.StoreInt32(&p.blockIdle, 0) 125 atomic.StoreInt32(&p.receiptIdle, 0) 126 atomic.StoreInt32(&p.stateIdle, 0) 127 128 p.headerThroughput = 0 129 p.blockThroughput = 0 130 p.receiptThroughput = 0 131 p.stateThroughput = 0 132 133 p.lacking = make(map[common.Hash]struct{}) 134 } 135 136 // FetchHeaders sends a header retrieval request to the remote peer. 137 func (p *peer) FetchHeaders(from uint64, count int) error { 138 // Sanity check the protocol version 139 if p.version < 62 { 140 panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version)) 141 } 142 // Short circuit if the peer is already fetching 143 if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { 144 return errAlreadyFetching 145 } 146 p.headerStarted = time.Now() 147 148 // Issue the header retrieval request (absolut upwards without gaps) 149 go p.getAbsHeaders(from, count, 0, false) 150 151 return nil 152 } 153 154 // FetchBodies sends a block body retrieval request to the remote peer. 155 func (p *peer) FetchBodies(request *fetchRequest) error { 156 // Sanity check the protocol version 157 if p.version < 62 { 158 panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version)) 159 } 160 // Short circuit if the peer is already fetching 161 if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { 162 return errAlreadyFetching 163 } 164 p.blockStarted = time.Now() 165 166 // Convert the header set to a retrievable slice 167 hashes := make([]common.Hash, 0, len(request.Headers)) 168 for _, header := range request.Headers { 169 hashes = append(hashes, header.Hash()) 170 } 171 go p.getBlockBodies(hashes) 172 173 return nil 174 } 175 176 // FetchReceipts sends a receipt retrieval request to the remote peer. 177 func (p *peer) FetchReceipts(request *fetchRequest) error { 178 // Sanity check the protocol version 179 if p.version < 63 { 180 panic(fmt.Sprintf("body fetch [eth/63+] requested on eth/%d", p.version)) 181 } 182 // Short circuit if the peer is already fetching 183 if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { 184 return errAlreadyFetching 185 } 186 p.receiptStarted = time.Now() 187 188 // Convert the header set to a retrievable slice 189 hashes := make([]common.Hash, 0, len(request.Headers)) 190 for _, header := range request.Headers { 191 hashes = append(hashes, header.Hash()) 192 } 193 go p.getReceipts(hashes) 194 195 return nil 196 } 197 198 // FetchNodeData sends a node state data retrieval request to the remote peer. 199 func (p *peer) FetchNodeData(hashes []common.Hash) error { 200 // Sanity check the protocol version 201 if p.version < 63 { 202 panic(fmt.Sprintf("node data fetch [eth/63+] requested on eth/%d", p.version)) 203 } 204 // Short circuit if the peer is already fetching 205 if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { 206 return errAlreadyFetching 207 } 208 p.stateStarted = time.Now() 209 go p.getNodeData(hashes) 210 return nil 211 } 212 213 // SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval 214 // requests. Its estimated header retrieval throughput is updated with that measured 215 // just now. 216 func (p *peer) SetHeadersIdle(delivered int) { 217 p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle) 218 } 219 220 // SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval 221 // requests. Its estimated block retrieval throughput is updated with that measured 222 // just now. 223 func (p *peer) SetBlocksIdle(delivered int) { 224 p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) 225 } 226 227 // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval 228 // requests. Its estimated body retrieval throughput is updated with that measured 229 // just now. 230 func (p *peer) SetBodiesIdle(delivered int) { 231 p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) 232 } 233 234 // SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt 235 // retrieval requests. Its estimated receipt retrieval throughput is updated 236 // with that measured just now. 237 func (p *peer) SetReceiptsIdle(delivered int) { 238 p.setIdle(p.receiptStarted, delivered, &p.receiptThroughput, &p.receiptIdle) 239 } 240 241 // SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie 242 // data retrieval requests. Its estimated state retrieval throughput is updated 243 // with that measured just now. 244 func (p *peer) SetNodeDataIdle(delivered int) { 245 p.setIdle(p.stateStarted, delivered, &p.stateThroughput, &p.stateIdle) 246 } 247 248 // setIdle sets the peer to idle, allowing it to execute new retrieval requests. 249 // Its estimated retrieval throughput is updated with that measured just now. 250 func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) { 251 // Irrelevant of the scaling, make sure the peer ends up idle 252 defer atomic.StoreInt32(idle, 0) 253 254 p.lock.Lock() 255 defer p.lock.Unlock() 256 257 // If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum 258 if delivered == 0 { 259 *throughput = 0 260 return 261 } 262 // Otherwise update the throughput with a new measurement 263 elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor 264 measured := float64(delivered) / (float64(elapsed) / float64(time.Second)) 265 266 *throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured 267 p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed)) 268 269 glog.V(logger.Debug).Infoln("Peer throughput measurements updated:", 270 "hps", p.headerThroughput, "bps", p.blockThroughput, 271 "rps", p.receiptThroughput, "sps", p.stateThroughput, 272 "miss", len(p.lacking), "rtt", p.rtt) 273 } 274 275 // HeaderCapacity retrieves the peers header download allowance based on its 276 // previously discovered throughput. 277 func (p *peer) HeaderCapacity(targetRTT time.Duration) int { 278 p.lock.RLock() 279 defer p.lock.RUnlock() 280 281 return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch))) 282 } 283 284 // BlockCapacity retrieves the peers block download allowance based on its 285 // previously discovered throughput. 286 func (p *peer) BlockCapacity(targetRTT time.Duration) int { 287 p.lock.RLock() 288 defer p.lock.RUnlock() 289 290 return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch))) 291 } 292 293 // ReceiptCapacity retrieves the peers receipt download allowance based on its 294 // previously discovered throughput. 295 func (p *peer) ReceiptCapacity(targetRTT time.Duration) int { 296 p.lock.RLock() 297 defer p.lock.RUnlock() 298 299 return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch))) 300 } 301 302 // NodeDataCapacity retrieves the peers state download allowance based on its 303 // previously discovered throughput. 304 func (p *peer) NodeDataCapacity(targetRTT time.Duration) int { 305 p.lock.RLock() 306 defer p.lock.RUnlock() 307 308 return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch))) 309 } 310 311 // MarkLacking appends a new entity to the set of items (blocks, receipts, states) 312 // that a peer is known not to have (i.e. have been requested before). If the 313 // set reaches its maximum allowed capacity, items are randomly dropped off. 314 func (p *peer) MarkLacking(hash common.Hash) { 315 p.lock.Lock() 316 defer p.lock.Unlock() 317 318 for len(p.lacking) >= maxLackingHashes { 319 for drop := range p.lacking { 320 delete(p.lacking, drop) 321 break 322 } 323 } 324 p.lacking[hash] = struct{}{} 325 } 326 327 // Lacks retrieves whether the hash of a blockchain item is on the peers lacking 328 // list (i.e. whether we know that the peer does not have it). 329 func (p *peer) Lacks(hash common.Hash) bool { 330 p.lock.RLock() 331 defer p.lock.RUnlock() 332 333 _, ok := p.lacking[hash] 334 return ok 335 } 336 337 // String implements fmt.Stringer. 338 func (p *peer) String() string { 339 p.lock.RLock() 340 defer p.lock.RUnlock() 341 342 return fmt.Sprintf("peer:%s@[%s] eth/%d", p.id, p.name, p.version) 343 // strings.Join([]string{ 344 // fmt.Sprintf("hs %3.2f/s", p.headerThroughput), 345 // fmt.Sprintf("bs %3.2f/s", p.blockThroughput), 346 // fmt.Sprintf("rs %3.2f/s", p.receiptThroughput), 347 // fmt.Sprintf("ss %3.2f/s", p.stateThroughput), 348 // fmt.Sprintf("miss %4d", len(p.lacking)), 349 // fmt.Sprintf("rtt %v", p.rtt), 350 //}, ", ") 351 // ) 352 } 353 354 // peerSet represents the collection of active peer participating in the chain 355 // download procedure. 356 type peerSet struct { 357 peers map[string]*peer 358 newPeerFeed event.Feed 359 peerDropFeed event.Feed 360 lock sync.RWMutex 361 } 362 363 // newPeerSet creates a new peer set top track the active download sources. 364 func newPeerSet() *peerSet { 365 return &peerSet{ 366 peers: make(map[string]*peer), 367 } 368 } 369 370 // SubscribeNewPeers subscribes to peer arrival events. 371 func (ps *peerSet) SubscribeNewPeers(ch chan<- *peer) event.Subscription { 372 return ps.newPeerFeed.Subscribe(ch) 373 } 374 375 // SubscribePeerDrops subscribes to peer departure events. 376 func (ps *peerSet) SubscribePeerDrops(ch chan<- *peer) event.Subscription { 377 return ps.peerDropFeed.Subscribe(ch) 378 } 379 380 // Reset iterates over the current peer set, and resets each of the known peers 381 // to prepare for a next batch of block retrieval. 382 func (ps *peerSet) Reset() { 383 ps.lock.RLock() 384 defer ps.lock.RUnlock() 385 386 for _, peer := range ps.peers { 387 peer.Reset() 388 } 389 } 390 391 // Register injects a new peer into the working set, or returns an error if the 392 // peer is already known. 393 // 394 // The method also sets the starting throughput values of the new peer to the 395 // average of all existing peers, to give it a realistic chance of being used 396 // for data retrievals. 397 func (ps *peerSet) Register(p *peer) error { 398 // Retrieve the current median RTT as a sane default 399 p.rtt = ps.medianRTT() 400 401 // Register the new peer with some meaningful defaults 402 ps.lock.Lock() 403 defer ps.lock.Unlock() 404 405 if _, ok := ps.peers[p.id]; ok { 406 return errAlreadyRegistered 407 } 408 if len(ps.peers) > 0 { 409 p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0 410 411 for _, peer := range ps.peers { 412 peer.lock.RLock() 413 p.headerThroughput += peer.headerThroughput 414 p.blockThroughput += peer.blockThroughput 415 p.receiptThroughput += peer.receiptThroughput 416 p.stateThroughput += peer.stateThroughput 417 peer.lock.RUnlock() 418 } 419 p.headerThroughput /= float64(len(ps.peers)) 420 p.blockThroughput /= float64(len(ps.peers)) 421 p.receiptThroughput /= float64(len(ps.peers)) 422 p.stateThroughput /= float64(len(ps.peers)) 423 } 424 ps.peers[p.id] = p 425 426 return nil 427 } 428 429 // Unregister removes a remote peer from the active set, disabling any further 430 // actions to/from that particular entity. 431 func (ps *peerSet) Unregister(id string) error { 432 ps.lock.Lock() 433 p, ok := ps.peers[id] 434 if !ok { 435 defer ps.lock.Unlock() 436 return errNotRegistered 437 } 438 delete(ps.peers, id) 439 ps.lock.Unlock() 440 441 ps.peerDropFeed.Send(p) 442 return nil 443 } 444 445 // Peer retrieves the registered peer with the given id. 446 func (ps *peerSet) Peer(id string) *peer { 447 ps.lock.RLock() 448 defer ps.lock.RUnlock() 449 450 return ps.peers[id] 451 } 452 453 // Len returns if the current number of peers in the set. 454 func (ps *peerSet) Len() int { 455 ps.lock.RLock() 456 defer ps.lock.RUnlock() 457 458 return len(ps.peers) 459 } 460 461 // AllPeers retrieves a flat list of all the peers within the set. 462 func (ps *peerSet) AllPeers() []*peer { 463 ps.lock.RLock() 464 defer ps.lock.RUnlock() 465 466 list := make([]*peer, 0, len(ps.peers)) 467 for _, p := range ps.peers { 468 list = append(list, p) 469 } 470 return list 471 } 472 473 // BlockIdlePeers retrieves a flat list of all the currently idle peers within the 474 // active peer set, ordered by their reputation. 475 func (ps *peerSet) BlockIdlePeers() ([]*peer, int) { 476 idle := func(p *peer) bool { 477 return atomic.LoadInt32(&p.blockIdle) == 0 478 } 479 throughput := func(p *peer) float64 { 480 p.lock.RLock() 481 defer p.lock.RUnlock() 482 return p.blockThroughput 483 } 484 return ps.idlePeers(61, 61, idle, throughput) 485 } 486 487 // HeaderIdlePeers retrieves a flat list of all the currently header-idle peers 488 // within the active peer set, ordered by their reputation. 489 func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) { 490 idle := func(p *peer) bool { 491 return atomic.LoadInt32(&p.headerIdle) == 0 492 } 493 throughput := func(p *peer) float64 { 494 p.lock.RLock() 495 defer p.lock.RUnlock() 496 return p.headerThroughput 497 } 498 return ps.idlePeers(62, 64, idle, throughput) 499 } 500 501 // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within 502 // the active peer set, ordered by their reputation. 503 func (ps *peerSet) BodyIdlePeers() ([]*peer, int) { 504 idle := func(p *peer) bool { 505 return atomic.LoadInt32(&p.blockIdle) == 0 506 } 507 throughput := func(p *peer) float64 { 508 p.lock.RLock() 509 defer p.lock.RUnlock() 510 return p.blockThroughput 511 } 512 return ps.idlePeers(62, 64, idle, throughput) 513 } 514 515 // ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers 516 // within the active peer set, ordered by their reputation. 517 func (ps *peerSet) ReceiptIdlePeers() ([]*peer, int) { 518 idle := func(p *peer) bool { 519 return atomic.LoadInt32(&p.receiptIdle) == 0 520 } 521 throughput := func(p *peer) float64 { 522 p.lock.RLock() 523 defer p.lock.RUnlock() 524 return p.receiptThroughput 525 } 526 return ps.idlePeers(63, 64, idle, throughput) 527 } 528 529 // NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle 530 // peers within the active peer set, ordered by their reputation. 531 func (ps *peerSet) NodeDataIdlePeers() ([]*peer, int) { 532 idle := func(p *peer) bool { 533 return atomic.LoadInt32(&p.stateIdle) == 0 534 } 535 throughput := func(p *peer) float64 { 536 p.lock.RLock() 537 defer p.lock.RUnlock() 538 return p.stateThroughput 539 } 540 return ps.idlePeers(63, 64, idle, throughput) 541 } 542 543 // idlePeers retrieves a flat list of all currently idle peers satisfying the 544 // protocol version constraints, using the provided function to check idleness. 545 // The resulting set of peers are sorted by their measure throughput. 546 func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer) bool, throughput func(*peer) float64) ([]*peer, int) { 547 ps.lock.RLock() 548 defer ps.lock.RUnlock() 549 550 idle, total := make([]*peer, 0, len(ps.peers)), 0 551 for _, p := range ps.peers { 552 if p.version >= minProtocol && p.version <= maxProtocol { 553 if idleCheck(p) { 554 idle = append(idle, p) 555 } 556 total++ 557 } 558 } 559 for i := 0; i < len(idle); i++ { 560 for j := i + 1; j < len(idle); j++ { 561 if throughput(idle[i]) < throughput(idle[j]) { 562 idle[i], idle[j] = idle[j], idle[i] 563 } 564 } 565 } 566 return idle, total 567 } 568 569 // medianRTT returns the median RTT of the peerset, considering only the tuning 570 // peers if there are more peers available. 571 func (ps *peerSet) medianRTT() time.Duration { 572 // Gather all the currently measured round trip times 573 ps.lock.RLock() 574 defer ps.lock.RUnlock() 575 576 rtts := make([]float64, 0, len(ps.peers)) 577 for _, p := range ps.peers { 578 p.lock.RLock() 579 rtts = append(rtts, float64(p.rtt)) 580 p.lock.RUnlock() 581 } 582 sort.Float64s(rtts) 583 584 median := rttMaxEstimate 585 if qosTuningPeers <= len(rtts) { 586 median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers 587 } else if len(rtts) > 0 { 588 median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos) 589 } 590 // Restrict the RTT into some QoS defaults, irrelevant of true RTT 591 if median < rttMinEstimate { 592 median = rttMinEstimate 593 } 594 if median > rttMaxEstimate { 595 median = rttMaxEstimate 596 } 597 return median 598 }