github.com/bamzi/go-ethereum@v1.6.7-0.20170704111104-138f26c93af1/eth/downloader/peer.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the active peer-set of the downloader, maintaining both failures 18 // as well as reputation metrics to prioritize the block retrievals. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "math" 26 "math/big" 27 "sort" 28 "sync" 29 "sync/atomic" 30 "time" 31 32 "github.com/ethereum/go-ethereum/common" 33 "github.com/ethereum/go-ethereum/event" 34 "github.com/ethereum/go-ethereum/log" 35 ) 36 37 const ( 38 maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items 39 measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. 40 ) 41 42 // Head hash and total difficulty retriever for 43 type currentHeadRetrievalFn func() (common.Hash, *big.Int) 44 45 // Block header and body fetchers belonging to eth/62 and above 46 type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error 47 type absoluteHeaderFetcherFn func(uint64, int, int, bool) error 48 type blockBodyFetcherFn func([]common.Hash) error 49 type receiptFetcherFn func([]common.Hash) error 50 type stateFetcherFn func([]common.Hash) error 51 52 var ( 53 errAlreadyFetching = errors.New("already fetching blocks from peer") 54 errAlreadyRegistered = errors.New("peer is already registered") 55 errNotRegistered = errors.New("peer is not registered") 56 ) 57 58 // peer represents an active peer from which hashes and blocks are retrieved. 59 type peer struct { 60 id string // Unique identifier of the peer 61 62 headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) 63 blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) 64 receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) 65 stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) 66 67 headerThroughput float64 // Number of headers measured to be retrievable per second 68 blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second 69 receiptThroughput float64 // Number of receipts measured to be retrievable per second 70 stateThroughput float64 // Number of node data pieces measured to be retrievable per second 71 72 rtt time.Duration // Request round trip time to track responsiveness (QoS) 73 74 headerStarted time.Time // Time instance when the last header fetch was started 75 blockStarted time.Time // Time instance when the last block (body) fetch was started 76 receiptStarted time.Time // Time instance when the last receipt fetch was started 77 stateStarted time.Time // Time instance when the last node data fetch was started 78 79 lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) 80 81 currentHead currentHeadRetrievalFn // Method to fetch the currently known head of the peer 82 83 getRelHeaders relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash 84 getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position 85 getBlockBodies blockBodyFetcherFn // [eth/62] Method to retrieve a batch of block bodies 86 87 getReceipts receiptFetcherFn // [eth/63] Method to retrieve a batch of block transaction receipts 88 getNodeData stateFetcherFn // [eth/63] Method to retrieve a batch of state trie data 89 90 version int // Eth protocol version number to switch strategies 91 log log.Logger // Contextual logger to add extra infos to peer logs 92 lock sync.RWMutex 93 } 94 95 // newPeer create a new downloader peer, with specific hash and block retrieval 96 // mechanisms. 97 func newPeer(id string, version int, currentHead currentHeadRetrievalFn, 98 getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, 99 getReceipts receiptFetcherFn, getNodeData stateFetcherFn, logger log.Logger) *peer { 100 101 return &peer{ 102 id: id, 103 lacking: make(map[common.Hash]struct{}), 104 105 currentHead: currentHead, 106 getRelHeaders: getRelHeaders, 107 getAbsHeaders: getAbsHeaders, 108 getBlockBodies: getBlockBodies, 109 110 getReceipts: getReceipts, 111 getNodeData: getNodeData, 112 113 version: version, 114 log: logger, 115 } 116 } 117 118 // Reset clears the internal state of a peer entity. 119 func (p *peer) Reset() { 120 p.lock.Lock() 121 defer p.lock.Unlock() 122 123 atomic.StoreInt32(&p.headerIdle, 0) 124 atomic.StoreInt32(&p.blockIdle, 0) 125 atomic.StoreInt32(&p.receiptIdle, 0) 126 atomic.StoreInt32(&p.stateIdle, 0) 127 128 p.headerThroughput = 0 129 p.blockThroughput = 0 130 p.receiptThroughput = 0 131 p.stateThroughput = 0 132 133 p.lacking = make(map[common.Hash]struct{}) 134 } 135 136 // FetchHeaders sends a header retrieval request to the remote peer. 137 func (p *peer) FetchHeaders(from uint64, count int) error { 138 // Sanity check the protocol version 139 if p.version < 62 { 140 panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version)) 141 } 142 // Short circuit if the peer is already fetching 143 if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { 144 return errAlreadyFetching 145 } 146 p.headerStarted = time.Now() 147 148 // Issue the header retrieval request (absolut upwards without gaps) 149 go p.getAbsHeaders(from, count, 0, false) 150 151 return nil 152 } 153 154 // FetchBodies sends a block body retrieval request to the remote peer. 155 func (p *peer) FetchBodies(request *fetchRequest) error { 156 // Sanity check the protocol version 157 if p.version < 62 { 158 panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version)) 159 } 160 // Short circuit if the peer is already fetching 161 if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { 162 return errAlreadyFetching 163 } 164 p.blockStarted = time.Now() 165 166 // Convert the header set to a retrievable slice 167 hashes := make([]common.Hash, 0, len(request.Headers)) 168 for _, header := range request.Headers { 169 hashes = append(hashes, header.Hash()) 170 } 171 go p.getBlockBodies(hashes) 172 173 return nil 174 } 175 176 // FetchReceipts sends a receipt retrieval request to the remote peer. 177 func (p *peer) FetchReceipts(request *fetchRequest) error { 178 // Sanity check the protocol version 179 if p.version < 63 { 180 panic(fmt.Sprintf("body fetch [eth/63+] requested on eth/%d", p.version)) 181 } 182 // Short circuit if the peer is already fetching 183 if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { 184 return errAlreadyFetching 185 } 186 p.receiptStarted = time.Now() 187 188 // Convert the header set to a retrievable slice 189 hashes := make([]common.Hash, 0, len(request.Headers)) 190 for _, header := range request.Headers { 191 hashes = append(hashes, header.Hash()) 192 } 193 go p.getReceipts(hashes) 194 195 return nil 196 } 197 198 // FetchNodeData sends a node state data retrieval request to the remote peer. 199 func (p *peer) FetchNodeData(hashes []common.Hash) error { 200 // Sanity check the protocol version 201 if p.version < 63 { 202 panic(fmt.Sprintf("node data fetch [eth/63+] requested on eth/%d", p.version)) 203 } 204 // Short circuit if the peer is already fetching 205 if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { 206 return errAlreadyFetching 207 } 208 p.stateStarted = time.Now() 209 go p.getNodeData(hashes) 210 return nil 211 } 212 213 // SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval 214 // requests. Its estimated header retrieval throughput is updated with that measured 215 // just now. 216 func (p *peer) SetHeadersIdle(delivered int) { 217 p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle) 218 } 219 220 // SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval 221 // requests. Its estimated block retrieval throughput is updated with that measured 222 // just now. 223 func (p *peer) SetBlocksIdle(delivered int) { 224 p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) 225 } 226 227 // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval 228 // requests. Its estimated body retrieval throughput is updated with that measured 229 // just now. 230 func (p *peer) SetBodiesIdle(delivered int) { 231 p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) 232 } 233 234 // SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt 235 // retrieval requests. Its estimated receipt retrieval throughput is updated 236 // with that measured just now. 237 func (p *peer) SetReceiptsIdle(delivered int) { 238 p.setIdle(p.receiptStarted, delivered, &p.receiptThroughput, &p.receiptIdle) 239 } 240 241 // SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie 242 // data retrieval requests. Its estimated state retrieval throughput is updated 243 // with that measured just now. 244 func (p *peer) SetNodeDataIdle(delivered int) { 245 p.setIdle(p.stateStarted, delivered, &p.stateThroughput, &p.stateIdle) 246 } 247 248 // setIdle sets the peer to idle, allowing it to execute new retrieval requests. 249 // Its estimated retrieval throughput is updated with that measured just now. 250 func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) { 251 // Irrelevant of the scaling, make sure the peer ends up idle 252 defer atomic.StoreInt32(idle, 0) 253 254 p.lock.Lock() 255 defer p.lock.Unlock() 256 257 // If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum 258 if delivered == 0 { 259 *throughput = 0 260 return 261 } 262 // Otherwise update the throughput with a new measurement 263 elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor 264 measured := float64(delivered) / (float64(elapsed) / float64(time.Second)) 265 266 *throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured 267 p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed)) 268 269 p.log.Trace("Peer throughput measurements updated", 270 "hps", p.headerThroughput, "bps", p.blockThroughput, 271 "rps", p.receiptThroughput, "sps", p.stateThroughput, 272 "miss", len(p.lacking), "rtt", p.rtt) 273 } 274 275 // HeaderCapacity retrieves the peers header download allowance based on its 276 // previously discovered throughput. 277 func (p *peer) HeaderCapacity(targetRTT time.Duration) int { 278 p.lock.RLock() 279 defer p.lock.RUnlock() 280 281 return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch))) 282 } 283 284 // BlockCapacity retrieves the peers block download allowance based on its 285 // previously discovered throughput. 286 func (p *peer) BlockCapacity(targetRTT time.Duration) int { 287 p.lock.RLock() 288 defer p.lock.RUnlock() 289 290 return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch))) 291 } 292 293 // ReceiptCapacity retrieves the peers receipt download allowance based on its 294 // previously discovered throughput. 295 func (p *peer) ReceiptCapacity(targetRTT time.Duration) int { 296 p.lock.RLock() 297 defer p.lock.RUnlock() 298 299 return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch))) 300 } 301 302 // NodeDataCapacity retrieves the peers state download allowance based on its 303 // previously discovered throughput. 304 func (p *peer) NodeDataCapacity(targetRTT time.Duration) int { 305 p.lock.RLock() 306 defer p.lock.RUnlock() 307 308 return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch))) 309 } 310 311 // MarkLacking appends a new entity to the set of items (blocks, receipts, states) 312 // that a peer is known not to have (i.e. have been requested before). If the 313 // set reaches its maximum allowed capacity, items are randomly dropped off. 314 func (p *peer) MarkLacking(hash common.Hash) { 315 p.lock.Lock() 316 defer p.lock.Unlock() 317 318 for len(p.lacking) >= maxLackingHashes { 319 for drop := range p.lacking { 320 delete(p.lacking, drop) 321 break 322 } 323 } 324 p.lacking[hash] = struct{}{} 325 } 326 327 // Lacks retrieves whether the hash of a blockchain item is on the peers lacking 328 // list (i.e. whether we know that the peer does not have it). 329 func (p *peer) Lacks(hash common.Hash) bool { 330 p.lock.RLock() 331 defer p.lock.RUnlock() 332 333 _, ok := p.lacking[hash] 334 return ok 335 } 336 337 // peerSet represents the collection of active peer participating in the chain 338 // download procedure. 339 type peerSet struct { 340 peers map[string]*peer 341 newPeerFeed event.Feed 342 lock sync.RWMutex 343 } 344 345 // newPeerSet creates a new peer set top track the active download sources. 346 func newPeerSet() *peerSet { 347 return &peerSet{ 348 peers: make(map[string]*peer), 349 } 350 } 351 352 func (ps *peerSet) SubscribeNewPeers(ch chan<- *peer) event.Subscription { 353 return ps.newPeerFeed.Subscribe(ch) 354 } 355 356 // Reset iterates over the current peer set, and resets each of the known peers 357 // to prepare for a next batch of block retrieval. 358 func (ps *peerSet) Reset() { 359 ps.lock.RLock() 360 defer ps.lock.RUnlock() 361 362 for _, peer := range ps.peers { 363 peer.Reset() 364 } 365 } 366 367 // Register injects a new peer into the working set, or returns an error if the 368 // peer is already known. 369 // 370 // The method also sets the starting throughput values of the new peer to the 371 // average of all existing peers, to give it a realistic chance of being used 372 // for data retrievals. 373 func (ps *peerSet) Register(p *peer) error { 374 // Retrieve the current median RTT as a sane default 375 p.rtt = ps.medianRTT() 376 377 // Register the new peer with some meaningful defaults 378 ps.lock.Lock() 379 if _, ok := ps.peers[p.id]; ok { 380 ps.lock.Unlock() 381 return errAlreadyRegistered 382 } 383 if len(ps.peers) > 0 { 384 p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0 385 386 for _, peer := range ps.peers { 387 peer.lock.RLock() 388 p.headerThroughput += peer.headerThroughput 389 p.blockThroughput += peer.blockThroughput 390 p.receiptThroughput += peer.receiptThroughput 391 p.stateThroughput += peer.stateThroughput 392 peer.lock.RUnlock() 393 } 394 p.headerThroughput /= float64(len(ps.peers)) 395 p.blockThroughput /= float64(len(ps.peers)) 396 p.receiptThroughput /= float64(len(ps.peers)) 397 p.stateThroughput /= float64(len(ps.peers)) 398 } 399 ps.peers[p.id] = p 400 ps.lock.Unlock() 401 402 ps.newPeerFeed.Send(p) 403 return nil 404 } 405 406 // Unregister removes a remote peer from the active set, disabling any further 407 // actions to/from that particular entity. 408 func (ps *peerSet) Unregister(id string) error { 409 ps.lock.Lock() 410 defer ps.lock.Unlock() 411 412 if _, ok := ps.peers[id]; !ok { 413 return errNotRegistered 414 } 415 delete(ps.peers, id) 416 return nil 417 } 418 419 // Peer retrieves the registered peer with the given id. 420 func (ps *peerSet) Peer(id string) *peer { 421 ps.lock.RLock() 422 defer ps.lock.RUnlock() 423 424 return ps.peers[id] 425 } 426 427 // Len returns if the current number of peers in the set. 428 func (ps *peerSet) Len() int { 429 ps.lock.RLock() 430 defer ps.lock.RUnlock() 431 432 return len(ps.peers) 433 } 434 435 // AllPeers retrieves a flat list of all the peers within the set. 436 func (ps *peerSet) AllPeers() []*peer { 437 ps.lock.RLock() 438 defer ps.lock.RUnlock() 439 440 list := make([]*peer, 0, len(ps.peers)) 441 for _, p := range ps.peers { 442 list = append(list, p) 443 } 444 return list 445 } 446 447 // HeaderIdlePeers retrieves a flat list of all the currently header-idle peers 448 // within the active peer set, ordered by their reputation. 449 func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) { 450 idle := func(p *peer) bool { 451 return atomic.LoadInt32(&p.headerIdle) == 0 452 } 453 throughput := func(p *peer) float64 { 454 p.lock.RLock() 455 defer p.lock.RUnlock() 456 return p.headerThroughput 457 } 458 return ps.idlePeers(62, 64, idle, throughput) 459 } 460 461 // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within 462 // the active peer set, ordered by their reputation. 463 func (ps *peerSet) BodyIdlePeers() ([]*peer, int) { 464 idle := func(p *peer) bool { 465 return atomic.LoadInt32(&p.blockIdle) == 0 466 } 467 throughput := func(p *peer) float64 { 468 p.lock.RLock() 469 defer p.lock.RUnlock() 470 return p.blockThroughput 471 } 472 return ps.idlePeers(62, 64, idle, throughput) 473 } 474 475 // ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers 476 // within the active peer set, ordered by their reputation. 477 func (ps *peerSet) ReceiptIdlePeers() ([]*peer, int) { 478 idle := func(p *peer) bool { 479 return atomic.LoadInt32(&p.receiptIdle) == 0 480 } 481 throughput := func(p *peer) float64 { 482 p.lock.RLock() 483 defer p.lock.RUnlock() 484 return p.receiptThroughput 485 } 486 return ps.idlePeers(63, 64, idle, throughput) 487 } 488 489 // NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle 490 // peers within the active peer set, ordered by their reputation. 491 func (ps *peerSet) NodeDataIdlePeers() ([]*peer, int) { 492 idle := func(p *peer) bool { 493 return atomic.LoadInt32(&p.stateIdle) == 0 494 } 495 throughput := func(p *peer) float64 { 496 p.lock.RLock() 497 defer p.lock.RUnlock() 498 return p.stateThroughput 499 } 500 return ps.idlePeers(63, 64, idle, throughput) 501 } 502 503 // idlePeers retrieves a flat list of all currently idle peers satisfying the 504 // protocol version constraints, using the provided function to check idleness. 505 // The resulting set of peers are sorted by their measure throughput. 506 func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer) bool, throughput func(*peer) float64) ([]*peer, int) { 507 ps.lock.RLock() 508 defer ps.lock.RUnlock() 509 510 idle, total := make([]*peer, 0, len(ps.peers)), 0 511 for _, p := range ps.peers { 512 if p.version >= minProtocol && p.version <= maxProtocol { 513 if idleCheck(p) { 514 idle = append(idle, p) 515 } 516 total++ 517 } 518 } 519 for i := 0; i < len(idle); i++ { 520 for j := i + 1; j < len(idle); j++ { 521 if throughput(idle[i]) < throughput(idle[j]) { 522 idle[i], idle[j] = idle[j], idle[i] 523 } 524 } 525 } 526 return idle, total 527 } 528 529 // medianRTT returns the median RTT of te peerset, considering only the tuning 530 // peers if there are more peers available. 531 func (ps *peerSet) medianRTT() time.Duration { 532 // Gather all the currnetly measured round trip times 533 ps.lock.RLock() 534 defer ps.lock.RUnlock() 535 536 rtts := make([]float64, 0, len(ps.peers)) 537 for _, p := range ps.peers { 538 p.lock.RLock() 539 rtts = append(rtts, float64(p.rtt)) 540 p.lock.RUnlock() 541 } 542 sort.Float64s(rtts) 543 544 median := rttMaxEstimate 545 if qosTuningPeers <= len(rtts) { 546 median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers 547 } else if len(rtts) > 0 { 548 median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos) 549 } 550 // Restrict the RTT into some QoS defaults, irrelevant of true RTT 551 if median < rttMinEstimate { 552 median = rttMinEstimate 553 } 554 if median > rttMaxEstimate { 555 median = rttMaxEstimate 556 } 557 return median 558 }