github.com/digdeepmining/go-atheios@v1.5.13-0.20180902133602-d5687a2e6f43/eth/downloader/peer.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the active peer-set of the downloader, maintaining both failures 18 // as well as reputation metrics to prioritize the block retrievals. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "math" 26 "math/big" 27 "sort" 28 "strings" 29 "sync" 30 "sync/atomic" 31 "time" 32 33 "github.com/atheioschain/go-atheios/common" 34 ) 35 36 const ( 37 maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items 38 measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. 39 ) 40 41 // Head hash and total difficulty retriever for 42 type currentHeadRetrievalFn func() (common.Hash, *big.Int) 43 44 // Block header and body fetchers belonging to eth/62 and above 45 type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error 46 type absoluteHeaderFetcherFn func(uint64, int, int, bool) error 47 type blockBodyFetcherFn func([]common.Hash) error 48 type receiptFetcherFn func([]common.Hash) error 49 type stateFetcherFn func([]common.Hash) error 50 51 var ( 52 errAlreadyFetching = errors.New("already fetching blocks from peer") 53 errAlreadyRegistered = errors.New("peer is already registered") 54 errNotRegistered = errors.New("peer is not registered") 55 ) 56 57 // peer represents an active peer from which hashes and blocks are retrieved. 58 type peer struct { 59 id string // Unique identifier of the peer 60 61 headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) 62 blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) 63 receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) 64 stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) 65 66 headerThroughput float64 // Number of headers measured to be retrievable per second 67 blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second 68 receiptThroughput float64 // Number of receipts measured to be retrievable per second 69 stateThroughput float64 // Number of node data pieces measured to be retrievable per second 70 71 rtt time.Duration // Request round trip time to track responsiveness (QoS) 72 73 headerStarted time.Time // Time instance when the last header fetch was started 74 blockStarted time.Time // Time instance when the last block (body) fetch was started 75 receiptStarted time.Time // Time instance when the last receipt fetch was started 76 stateStarted time.Time // Time instance when the last node data fetch was started 77 78 lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) 79 80 currentHead currentHeadRetrievalFn // Method to fetch the currently known head of the peer 81 82 getRelHeaders relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash 83 getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position 84 getBlockBodies blockBodyFetcherFn // [eth/62] Method to retrieve a batch of block bodies 85 86 getReceipts receiptFetcherFn // [eth/63] Method to retrieve a batch of block transaction receipts 87 getNodeData stateFetcherFn // [eth/63] Method to retrieve a batch of state trie data 88 89 version int // Eth protocol version number to switch strategies 90 lock sync.RWMutex 91 } 92 93 // newPeer create a new downloader peer, with specific hash and block retrieval 94 // mechanisms. 95 func newPeer(id string, version int, currentHead currentHeadRetrievalFn, 96 getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, 97 getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer { 98 return &peer{ 99 id: id, 100 lacking: make(map[common.Hash]struct{}), 101 102 currentHead: currentHead, 103 getRelHeaders: getRelHeaders, 104 getAbsHeaders: getAbsHeaders, 105 getBlockBodies: getBlockBodies, 106 107 getReceipts: getReceipts, 108 getNodeData: getNodeData, 109 110 version: version, 111 } 112 } 113 114 // Reset clears the internal state of a peer entity. 115 func (p *peer) Reset() { 116 p.lock.Lock() 117 defer p.lock.Unlock() 118 119 atomic.StoreInt32(&p.headerIdle, 0) 120 atomic.StoreInt32(&p.blockIdle, 0) 121 atomic.StoreInt32(&p.receiptIdle, 0) 122 atomic.StoreInt32(&p.stateIdle, 0) 123 124 p.headerThroughput = 0 125 p.blockThroughput = 0 126 p.receiptThroughput = 0 127 p.stateThroughput = 0 128 129 p.lacking = make(map[common.Hash]struct{}) 130 } 131 132 // FetchHeaders sends a header retrieval request to the remote peer. 133 func (p *peer) FetchHeaders(from uint64, count int) error { 134 // Sanity check the protocol version 135 if p.version < 62 { 136 panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version)) 137 } 138 // Short circuit if the peer is already fetching 139 if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { 140 return errAlreadyFetching 141 } 142 p.headerStarted = time.Now() 143 144 // Issue the header retrieval request (absolut upwards without gaps) 145 go p.getAbsHeaders(from, count, 0, false) 146 147 return nil 148 } 149 150 // FetchBodies sends a block body retrieval request to the remote peer. 151 func (p *peer) FetchBodies(request *fetchRequest) error { 152 // Sanity check the protocol version 153 if p.version < 62 { 154 panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version)) 155 } 156 // Short circuit if the peer is already fetching 157 if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { 158 return errAlreadyFetching 159 } 160 p.blockStarted = time.Now() 161 162 // Convert the header set to a retrievable slice 163 hashes := make([]common.Hash, 0, len(request.Headers)) 164 for _, header := range request.Headers { 165 hashes = append(hashes, header.Hash()) 166 } 167 go p.getBlockBodies(hashes) 168 169 return nil 170 } 171 172 // FetchReceipts sends a receipt retrieval request to the remote peer. 173 func (p *peer) FetchReceipts(request *fetchRequest) error { 174 // Sanity check the protocol version 175 if p.version < 63 { 176 panic(fmt.Sprintf("body fetch [eth/63+] requested on eth/%d", p.version)) 177 } 178 // Short circuit if the peer is already fetching 179 if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { 180 return errAlreadyFetching 181 } 182 p.receiptStarted = time.Now() 183 184 // Convert the header set to a retrievable slice 185 hashes := make([]common.Hash, 0, len(request.Headers)) 186 for _, header := range request.Headers { 187 hashes = append(hashes, header.Hash()) 188 } 189 go p.getReceipts(hashes) 190 191 return nil 192 } 193 194 // FetchNodeData sends a node state data retrieval request to the remote peer. 195 func (p *peer) FetchNodeData(request *fetchRequest) error { 196 // Sanity check the protocol version 197 if p.version < 63 { 198 panic(fmt.Sprintf("node data fetch [eth/63+] requested on eth/%d", p.version)) 199 } 200 // Short circuit if the peer is already fetching 201 if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { 202 return errAlreadyFetching 203 } 204 p.stateStarted = time.Now() 205 206 // Convert the hash set to a retrievable slice 207 hashes := make([]common.Hash, 0, len(request.Hashes)) 208 for hash := range request.Hashes { 209 hashes = append(hashes, hash) 210 } 211 go p.getNodeData(hashes) 212 213 return nil 214 } 215 216 // SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval 217 // requests. Its estimated header retrieval throughput is updated with that measured 218 // just now. 219 func (p *peer) SetHeadersIdle(delivered int) { 220 p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle) 221 } 222 223 // SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval 224 // requests. Its estimated block retrieval throughput is updated with that measured 225 // just now. 226 func (p *peer) SetBlocksIdle(delivered int) { 227 p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) 228 } 229 230 // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval 231 // requests. Its estimated body retrieval throughput is updated with that measured 232 // just now. 233 func (p *peer) SetBodiesIdle(delivered int) { 234 p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) 235 } 236 237 // SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt 238 // retrieval requests. Its estimated receipt retrieval throughput is updated 239 // with that measured just now. 240 func (p *peer) SetReceiptsIdle(delivered int) { 241 p.setIdle(p.receiptStarted, delivered, &p.receiptThroughput, &p.receiptIdle) 242 } 243 244 // SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie 245 // data retrieval requests. Its estimated state retrieval throughput is updated 246 // with that measured just now. 247 func (p *peer) SetNodeDataIdle(delivered int) { 248 p.setIdle(p.stateStarted, delivered, &p.stateThroughput, &p.stateIdle) 249 } 250 251 // setIdle sets the peer to idle, allowing it to execute new retrieval requests. 252 // Its estimated retrieval throughput is updated with that measured just now. 253 func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) { 254 // Irrelevant of the scaling, make sure the peer ends up idle 255 defer atomic.StoreInt32(idle, 0) 256 257 p.lock.Lock() 258 defer p.lock.Unlock() 259 260 // If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum 261 if delivered == 0 { 262 *throughput = 0 263 return 264 } 265 // Otherwise update the throughput with a new measurement 266 elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor 267 measured := float64(delivered) / (float64(elapsed) / float64(time.Second)) 268 269 *throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured 270 p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed)) 271 } 272 273 // HeaderCapacity retrieves the peers header download allowance based on its 274 // previously discovered throughput. 275 func (p *peer) HeaderCapacity(targetRTT time.Duration) int { 276 p.lock.RLock() 277 defer p.lock.RUnlock() 278 279 return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch))) 280 } 281 282 // BlockCapacity retrieves the peers block download allowance based on its 283 // previously discovered throughput. 284 func (p *peer) BlockCapacity(targetRTT time.Duration) int { 285 p.lock.RLock() 286 defer p.lock.RUnlock() 287 288 return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch))) 289 } 290 291 // ReceiptCapacity retrieves the peers receipt download allowance based on its 292 // previously discovered throughput. 293 func (p *peer) ReceiptCapacity(targetRTT time.Duration) int { 294 p.lock.RLock() 295 defer p.lock.RUnlock() 296 297 return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch))) 298 } 299 300 // NodeDataCapacity retrieves the peers state download allowance based on its 301 // previously discovered throughput. 302 func (p *peer) NodeDataCapacity(targetRTT time.Duration) int { 303 p.lock.RLock() 304 defer p.lock.RUnlock() 305 306 return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch))) 307 } 308 309 // MarkLacking appends a new entity to the set of items (blocks, receipts, states) 310 // that a peer is known not to have (i.e. have been requested before). If the 311 // set reaches its maximum allowed capacity, items are randomly dropped off. 312 func (p *peer) MarkLacking(hash common.Hash) { 313 p.lock.Lock() 314 defer p.lock.Unlock() 315 316 for len(p.lacking) >= maxLackingHashes { 317 for drop := range p.lacking { 318 delete(p.lacking, drop) 319 break 320 } 321 } 322 p.lacking[hash] = struct{}{} 323 } 324 325 // Lacks retrieves whether the hash of a blockchain item is on the peers lacking 326 // list (i.e. whether we know that the peer does not have it). 327 func (p *peer) Lacks(hash common.Hash) bool { 328 p.lock.RLock() 329 defer p.lock.RUnlock() 330 331 _, ok := p.lacking[hash] 332 return ok 333 } 334 335 // String implements fmt.Stringer. 336 func (p *peer) String() string { 337 p.lock.RLock() 338 defer p.lock.RUnlock() 339 340 return fmt.Sprintf("Peer %s [%s]", p.id, strings.Join([]string{ 341 fmt.Sprintf("hs %3.2f/s", p.headerThroughput), 342 fmt.Sprintf("bs %3.2f/s", p.blockThroughput), 343 fmt.Sprintf("rs %3.2f/s", p.receiptThroughput), 344 fmt.Sprintf("ss %3.2f/s", p.stateThroughput), 345 fmt.Sprintf("miss %4d", len(p.lacking)), 346 fmt.Sprintf("rtt %v", p.rtt), 347 }, ", ")) 348 } 349 350 // peerSet represents the collection of active peer participating in the chain 351 // download procedure. 352 type peerSet struct { 353 peers map[string]*peer 354 lock sync.RWMutex 355 } 356 357 // newPeerSet creates a new peer set top track the active download sources. 358 func newPeerSet() *peerSet { 359 return &peerSet{ 360 peers: make(map[string]*peer), 361 } 362 } 363 364 // Reset iterates over the current peer set, and resets each of the known peers 365 // to prepare for a next batch of block retrieval. 366 func (ps *peerSet) Reset() { 367 ps.lock.RLock() 368 defer ps.lock.RUnlock() 369 370 for _, peer := range ps.peers { 371 peer.Reset() 372 } 373 } 374 375 // Register injects a new peer into the working set, or returns an error if the 376 // peer is already known. 377 // 378 // The method also sets the starting throughput values of the new peer to the 379 // average of all existing peers, to give it a realistic chance of being used 380 // for data retrievals. 381 func (ps *peerSet) Register(p *peer) error { 382 // Retrieve the current median RTT as a sane default 383 p.rtt = ps.medianRTT() 384 385 // Register the new peer with some meaningful defaults 386 ps.lock.Lock() 387 defer ps.lock.Unlock() 388 389 if _, ok := ps.peers[p.id]; ok { 390 return errAlreadyRegistered 391 } 392 if len(ps.peers) > 0 { 393 p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0 394 395 for _, peer := range ps.peers { 396 peer.lock.RLock() 397 p.headerThroughput += peer.headerThroughput 398 p.blockThroughput += peer.blockThroughput 399 p.receiptThroughput += peer.receiptThroughput 400 p.stateThroughput += peer.stateThroughput 401 peer.lock.RUnlock() 402 } 403 p.headerThroughput /= float64(len(ps.peers)) 404 p.blockThroughput /= float64(len(ps.peers)) 405 p.receiptThroughput /= float64(len(ps.peers)) 406 p.stateThroughput /= float64(len(ps.peers)) 407 } 408 ps.peers[p.id] = p 409 return nil 410 } 411 412 // Unregister removes a remote peer from the active set, disabling any further 413 // actions to/from that particular entity. 414 func (ps *peerSet) Unregister(id string) error { 415 ps.lock.Lock() 416 defer ps.lock.Unlock() 417 418 if _, ok := ps.peers[id]; !ok { 419 return errNotRegistered 420 } 421 delete(ps.peers, id) 422 return nil 423 } 424 425 // Peer retrieves the registered peer with the given id. 426 func (ps *peerSet) Peer(id string) *peer { 427 ps.lock.RLock() 428 defer ps.lock.RUnlock() 429 430 return ps.peers[id] 431 } 432 433 // Len returns if the current number of peers in the set. 434 func (ps *peerSet) Len() int { 435 ps.lock.RLock() 436 defer ps.lock.RUnlock() 437 438 return len(ps.peers) 439 } 440 441 // AllPeers retrieves a flat list of all the peers within the set. 442 func (ps *peerSet) AllPeers() []*peer { 443 ps.lock.RLock() 444 defer ps.lock.RUnlock() 445 446 list := make([]*peer, 0, len(ps.peers)) 447 for _, p := range ps.peers { 448 list = append(list, p) 449 } 450 return list 451 } 452 453 // HeaderIdlePeers retrieves a flat list of all the currently header-idle peers 454 // within the active peer set, ordered by their reputation. 455 func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) { 456 idle := func(p *peer) bool { 457 return atomic.LoadInt32(&p.headerIdle) == 0 458 } 459 throughput := func(p *peer) float64 { 460 p.lock.RLock() 461 defer p.lock.RUnlock() 462 return p.headerThroughput 463 } 464 return ps.idlePeers(62, 64, idle, throughput) 465 } 466 467 // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within 468 // the active peer set, ordered by their reputation. 469 func (ps *peerSet) BodyIdlePeers() ([]*peer, int) { 470 idle := func(p *peer) bool { 471 return atomic.LoadInt32(&p.blockIdle) == 0 472 } 473 throughput := func(p *peer) float64 { 474 p.lock.RLock() 475 defer p.lock.RUnlock() 476 return p.blockThroughput 477 } 478 return ps.idlePeers(62, 64, idle, throughput) 479 } 480 481 // ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers 482 // within the active peer set, ordered by their reputation. 483 func (ps *peerSet) ReceiptIdlePeers() ([]*peer, int) { 484 idle := func(p *peer) bool { 485 return atomic.LoadInt32(&p.receiptIdle) == 0 486 } 487 throughput := func(p *peer) float64 { 488 p.lock.RLock() 489 defer p.lock.RUnlock() 490 return p.receiptThroughput 491 } 492 return ps.idlePeers(63, 64, idle, throughput) 493 } 494 495 // NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle 496 // peers within the active peer set, ordered by their reputation. 497 func (ps *peerSet) NodeDataIdlePeers() ([]*peer, int) { 498 idle := func(p *peer) bool { 499 return atomic.LoadInt32(&p.stateIdle) == 0 500 } 501 throughput := func(p *peer) float64 { 502 p.lock.RLock() 503 defer p.lock.RUnlock() 504 return p.stateThroughput 505 } 506 return ps.idlePeers(63, 64, idle, throughput) 507 } 508 509 // idlePeers retrieves a flat list of all currently idle peers satisfying the 510 // protocol version constraints, using the provided function to check idleness. 511 // The resulting set of peers are sorted by their measure throughput. 512 func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer) bool, throughput func(*peer) float64) ([]*peer, int) { 513 ps.lock.RLock() 514 defer ps.lock.RUnlock() 515 516 idle, total := make([]*peer, 0, len(ps.peers)), 0 517 for _, p := range ps.peers { 518 if p.version >= minProtocol && p.version <= maxProtocol { 519 if idleCheck(p) { 520 idle = append(idle, p) 521 } 522 total++ 523 } 524 } 525 for i := 0; i < len(idle); i++ { 526 for j := i + 1; j < len(idle); j++ { 527 if throughput(idle[i]) < throughput(idle[j]) { 528 idle[i], idle[j] = idle[j], idle[i] 529 } 530 } 531 } 532 return idle, total 533 } 534 535 // medianRTT returns the median RTT of te peerset, considering only the tuning 536 // peers if there are more peers available. 537 func (ps *peerSet) medianRTT() time.Duration { 538 // Gather all the currnetly measured round trip times 539 ps.lock.RLock() 540 defer ps.lock.RUnlock() 541 542 rtts := make([]float64, 0, len(ps.peers)) 543 for _, p := range ps.peers { 544 p.lock.RLock() 545 rtts = append(rtts, float64(p.rtt)) 546 p.lock.RUnlock() 547 } 548 sort.Float64s(rtts) 549 550 median := rttMaxEstimate 551 if qosTuningPeers <= len(rtts) { 552 median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers 553 } else if len(rtts) > 0 { 554 median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos) 555 } 556 // Restrict the RTT into some QoS defaults, irrelevant of true RTT 557 if median < rttMinEstimate { 558 median = rttMinEstimate 559 } 560 if median > rttMaxEstimate { 561 median = rttMaxEstimate 562 } 563 return median 564 }