github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/eth/downloader/peer.go (about) 1 // This file is part of the go-sberex library. The go-sberex library is 2 // free software: you can redistribute it and/or modify it under the terms 3 // of the GNU Lesser General Public License as published by the Free 4 // Software Foundation, either version 3 of the License, or (at your option) 5 // any later version. 6 // 7 // The go-sberex library is distributed in the hope that it will be useful, 8 // but WITHOUT ANY WARRANTY; without even the implied warranty of 9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 10 // General Public License <http://www.gnu.org/licenses/> for more details. 11 12 // Contains the active peer-set of the downloader, maintaining both failures 13 // as well as reputation metrics to prioritize the block retrievals. 14 15 package downloader 16 17 import ( 18 "errors" 19 "fmt" 20 "math" 21 "math/big" 22 "sort" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 "github.com/Sberex/go-sberex/common" 28 "github.com/Sberex/go-sberex/event" 29 "github.com/Sberex/go-sberex/log" 30 ) 31 32 const ( 33 maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items 34 measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. 35 ) 36 37 var ( 38 errAlreadyFetching = errors.New("already fetching blocks from peer") 39 errAlreadyRegistered = errors.New("peer is already registered") 40 errNotRegistered = errors.New("peer is not registered") 41 ) 42 43 // peerConnection represents an active peer from which hashes and blocks are retrieved. 44 type peerConnection struct { 45 id string // Unique identifier of the peer 46 47 headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) 48 blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) 49 receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) 50 stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) 51 52 headerThroughput float64 // Number of headers measured to be retrievable per second 53 blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second 54 receiptThroughput float64 // Number of receipts measured to be retrievable per second 55 stateThroughput float64 // Number of node data pieces measured to be retrievable per second 56 57 rtt time.Duration // Request round trip time to track responsiveness (QoS) 58 59 headerStarted time.Time // Time instance when the last header fetch was started 60 blockStarted time.Time // Time instance when the last block (body) fetch was started 61 receiptStarted time.Time // Time instance when the last receipt fetch was started 62 stateStarted time.Time // Time instance when the last node data fetch was started 63 64 lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) 65 66 peer Peer 67 68 version int // Eth protocol version number to switch strategies 69 log log.Logger // Contextual logger to add extra infos to peer logs 70 lock sync.RWMutex 71 } 72 73 // LightPeer encapsulates the methods required to synchronise with a remote light peer. 74 type LightPeer interface { 75 Head() (common.Hash, *big.Int) 76 RequestHeadersByHash(common.Hash, int, int, bool) error 77 RequestHeadersByNumber(uint64, int, int, bool) error 78 } 79 80 // Peer encapsulates the methods required to synchronise with a remote full peer. 81 type Peer interface { 82 LightPeer 83 RequestBodies([]common.Hash) error 84 RequestReceipts([]common.Hash) error 85 RequestNodeData([]common.Hash) error 86 } 87 88 // lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods. 89 type lightPeerWrapper struct { 90 peer LightPeer 91 } 92 93 func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } 94 func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error { 95 return w.peer.RequestHeadersByHash(h, amount, skip, reverse) 96 } 97 func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error { 98 return w.peer.RequestHeadersByNumber(i, amount, skip, reverse) 99 } 100 func (w *lightPeerWrapper) RequestBodies([]common.Hash) error { 101 panic("RequestBodies not supported in light client mode sync") 102 } 103 func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error { 104 panic("RequestReceipts not supported in light client mode sync") 105 } 106 func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error { 107 panic("RequestNodeData not supported in light client mode sync") 108 } 109 110 // newPeerConnection creates a new downloader peer. 111 func newPeerConnection(id string, version int, peer Peer, logger log.Logger) *peerConnection { 112 return &peerConnection{ 113 id: id, 114 lacking: make(map[common.Hash]struct{}), 115 116 peer: peer, 117 118 version: version, 119 log: logger, 120 } 121 } 122 123 // Reset clears the internal state of a peer entity. 124 func (p *peerConnection) Reset() { 125 p.lock.Lock() 126 defer p.lock.Unlock() 127 128 atomic.StoreInt32(&p.headerIdle, 0) 129 atomic.StoreInt32(&p.blockIdle, 0) 130 atomic.StoreInt32(&p.receiptIdle, 0) 131 atomic.StoreInt32(&p.stateIdle, 0) 132 133 p.headerThroughput = 0 134 p.blockThroughput = 0 135 p.receiptThroughput = 0 136 p.stateThroughput = 0 137 138 p.lacking = make(map[common.Hash]struct{}) 139 } 140 141 // FetchHeaders sends a header retrieval request to the remote peer. 142 func (p *peerConnection) FetchHeaders(from uint64, count int) error { 143 // Sanity check the protocol version 144 if p.version < 62 { 145 panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version)) 146 } 147 // Short circuit if the peer is already fetching 148 if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { 149 return errAlreadyFetching 150 } 151 p.headerStarted = time.Now() 152 153 // Issue the header retrieval request (absolut upwards without gaps) 154 go p.peer.RequestHeadersByNumber(from, count, 0, false) 155 156 return nil 157 } 158 159 // FetchBodies sends a block body retrieval request to the remote peer. 160 func (p *peerConnection) FetchBodies(request *fetchRequest) error { 161 // Sanity check the protocol version 162 if p.version < 62 { 163 panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version)) 164 } 165 // Short circuit if the peer is already fetching 166 if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { 167 return errAlreadyFetching 168 } 169 p.blockStarted = time.Now() 170 171 // Convert the header set to a retrievable slice 172 hashes := make([]common.Hash, 0, len(request.Headers)) 173 for _, header := range request.Headers { 174 hashes = append(hashes, header.Hash()) 175 } 176 go p.peer.RequestBodies(hashes) 177 178 return nil 179 } 180 181 // FetchReceipts sends a receipt retrieval request to the remote peer. 182 func (p *peerConnection) FetchReceipts(request *fetchRequest) error { 183 // Sanity check the protocol version 184 if p.version < 63 { 185 panic(fmt.Sprintf("body fetch [eth/63+] requested on eth/%d", p.version)) 186 } 187 // Short circuit if the peer is already fetching 188 if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { 189 return errAlreadyFetching 190 } 191 p.receiptStarted = time.Now() 192 193 // Convert the header set to a retrievable slice 194 hashes := make([]common.Hash, 0, len(request.Headers)) 195 for _, header := range request.Headers { 196 hashes = append(hashes, header.Hash()) 197 } 198 go p.peer.RequestReceipts(hashes) 199 200 return nil 201 } 202 203 // FetchNodeData sends a node state data retrieval request to the remote peer. 204 func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { 205 // Sanity check the protocol version 206 if p.version < 63 { 207 panic(fmt.Sprintf("node data fetch [eth/63+] requested on eth/%d", p.version)) 208 } 209 // Short circuit if the peer is already fetching 210 if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { 211 return errAlreadyFetching 212 } 213 p.stateStarted = time.Now() 214 215 go p.peer.RequestNodeData(hashes) 216 217 return nil 218 } 219 220 // SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval 221 // requests. Its estimated header retrieval throughput is updated with that measured 222 // just now. 223 func (p *peerConnection) SetHeadersIdle(delivered int) { 224 p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle) 225 } 226 227 // SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval 228 // requests. Its estimated block retrieval throughput is updated with that measured 229 // just now. 230 func (p *peerConnection) SetBlocksIdle(delivered int) { 231 p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) 232 } 233 234 // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval 235 // requests. Its estimated body retrieval throughput is updated with that measured 236 // just now. 237 func (p *peerConnection) SetBodiesIdle(delivered int) { 238 p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) 239 } 240 241 // SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt 242 // retrieval requests. Its estimated receipt retrieval throughput is updated 243 // with that measured just now. 244 func (p *peerConnection) SetReceiptsIdle(delivered int) { 245 p.setIdle(p.receiptStarted, delivered, &p.receiptThroughput, &p.receiptIdle) 246 } 247 248 // SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie 249 // data retrieval requests. Its estimated state retrieval throughput is updated 250 // with that measured just now. 251 func (p *peerConnection) SetNodeDataIdle(delivered int) { 252 p.setIdle(p.stateStarted, delivered, &p.stateThroughput, &p.stateIdle) 253 } 254 255 // setIdle sets the peer to idle, allowing it to execute new retrieval requests. 256 // Its estimated retrieval throughput is updated with that measured just now. 257 func (p *peerConnection) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) { 258 // Irrelevant of the scaling, make sure the peer ends up idle 259 defer atomic.StoreInt32(idle, 0) 260 261 p.lock.Lock() 262 defer p.lock.Unlock() 263 264 // If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum 265 if delivered == 0 { 266 *throughput = 0 267 return 268 } 269 // Otherwise update the throughput with a new measurement 270 elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor 271 measured := float64(delivered) / (float64(elapsed) / float64(time.Second)) 272 273 *throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured 274 p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed)) 275 276 p.log.Trace("Peer throughput measurements updated", 277 "hps", p.headerThroughput, "bps", p.blockThroughput, 278 "rps", p.receiptThroughput, "sps", p.stateThroughput, 279 "miss", len(p.lacking), "rtt", p.rtt) 280 } 281 282 // HeaderCapacity retrieves the peers header download allowance based on its 283 // previously discovered throughput. 284 func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { 285 p.lock.RLock() 286 defer p.lock.RUnlock() 287 288 return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch))) 289 } 290 291 // BlockCapacity retrieves the peers block download allowance based on its 292 // previously discovered throughput. 293 func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { 294 p.lock.RLock() 295 defer p.lock.RUnlock() 296 297 return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch))) 298 } 299 300 // ReceiptCapacity retrieves the peers receipt download allowance based on its 301 // previously discovered throughput. 302 func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { 303 p.lock.RLock() 304 defer p.lock.RUnlock() 305 306 return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch))) 307 } 308 309 // NodeDataCapacity retrieves the peers state download allowance based on its 310 // previously discovered throughput. 311 func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { 312 p.lock.RLock() 313 defer p.lock.RUnlock() 314 315 return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch))) 316 } 317 318 // MarkLacking appends a new entity to the set of items (blocks, receipts, states) 319 // that a peer is known not to have (i.e. have been requested before). If the 320 // set reaches its maximum allowed capacity, items are randomly dropped off. 321 func (p *peerConnection) MarkLacking(hash common.Hash) { 322 p.lock.Lock() 323 defer p.lock.Unlock() 324 325 for len(p.lacking) >= maxLackingHashes { 326 for drop := range p.lacking { 327 delete(p.lacking, drop) 328 break 329 } 330 } 331 p.lacking[hash] = struct{}{} 332 } 333 334 // Lacks retrieves whether the hash of a blockchain item is on the peers lacking 335 // list (i.e. whether we know that the peer does not have it). 336 func (p *peerConnection) Lacks(hash common.Hash) bool { 337 p.lock.RLock() 338 defer p.lock.RUnlock() 339 340 _, ok := p.lacking[hash] 341 return ok 342 } 343 344 // peerSet represents the collection of active peer participating in the chain 345 // download procedure. 346 type peerSet struct { 347 peers map[string]*peerConnection 348 newPeerFeed event.Feed 349 peerDropFeed event.Feed 350 lock sync.RWMutex 351 } 352 353 // newPeerSet creates a new peer set top track the active download sources. 354 func newPeerSet() *peerSet { 355 return &peerSet{ 356 peers: make(map[string]*peerConnection), 357 } 358 } 359 360 // SubscribeNewPeers subscribes to peer arrival events. 361 func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription { 362 return ps.newPeerFeed.Subscribe(ch) 363 } 364 365 // SubscribePeerDrops subscribes to peer departure events. 366 func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription { 367 return ps.peerDropFeed.Subscribe(ch) 368 } 369 370 // Reset iterates over the current peer set, and resets each of the known peers 371 // to prepare for a next batch of block retrieval. 372 func (ps *peerSet) Reset() { 373 ps.lock.RLock() 374 defer ps.lock.RUnlock() 375 376 for _, peer := range ps.peers { 377 peer.Reset() 378 } 379 } 380 381 // Register injects a new peer into the working set, or returns an error if the 382 // peer is already known. 383 // 384 // The method also sets the starting throughput values of the new peer to the 385 // average of all existing peers, to give it a realistic chance of being used 386 // for data retrievals. 387 func (ps *peerSet) Register(p *peerConnection) error { 388 // Retrieve the current median RTT as a sane default 389 p.rtt = ps.medianRTT() 390 391 // Register the new peer with some meaningful defaults 392 ps.lock.Lock() 393 if _, ok := ps.peers[p.id]; ok { 394 ps.lock.Unlock() 395 return errAlreadyRegistered 396 } 397 if len(ps.peers) > 0 { 398 p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0 399 400 for _, peer := range ps.peers { 401 peer.lock.RLock() 402 p.headerThroughput += peer.headerThroughput 403 p.blockThroughput += peer.blockThroughput 404 p.receiptThroughput += peer.receiptThroughput 405 p.stateThroughput += peer.stateThroughput 406 peer.lock.RUnlock() 407 } 408 p.headerThroughput /= float64(len(ps.peers)) 409 p.blockThroughput /= float64(len(ps.peers)) 410 p.receiptThroughput /= float64(len(ps.peers)) 411 p.stateThroughput /= float64(len(ps.peers)) 412 } 413 ps.peers[p.id] = p 414 ps.lock.Unlock() 415 416 ps.newPeerFeed.Send(p) 417 return nil 418 } 419 420 // Unregister removes a remote peer from the active set, disabling any further 421 // actions to/from that particular entity. 422 func (ps *peerSet) Unregister(id string) error { 423 ps.lock.Lock() 424 p, ok := ps.peers[id] 425 if !ok { 426 defer ps.lock.Unlock() 427 return errNotRegistered 428 } 429 delete(ps.peers, id) 430 ps.lock.Unlock() 431 432 ps.peerDropFeed.Send(p) 433 return nil 434 } 435 436 // Peer retrieves the registered peer with the given id. 437 func (ps *peerSet) Peer(id string) *peerConnection { 438 ps.lock.RLock() 439 defer ps.lock.RUnlock() 440 441 return ps.peers[id] 442 } 443 444 // Len returns if the current number of peers in the set. 445 func (ps *peerSet) Len() int { 446 ps.lock.RLock() 447 defer ps.lock.RUnlock() 448 449 return len(ps.peers) 450 } 451 452 // AllPeers retrieves a flat list of all the peers within the set. 453 func (ps *peerSet) AllPeers() []*peerConnection { 454 ps.lock.RLock() 455 defer ps.lock.RUnlock() 456 457 list := make([]*peerConnection, 0, len(ps.peers)) 458 for _, p := range ps.peers { 459 list = append(list, p) 460 } 461 return list 462 } 463 464 // HeaderIdlePeers retrieves a flat list of all the currently header-idle peers 465 // within the active peer set, ordered by their reputation. 466 func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { 467 idle := func(p *peerConnection) bool { 468 return atomic.LoadInt32(&p.headerIdle) == 0 469 } 470 throughput := func(p *peerConnection) float64 { 471 p.lock.RLock() 472 defer p.lock.RUnlock() 473 return p.headerThroughput 474 } 475 return ps.idlePeers(62, 64, idle, throughput) 476 } 477 478 // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within 479 // the active peer set, ordered by their reputation. 480 func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { 481 idle := func(p *peerConnection) bool { 482 return atomic.LoadInt32(&p.blockIdle) == 0 483 } 484 throughput := func(p *peerConnection) float64 { 485 p.lock.RLock() 486 defer p.lock.RUnlock() 487 return p.blockThroughput 488 } 489 return ps.idlePeers(62, 64, idle, throughput) 490 } 491 492 // ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers 493 // within the active peer set, ordered by their reputation. 494 func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { 495 idle := func(p *peerConnection) bool { 496 return atomic.LoadInt32(&p.receiptIdle) == 0 497 } 498 throughput := func(p *peerConnection) float64 { 499 p.lock.RLock() 500 defer p.lock.RUnlock() 501 return p.receiptThroughput 502 } 503 return ps.idlePeers(63, 64, idle, throughput) 504 } 505 506 // NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle 507 // peers within the active peer set, ordered by their reputation. 508 func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { 509 idle := func(p *peerConnection) bool { 510 return atomic.LoadInt32(&p.stateIdle) == 0 511 } 512 throughput := func(p *peerConnection) float64 { 513 p.lock.RLock() 514 defer p.lock.RUnlock() 515 return p.stateThroughput 516 } 517 return ps.idlePeers(63, 64, idle, throughput) 518 } 519 520 // idlePeers retrieves a flat list of all currently idle peers satisfying the 521 // protocol version constraints, using the provided function to check idleness. 522 // The resulting set of peers are sorted by their measure throughput. 523 func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerConnection) bool, throughput func(*peerConnection) float64) ([]*peerConnection, int) { 524 ps.lock.RLock() 525 defer ps.lock.RUnlock() 526 527 idle, total := make([]*peerConnection, 0, len(ps.peers)), 0 528 for _, p := range ps.peers { 529 if p.version >= minProtocol && p.version <= maxProtocol { 530 if idleCheck(p) { 531 idle = append(idle, p) 532 } 533 total++ 534 } 535 } 536 for i := 0; i < len(idle); i++ { 537 for j := i + 1; j < len(idle); j++ { 538 if throughput(idle[i]) < throughput(idle[j]) { 539 idle[i], idle[j] = idle[j], idle[i] 540 } 541 } 542 } 543 return idle, total 544 } 545 546 // medianRTT returns the median RTT of the peerset, considering only the tuning 547 // peers if there are more peers available. 548 func (ps *peerSet) medianRTT() time.Duration { 549 // Gather all the currnetly measured round trip times 550 ps.lock.RLock() 551 defer ps.lock.RUnlock() 552 553 rtts := make([]float64, 0, len(ps.peers)) 554 for _, p := range ps.peers { 555 p.lock.RLock() 556 rtts = append(rtts, float64(p.rtt)) 557 p.lock.RUnlock() 558 } 559 sort.Float64s(rtts) 560 561 median := rttMaxEstimate 562 if qosTuningPeers <= len(rtts) { 563 median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers 564 } else if len(rtts) > 0 { 565 median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos) 566 } 567 // Restrict the RTT into some QoS defaults, irrelevant of true RTT 568 if median < rttMinEstimate { 569 median = rttMinEstimate 570 } 571 if median > rttMaxEstimate { 572 median = rttMaxEstimate 573 } 574 return median 575 }