github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/p2p/discover/table.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package discover implements the Node Discovery Protocol. 18 // 19 // The Node Discovery protocol provides a way to find RLPx nodes that 20 // can be connected to. It uses a Kademlia-like protocol to maintain a 21 // distributed database of the IDs and endpoints of all listening 22 // nodes. 23 package discover 24 25 import ( 26 "context" 27 crand "crypto/rand" 28 "encoding/binary" 29 "fmt" 30 mrand "math/rand" 31 "net" 32 "sort" 33 "sync" 34 "time" 35 36 "github.com/ethereum/go-ethereum/common" 37 "github.com/ethereum/go-ethereum/log" 38 "github.com/ethereum/go-ethereum/metrics" 39 "github.com/ethereum/go-ethereum/p2p/enode" 40 "github.com/ethereum/go-ethereum/p2p/netutil" 41 ) 42 43 const ( 44 alpha = 3 // Kademlia concurrency factor 45 bucketSize = 16 // Kademlia bucket size 46 maxReplacements = 10 // Size of per-bucket replacement list 47 48 // We keep buckets for the upper 1/15 of distances because 49 // it's very unlikely we'll ever encounter a node that's closer. 50 hashBits = len(common.Hash{}) * 8 51 nBuckets = hashBits / 15 // Number of buckets 52 bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket 53 54 // IP address limits. 55 bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 56 tableIPLimit, tableSubnet = 10, 24 57 58 copyNodesInterval = 30 * time.Second 59 seedMinTableTime = 5 * time.Minute 60 seedCount = 30 61 seedMaxAge = 5 * 24 * time.Hour 62 ) 63 64 // Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps 65 // itself up-to-date by verifying the liveness of neighbors and requesting their node 66 // records when announcements of a new record version are received. 67 type Table struct { 68 mutex sync.Mutex // protects buckets, bucket content, nursery, rand 69 buckets [nBuckets]*bucket // index of known nodes by distance 70 nursery []*node // bootstrap nodes 71 rand *mrand.Rand // source of randomness, periodically reseeded 72 ips netutil.DistinctNetSet 73 74 db *enode.DB // database of known nodes 75 net transport 76 cfg Config 77 log log.Logger 78 79 // loop channels 80 refreshReq chan chan struct{} 81 initDone chan struct{} 82 closeReq chan struct{} 83 closed chan struct{} 84 85 nodeAddedHook func(*bucket, *node) 86 nodeRemovedHook func(*bucket, *node) 87 } 88 89 // transport is implemented by the UDP transports. 90 type transport interface { 91 Self() *enode.Node 92 RequestENR(*enode.Node) (*enode.Node, error) 93 lookupRandom() []*enode.Node 94 lookupSelf() []*enode.Node 95 ping(*enode.Node) (seq uint64, err error) 96 } 97 98 // bucket contains nodes, ordered by their last activity. the entry 99 // that was most recently active is the first element in entries. 100 type bucket struct { 101 entries []*node // live entries, sorted by time of last contact 102 replacements []*node // recently seen nodes to be used if revalidation fails 103 ips netutil.DistinctNetSet 104 index int 105 } 106 107 func newTable(t transport, db *enode.DB, cfg Config) (*Table, error) { 108 cfg = cfg.withDefaults() 109 tab := &Table{ 110 net: t, 111 db: db, 112 cfg: cfg, 113 log: cfg.Log, 114 refreshReq: make(chan chan struct{}), 115 initDone: make(chan struct{}), 116 closeReq: make(chan struct{}), 117 closed: make(chan struct{}), 118 rand: mrand.New(mrand.NewSource(0)), 119 ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, 120 } 121 if err := tab.setFallbackNodes(cfg.Bootnodes); err != nil { 122 return nil, err 123 } 124 for i := range tab.buckets { 125 tab.buckets[i] = &bucket{ 126 index: i, 127 ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, 128 } 129 } 130 tab.seedRand() 131 tab.loadSeedNodes() 132 133 return tab, nil 134 } 135 136 func newMeteredTable(t transport, db *enode.DB, cfg Config) (*Table, error) { 137 tab, err := newTable(t, db, cfg) 138 if err != nil { 139 return nil, err 140 } 141 if metrics.Enabled { 142 tab.nodeAddedHook = func(b *bucket, n *node) { 143 bucketsCounter[b.index].Inc(1) 144 } 145 tab.nodeRemovedHook = func(b *bucket, n *node) { 146 bucketsCounter[b.index].Dec(1) 147 } 148 } 149 return tab, nil 150 } 151 152 // Nodes returns all nodes contained in the table. 153 func (tab *Table) Nodes() []*enode.Node { 154 if !tab.isInitDone() { 155 return nil 156 } 157 158 tab.mutex.Lock() 159 defer tab.mutex.Unlock() 160 161 var nodes []*enode.Node 162 for _, b := range &tab.buckets { 163 for _, n := range b.entries { 164 nodes = append(nodes, unwrapNode(n)) 165 } 166 } 167 return nodes 168 } 169 170 func (tab *Table) self() *enode.Node { 171 return tab.net.Self() 172 } 173 174 func (tab *Table) seedRand() { 175 var b [8]byte 176 crand.Read(b[:]) 177 178 tab.mutex.Lock() 179 tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:]))) 180 tab.mutex.Unlock() 181 } 182 183 // getNode returns the node with the given ID or nil if it isn't in the table. 184 func (tab *Table) getNode(id enode.ID) *enode.Node { 185 tab.mutex.Lock() 186 defer tab.mutex.Unlock() 187 188 b := tab.bucket(id) 189 for _, e := range b.entries { 190 if e.ID() == id { 191 return unwrapNode(e) 192 } 193 } 194 return nil 195 } 196 197 // close terminates the network listener and flushes the node database. 198 func (tab *Table) close() { 199 close(tab.closeReq) 200 <-tab.closed 201 } 202 203 // setFallbackNodes sets the initial points of contact. These nodes 204 // are used to connect to the network if the table is empty and there 205 // are no known nodes in the database. 206 func (tab *Table) setFallbackNodes(nodes []*enode.Node) error { 207 nursery := make([]*node, 0, len(nodes)) 208 for _, n := range nodes { 209 if err := n.ValidateComplete(); err != nil { 210 return fmt.Errorf("bad bootstrap node %q: %v", n, err) 211 } 212 if tab.cfg.NetRestrict != nil && !tab.cfg.NetRestrict.Contains(n.IP()) { 213 tab.log.Error("Bootstrap node filtered by netrestrict", "id", n.ID(), "ip", n.IP()) 214 continue 215 } 216 nursery = append(nursery, wrapNode(n)) 217 } 218 tab.nursery = nursery 219 return nil 220 } 221 222 // isInitDone returns whether the table's initial seeding procedure has completed. 223 func (tab *Table) isInitDone() bool { 224 select { 225 case <-tab.initDone: 226 return true 227 default: 228 return false 229 } 230 } 231 232 func (tab *Table) refresh() <-chan struct{} { 233 done := make(chan struct{}) 234 select { 235 case tab.refreshReq <- done: 236 case <-tab.closeReq: 237 close(done) 238 } 239 return done 240 } 241 242 // loop schedules runs of doRefresh, doRevalidate and copyLiveNodes. 243 func (tab *Table) loop() { 244 var ( 245 revalidate = time.NewTimer(tab.nextRevalidateTime()) 246 refresh = time.NewTimer(tab.nextRefreshTime()) 247 copyNodes = time.NewTicker(copyNodesInterval) 248 refreshDone = make(chan struct{}) // where doRefresh reports completion 249 revalidateDone chan struct{} // where doRevalidate reports completion 250 waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs 251 ) 252 defer refresh.Stop() 253 defer revalidate.Stop() 254 defer copyNodes.Stop() 255 256 // Start initial refresh. 257 go tab.doRefresh(refreshDone) 258 259 loop: 260 for { 261 select { 262 case <-refresh.C: 263 tab.seedRand() 264 if refreshDone == nil { 265 refreshDone = make(chan struct{}) 266 go tab.doRefresh(refreshDone) 267 } 268 case req := <-tab.refreshReq: 269 waiting = append(waiting, req) 270 if refreshDone == nil { 271 refreshDone = make(chan struct{}) 272 go tab.doRefresh(refreshDone) 273 } 274 case <-refreshDone: 275 for _, ch := range waiting { 276 close(ch) 277 } 278 waiting, refreshDone = nil, nil 279 refresh.Reset(tab.nextRefreshTime()) 280 case <-revalidate.C: 281 revalidateDone = make(chan struct{}) 282 go tab.doRevalidate(revalidateDone) 283 case <-revalidateDone: 284 revalidate.Reset(tab.nextRevalidateTime()) 285 revalidateDone = nil 286 case <-copyNodes.C: 287 go tab.copyLiveNodes() 288 case <-tab.closeReq: 289 break loop 290 } 291 } 292 293 if refreshDone != nil { 294 <-refreshDone 295 } 296 for _, ch := range waiting { 297 close(ch) 298 } 299 if revalidateDone != nil { 300 <-revalidateDone 301 } 302 close(tab.closed) 303 } 304 305 // doRefresh performs a lookup for a random target to keep buckets full. seed nodes are 306 // inserted if the table is empty (initial bootstrap or discarded faulty peers). 307 func (tab *Table) doRefresh(done chan struct{}) { 308 defer close(done) 309 310 // Load nodes from the database and insert 311 // them. This should yield a few previously seen nodes that are 312 // (hopefully) still alive. 313 tab.loadSeedNodes() 314 315 // Run self lookup to discover new neighbor nodes. 316 tab.net.lookupSelf() 317 318 // The Kademlia paper specifies that the bucket refresh should 319 // perform a lookup in the least recently used bucket. We cannot 320 // adhere to this because the findnode target is a 512bit value 321 // (not hash-sized) and it is not easily possible to generate a 322 // sha3 preimage that falls into a chosen bucket. 323 // We perform a few lookups with a random target instead. 324 for i := 0; i < 3; i++ { 325 tab.net.lookupRandom() 326 } 327 } 328 329 func (tab *Table) loadSeedNodes() { 330 seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge)) 331 seeds = append(seeds, tab.nursery...) 332 for i := range seeds { 333 seed := seeds[i] 334 if tab.log.Enabled(context.Background(), log.LevelTrace) { 335 age := time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) 336 tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age) 337 } 338 tab.addSeenNode(seed) 339 } 340 } 341 342 // doRevalidate checks that the last node in a random bucket is still live and replaces or 343 // deletes the node if it isn't. 344 func (tab *Table) doRevalidate(done chan<- struct{}) { 345 defer func() { done <- struct{}{} }() 346 347 last, bi := tab.nodeToRevalidate() 348 if last == nil { 349 // No non-empty bucket found. 350 return 351 } 352 353 // Ping the selected node and wait for a pong. 354 remoteSeq, err := tab.net.ping(unwrapNode(last)) 355 356 // Also fetch record if the node replied and returned a higher sequence number. 357 if last.Seq() < remoteSeq { 358 n, err := tab.net.RequestENR(unwrapNode(last)) 359 if err != nil { 360 tab.log.Debug("ENR request failed", "id", last.ID(), "addr", last.addr(), "err", err) 361 } else { 362 last = &node{Node: *n, addedAt: last.addedAt, livenessChecks: last.livenessChecks} 363 } 364 } 365 366 tab.mutex.Lock() 367 defer tab.mutex.Unlock() 368 b := tab.buckets[bi] 369 if err == nil { 370 // The node responded, move it to the front. 371 last.livenessChecks++ 372 tab.log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks) 373 tab.bumpInBucket(b, last) 374 return 375 } 376 // No reply received, pick a replacement or delete the node if there aren't 377 // any replacements. 378 if r := tab.replace(b, last); r != nil { 379 tab.log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP()) 380 } else { 381 tab.log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks) 382 } 383 } 384 385 // nodeToRevalidate returns the last node in a random, non-empty bucket. 386 func (tab *Table) nodeToRevalidate() (n *node, bi int) { 387 tab.mutex.Lock() 388 defer tab.mutex.Unlock() 389 390 for _, bi = range tab.rand.Perm(len(tab.buckets)) { 391 b := tab.buckets[bi] 392 if len(b.entries) > 0 { 393 last := b.entries[len(b.entries)-1] 394 return last, bi 395 } 396 } 397 return nil, 0 398 } 399 400 func (tab *Table) nextRevalidateTime() time.Duration { 401 tab.mutex.Lock() 402 defer tab.mutex.Unlock() 403 404 return time.Duration(tab.rand.Int63n(int64(tab.cfg.PingInterval))) 405 } 406 407 func (tab *Table) nextRefreshTime() time.Duration { 408 tab.mutex.Lock() 409 defer tab.mutex.Unlock() 410 411 half := tab.cfg.RefreshInterval / 2 412 return half + time.Duration(tab.rand.Int63n(int64(half))) 413 } 414 415 // copyLiveNodes adds nodes from the table to the database if they have been in the table 416 // longer than seedMinTableTime. 417 func (tab *Table) copyLiveNodes() { 418 tab.mutex.Lock() 419 defer tab.mutex.Unlock() 420 421 now := time.Now() 422 for _, b := range &tab.buckets { 423 for _, n := range b.entries { 424 if n.livenessChecks > 0 && now.Sub(n.addedAt) >= seedMinTableTime { 425 tab.db.UpdateNode(unwrapNode(n)) 426 } 427 } 428 } 429 } 430 431 // findnodeByID returns the n nodes in the table that are closest to the given id. 432 // This is used by the FINDNODE/v4 handler. 433 // 434 // The preferLive parameter says whether the caller wants liveness-checked results. If 435 // preferLive is true and the table contains any verified nodes, the result will not 436 // contain unverified nodes. However, if there are no verified nodes at all, the result 437 // will contain unverified nodes. 438 func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance { 439 tab.mutex.Lock() 440 defer tab.mutex.Unlock() 441 442 // Scan all buckets. There might be a better way to do this, but there aren't that many 443 // buckets, so this solution should be fine. The worst-case complexity of this loop 444 // is O(tab.len() * nresults). 445 nodes := &nodesByDistance{target: target} 446 liveNodes := &nodesByDistance{target: target} 447 for _, b := range &tab.buckets { 448 for _, n := range b.entries { 449 nodes.push(n, nresults) 450 if preferLive && n.livenessChecks > 0 { 451 liveNodes.push(n, nresults) 452 } 453 } 454 } 455 456 if preferLive && len(liveNodes.entries) > 0 { 457 return liveNodes 458 } 459 return nodes 460 } 461 462 // appendLiveNodes adds nodes at the given distance to the result slice. 463 func (tab *Table) appendLiveNodes(dist uint, result []*enode.Node) []*enode.Node { 464 if dist > 256 { 465 return result 466 } 467 if dist == 0 { 468 return append(result, tab.self()) 469 } 470 471 tab.mutex.Lock() 472 defer tab.mutex.Unlock() 473 for _, n := range tab.bucketAtDistance(int(dist)).entries { 474 if n.livenessChecks >= 1 { 475 node := n.Node // avoid handing out pointer to struct field 476 result = append(result, &node) 477 } 478 } 479 return result 480 } 481 482 // len returns the number of nodes in the table. 483 func (tab *Table) len() (n int) { 484 tab.mutex.Lock() 485 defer tab.mutex.Unlock() 486 487 for _, b := range &tab.buckets { 488 n += len(b.entries) 489 } 490 return n 491 } 492 493 // bucketLen returns the number of nodes in the bucket for the given ID. 494 func (tab *Table) bucketLen(id enode.ID) int { 495 tab.mutex.Lock() 496 defer tab.mutex.Unlock() 497 498 return len(tab.bucket(id).entries) 499 } 500 501 // bucket returns the bucket for the given node ID hash. 502 func (tab *Table) bucket(id enode.ID) *bucket { 503 d := enode.LogDist(tab.self().ID(), id) 504 return tab.bucketAtDistance(d) 505 } 506 507 func (tab *Table) bucketAtDistance(d int) *bucket { 508 if d <= bucketMinDistance { 509 return tab.buckets[0] 510 } 511 return tab.buckets[d-bucketMinDistance-1] 512 } 513 514 // addSeenNode adds a node which may or may not be live to the end of a bucket. If the 515 // bucket has space available, adding the node succeeds immediately. Otherwise, the node is 516 // added to the replacements list. 517 // 518 // The caller must not hold tab.mutex. 519 func (tab *Table) addSeenNode(n *node) { 520 if n.ID() == tab.self().ID() { 521 return 522 } 523 524 tab.mutex.Lock() 525 defer tab.mutex.Unlock() 526 b := tab.bucket(n.ID()) 527 if contains(b.entries, n.ID()) { 528 // Already in bucket, don't add. 529 return 530 } 531 if len(b.entries) >= bucketSize { 532 // Bucket full, maybe add as replacement. 533 tab.addReplacement(b, n) 534 return 535 } 536 if !tab.addIP(b, n.IP()) { 537 // Can't add: IP limit reached. 538 return 539 } 540 541 // Add to end of bucket: 542 b.entries = append(b.entries, n) 543 b.replacements = deleteNode(b.replacements, n) 544 n.addedAt = time.Now() 545 546 if tab.nodeAddedHook != nil { 547 tab.nodeAddedHook(b, n) 548 } 549 } 550 551 // addVerifiedNode adds a node whose existence has been verified recently to the front of a 552 // bucket. If the node is already in the bucket, it is moved to the front. If the bucket 553 // has no space, the node is added to the replacements list. 554 // 555 // There is an additional safety measure: if the table is still initializing the node 556 // is not added. This prevents an attack where the table could be filled by just sending 557 // ping repeatedly. 558 // 559 // The caller must not hold tab.mutex. 560 func (tab *Table) addVerifiedNode(n *node) { 561 if !tab.isInitDone() { 562 return 563 } 564 if n.ID() == tab.self().ID() { 565 return 566 } 567 568 tab.mutex.Lock() 569 defer tab.mutex.Unlock() 570 b := tab.bucket(n.ID()) 571 if tab.bumpInBucket(b, n) { 572 // Already in bucket, moved to front. 573 return 574 } 575 if len(b.entries) >= bucketSize { 576 // Bucket full, maybe add as replacement. 577 tab.addReplacement(b, n) 578 return 579 } 580 if !tab.addIP(b, n.IP()) { 581 // Can't add: IP limit reached. 582 return 583 } 584 585 // Add to front of bucket. 586 b.entries, _ = pushNode(b.entries, n, bucketSize) 587 b.replacements = deleteNode(b.replacements, n) 588 n.addedAt = time.Now() 589 590 if tab.nodeAddedHook != nil { 591 tab.nodeAddedHook(b, n) 592 } 593 } 594 595 // delete removes an entry from the node table. It is used to evacuate dead nodes. 596 func (tab *Table) delete(node *node) { 597 tab.mutex.Lock() 598 defer tab.mutex.Unlock() 599 600 tab.deleteInBucket(tab.bucket(node.ID()), node) 601 } 602 603 func (tab *Table) addIP(b *bucket, ip net.IP) bool { 604 if len(ip) == 0 { 605 return false // Nodes without IP cannot be added. 606 } 607 if netutil.IsLAN(ip) { 608 return true 609 } 610 if !tab.ips.Add(ip) { 611 tab.log.Debug("IP exceeds table limit", "ip", ip) 612 return false 613 } 614 if !b.ips.Add(ip) { 615 tab.log.Debug("IP exceeds bucket limit", "ip", ip) 616 tab.ips.Remove(ip) 617 return false 618 } 619 return true 620 } 621 622 func (tab *Table) removeIP(b *bucket, ip net.IP) { 623 if netutil.IsLAN(ip) { 624 return 625 } 626 tab.ips.Remove(ip) 627 b.ips.Remove(ip) 628 } 629 630 func (tab *Table) addReplacement(b *bucket, n *node) { 631 for _, e := range b.replacements { 632 if e.ID() == n.ID() { 633 return // already in list 634 } 635 } 636 if !tab.addIP(b, n.IP()) { 637 return 638 } 639 var removed *node 640 b.replacements, removed = pushNode(b.replacements, n, maxReplacements) 641 if removed != nil { 642 tab.removeIP(b, removed.IP()) 643 } 644 } 645 646 // replace removes n from the replacement list and replaces 'last' with it if it is the 647 // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced 648 // with someone else or became active. 649 func (tab *Table) replace(b *bucket, last *node) *node { 650 if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() { 651 // Entry has moved, don't replace it. 652 return nil 653 } 654 // Still the last entry. 655 if len(b.replacements) == 0 { 656 tab.deleteInBucket(b, last) 657 return nil 658 } 659 r := b.replacements[tab.rand.Intn(len(b.replacements))] 660 b.replacements = deleteNode(b.replacements, r) 661 b.entries[len(b.entries)-1] = r 662 tab.removeIP(b, last.IP()) 663 return r 664 } 665 666 // bumpInBucket moves the given node to the front of the bucket entry list 667 // if it is contained in that list. 668 func (tab *Table) bumpInBucket(b *bucket, n *node) bool { 669 for i := range b.entries { 670 if b.entries[i].ID() == n.ID() { 671 if !n.IP().Equal(b.entries[i].IP()) { 672 // Endpoint has changed, ensure that the new IP fits into table limits. 673 tab.removeIP(b, b.entries[i].IP()) 674 if !tab.addIP(b, n.IP()) { 675 // It doesn't, put the previous one back. 676 tab.addIP(b, b.entries[i].IP()) 677 return false 678 } 679 } 680 // Move it to the front. 681 copy(b.entries[1:], b.entries[:i]) 682 b.entries[0] = n 683 return true 684 } 685 } 686 return false 687 } 688 689 func (tab *Table) deleteInBucket(b *bucket, n *node) { 690 // Check if the node is actually in the bucket so the removed hook 691 // isn't called multiple times for the same node. 692 if !contains(b.entries, n.ID()) { 693 return 694 } 695 b.entries = deleteNode(b.entries, n) 696 tab.removeIP(b, n.IP()) 697 if tab.nodeRemovedHook != nil { 698 tab.nodeRemovedHook(b, n) 699 } 700 } 701 702 func contains(ns []*node, id enode.ID) bool { 703 for _, n := range ns { 704 if n.ID() == id { 705 return true 706 } 707 } 708 return false 709 } 710 711 // pushNode adds n to the front of list, keeping at most max items. 712 func pushNode(list []*node, n *node, max int) ([]*node, *node) { 713 if len(list) < max { 714 list = append(list, nil) 715 } 716 removed := list[len(list)-1] 717 copy(list[1:], list) 718 list[0] = n 719 return list, removed 720 } 721 722 // deleteNode removes n from list. 723 func deleteNode(list []*node, n *node) []*node { 724 for i := range list { 725 if list[i].ID() == n.ID() { 726 return append(list[:i], list[i+1:]...) 727 } 728 } 729 return list 730 } 731 732 // nodesByDistance is a list of nodes, ordered by distance to target. 733 type nodesByDistance struct { 734 entries []*node 735 target enode.ID 736 } 737 738 // push adds the given node to the list, keeping the total size below maxElems. 739 func (h *nodesByDistance) push(n *node, maxElems int) { 740 ix := sort.Search(len(h.entries), func(i int) bool { 741 return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0 742 }) 743 744 end := len(h.entries) 745 if len(h.entries) < maxElems { 746 h.entries = append(h.entries, n) 747 } 748 if ix < end { 749 // Slide existing entries down to make room. 750 // This will overwrite the entry we just appended. 751 copy(h.entries[ix+1:], h.entries[ix:]) 752 h.entries[ix] = n 753 } 754 }