github.com/annchain/OG@v0.0.9/p2p/discover/table.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package discover implements the node Discovery Protocol. 18 // 19 // The node Discovery protocol provides a way to find RLPx nodes that 20 // can be connected to. It uses a Kademlia-like protocol to maintain a 21 // distributed database of the IDs and endpoints of all listening 22 // nodes. 23 package discover 24 25 import ( 26 "crypto/ecdsa" 27 crand "crypto/rand" 28 "encoding/binary" 29 "fmt" 30 "github.com/annchain/OG/arefactor/common/goroutine" 31 "github.com/annchain/OG/arefactor/og/types" 32 ogcrypto2 "github.com/annchain/OG/deprecated/ogcrypto" 33 "github.com/sirupsen/logrus" 34 mrand "math/rand" 35 "net" 36 "sort" 37 "sync" 38 "time" 39 40 "github.com/annchain/OG/p2p/netutil" 41 "github.com/annchain/OG/p2p/onode" 42 ) 43 44 const ( 45 alpha = 3 // Kademlia concurrency factor 46 bucketSize = 16 // Kademlia bucket size 47 maxReplacements = 10 // Size of per-bucket replacement list 48 49 // We keep buckets for the upper 1/15 of distances because 50 // it's very unlikely we'll ever encounter a node that's closer. 51 hashBits = len(types.Hash{}.Bytes) * 8 52 nBuckets = hashBits / 15 // Number of buckets 53 bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket 54 55 // IP address limits. 56 bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 57 tableIPLimit, tableSubnet = 10, 24 58 59 maxFindnodeFailures = 5 // nodes exceeding this limit are dropped 60 refreshInterval = 30 * time.Minute 61 revalidateInterval = 10 * time.Second 62 copyNodesInterval = 30 * time.Second 63 seedMinTableTime = 5 * time.Minute 64 seedCount = 30 65 seedMaxAge = 5 * 24 * time.Hour 66 ) 67 68 type Table struct { 69 mutex sync.Mutex // protects buckets, bucket content, nursery, rand 70 buckets [nBuckets]*bucket // index of known nodes by distance 71 nursery []*node // bootstrap nodes 72 rand *mrand.Rand // source of randomness, periodically reseeded 73 ips netutil.DistinctNetSet 74 75 db *onode.DB // database of known nodes 76 net transport 77 refreshReq chan chan struct{} 78 initDone chan struct{} 79 closeReq chan struct{} 80 closed chan struct{} 81 82 nodeAddedHook func(*node) // for testing 83 } 84 85 // transport is implemented by the UDP transport. 86 // it is an interface so we can test without opening lots of UDP 87 // sockets and without generating a private key. 88 type transport interface { 89 self() *onode.Node 90 ping(onode.ID, *net.UDPAddr) error 91 findnode(toid onode.ID, addr *net.UDPAddr, target EncPubkey) ([]*node, error) 92 close() 93 } 94 95 // bucket contains nodes, ordered by their last activity. the entry 96 // that was most recently active is the first element in entries. 97 type bucket struct { 98 entries []*node // live entries, sorted by time of last contact 99 replacements []*node // recently seen nodes to be used if revalidation fails 100 ips netutil.DistinctNetSet 101 } 102 103 func newTable(t transport, db *onode.DB, bootnodes []*onode.Node) (*Table, error) { 104 tab := &Table{ 105 net: t, 106 db: db, 107 refreshReq: make(chan chan struct{}), 108 initDone: make(chan struct{}), 109 closeReq: make(chan struct{}), 110 closed: make(chan struct{}), 111 rand: mrand.New(mrand.NewSource(0)), 112 ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, 113 } 114 if err := tab.setFallbackNodes(bootnodes); err != nil { 115 return nil, err 116 } 117 for i := range tab.buckets { 118 tab.buckets[i] = &bucket{ 119 ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, 120 } 121 } 122 tab.seedRand() 123 tab.loadSeedNodes() 124 125 goroutine.New(tab.loop) 126 return tab, nil 127 } 128 129 func (tab *Table) self() *onode.Node { 130 return tab.net.self() 131 } 132 133 func (tab *Table) seedRand() { 134 var b [8]byte 135 crand.Read(b[:]) 136 137 tab.mutex.Lock() 138 tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:]))) 139 tab.mutex.Unlock() 140 } 141 142 // ReadRandomNodes fills the given slice with random nodes from the table. The results 143 // are guaranteed to be unique for a single invocation, no node will appear twice. 144 func (tab *Table) ReadRandomNodes(buf []*onode.Node) (n int) { 145 if !tab.isInitDone() { 146 return 0 147 } 148 tab.mutex.Lock() 149 defer tab.mutex.Unlock() 150 151 // Find all non-empty buckets and get a fresh slice of their entries. 152 var buckets [][]*node 153 for _, b := range &tab.buckets { 154 if len(b.entries) > 0 { 155 buckets = append(buckets, b.entries) 156 } 157 } 158 if len(buckets) == 0 { 159 return 0 160 } 161 // Shuffle the buckets. 162 for i := len(buckets) - 1; i > 0; i-- { 163 j := tab.rand.Intn(len(buckets)) 164 buckets[i], buckets[j] = buckets[j], buckets[i] 165 } 166 // Move head of each bucket into buf, removing buckets that become empty. 167 var i, j int 168 for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) { 169 b := buckets[j] 170 buf[i] = unwrapNode(b[0]) 171 buckets[j] = b[1:] 172 if len(b) == 1 { 173 buckets = append(buckets[:j], buckets[j+1:]...) 174 } 175 if len(buckets) == 0 { 176 break 177 } 178 } 179 return i + 1 180 } 181 182 // Close terminates the network listener and flushes the node database. 183 func (tab *Table) Close() { 184 if tab.net != nil { 185 tab.net.close() 186 } 187 188 select { 189 case <-tab.closed: 190 // already closed. 191 case tab.closeReq <- struct{}{}: 192 <-tab.closed // wait for refreshLoop to end. 193 } 194 } 195 196 // setFallbackNodes sets the initial points of contact. These nodes 197 // are used to connect to the network if the table is empty and there 198 // are no known nodes in the database. 199 func (tab *Table) setFallbackNodes(nodes []*onode.Node) error { 200 for _, n := range nodes { 201 if err := n.ValidateComplete(); err != nil { 202 return fmt.Errorf("bad bootstrap node %q: %v", n, err) 203 } 204 } 205 tab.nursery = wrapNodes(nodes) 206 return nil 207 } 208 209 // isInitDone returns whether the table's initial seeding procedure has completed. 210 func (tab *Table) isInitDone() bool { 211 select { 212 case <-tab.initDone: 213 return true 214 default: 215 return false 216 } 217 } 218 219 // Resolve searches for a specific node with the given ID. 220 // It returns nil if the node could not be found. 221 func (tab *Table) Resolve(n *onode.Node) *onode.Node { 222 // If the node is present in the local table, no 223 // network interaction is required. 224 hash := n.ID() 225 tab.mutex.Lock() 226 cl := tab.closest(hash, 1) 227 tab.mutex.Unlock() 228 if len(cl.entries) > 0 && cl.entries[0].ID() == hash { 229 return unwrapNode(cl.entries[0]) 230 } 231 // Otherwise, do a network lookup. 232 result := tab.lookup(encodePubkey(n.Pubkey()), true) 233 for _, n := range result { 234 if n.ID() == hash { 235 return unwrapNode(n) 236 } 237 } 238 return nil 239 } 240 241 // LookupRandom finds random nodes in the network. 242 func (tab *Table) LookupRandom() []*onode.Node { 243 var target EncPubkey 244 crand.Read(target[:]) 245 return unwrapNodes(tab.lookup(target, true)) 246 } 247 248 // lookup performs a network search for nodes close to the given target. It approaches the 249 // target by querying nodes that are closer to it on each iteration. The given target does 250 // not need to be an actual node identifier. 251 func (tab *Table) lookup(targetKey EncPubkey, refreshIfEmpty bool) []*node { 252 var ( 253 targetHash = ogcrypto2.Keccak256Hash(targetKey[:]) 254 target = onode.ID(targetHash.Bytes) 255 asked = make(map[onode.ID]bool) 256 seen = make(map[onode.ID]bool) 257 reply = make(chan []*node, alpha) 258 pendingQueries = 0 259 result *nodesByDistance 260 ) 261 // don't query further if we hit ourself. 262 // unlikely to happen often in practice. 263 asked[tab.self().ID()] = true 264 265 for { 266 tab.mutex.Lock() 267 // generate initial result set 268 result = tab.closest(target, bucketSize) 269 tab.mutex.Unlock() 270 if len(result.entries) > 0 || !refreshIfEmpty { 271 break 272 } 273 // The result set is empty, all nodes were dropped, refresh. 274 // We actually wait for the refresh to complete here. The very 275 // first query will hit this case and run the bootstrapping 276 // logic. 277 <-tab.refresh() 278 refreshIfEmpty = false 279 } 280 281 for { 282 // ask the alpha closest nodes that we haven't asked yet 283 for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { 284 n := result.entries[i] 285 if !asked[n.ID()] { 286 asked[n.ID()] = true 287 pendingQueries++ 288 goroutine.New(func() { 289 tab.findnode(n, targetKey, reply) 290 }) 291 } 292 } 293 if pendingQueries == 0 { 294 // we have asked all closest nodes, stop the search 295 break 296 } 297 // wait for the next reply 298 for _, n := range <-reply { 299 if n != nil && !seen[n.ID()] { 300 seen[n.ID()] = true 301 result.push(n, bucketSize) 302 } 303 } 304 pendingQueries-- 305 } 306 return result.entries 307 } 308 309 func (tab *Table) findnode(n *node, targetKey EncPubkey, reply chan<- []*node) { 310 fails := tab.db.FindFails(n.ID()) 311 r, err := tab.net.findnode(n.ID(), n.addr(), targetKey) 312 if err != nil || len(r) == 0 { 313 fails++ 314 tab.db.UpdateFindFails(n.ID(), fails) 315 log.WithFields(logrus.Fields{"id": n.ID(), 316 "failCount": fails, 317 }).WithError(err).Trace("Findnode failed") 318 if fails >= maxFindnodeFailures { 319 log.WithFields(logrus.Fields{"id": n.ID(), "failCount": fails}).Trace("Too many findnode failures, dropping") 320 tab.delete(n) 321 } 322 } else if fails > 0 { 323 tab.db.UpdateFindFails(n.ID(), fails-1) 324 } 325 326 // Grab as many nodes as possible. Some of them might not be alive anymore, but we'll 327 // just remove those again during revalidation. 328 for _, n := range r { 329 tab.add(n) 330 } 331 reply <- r 332 } 333 334 func (tab *Table) refresh() <-chan struct{} { 335 done := make(chan struct{}) 336 select { 337 case tab.refreshReq <- done: 338 case <-tab.closed: 339 close(done) 340 } 341 return done 342 } 343 344 // loop schedules refresh, revalidate runs and coordinates shutdown. 345 func (tab *Table) loop() { 346 var ( 347 revalidate = time.NewTimer(tab.nextRevalidateTime()) 348 refresh = time.NewTicker(refreshInterval) 349 copyNodes = time.NewTicker(copyNodesInterval) 350 refreshDone = make(chan struct{}) // where doRefresh reports completion 351 revalidateDone chan struct{} // where doRevalidate reports completion 352 waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs 353 ) 354 defer refresh.Stop() 355 defer revalidate.Stop() 356 defer copyNodes.Stop() 357 358 // Start initial refresh. 359 goroutine.New(func() { 360 tab.doRefresh(refreshDone) 361 }) 362 363 loop: 364 for { 365 select { 366 case <-refresh.C: 367 tab.seedRand() 368 if refreshDone == nil { 369 refreshDone = make(chan struct{}) 370 goroutine.New(func() { 371 tab.doRefresh(refreshDone) 372 }) 373 } 374 case req := <-tab.refreshReq: 375 waiting = append(waiting, req) 376 if refreshDone == nil { 377 refreshDone = make(chan struct{}) 378 goroutine.New(func() { 379 tab.doRefresh(refreshDone) 380 }) 381 } 382 case <-refreshDone: 383 for _, ch := range waiting { 384 close(ch) 385 } 386 waiting, refreshDone = nil, nil 387 case <-revalidate.C: 388 revalidateDone = make(chan struct{}) 389 goroutine.New(func() { 390 tab.doRevalidate(revalidateDone) 391 }) 392 case <-revalidateDone: 393 revalidate.Reset(tab.nextRevalidateTime()) 394 revalidateDone = nil 395 case <-copyNodes.C: 396 goroutine.New(tab.copyLiveNodes) 397 case <-tab.closeReq: 398 break loop 399 } 400 } 401 402 if refreshDone != nil { 403 <-refreshDone 404 } 405 for _, ch := range waiting { 406 close(ch) 407 } 408 if revalidateDone != nil { 409 <-revalidateDone 410 } 411 close(tab.closed) 412 } 413 414 // doRefresh performs a lookup for a random target to keep buckets 415 // full. seed nodes are inserted if the table is empty (initial 416 // bootstrap or discarded faulty peers). 417 func (tab *Table) doRefresh(done chan struct{}) { 418 defer close(done) 419 420 // Load nodes from the database and insert 421 // them. This should yield a few previously seen nodes that are 422 // (hopefully) still alive. 423 tab.loadSeedNodes() 424 425 // Run self lookup to discover new neighbor nodes. 426 // We can only do this if we have a secp256k1 identity. 427 var key ecdsa.PublicKey 428 if err := tab.self().Load((*onode.Secp256k1)(&key)); err == nil { 429 tab.lookup(encodePubkey(&key), false) 430 } 431 432 // The Kademlia paper specifies that the bucket refresh should 433 // perform a lookup in the least recently used bucket. We cannot 434 // adhere to this because the findnode target is a 512bit value 435 // (not hash-sized) and it is not easily possible to generate a 436 // sha3 preimage that falls into a chosen bucket. 437 // We perform a few lookups with a random target instead. 438 for i := 0; i < 3; i++ { 439 var target EncPubkey 440 crand.Read(target[:]) 441 tab.lookup(target, false) 442 } 443 } 444 445 func (tab *Table) loadSeedNodes() { 446 seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge)) 447 seeds = append(seeds, tab.nursery...) 448 for i := range seeds { 449 seed := seeds[i] 450 age := time.Since(tab.db.LastPongReceived(seed.ID())) 451 log.WithFields(logrus.Fields{"id": seed.ID(), "addr": seed.addr(), "age": age.String()}).Debug("found seed node in database") 452 tab.add(seed) 453 } 454 } 455 456 // doRevalidate checks that the last node in a random bucket is still live 457 // and replaces or deletes the node if it isn't. 458 func (tab *Table) doRevalidate(done chan<- struct{}) { 459 defer func() { done <- struct{}{} }() 460 461 last, bi := tab.nodeToRevalidate() 462 if last == nil { 463 // No non-empty bucket found. 464 return 465 } 466 467 // Ping the selected node and wait for a pong. 468 err := tab.net.ping(last.ID(), last.addr()) 469 470 tab.mutex.Lock() 471 defer tab.mutex.Unlock() 472 b := tab.buckets[bi] 473 if err == nil { 474 // The node responded, move it to the front. 475 log.Debug("Revalidated node", "b", bi, "id", last.ID()) 476 b.bump(last) 477 return 478 } 479 // No reply received, pick a replacement or delete the node if there aren't 480 // any replacements. 481 if r := tab.replace(b, last); r != nil { 482 log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "r", r.ID(), "rip", r.IP()) 483 } else { 484 log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP()) 485 } 486 } 487 488 // nodeToRevalidate returns the last node in a random, non-empty bucket. 489 func (tab *Table) nodeToRevalidate() (n *node, bi int) { 490 tab.mutex.Lock() 491 defer tab.mutex.Unlock() 492 493 for _, bi = range tab.rand.Perm(len(tab.buckets)) { 494 b := tab.buckets[bi] 495 if len(b.entries) > 0 { 496 last := b.entries[len(b.entries)-1] 497 return last, bi 498 } 499 } 500 return nil, 0 501 } 502 503 func (tab *Table) nextRevalidateTime() time.Duration { 504 tab.mutex.Lock() 505 defer tab.mutex.Unlock() 506 507 return time.Duration(tab.rand.Int63n(int64(revalidateInterval))) 508 } 509 510 // copyLiveNodes adds nodes from the table to the database if they have been in the table 511 // longer then minTableTime. 512 func (tab *Table) copyLiveNodes() { 513 tab.mutex.Lock() 514 defer tab.mutex.Unlock() 515 516 now := time.Now() 517 for _, b := range &tab.buckets { 518 for _, n := range b.entries { 519 if now.Sub(n.addedAt) >= seedMinTableTime { 520 tab.db.UpdateNode(unwrapNode(n)) 521 } 522 } 523 } 524 } 525 526 // closest returns the n nodes in the table that are closest to the 527 // given id. The caller must hold tab.mutex. 528 func (tab *Table) closest(target onode.ID, nresults int) *nodesByDistance { 529 // This is a very wasteful way to find the closest nodes but 530 // obviously correct. I believe that tree-based buckets would make 531 // this easier to implement efficiently. 532 close := &nodesByDistance{target: target} 533 for _, b := range &tab.buckets { 534 for _, n := range b.entries { 535 close.push(n, nresults) 536 } 537 } 538 return close 539 } 540 541 func (tab *Table) len() (n int) { 542 for _, b := range &tab.buckets { 543 n += len(b.entries) 544 } 545 return n 546 } 547 548 // bucket returns the bucket for the given node ID hash. 549 func (tab *Table) bucket(id onode.ID) *bucket { 550 d := onode.LogDist(tab.self().ID(), id) 551 if d <= bucketMinDistance { 552 return tab.buckets[0] 553 } 554 return tab.buckets[d-bucketMinDistance-1] 555 } 556 557 // add attempts to add the given node to its corresponding bucket. If the bucket has space 558 // available, adding the node succeeds immediately. Otherwise, the node is added if the 559 // least recently active node in the bucket does not respond to a ping packet. 560 // 561 // The caller must not hold tab.mutex. 562 func (tab *Table) add(n *node) { 563 if n.ID() == tab.self().ID() { 564 return 565 } 566 567 tab.mutex.Lock() 568 defer tab.mutex.Unlock() 569 b := tab.bucket(n.ID()) 570 if !tab.bumpOrAdd(b, n) { 571 // Node is not in table. Add it to the replacement list. 572 tab.addReplacement(b, n) 573 } 574 } 575 576 // addThroughPing adds the given node to the table. Compared to plain 577 // 'add' there is an additional safety measure: if the table is still 578 // initializing the node is not added. This prevents an attack where the 579 // table could be filled by just sending ping repeatedly. 580 // 581 // The caller must not hold tab.mutex. 582 func (tab *Table) addThroughPing(n *node) { 583 if !tab.isInitDone() { 584 return 585 } 586 tab.add(n) 587 } 588 589 // stuff adds nodes the table to the end of their corresponding bucket 590 // if the bucket is not full. The caller must not hold tab.mutex. 591 func (tab *Table) stuff(nodes []*node) { 592 tab.mutex.Lock() 593 defer tab.mutex.Unlock() 594 595 for _, n := range nodes { 596 if n.ID() == tab.self().ID() { 597 continue // don't add self 598 } 599 b := tab.bucket(n.ID()) 600 if len(b.entries) < bucketSize { 601 tab.bumpOrAdd(b, n) 602 } 603 } 604 } 605 606 // delete removes an entry from the node table. It is used to evacuate dead nodes. 607 func (tab *Table) delete(node *node) { 608 tab.mutex.Lock() 609 defer tab.mutex.Unlock() 610 611 tab.deleteInBucket(tab.bucket(node.ID()), node) 612 } 613 614 func (tab *Table) addIP(b *bucket, ip net.IP) bool { 615 if netutil.IsLAN(ip) { 616 return true 617 } 618 if !tab.ips.Add(ip) { 619 log.WithField("ip", ip).Debug("IP exceeds table limit") 620 return false 621 } 622 if !b.ips.Add(ip) { 623 log.WithField("ip", ip).Debug("IP exceeds bucket limit") 624 tab.ips.Remove(ip) 625 return false 626 } 627 return true 628 } 629 630 func (tab *Table) removeIP(b *bucket, ip net.IP) { 631 if netutil.IsLAN(ip) { 632 return 633 } 634 tab.ips.Remove(ip) 635 b.ips.Remove(ip) 636 } 637 638 func (tab *Table) addReplacement(b *bucket, n *node) { 639 for _, e := range b.replacements { 640 if e.ID() == n.ID() { 641 return // already in list 642 } 643 } 644 if !tab.addIP(b, n.IP()) { 645 return 646 } 647 var removed *node 648 b.replacements, removed = pushNode(b.replacements, n, maxReplacements) 649 if removed != nil { 650 tab.removeIP(b, removed.IP()) 651 } 652 } 653 654 // replace removes n from the replacement list and replaces 'last' with it if it is the 655 // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced 656 // with someone else or became active. 657 func (tab *Table) replace(b *bucket, last *node) *node { 658 if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() { 659 // Entry has moved, don't replace it. 660 return nil 661 } 662 // Still the last entry. 663 if len(b.replacements) == 0 { 664 tab.deleteInBucket(b, last) 665 return nil 666 } 667 r := b.replacements[tab.rand.Intn(len(b.replacements))] 668 b.replacements = deleteNode(b.replacements, r) 669 b.entries[len(b.entries)-1] = r 670 tab.removeIP(b, last.IP()) 671 return r 672 } 673 674 // bump moves the given node to the front of the bucket entry list 675 // if it is contained in that list. 676 func (b *bucket) bump(n *node) bool { 677 for i := range b.entries { 678 if b.entries[i].ID() == n.ID() { 679 // move it to the front 680 copy(b.entries[1:], b.entries[:i]) 681 b.entries[0] = n 682 return true 683 } 684 } 685 return false 686 } 687 688 // bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't 689 // full. The return value is true if n is in the bucket. 690 func (tab *Table) bumpOrAdd(b *bucket, n *node) bool { 691 if b.bump(n) { 692 return true 693 } 694 if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP()) { 695 return false 696 } 697 b.entries, _ = pushNode(b.entries, n, bucketSize) 698 b.replacements = deleteNode(b.replacements, n) 699 n.addedAt = time.Now() 700 if tab.nodeAddedHook != nil { 701 tab.nodeAddedHook(n) 702 } 703 return true 704 } 705 706 func (tab *Table) deleteInBucket(b *bucket, n *node) { 707 b.entries = deleteNode(b.entries, n) 708 tab.removeIP(b, n.IP()) 709 } 710 711 // pushNode adds n to the front of list, keeping at most max items. 712 func pushNode(list []*node, n *node, max int) ([]*node, *node) { 713 if len(list) < max { 714 list = append(list, nil) 715 } 716 removed := list[len(list)-1] 717 copy(list[1:], list) 718 list[0] = n 719 return list, removed 720 } 721 722 // deleteNode removes n from list. 723 func deleteNode(list []*node, n *node) []*node { 724 for i := range list { 725 if list[i].ID() == n.ID() { 726 return append(list[:i], list[i+1:]...) 727 } 728 } 729 return list 730 } 731 732 // nodesByDistance is a list of nodes, ordered by 733 // distance to target. 734 type nodesByDistance struct { 735 entries []*node 736 target onode.ID 737 } 738 739 // push adds the given node to the list, keeping the total size below maxElems. 740 func (h *nodesByDistance) push(n *node, maxElems int) { 741 ix := sort.Search(len(h.entries), func(i int) bool { 742 return onode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0 743 }) 744 if len(h.entries) < maxElems { 745 h.entries = append(h.entries, n) 746 } 747 if ix == len(h.entries) { 748 // farther away than all nodes we already have. 749 // if there was room for it, the node is now the last element. 750 } else { 751 // slide existing entries down to make room 752 // this will overwrite the entry we just appended. 753 copy(h.entries[ix+1:], h.entries[ix:]) 754 h.entries[ix] = n 755 } 756 }