github.com/Blockdaemon/celo-blockchain@v0.0.0-20200129231733-e667f6b08419/p2p/discover/table.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package discover implements the Node Discovery Protocol. 18 // 19 // The Node Discovery protocol provides a way to find RLPx nodes that 20 // can be connected to. It uses a Kademlia-like protocol to maintain a 21 // distributed database of the IDs and endpoints of all listening 22 // nodes. 23 package discover 24 25 import ( 26 "crypto/ecdsa" 27 crand "crypto/rand" 28 "encoding/binary" 29 "fmt" 30 mrand "math/rand" 31 "net" 32 "sort" 33 "sync" 34 "time" 35 36 "github.com/ethereum/go-ethereum/common" 37 "github.com/ethereum/go-ethereum/crypto" 38 "github.com/ethereum/go-ethereum/log" 39 "github.com/ethereum/go-ethereum/p2p/enode" 40 "github.com/ethereum/go-ethereum/p2p/netutil" 41 ) 42 43 const ( 44 alpha = 3 // Kademlia concurrency factor 45 bucketSize = 16 // Kademlia bucket size 46 maxReplacements = 10 // Size of per-bucket replacement list 47 48 // We keep buckets for the upper 1/15 of distances because 49 // it's very unlikely we'll ever encounter a node that's closer. 50 hashBits = len(common.Hash{}) * 8 51 nBuckets = hashBits / 15 // Number of buckets 52 bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket 53 54 // IP address limits. 55 bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 56 tableIPLimit, tableSubnet = 10, 24 57 58 maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped 59 refreshInterval = 30 * time.Minute 60 revalidateInterval = 10 * time.Second 61 copyNodesInterval = 30 * time.Second 62 seedMinTableTime = 5 * time.Minute 63 seedCount = 30 64 seedMaxAge = 5 * 24 * time.Hour 65 ) 66 67 type Table struct { 68 mutex sync.Mutex // protects buckets, bucket content, nursery, rand 69 buckets [nBuckets]*bucket // index of known nodes by distance 70 nursery []*node // bootstrap nodes 71 rand *mrand.Rand // source of randomness, periodically reseeded 72 ips netutil.DistinctNetSet 73 74 db *enode.DB // database of known nodes 75 net transport 76 refreshReq chan chan struct{} 77 initDone chan struct{} 78 79 closeOnce sync.Once 80 closeReq chan struct{} 81 closed chan struct{} 82 83 nodeAddedHook func(*node) // for testing 84 } 85 86 type bucketInfo struct { 87 Entries []*node `json:"entries"` 88 Replacements []*node `json:"replacements"` 89 IPs string `json:"ips"` 90 } 91 92 // TableInfo provides information on the discovery table 93 type TableInfo struct { 94 Buckets [nBuckets]*bucketInfo `json:"buckets"` 95 IPs string `json:"ips"` 96 } 97 98 // transport is implemented by the UDP transport. 99 // it is an interface so we can test without opening lots of UDP 100 // sockets and without generating a private key. 101 type transport interface { 102 self() *enode.Node 103 ping(enode.ID, *net.UDPAddr) error 104 findnode(toid enode.ID, addr *net.UDPAddr, target encPubkey) ([]*node, error) 105 close() 106 } 107 108 // bucket contains nodes, ordered by their last activity. the entry 109 // that was most recently active is the first element in entries. 110 type bucket struct { 111 entries []*node // live entries, sorted by time of last contact 112 replacements []*node // recently seen nodes to be used if revalidation fails 113 ips netutil.DistinctNetSet 114 } 115 116 func newTable(t transport, db *enode.DB, bootnodes []*enode.Node) (*Table, error) { 117 tab := &Table{ 118 net: t, 119 db: db, 120 refreshReq: make(chan chan struct{}), 121 initDone: make(chan struct{}), 122 closeReq: make(chan struct{}), 123 closed: make(chan struct{}), 124 rand: mrand.New(mrand.NewSource(0)), 125 ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, 126 } 127 if err := tab.setFallbackNodes(bootnodes); err != nil { 128 return nil, err 129 } 130 for i := range tab.buckets { 131 tab.buckets[i] = &bucket{ 132 ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, 133 } 134 } 135 tab.seedRand() 136 tab.loadSeedNodes() 137 138 go tab.loop() 139 return tab, nil 140 } 141 142 func (tab *Table) self() *enode.Node { 143 return tab.net.self() 144 } 145 146 func (tab *Table) seedRand() { 147 var b [8]byte 148 crand.Read(b[:]) 149 150 tab.mutex.Lock() 151 tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:]))) 152 tab.mutex.Unlock() 153 } 154 155 // ReadRandomNodes fills the given slice with random nodes from the table. The results 156 // are guaranteed to be unique for a single invocation, no node will appear twice. 157 func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) { 158 if !tab.isInitDone() { 159 return 0 160 } 161 tab.mutex.Lock() 162 defer tab.mutex.Unlock() 163 164 // Find all non-empty buckets and get a fresh slice of their entries. 165 var buckets [][]*node 166 for _, b := range &tab.buckets { 167 if len(b.entries) > 0 { 168 buckets = append(buckets, b.entries) 169 } 170 } 171 if len(buckets) == 0 { 172 return 0 173 } 174 // Shuffle the buckets. 175 for i := len(buckets) - 1; i > 0; i-- { 176 j := tab.rand.Intn(len(buckets)) 177 buckets[i], buckets[j] = buckets[j], buckets[i] 178 } 179 // Move head of each bucket into buf, removing buckets that become empty. 180 var i, j int 181 for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) { 182 b := buckets[j] 183 buf[i] = unwrapNode(b[0]) 184 buckets[j] = b[1:] 185 if len(b) == 1 { 186 buckets = append(buckets[:j], buckets[j+1:]...) 187 } 188 if len(buckets) == 0 { 189 break 190 } 191 } 192 return i + 1 193 } 194 195 // Close terminates the network listener and flushes the node database. 196 func (tab *Table) Close() { 197 tab.closeOnce.Do(func() { 198 if tab.net != nil { 199 tab.net.close() 200 } 201 // Wait for loop to end. 202 close(tab.closeReq) 203 <-tab.closed 204 }) 205 } 206 207 // setFallbackNodes sets the initial points of contact. These nodes 208 // are used to connect to the network if the table is empty and there 209 // are no known nodes in the database. 210 func (tab *Table) setFallbackNodes(nodes []*enode.Node) error { 211 for _, n := range nodes { 212 if err := n.ValidateComplete(); err != nil { 213 return fmt.Errorf("bad bootstrap node %q: %v", n, err) 214 } 215 } 216 tab.nursery = wrapNodes(nodes) 217 return nil 218 } 219 220 // isInitDone returns whether the table's initial seeding procedure has completed. 221 func (tab *Table) isInitDone() bool { 222 select { 223 case <-tab.initDone: 224 return true 225 default: 226 return false 227 } 228 } 229 230 // Resolve searches for a specific node with the given ID. 231 // It returns nil if the node could not be found. 232 func (tab *Table) Resolve(n *enode.Node) *enode.Node { 233 // If the node is present in the local table, no 234 // network interaction is required. 235 hash := n.ID() 236 tab.mutex.Lock() 237 cl := tab.closest(hash, 1) 238 tab.mutex.Unlock() 239 if len(cl.entries) > 0 && cl.entries[0].ID() == hash { 240 return unwrapNode(cl.entries[0]) 241 } 242 // Otherwise, do a network lookup. 243 result := tab.lookup(encodePubkey(n.Pubkey()), true) 244 for _, n := range result { 245 if n.ID() == hash { 246 return unwrapNode(n) 247 } 248 } 249 return nil 250 } 251 252 // LookupRandom finds random nodes in the network. 253 func (tab *Table) LookupRandom() []*enode.Node { 254 var target encPubkey 255 crand.Read(target[:]) 256 return unwrapNodes(tab.lookup(target, true)) 257 } 258 259 // lookup performs a network search for nodes close to the given target. It approaches the 260 // target by querying nodes that are closer to it on each iteration. The given target does 261 // not need to be an actual node identifier. 262 func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node { 263 var ( 264 target = enode.ID(crypto.Keccak256Hash(targetKey[:])) 265 asked = make(map[enode.ID]bool) 266 seen = make(map[enode.ID]bool) 267 reply = make(chan []*node, alpha) 268 pendingQueries = 0 269 result *nodesByDistance 270 ) 271 // don't query further if we hit ourself. 272 // unlikely to happen often in practice. 273 asked[tab.self().ID()] = true 274 275 for { 276 tab.mutex.Lock() 277 // generate initial result set 278 result = tab.closest(target, bucketSize) 279 tab.mutex.Unlock() 280 if len(result.entries) > 0 || !refreshIfEmpty { 281 break 282 } 283 // The result set is empty, all nodes were dropped, refresh. 284 // We actually wait for the refresh to complete here. The very 285 // first query will hit this case and run the bootstrapping 286 // logic. 287 <-tab.refresh() 288 refreshIfEmpty = false 289 } 290 291 for { 292 // ask the alpha closest nodes that we haven't asked yet 293 for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { 294 n := result.entries[i] 295 if !asked[n.ID()] { 296 asked[n.ID()] = true 297 pendingQueries++ 298 go tab.findnode(n, targetKey, reply) 299 } 300 } 301 if pendingQueries == 0 { 302 // we have asked all closest nodes, stop the search 303 break 304 } 305 select { 306 case nodes := <-reply: 307 for _, n := range nodes { 308 if n != nil && !seen[n.ID()] { 309 seen[n.ID()] = true 310 result.push(n, bucketSize) 311 } 312 } 313 case <-tab.closeReq: 314 return nil // shutdown, no need to continue. 315 } 316 pendingQueries-- 317 } 318 return result.entries 319 } 320 321 func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) { 322 fails := tab.db.FindFails(n.ID(), n.IP()) 323 r, err := tab.net.findnode(n.ID(), n.addr(), targetKey) 324 if err == errClosed { 325 // Avoid recording failures on shutdown. 326 reply <- nil 327 return 328 } else if err != nil || len(r) == 0 { 329 fails++ 330 tab.db.UpdateFindFails(n.ID(), n.IP(), fails) 331 log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err) 332 if fails >= maxFindnodeFailures { 333 log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails) 334 tab.delete(n) 335 } 336 } else if fails > 0 { 337 tab.db.UpdateFindFails(n.ID(), n.IP(), fails-1) 338 } 339 340 // Grab as many nodes as possible. Some of them might not be alive anymore, but we'll 341 // just remove those again during revalidation. 342 for _, n := range r { 343 tab.addSeenNode(n) 344 } 345 reply <- r 346 } 347 348 func (tab *Table) refresh() <-chan struct{} { 349 done := make(chan struct{}) 350 select { 351 case tab.refreshReq <- done: 352 case <-tab.closeReq: 353 close(done) 354 } 355 return done 356 } 357 358 // loop schedules refresh, revalidate runs and coordinates shutdown. 359 func (tab *Table) loop() { 360 var ( 361 revalidate = time.NewTimer(tab.nextRevalidateTime()) 362 refresh = time.NewTicker(refreshInterval) 363 copyNodes = time.NewTicker(copyNodesInterval) 364 refreshDone = make(chan struct{}) // where doRefresh reports completion 365 revalidateDone chan struct{} // where doRevalidate reports completion 366 waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs 367 ) 368 defer refresh.Stop() 369 defer revalidate.Stop() 370 defer copyNodes.Stop() 371 372 // Start initial refresh. 373 go tab.doRefresh(refreshDone) 374 375 loop: 376 for { 377 select { 378 case <-refresh.C: 379 tab.seedRand() 380 if refreshDone == nil { 381 refreshDone = make(chan struct{}) 382 go tab.doRefresh(refreshDone) 383 } 384 case req := <-tab.refreshReq: 385 waiting = append(waiting, req) 386 if refreshDone == nil { 387 refreshDone = make(chan struct{}) 388 go tab.doRefresh(refreshDone) 389 } 390 case <-refreshDone: 391 for _, ch := range waiting { 392 close(ch) 393 } 394 waiting, refreshDone = nil, nil 395 case <-revalidate.C: 396 revalidateDone = make(chan struct{}) 397 go tab.doRevalidate(revalidateDone) 398 case <-revalidateDone: 399 revalidate.Reset(tab.nextRevalidateTime()) 400 revalidateDone = nil 401 case <-copyNodes.C: 402 go tab.copyLiveNodes() 403 case <-tab.closeReq: 404 break loop 405 } 406 } 407 408 if refreshDone != nil { 409 <-refreshDone 410 } 411 for _, ch := range waiting { 412 close(ch) 413 } 414 if revalidateDone != nil { 415 <-revalidateDone 416 } 417 close(tab.closed) 418 } 419 420 // doRefresh performs a lookup for a random target to keep buckets 421 // full. seed nodes are inserted if the table is empty (initial 422 // bootstrap or discarded faulty peers). 423 func (tab *Table) doRefresh(done chan struct{}) { 424 defer close(done) 425 426 // Load nodes from the database and insert 427 // them. This should yield a few previously seen nodes that are 428 // (hopefully) still alive. 429 tab.loadSeedNodes() 430 431 // Run self lookup to discover new neighbor nodes. 432 // We can only do this if we have a secp256k1 identity. 433 var key ecdsa.PublicKey 434 if err := tab.self().Load((*enode.Secp256k1)(&key)); err == nil { 435 tab.lookup(encodePubkey(&key), false) 436 } 437 438 // The Kademlia paper specifies that the bucket refresh should 439 // perform a lookup in the least recently used bucket. We cannot 440 // adhere to this because the findnode target is a 512bit value 441 // (not hash-sized) and it is not easily possible to generate a 442 // sha3 preimage that falls into a chosen bucket. 443 // We perform a few lookups with a random target instead. 444 for i := 0; i < 3; i++ { 445 var target encPubkey 446 crand.Read(target[:]) 447 tab.lookup(target, false) 448 } 449 } 450 451 func (tab *Table) loadSeedNodes() { 452 seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge)) 453 seeds = append(seeds, tab.nursery...) 454 for i := range seeds { 455 seed := seeds[i] 456 age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) }} 457 log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age) 458 tab.addSeenNode(seed) 459 } 460 } 461 462 // doRevalidate checks that the last node in a random bucket is still live 463 // and replaces or deletes the node if it isn't. 464 func (tab *Table) doRevalidate(done chan<- struct{}) { 465 defer func() { done <- struct{}{} }() 466 467 last, bi := tab.nodeToRevalidate() 468 if last == nil { 469 // No non-empty bucket found. 470 return 471 } 472 473 // Ping the selected node and wait for a pong. 474 err := tab.net.ping(last.ID(), last.addr()) 475 476 tab.mutex.Lock() 477 defer tab.mutex.Unlock() 478 b := tab.buckets[bi] 479 if err == nil { 480 // The node responded, move it to the front. 481 last.livenessChecks++ 482 log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks) 483 tab.bumpInBucket(b, last) 484 return 485 } 486 // No reply received, pick a replacement or delete the node if there aren't 487 // any replacements. 488 if r := tab.replace(b, last); r != nil { 489 log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP()) 490 } else { 491 log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks) 492 } 493 } 494 495 // nodeToRevalidate returns the last node in a random, non-empty bucket. 496 func (tab *Table) nodeToRevalidate() (n *node, bi int) { 497 tab.mutex.Lock() 498 defer tab.mutex.Unlock() 499 500 for _, bi = range tab.rand.Perm(len(tab.buckets)) { 501 b := tab.buckets[bi] 502 if len(b.entries) > 0 { 503 last := b.entries[len(b.entries)-1] 504 return last, bi 505 } 506 } 507 return nil, 0 508 } 509 510 func (tab *Table) nextRevalidateTime() time.Duration { 511 tab.mutex.Lock() 512 defer tab.mutex.Unlock() 513 514 return time.Duration(tab.rand.Int63n(int64(revalidateInterval))) 515 } 516 517 // copyLiveNodes adds nodes from the table to the database if they have been in the table 518 // longer then minTableTime. 519 func (tab *Table) copyLiveNodes() { 520 tab.mutex.Lock() 521 defer tab.mutex.Unlock() 522 523 now := time.Now() 524 for _, b := range &tab.buckets { 525 for _, n := range b.entries { 526 if n.livenessChecks > 0 && now.Sub(n.addedAt) >= seedMinTableTime { 527 tab.db.UpdateNode(unwrapNode(n)) 528 } 529 } 530 } 531 } 532 533 // closest returns the n nodes in the table that are closest to the 534 // given id. The caller must hold tab.mutex. 535 func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance { 536 // This is a very wasteful way to find the closest nodes but 537 // obviously correct. I believe that tree-based buckets would make 538 // this easier to implement efficiently. 539 close := &nodesByDistance{target: target} 540 for _, b := range &tab.buckets { 541 for _, n := range b.entries { 542 if n.livenessChecks > 0 { 543 close.push(n, nresults) 544 } 545 } 546 } 547 return close 548 } 549 550 func (tab *Table) len() (n int) { 551 for _, b := range &tab.buckets { 552 n += len(b.entries) 553 } 554 return n 555 } 556 557 // bucket returns the bucket for the given node ID hash. 558 func (tab *Table) bucket(id enode.ID) *bucket { 559 d := enode.LogDist(tab.self().ID(), id) 560 if d <= bucketMinDistance { 561 return tab.buckets[0] 562 } 563 return tab.buckets[d-bucketMinDistance-1] 564 } 565 566 // addSeenNode adds a node which may or may not be live to the end of a bucket. If the 567 // bucket has space available, adding the node succeeds immediately. Otherwise, the node is 568 // added to the replacements list. 569 // 570 // The caller must not hold tab.mutex. 571 func (tab *Table) addSeenNode(n *node) { 572 if n.ID() == tab.self().ID() { 573 return 574 } 575 576 tab.mutex.Lock() 577 defer tab.mutex.Unlock() 578 b := tab.bucket(n.ID()) 579 if contains(b.entries, n.ID()) { 580 // Already in bucket, don't add. 581 return 582 } 583 if len(b.entries) >= bucketSize { 584 // Bucket full, maybe add as replacement. 585 tab.addReplacement(b, n) 586 return 587 } 588 if !tab.addIP(b, n.IP()) { 589 // Can't add: IP limit reached. 590 return 591 } 592 // Add to end of bucket: 593 b.entries = append(b.entries, n) 594 b.replacements = deleteNode(b.replacements, n) 595 n.addedAt = time.Now() 596 if tab.nodeAddedHook != nil { 597 tab.nodeAddedHook(n) 598 } 599 } 600 601 // addVerifiedNode adds a node whose existence has been verified recently to the front of a 602 // bucket. If the node is already in the bucket, it is moved to the front. If the bucket 603 // has no space, the node is added to the replacements list. 604 // 605 // There is an additional safety measure: if the table is still initializing the node 606 // is not added. This prevents an attack where the table could be filled by just sending 607 // ping repeatedly. 608 // 609 // The caller must not hold tab.mutex. 610 func (tab *Table) addVerifiedNode(n *node) { 611 if !tab.isInitDone() { 612 return 613 } 614 if n.ID() == tab.self().ID() { 615 return 616 } 617 618 tab.mutex.Lock() 619 defer tab.mutex.Unlock() 620 b := tab.bucket(n.ID()) 621 if tab.bumpInBucket(b, n) { 622 // Already in bucket, moved to front. 623 return 624 } 625 if len(b.entries) >= bucketSize { 626 // Bucket full, maybe add as replacement. 627 tab.addReplacement(b, n) 628 return 629 } 630 if !tab.addIP(b, n.IP()) { 631 // Can't add: IP limit reached. 632 return 633 } 634 // Add to front of bucket. 635 b.entries, _ = pushNode(b.entries, n, bucketSize) 636 b.replacements = deleteNode(b.replacements, n) 637 n.addedAt = time.Now() 638 if tab.nodeAddedHook != nil { 639 tab.nodeAddedHook(n) 640 } 641 } 642 643 // delete removes an entry from the node table. It is used to evacuate dead nodes. 644 func (tab *Table) delete(node *node) { 645 tab.mutex.Lock() 646 defer tab.mutex.Unlock() 647 648 tab.deleteInBucket(tab.bucket(node.ID()), node) 649 } 650 651 func (tab *Table) addIP(b *bucket, ip net.IP) bool { 652 if netutil.IsLAN(ip) { 653 return true 654 } 655 if !tab.ips.Add(ip) { 656 log.Debug("IP exceeds table limit", "ip", ip) 657 return false 658 } 659 if !b.ips.Add(ip) { 660 log.Debug("IP exceeds bucket limit", "ip", ip) 661 tab.ips.Remove(ip) 662 return false 663 } 664 return true 665 } 666 667 func (tab *Table) removeIP(b *bucket, ip net.IP) { 668 if netutil.IsLAN(ip) { 669 return 670 } 671 tab.ips.Remove(ip) 672 b.ips.Remove(ip) 673 } 674 675 func (tab *Table) addReplacement(b *bucket, n *node) { 676 for _, e := range b.replacements { 677 if e.ID() == n.ID() { 678 return // already in list 679 } 680 } 681 if !tab.addIP(b, n.IP()) { 682 return 683 } 684 var removed *node 685 b.replacements, removed = pushNode(b.replacements, n, maxReplacements) 686 if removed != nil { 687 tab.removeIP(b, removed.IP()) 688 } 689 } 690 691 // replace removes n from the replacement list and replaces 'last' with it if it is the 692 // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced 693 // with someone else or became active. 694 func (tab *Table) replace(b *bucket, last *node) *node { 695 if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() { 696 // Entry has moved, don't replace it. 697 return nil 698 } 699 // Still the last entry. 700 if len(b.replacements) == 0 { 701 tab.deleteInBucket(b, last) 702 return nil 703 } 704 r := b.replacements[tab.rand.Intn(len(b.replacements))] 705 b.replacements = deleteNode(b.replacements, r) 706 b.entries[len(b.entries)-1] = r 707 tab.removeIP(b, last.IP()) 708 return r 709 } 710 711 // bumpInBucket moves the given node to the front of the bucket entry list 712 // if it is contained in that list. 713 func (tab *Table) bumpInBucket(b *bucket, n *node) bool { 714 for i := range b.entries { 715 if b.entries[i].ID() == n.ID() { 716 if !n.IP().Equal(b.entries[i].IP()) { 717 // Endpoint has changed, ensure that the new IP fits into table limits. 718 tab.removeIP(b, b.entries[i].IP()) 719 if !tab.addIP(b, n.IP()) { 720 // It doesn't, put the previous one back. 721 tab.addIP(b, b.entries[i].IP()) 722 return false 723 } 724 } 725 // Move it to the front. 726 copy(b.entries[1:], b.entries[:i]) 727 b.entries[0] = n 728 return true 729 } 730 } 731 return false 732 } 733 734 func (tab *Table) deleteInBucket(b *bucket, n *node) { 735 b.entries = deleteNode(b.entries, n) 736 tab.removeIP(b, n.IP()) 737 } 738 739 // Info gives information on all the buckets and IPs in the Table 740 func (tab *Table) Info() *TableInfo { 741 var buckets [nBuckets]*bucketInfo 742 for i := 0; i < nBuckets; i++ { 743 buckets[i] = &bucketInfo{ 744 Entries: tab.buckets[i].entries, 745 Replacements: tab.buckets[i].replacements, 746 IPs: tab.buckets[i].ips.String(), 747 } 748 } 749 return &TableInfo{ 750 Buckets: buckets, 751 IPs: tab.ips.String(), 752 } 753 } 754 755 func contains(ns []*node, id enode.ID) bool { 756 for _, n := range ns { 757 if n.ID() == id { 758 return true 759 } 760 } 761 return false 762 } 763 764 // pushNode adds n to the front of list, keeping at most max items. 765 func pushNode(list []*node, n *node, max int) ([]*node, *node) { 766 if len(list) < max { 767 list = append(list, nil) 768 } 769 removed := list[len(list)-1] 770 copy(list[1:], list) 771 list[0] = n 772 return list, removed 773 } 774 775 // deleteNode removes n from list. 776 func deleteNode(list []*node, n *node) []*node { 777 for i := range list { 778 if list[i].ID() == n.ID() { 779 return append(list[:i], list[i+1:]...) 780 } 781 } 782 return list 783 } 784 785 // nodesByDistance is a list of nodes, ordered by 786 // distance to target. 787 type nodesByDistance struct { 788 entries []*node 789 target enode.ID 790 } 791 792 // push adds the given node to the list, keeping the total size below maxElems. 793 func (h *nodesByDistance) push(n *node, maxElems int) { 794 ix := sort.Search(len(h.entries), func(i int) bool { 795 return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0 796 }) 797 if len(h.entries) < maxElems { 798 h.entries = append(h.entries, n) 799 } 800 if ix == len(h.entries) { 801 // farther away than all nodes we already have. 802 // if there was room for it, the node is now the last element. 803 } else { 804 // slide existing entries down to make room 805 // this will overwrite the entry we just appended. 806 copy(h.entries[ix+1:], h.entries[ix:]) 807 h.entries[ix] = n 808 } 809 }