github.com/intfoundation/intchain@v0.0.0-20220727031208-4316ad31ca73/p2p/discover/table.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package discover implements the Node Discovery Protocol. 18 // 19 // The Node Discovery protocol provides a way to find RLPx nodes that 20 // can be connected to. It uses a Kademlia-like protocol to maintain a 21 // distributed database of the IDs and endpoints of all listening 22 // nodes. 23 package discover 24 25 import ( 26 crand "crypto/rand" 27 "encoding/binary" 28 "errors" 29 "fmt" 30 mrand "math/rand" 31 "net" 32 "sort" 33 "sync" 34 "time" 35 36 "github.com/intfoundation/intchain/common" 37 "github.com/intfoundation/intchain/crypto" 38 "github.com/intfoundation/intchain/log" 39 "github.com/intfoundation/intchain/p2p/netutil" 40 ) 41 42 const ( 43 alpha = 3 // Kademlia concurrency factor 44 bucketSize = 16 // Kademlia bucket size 45 maxReplacements = 10 // Size of per-bucket replacement list 46 47 // We keep buckets for the upper 1/15 of distances because 48 // it's very unlikely we'll ever encounter a node that's closer. 49 hashBits = len(common.Hash{}) * 8 50 nBuckets = hashBits / 15 // Number of buckets 51 bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket 52 53 // IP address limits. 54 bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 55 tableIPLimit, tableSubnet = 10, 24 56 57 maxBondingPingPongs = 16 // Limit on the number of concurrent ping/pong interactions 58 maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped 59 60 refreshInterval = 30 * time.Minute 61 revalidateInterval = 10 * time.Second 62 copyNodesInterval = 30 * time.Second 63 seedMinTableTime = 5 * time.Minute 64 seedCount = 30 65 seedMaxAge = 5 * 24 * time.Hour 66 ) 67 68 type Table struct { 69 mutex sync.Mutex // protects buckets, bucket content, nursery, rand 70 buckets [nBuckets]*bucket // index of known nodes by distance 71 nursery []*Node // bootstrap nodes 72 rand *mrand.Rand // source of randomness, periodically reseeded 73 ips netutil.DistinctNetSet 74 75 db *nodeDB // database of known nodes 76 refreshReq chan chan struct{} 77 initDone chan struct{} 78 closeReq chan struct{} 79 closed chan struct{} 80 81 bondmu sync.Mutex 82 bonding map[NodeID]*bondproc 83 bondslots chan struct{} // limits total number of active bonding processes 84 85 nodeAddedHook func(*Node) // for testing 86 87 net transport 88 self *Node // metadata of the local node 89 } 90 91 type bondproc struct { 92 err error 93 n *Node 94 done chan struct{} 95 } 96 97 // transport is implemented by the UDP transport. 98 // it is an interface so we can test without opening lots of UDP 99 // sockets and without generating a private key. 100 type transport interface { 101 ping(NodeID, *net.UDPAddr) error 102 waitping(NodeID) error 103 findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error) 104 close() 105 } 106 107 // bucket contains nodes, ordered by their last activity. the entry 108 // that was most recently active is the first element in entries. 109 type bucket struct { 110 entries []*Node // live entries, sorted by time of last contact 111 replacements []*Node // recently seen nodes to be used if revalidation fails 112 ips netutil.DistinctNetSet 113 } 114 115 func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) { 116 // If no node database was given, use an in-memory one 117 db, err := newNodeDB(nodeDBPath, Version, ourID) 118 if err != nil { 119 return nil, err 120 } 121 tab := &Table{ 122 net: t, 123 db: db, 124 self: NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)), 125 bonding: make(map[NodeID]*bondproc), 126 bondslots: make(chan struct{}, maxBondingPingPongs), 127 refreshReq: make(chan chan struct{}), 128 initDone: make(chan struct{}), 129 closeReq: make(chan struct{}), 130 closed: make(chan struct{}), 131 rand: mrand.New(mrand.NewSource(0)), 132 ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, 133 } 134 if err := tab.setFallbackNodes(bootnodes); err != nil { 135 return nil, err 136 } 137 for i := 0; i < cap(tab.bondslots); i++ { 138 tab.bondslots <- struct{}{} 139 } 140 for i := range tab.buckets { 141 tab.buckets[i] = &bucket{ 142 ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, 143 } 144 } 145 tab.seedRand() 146 tab.loadSeedNodes(false) 147 // Start the background expiration goroutine after loading seeds so that the search for 148 // seed nodes also considers older nodes that would otherwise be removed by the 149 // expiration. 150 tab.db.ensureExpirer() 151 go tab.loop() 152 return tab, nil 153 } 154 155 func (tab *Table) seedRand() { 156 var b [8]byte 157 crand.Read(b[:]) 158 159 tab.mutex.Lock() 160 tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:]))) 161 tab.mutex.Unlock() 162 } 163 164 // Self returns the local node. 165 // The returned node should not be modified by the caller. 166 func (tab *Table) Self() *Node { 167 return tab.self 168 } 169 170 // ReadRandomNodes fills the given slice with random nodes from the 171 // table. It will not write the same node more than once. The nodes in 172 // the slice are copies and can be modified by the caller. 173 func (tab *Table) ReadRandomNodes(buf []*Node) (n int) { 174 if !tab.isInitDone() { 175 return 0 176 } 177 tab.mutex.Lock() 178 defer tab.mutex.Unlock() 179 180 // Find all non-empty buckets and get a fresh slice of their entries. 181 var buckets [][]*Node 182 for _, b := range tab.buckets { 183 if len(b.entries) > 0 { 184 buckets = append(buckets, b.entries[:]) 185 } 186 } 187 if len(buckets) == 0 { 188 return 0 189 } 190 // Shuffle the buckets. 191 for i := len(buckets) - 1; i > 0; i-- { 192 j := tab.rand.Intn(len(buckets)) 193 buckets[i], buckets[j] = buckets[j], buckets[i] 194 } 195 // Move head of each bucket into buf, removing buckets that become empty. 196 var i, j int 197 for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) { 198 b := buckets[j] 199 buf[i] = &(*b[0]) 200 buckets[j] = b[1:] 201 if len(b) == 1 { 202 buckets = append(buckets[:j], buckets[j+1:]...) 203 } 204 if len(buckets) == 0 { 205 break 206 } 207 } 208 return i + 1 209 } 210 211 // Close terminates the network listener and flushes the node database. 212 func (tab *Table) Close() { 213 select { 214 case <-tab.closed: 215 // already closed. 216 case tab.closeReq <- struct{}{}: 217 <-tab.closed // wait for refreshLoop to end. 218 } 219 } 220 221 // setFallbackNodes sets the initial points of contact. These nodes 222 // are used to connect to the network if the table is empty and there 223 // are no known nodes in the database. 224 func (tab *Table) setFallbackNodes(nodes []*Node) error { 225 for _, n := range nodes { 226 if err := n.validateComplete(); err != nil { 227 return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err) 228 } 229 } 230 tab.nursery = make([]*Node, 0, len(nodes)) 231 for _, n := range nodes { 232 cpy := *n 233 // Recompute cpy.sha because the node might not have been 234 // created by NewNode or ParseNode. 235 cpy.sha = crypto.Keccak256Hash(n.ID[:]) 236 tab.nursery = append(tab.nursery, &cpy) 237 } 238 return nil 239 } 240 241 // isInitDone returns whether the table's initial seeding procedure has completed. 242 func (tab *Table) isInitDone() bool { 243 select { 244 case <-tab.initDone: 245 return true 246 default: 247 return false 248 } 249 } 250 251 // Resolve searches for a specific node with the given ID. 252 // It returns nil if the node could not be found. 253 func (tab *Table) Resolve(targetID NodeID) *Node { 254 // If the node is present in the local table, no 255 // network interaction is required. 256 hash := crypto.Keccak256Hash(targetID[:]) 257 tab.mutex.Lock() 258 cl := tab.closest(hash, 1) 259 tab.mutex.Unlock() 260 if len(cl.entries) > 0 && cl.entries[0].ID == targetID { 261 return cl.entries[0] 262 } 263 // Otherwise, do a network lookup. 264 result := tab.Lookup(targetID) 265 for _, n := range result { 266 if n.ID == targetID { 267 return n 268 } 269 } 270 return nil 271 } 272 273 // Lookup performs a network search for nodes close 274 // to the given target. It approaches the target by querying 275 // nodes that are closer to it on each iteration. 276 // The given target does not need to be an actual node 277 // identifier. 278 func (tab *Table) Lookup(targetID NodeID) []*Node { 279 return tab.lookup(targetID, true) 280 } 281 282 func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node { 283 var ( 284 target = crypto.Keccak256Hash(targetID[:]) 285 asked = make(map[NodeID]bool) 286 seen = make(map[NodeID]bool) 287 reply = make(chan []*Node, alpha) 288 pendingQueries = 0 289 result *nodesByDistance 290 ) 291 // don't query further if we hit ourself. 292 // unlikely to happen often in practice. 293 asked[tab.self.ID] = true 294 295 for { 296 tab.mutex.Lock() 297 // generate initial result set 298 result = tab.closest(target, bucketSize) 299 tab.mutex.Unlock() 300 if len(result.entries) > 0 || !refreshIfEmpty { 301 break 302 } 303 // The result set is empty, all nodes were dropped, refresh. 304 // We actually wait for the refresh to complete here. The very 305 // first query will hit this case and run the bootstrapping 306 // logic. 307 <-tab.refresh() 308 refreshIfEmpty = false 309 } 310 311 for { 312 // ask the alpha closest nodes that we haven't asked yet 313 for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { 314 n := result.entries[i] 315 if !asked[n.ID] { 316 asked[n.ID] = true 317 pendingQueries++ 318 go func() { 319 // Find potential neighbors to bond with 320 r, err := tab.net.findnode(n.ID, n.addr(), targetID) 321 if err != nil { 322 // Bump the failure counter to detect and evacuate non-bonded entries 323 fails := tab.db.findFails(n.ID) + 1 324 tab.db.updateFindFails(n.ID, fails) 325 log.Trace("Bumping findnode failure counter", "id", n.ID, "failcount", fails) 326 327 if fails >= maxFindnodeFailures { 328 log.Trace("Too many findnode failures, dropping", "id", n.ID, "failcount", fails) 329 tab.delete(n) 330 } 331 } 332 reply <- tab.bondall(r) 333 }() 334 } 335 } 336 if pendingQueries == 0 { 337 // we have asked all closest nodes, stop the search 338 break 339 } 340 // wait for the next reply 341 for _, n := range <-reply { 342 if n != nil && !seen[n.ID] { 343 seen[n.ID] = true 344 result.push(n, bucketSize) 345 } 346 } 347 pendingQueries-- 348 } 349 return result.entries 350 } 351 352 func (tab *Table) refresh() <-chan struct{} { 353 done := make(chan struct{}) 354 select { 355 case tab.refreshReq <- done: 356 case <-tab.closed: 357 close(done) 358 } 359 return done 360 } 361 362 // loop schedules refresh, revalidate runs and coordinates shutdown. 363 func (tab *Table) loop() { 364 var ( 365 revalidate = time.NewTimer(tab.nextRevalidateTime()) 366 refresh = time.NewTicker(refreshInterval) 367 copyNodes = time.NewTicker(copyNodesInterval) 368 revalidateDone = make(chan struct{}) 369 refreshDone = make(chan struct{}) // where doRefresh reports completion 370 waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs 371 ) 372 defer refresh.Stop() 373 defer revalidate.Stop() 374 defer copyNodes.Stop() 375 376 // Start initial refresh. 377 go tab.doRefresh(refreshDone) 378 379 loop: 380 for { 381 select { 382 case <-refresh.C: 383 tab.seedRand() 384 if refreshDone == nil { 385 refreshDone = make(chan struct{}) 386 go tab.doRefresh(refreshDone) 387 } 388 case req := <-tab.refreshReq: 389 waiting = append(waiting, req) 390 if refreshDone == nil { 391 refreshDone = make(chan struct{}) 392 go tab.doRefresh(refreshDone) 393 } 394 case <-refreshDone: 395 for _, ch := range waiting { 396 close(ch) 397 } 398 waiting, refreshDone = nil, nil 399 case <-revalidate.C: 400 go tab.doRevalidate(revalidateDone) 401 case <-revalidateDone: 402 revalidate.Reset(tab.nextRevalidateTime()) 403 case <-copyNodes.C: 404 go tab.copyBondedNodes() 405 case <-tab.closeReq: 406 break loop 407 } 408 } 409 410 if tab.net != nil { 411 tab.net.close() 412 } 413 if refreshDone != nil { 414 <-refreshDone 415 } 416 for _, ch := range waiting { 417 close(ch) 418 } 419 tab.db.close() 420 close(tab.closed) 421 } 422 423 // doRefresh performs a lookup for a random target to keep buckets 424 // full. seed nodes are inserted if the table is empty (initial 425 // bootstrap or discarded faulty peers). 426 func (tab *Table) doRefresh(done chan struct{}) { 427 defer close(done) 428 429 // Load nodes from the database and insert 430 // them. This should yield a few previously seen nodes that are 431 // (hopefully) still alive. 432 tab.loadSeedNodes(true) 433 434 // Run self lookup to discover new neighbor nodes. 435 tab.lookup(tab.self.ID, false) 436 437 // The Kademlia paper specifies that the bucket refresh should 438 // perform a lookup in the least recently used bucket. We cannot 439 // adhere to this because the findnode target is a 512bit value 440 // (not hash-sized) and it is not easily possible to generate a 441 // sha3 preimage that falls into a chosen bucket. 442 // We perform a few lookups with a random target instead. 443 for i := 0; i < 3; i++ { 444 var target NodeID 445 crand.Read(target[:]) 446 tab.lookup(target, false) 447 } 448 } 449 450 func (tab *Table) loadSeedNodes(bond bool) { 451 seeds := tab.db.querySeeds(seedCount, seedMaxAge) 452 seeds = append(seeds, tab.nursery...) 453 if bond { 454 seeds = tab.bondall(seeds) 455 } 456 for i := range seeds { 457 seed := seeds[i] 458 age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.bondTime(seed.ID)) }} 459 log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age) 460 tab.add(seed) 461 } 462 } 463 464 // doRevalidate checks that the last node in a random bucket is still live 465 // and replaces or deletes the node if it isn't. 466 func (tab *Table) doRevalidate(done chan<- struct{}) { 467 defer func() { done <- struct{}{} }() 468 469 last, bi := tab.nodeToRevalidate() 470 if last == nil { 471 // No non-empty bucket found. 472 return 473 } 474 475 // Ping the selected node and wait for a pong. 476 err := tab.ping(last.ID, last.addr()) 477 478 tab.mutex.Lock() 479 defer tab.mutex.Unlock() 480 b := tab.buckets[bi] 481 if err == nil { 482 // The node responded, move it to the front. 483 log.Debug("Revalidated node", "b", bi, "id", last.ID) 484 b.bump(last) 485 return 486 } 487 // No reply received, pick a replacement or delete the node if there aren't 488 // any replacements. 489 if r := tab.replace(b, last); r != nil { 490 log.Debug("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP) 491 } else { 492 log.Debug("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP) 493 } 494 } 495 496 // nodeToRevalidate returns the last node in a random, non-empty bucket. 497 func (tab *Table) nodeToRevalidate() (n *Node, bi int) { 498 tab.mutex.Lock() 499 defer tab.mutex.Unlock() 500 501 for _, bi = range tab.rand.Perm(len(tab.buckets)) { 502 b := tab.buckets[bi] 503 if len(b.entries) > 0 { 504 last := b.entries[len(b.entries)-1] 505 return last, bi 506 } 507 } 508 return nil, 0 509 } 510 511 func (tab *Table) nextRevalidateTime() time.Duration { 512 tab.mutex.Lock() 513 defer tab.mutex.Unlock() 514 515 return time.Duration(tab.rand.Int63n(int64(revalidateInterval))) 516 } 517 518 // copyBondedNodes adds nodes from the table to the database if they have been in the table 519 // longer then minTableTime. 520 func (tab *Table) copyBondedNodes() { 521 tab.mutex.Lock() 522 defer tab.mutex.Unlock() 523 524 now := time.Now() 525 for _, b := range tab.buckets { 526 for _, n := range b.entries { 527 if now.Sub(n.addedAt) >= seedMinTableTime { 528 tab.db.updateNode(n) 529 } 530 } 531 } 532 } 533 534 // closest returns the n nodes in the table that are closest to the 535 // given id. The caller must hold tab.mutex. 536 func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance { 537 // This is a very wasteful way to find the closest nodes but 538 // obviously correct. I believe that tree-based buckets would make 539 // this easier to implement efficiently. 540 close := &nodesByDistance{target: target} 541 for _, b := range tab.buckets { 542 for _, n := range b.entries { 543 close.push(n, nresults) 544 } 545 } 546 return close 547 } 548 549 func (tab *Table) len() (n int) { 550 for _, b := range tab.buckets { 551 n += len(b.entries) 552 } 553 return n 554 } 555 556 // bondall bonds with all given nodes concurrently and returns 557 // those nodes for which bonding has probably succeeded. 558 func (tab *Table) bondall(nodes []*Node) (result []*Node) { 559 rc := make(chan *Node, len(nodes)) 560 for i := range nodes { 561 go func(n *Node) { 562 nn, _ := tab.bond(false, n.ID, n.addr(), n.TCP) 563 rc <- nn 564 }(nodes[i]) 565 } 566 for range nodes { 567 if n := <-rc; n != nil { 568 result = append(result, n) 569 } 570 } 571 return result 572 } 573 574 // bond ensures the local node has a bond with the given remote node. 575 // It also attempts to insert the node into the table if bonding succeeds. 576 // The caller must not hold tab.mutex. 577 // 578 // A bond is must be established before sending findnode requests. 579 // Both sides must have completed a ping/pong exchange for a bond to 580 // exist. The total number of active bonding processes is limited in 581 // order to restrain network use. 582 // 583 // bond is meant to operate idempotently in that bonding with a remote 584 // node which still remembers a previously established bond will work. 585 // The remote node will simply not send a ping back, causing waitping 586 // to time out. 587 // 588 // If pinged is true, the remote node has just pinged us and one half 589 // of the process can be skipped. 590 func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) (*Node, error) { 591 if id == tab.self.ID { 592 return nil, errors.New("is self") 593 } 594 if pinged && !tab.isInitDone() { 595 return nil, errors.New("still initializing") 596 } 597 // Start bonding if we haven't seen this node for a while or if it failed findnode too often. 598 node, fails := tab.db.node(id), tab.db.findFails(id) 599 age := time.Since(tab.db.bondTime(id)) 600 var result error 601 if fails > 0 || age > nodeDBNodeExpiration { 602 log.Trace("Starting bonding ping/pong", "id", id, "known", node != nil, "failcount", fails, "age", age) 603 604 tab.bondmu.Lock() 605 w := tab.bonding[id] 606 if w != nil { 607 // Wait for an existing bonding process to complete. 608 tab.bondmu.Unlock() 609 <-w.done 610 } else { 611 // Register a new bonding process. 612 w = &bondproc{done: make(chan struct{})} 613 tab.bonding[id] = w 614 tab.bondmu.Unlock() 615 // Do the ping/pong. The result goes into w. 616 tab.pingpong(w, pinged, id, addr, tcpPort) 617 // Unregister the process after it's done. 618 tab.bondmu.Lock() 619 delete(tab.bonding, id) 620 tab.bondmu.Unlock() 621 } 622 // Retrieve the bonding results 623 result = w.err 624 if result == nil { 625 node = w.n 626 } 627 } 628 // Add the node to the table even if the bonding ping/pong 629 // fails. It will be relaced quickly if it continues to be 630 // unresponsive. 631 if node != nil { 632 tab.add(node) 633 tab.db.updateFindFails(id, 0) 634 } 635 return node, result 636 } 637 638 func (tab *Table) pingpong(w *bondproc, pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) { 639 // Request a bonding slot to limit network usage 640 <-tab.bondslots 641 defer func() { tab.bondslots <- struct{}{} }() 642 643 // Ping the remote side and wait for a pong. 644 if w.err = tab.ping(id, addr); w.err != nil { 645 close(w.done) 646 return 647 } 648 if !pinged { 649 // Give the remote node a chance to ping us before we start 650 // sending findnode requests. If they still remember us, 651 // waitping will simply time out. 652 tab.net.waitping(id) 653 } 654 // Bonding succeeded, update the node database. 655 w.n = NewNode(id, addr.IP, uint16(addr.Port), tcpPort) 656 close(w.done) 657 } 658 659 // ping a remote endpoint and wait for a reply, also updating the node 660 // database accordingly. 661 func (tab *Table) ping(id NodeID, addr *net.UDPAddr) error { 662 tab.db.updateLastPing(id, time.Now()) 663 if err := tab.net.ping(id, addr); err != nil { 664 return err 665 } 666 tab.db.updateBondTime(id, time.Now()) 667 return nil 668 } 669 670 // bucket returns the bucket for the given node ID hash. 671 func (tab *Table) bucket(sha common.Hash) *bucket { 672 d := logdist(tab.self.sha, sha) 673 if d <= bucketMinDistance { 674 return tab.buckets[0] 675 } 676 return tab.buckets[d-bucketMinDistance-1] 677 } 678 679 // add attempts to add the given node its corresponding bucket. If the 680 // bucket has space available, adding the node succeeds immediately. 681 // Otherwise, the node is added if the least recently active node in 682 // the bucket does not respond to a ping packet. 683 // 684 // The caller must not hold tab.mutex. 685 func (tab *Table) add(new *Node) { 686 tab.mutex.Lock() 687 defer tab.mutex.Unlock() 688 689 b := tab.bucket(new.sha) 690 if !tab.bumpOrAdd(b, new) { 691 // Node is not in table. Add it to the replacement list. 692 tab.addReplacement(b, new) 693 } 694 } 695 696 // stuff adds nodes the table to the end of their corresponding bucket 697 // if the bucket is not full. The caller must not hold tab.mutex. 698 func (tab *Table) stuff(nodes []*Node) { 699 tab.mutex.Lock() 700 defer tab.mutex.Unlock() 701 702 for _, n := range nodes { 703 if n.ID == tab.self.ID { 704 continue // don't add self 705 } 706 b := tab.bucket(n.sha) 707 if len(b.entries) < bucketSize { 708 tab.bumpOrAdd(b, n) 709 } 710 } 711 } 712 713 // delete removes an entry from the node table (used to evacuate 714 // failed/non-bonded discovery peers). 715 func (tab *Table) delete(node *Node) { 716 tab.mutex.Lock() 717 defer tab.mutex.Unlock() 718 719 tab.deleteInBucket(tab.bucket(node.sha), node) 720 } 721 722 func (tab *Table) addIP(b *bucket, ip net.IP) bool { 723 if netutil.IsLAN(ip) { 724 return true 725 } 726 if !tab.ips.Add(ip) { 727 log.Debug("IP exceeds table limit", "ip", ip) 728 return false 729 } 730 if !b.ips.Add(ip) { 731 log.Debug("IP exceeds bucket limit", "ip", ip) 732 tab.ips.Remove(ip) 733 return false 734 } 735 return true 736 } 737 738 func (tab *Table) removeIP(b *bucket, ip net.IP) { 739 if netutil.IsLAN(ip) { 740 return 741 } 742 tab.ips.Remove(ip) 743 b.ips.Remove(ip) 744 } 745 746 func (tab *Table) addReplacement(b *bucket, n *Node) { 747 for _, e := range b.replacements { 748 if e.ID == n.ID { 749 return // already in list 750 } 751 } 752 if !tab.addIP(b, n.IP) { 753 return 754 } 755 var removed *Node 756 b.replacements, removed = pushNode(b.replacements, n, maxReplacements) 757 if removed != nil { 758 tab.removeIP(b, removed.IP) 759 } 760 } 761 762 // replace removes n from the replacement list and replaces 'last' with it if it is the 763 // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced 764 // with someone else or became active. 765 func (tab *Table) replace(b *bucket, last *Node) *Node { 766 if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID { 767 // Entry has moved, don't replace it. 768 return nil 769 } 770 // Still the last entry. 771 if len(b.replacements) == 0 { 772 tab.deleteInBucket(b, last) 773 return nil 774 } 775 r := b.replacements[tab.rand.Intn(len(b.replacements))] 776 b.replacements = deleteNode(b.replacements, r) 777 b.entries[len(b.entries)-1] = r 778 tab.removeIP(b, last.IP) 779 return r 780 } 781 782 // bump moves the given node to the front of the bucket entry list 783 // if it is contained in that list. 784 func (b *bucket) bump(n *Node) bool { 785 for i := range b.entries { 786 if b.entries[i].ID == n.ID { 787 // move it to the front 788 copy(b.entries[1:], b.entries[:i]) 789 b.entries[0] = n 790 return true 791 } 792 } 793 return false 794 } 795 796 // bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't 797 // full. The return value is true if n is in the bucket. 798 func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool { 799 if b.bump(n) { 800 return true 801 } 802 if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) { 803 return false 804 } 805 b.entries, _ = pushNode(b.entries, n, bucketSize) 806 b.replacements = deleteNode(b.replacements, n) 807 n.addedAt = time.Now() 808 if tab.nodeAddedHook != nil { 809 tab.nodeAddedHook(n) 810 } 811 return true 812 } 813 814 func (tab *Table) deleteInBucket(b *bucket, n *Node) { 815 b.entries = deleteNode(b.entries, n) 816 tab.removeIP(b, n.IP) 817 } 818 819 // pushNode adds n to the front of list, keeping at most max items. 820 func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) { 821 if len(list) < max { 822 list = append(list, nil) 823 } 824 removed := list[len(list)-1] 825 copy(list[1:], list) 826 list[0] = n 827 return list, removed 828 } 829 830 // deleteNode removes n from list. 831 func deleteNode(list []*Node, n *Node) []*Node { 832 for i := range list { 833 if list[i].ID == n.ID { 834 return append(list[:i], list[i+1:]...) 835 } 836 } 837 return list 838 } 839 840 // nodesByDistance is a list of nodes, ordered by 841 // distance to target. 842 type nodesByDistance struct { 843 entries []*Node 844 target common.Hash 845 } 846 847 // push adds the given node to the list, keeping the total size below maxElems. 848 func (h *nodesByDistance) push(n *Node, maxElems int) { 849 ix := sort.Search(len(h.entries), func(i int) bool { 850 return distcmp(h.target, h.entries[i].sha, n.sha) > 0 851 }) 852 if len(h.entries) < maxElems { 853 h.entries = append(h.entries, n) 854 } 855 if ix == len(h.entries) { 856 // farther away than all nodes we already have. 857 // if there was room for it, the node is now the last element. 858 } else { 859 // slide existing entries down to make room 860 // this will overwrite the entry we just appended. 861 copy(h.entries[ix+1:], h.entries[ix:]) 862 h.entries[ix] = n 863 } 864 }