github.com/aquanetwork/aquachain@v1.7.8/p2p/discover/table.go (about) 1 // Copyright 2015 The aquachain Authors 2 // This file is part of the aquachain library. 3 // 4 // The aquachain library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The aquachain library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package discover implements the Node Discovery Protocol. 18 // 19 // The Node Discovery protocol provides a way to find RLPx nodes that 20 // can be connected to. It uses a Kademlia-like protocol to maintain a 21 // distributed database of the IDs and endpoints of all listening 22 // nodes. 23 package discover 24 25 import ( 26 crand "crypto/rand" 27 "encoding/binary" 28 "errors" 29 "fmt" 30 mrand "math/rand" 31 "net" 32 "sort" 33 "sync" 34 "time" 35 36 "gitlab.com/aquachain/aquachain/common" 37 "gitlab.com/aquachain/aquachain/common/log" 38 "gitlab.com/aquachain/aquachain/crypto" 39 "gitlab.com/aquachain/aquachain/p2p/netutil" 40 ) 41 42 const ( 43 alpha = 3 // Kademlia concurrency factor 44 bucketSize = 16 // Kademlia bucket size 45 maxReplacements = 10 // Size of per-bucket replacement list 46 47 // We keep buckets for the upper 1/15 of distances because 48 // it's very unlikely we'll ever encounter a node that's closer. 49 hashBits = len(common.Hash{}) * 8 50 nBuckets = hashBits / 15 // Number of buckets 51 bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket 52 53 // IP address limits. 54 bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 55 tableIPLimit, tableSubnet = 10, 24 56 57 maxBondingPingPongs = 16 // Limit on the number of concurrent ping/pong interactions 58 maxFindnodeFailures = 3 // Nodes exceeding this limit are dropped 59 60 refreshInterval = 10 * time.Minute 61 revalidateInterval = 10 * time.Second 62 copyNodesInterval = 30 * time.Second 63 seedMinTableTime = 5 * time.Minute 64 seedCount = 30 65 seedMaxAge = 24 * time.Hour 66 ) 67 68 type Table struct { 69 mutex sync.Mutex // protects buckets, bucket content, nursery, rand 70 buckets [nBuckets]*bucket // index of known nodes by distance 71 nursery []*Node // bootstrap nodes 72 rand *mrand.Rand // source of randomness, periodically reseeded 73 ips netutil.DistinctNetSet 74 75 db *nodeDB // database of known nodes 76 refreshReq chan chan struct{} 77 initDone chan struct{} 78 closeReq chan struct{} 79 closed chan struct{} 80 81 bondmu sync.Mutex 82 bonding map[NodeID]*bondproc 83 bondslots chan struct{} // limits total number of active bonding processes 84 85 nodeAddedHook func(*Node) // for testing 86 87 net transport 88 self *Node // metadata of the local node 89 } 90 91 type bondproc struct { 92 err error 93 n *Node 94 done chan struct{} 95 } 96 97 // transport is implemented by the UDP transport. 98 // it is an interface so we can test without opening lots of UDP 99 // sockets and without generating a private key. 100 type transport interface { 101 ping(NodeID, *net.UDPAddr) error 102 waitping(NodeID) error 103 findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error) 104 close() 105 } 106 107 // bucket contains nodes, ordered by their last activity. the entry 108 // that was most recently active is the first element in entries. 109 type bucket struct { 110 entries []*Node // live entries, sorted by time of last contact 111 replacements []*Node // recently seen nodes to be used if revalidation fails 112 ips netutil.DistinctNetSet 113 } 114 115 func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) { 116 // If no node database was given, use an in-memory one 117 db, err := newNodeDB(nodeDBPath, Version, ourID) 118 if err != nil { 119 return nil, err 120 } 121 tab := &Table{ 122 net: t, 123 db: db, 124 self: NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)), 125 bonding: make(map[NodeID]*bondproc), 126 bondslots: make(chan struct{}, maxBondingPingPongs), 127 refreshReq: make(chan chan struct{}), 128 initDone: make(chan struct{}), 129 closeReq: make(chan struct{}), 130 closed: make(chan struct{}), 131 rand: mrand.New(mrand.NewSource(0)), 132 ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, 133 } 134 if err := tab.setFallbackNodes(bootnodes); err != nil { 135 return nil, err 136 } 137 for i := 0; i < cap(tab.bondslots); i++ { 138 tab.bondslots <- struct{}{} 139 } 140 for i := range tab.buckets { 141 tab.buckets[i] = &bucket{ 142 ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, 143 } 144 } 145 tab.seedRand() 146 tab.loadSeedNodes(false) 147 // Start the background expiration goroutine after loading seeds so that the search for 148 // seed nodes also considers older nodes that would otherwise be removed by the 149 // expiration. 150 tab.db.ensureExpirer() 151 go tab.loop() 152 return tab, nil 153 } 154 155 func (tab *Table) seedRand() { 156 var b [8]byte 157 crand.Read(b[:]) 158 159 tab.mutex.Lock() 160 tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:]))) 161 tab.mutex.Unlock() 162 } 163 164 // Self returns the local node. 165 // The returned node should not be modified by the caller. 166 func (tab *Table) Self() *Node { 167 return tab.self 168 } 169 170 // ReadRandomNodes fills the given slice with random nodes from the 171 // table. It will not write the same node more than once. The nodes in 172 // the slice are copies and can be modified by the caller. 173 func (tab *Table) ReadRandomNodes(buf []*Node) (n int) { 174 if !tab.isInitDone() { 175 return 0 176 } 177 tab.mutex.Lock() 178 defer tab.mutex.Unlock() 179 180 // Find all non-empty buckets and get a fresh slice of their entries. 181 var buckets [][]*Node 182 for _, b := range tab.buckets { 183 if len(b.entries) > 0 { 184 buckets = append(buckets, b.entries[:]) 185 } 186 } 187 if len(buckets) == 0 { 188 return 0 189 } 190 // Shuffle the buckets. 191 for i := len(buckets) - 1; i > 0; i-- { 192 j := tab.rand.Intn(len(buckets)) 193 buckets[i], buckets[j] = buckets[j], buckets[i] 194 } 195 // Move head of each bucket into buf, removing buckets that become empty. 196 var i, j int 197 for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) { 198 b := buckets[j] 199 buf[i] = &(*b[0]) 200 buckets[j] = b[1:] 201 if len(b) == 1 { 202 buckets = append(buckets[:j], buckets[j+1:]...) 203 } 204 if len(buckets) == 0 { 205 break 206 } 207 } 208 return i + 1 209 } 210 211 // Close terminates the network listener and flushes the node database. 212 func (tab *Table) Close() { 213 select { 214 case <-tab.closed: 215 // already closed. 216 case tab.closeReq <- struct{}{}: 217 <-tab.closed // wait for refreshLoop to end. 218 } 219 } 220 221 // setFallbackNodes sets the initial points of contact. These nodes 222 // are used to connect to the network if the table is empty and there 223 // are no known nodes in the database. 224 func (tab *Table) setFallbackNodes(nodes []*Node) error { 225 for _, n := range nodes { 226 if err := n.validateComplete(); err != nil { 227 return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err) 228 } 229 } 230 tab.nursery = make([]*Node, 0, len(nodes)) 231 for _, n := range nodes { 232 cpy := *n 233 // Recompute cpy.sha because the node might not have been 234 // created by NewNode or ParseNode. 235 cpy.sha = crypto.Keccak256Hash(n.ID[:]) 236 tab.nursery = append(tab.nursery, &cpy) 237 } 238 return nil 239 } 240 241 // isInitDone returns whether the table's initial seeding procedure has completed. 242 func (tab *Table) isInitDone() bool { 243 select { 244 case <-tab.initDone: 245 return true 246 default: 247 return false 248 } 249 } 250 251 // Resolve searches for a specific node with the given ID. 252 // It returns nil if the node could not be found. 253 func (tab *Table) Resolve(targetID NodeID) *Node { 254 // If the node is present in the local table, no 255 // network interaction is required. 256 hash := crypto.Keccak256Hash(targetID[:]) 257 tab.mutex.Lock() 258 cl := tab.closest(hash, 1) 259 tab.mutex.Unlock() 260 if len(cl.entries) > 0 && cl.entries[0].ID == targetID { 261 return cl.entries[0] 262 } 263 // Otherwise, do a network lookup. 264 result := tab.Lookup(targetID) 265 for _, n := range result { 266 if n.ID == targetID { 267 return n 268 } 269 } 270 return nil 271 } 272 273 // Lookup performs a network search for nodes close 274 // to the given target. It approaches the target by querying 275 // nodes that are closer to it on each iteration. 276 // The given target does not need to be an actual node 277 // identifier. 278 func (tab *Table) Lookup(targetID NodeID) []*Node { 279 return tab.lookup(targetID, true) 280 } 281 282 func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node { 283 var ( 284 target = crypto.Keccak256Hash(targetID[:]) 285 asked = make(map[NodeID]bool) 286 seen = make(map[NodeID]bool) 287 reply = make(chan []*Node, alpha) 288 pendingQueries = 0 289 result *nodesByDistance 290 ) 291 // don't query further if we hit ourself. 292 // unlikely to happen often in practice. 293 asked[tab.self.ID] = true 294 295 for { 296 tab.mutex.Lock() 297 // generate initial result set 298 result = tab.closest(target, bucketSize) 299 tab.mutex.Unlock() 300 if len(result.entries) > 0 || !refreshIfEmpty { 301 break 302 } 303 // The result set is empty, all nodes were dropped, refresh. 304 // We actually wait for the refresh to complete here. The very 305 // first query will hit this case and run the bootstrapping 306 // logic. 307 <-tab.refresh() 308 refreshIfEmpty = false 309 } 310 311 for { 312 // ask the alpha closest nodes that we haven't asked yet 313 for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { 314 n := result.entries[i] 315 if !asked[n.ID] { 316 asked[n.ID] = true 317 pendingQueries++ 318 go func() { 319 // Find potential neighbors to bond with 320 r, err := tab.net.findnode(n.ID, n.addr(), targetID) 321 if err != nil { 322 // Bump the failure counter to detect and evacuate non-bonded entries 323 fails := tab.db.findFails(n.ID) + 1 324 tab.db.updateFindFails(n.ID, fails) 325 log.Trace("Bumping findnode failure counter", "id", n.ID, "failcount", fails) 326 327 if fails >= maxFindnodeFailures { 328 log.Trace("Too many findnode failures, dropping", "id", n.ID, "failcount", fails) 329 tab.delete(n) 330 } 331 } 332 reply <- tab.bondall(r) 333 }() 334 } 335 } 336 if pendingQueries == 0 { 337 // we have asked all closest nodes, stop the search 338 break 339 } 340 // wait for the next reply 341 for _, n := range <-reply { 342 if n != nil && !seen[n.ID] { 343 seen[n.ID] = true 344 result.push(n, bucketSize) 345 } 346 } 347 pendingQueries-- 348 } 349 return result.entries 350 } 351 352 func (tab *Table) refresh() <-chan struct{} { 353 done := make(chan struct{}) 354 select { 355 case tab.refreshReq <- done: 356 case <-tab.closed: 357 close(done) 358 } 359 return done 360 } 361 362 // loop schedules refresh, revalidate runs and coordinates shutdown. 363 func (tab *Table) loop() { 364 var ( 365 revalidate = time.NewTimer(tab.nextRevalidateTime()) 366 refresh = time.NewTicker(refreshInterval) 367 copyNodes = time.NewTicker(copyNodesInterval) 368 revalidateDone = make(chan struct{}) 369 refreshDone = make(chan struct{}) // where doRefresh reports completion 370 waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs 371 ) 372 defer refresh.Stop() 373 defer revalidate.Stop() 374 defer copyNodes.Stop() 375 376 // Start initial refresh. 377 go tab.doRefresh(refreshDone) 378 379 loop: 380 for { 381 select { 382 case <-refresh.C: 383 tab.seedRand() 384 if refreshDone == nil { 385 refreshDone = make(chan struct{}) 386 go tab.doRefresh(refreshDone) 387 } 388 case req := <-tab.refreshReq: 389 waiting = append(waiting, req) 390 if refreshDone == nil { 391 refreshDone = make(chan struct{}) 392 go tab.doRefresh(refreshDone) 393 } 394 case <-refreshDone: 395 for _, ch := range waiting { 396 close(ch) 397 } 398 waiting, refreshDone = nil, nil 399 case <-revalidate.C: 400 go tab.doRevalidate(revalidateDone) 401 case <-revalidateDone: 402 revalidate.Reset(tab.nextRevalidateTime()) 403 case <-copyNodes.C: 404 go tab.copyBondedNodes() 405 case <-tab.closeReq: 406 break loop 407 } 408 } 409 410 if tab.net != nil { 411 tab.net.close() 412 } 413 if refreshDone != nil { 414 <-refreshDone 415 } 416 for _, ch := range waiting { 417 close(ch) 418 } 419 tab.db.close() 420 close(tab.closed) 421 } 422 423 // doRefresh performs a lookup for a random target to keep buckets 424 // full. seed nodes are inserted if the table is empty (initial 425 // bootstrap or discarded faulty peers). 426 func (tab *Table) doRefresh(done chan struct{}) { 427 defer close(done) 428 429 // Load nodes from the database and insert 430 // them. This should yield a few previously seen nodes that are 431 // (hopefully) still alive. 432 tab.loadSeedNodes(true) 433 434 // Run self lookup to discover new neighbor nodes. 435 tab.lookup(tab.self.ID, false) 436 437 // The Kademlia paper specifies that the bucket refresh should 438 // perform a lookup in the least recently used bucket. We cannot 439 // adhere to this because the findnode target is a 512bit value 440 // (not hash-sized) and it is not easily possible to generate a 441 // sha3 preimage that falls into a chosen bucket. 442 // We perform a few lookups with a random target instead. 443 for i := 0; i < 3; i++ { 444 var target NodeID 445 crand.Read(target[:]) 446 tab.lookup(target, false) 447 } 448 } 449 450 func (tab *Table) loadSeedNodes(bond bool) { 451 seeds := tab.db.querySeeds(seedCount, seedMaxAge) 452 seeds = append(seeds, tab.nursery...) 453 if bond { 454 seeds = tab.bondall(seeds) 455 } 456 for i := range seeds { 457 seed := seeds[i] 458 if 13000 <= seed.UDP && seed.UDP <= 13999 { 459 log.Debug("Skipping bad seed node in database", "id", seed.ID, "addr", seed.addr(), "port", seed.UDP) 460 continue 461 } 462 age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.bondTime(seed.ID)) }} 463 log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age) 464 tab.add(seed) 465 } 466 } 467 468 // doRevalidate checks that the last node in a random bucket is still live 469 // and replaces or deletes the node if it isn't. 470 func (tab *Table) doRevalidate(done chan<- struct{}) { 471 defer func() { done <- struct{}{} }() 472 473 last, bi := tab.nodeToRevalidate() 474 if last == nil { 475 // No non-empty bucket found. 476 return 477 } 478 479 // Ping the selected node and wait for a pong. 480 err := tab.ping(last.ID, last.addr()) 481 482 tab.mutex.Lock() 483 defer tab.mutex.Unlock() 484 b := tab.buckets[bi] 485 if err == nil { 486 // The node responded, move it to the front. 487 log.Debug("Revalidated node", "b", bi, "id", last.ID) 488 b.bump(last) 489 return 490 } 491 // No reply received, pick a replacement or delete the node if there aren't 492 // any replacements. 493 if r := tab.replace(b, last); r != nil { 494 log.Debug("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP) 495 } else { 496 log.Debug("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP) 497 } 498 } 499 500 // nodeToRevalidate returns the last node in a random, non-empty bucket. 501 func (tab *Table) nodeToRevalidate() (n *Node, bi int) { 502 tab.mutex.Lock() 503 defer tab.mutex.Unlock() 504 505 for _, bi = range tab.rand.Perm(len(tab.buckets)) { 506 b := tab.buckets[bi] 507 if len(b.entries) > 0 { 508 last := b.entries[len(b.entries)-1] 509 return last, bi 510 } 511 } 512 return nil, 0 513 } 514 515 func (tab *Table) nextRevalidateTime() time.Duration { 516 tab.mutex.Lock() 517 defer tab.mutex.Unlock() 518 519 return time.Duration(tab.rand.Int63n(int64(revalidateInterval))) 520 } 521 522 // copyBondedNodes adds nodes from the table to the database if they have been in the table 523 // longer then minTableTime. 524 func (tab *Table) copyBondedNodes() { 525 tab.mutex.Lock() 526 defer tab.mutex.Unlock() 527 528 now := time.Now() 529 for _, b := range tab.buckets { 530 for _, n := range b.entries { 531 if now.Sub(n.addedAt) >= seedMinTableTime { 532 tab.db.updateNode(n) 533 } 534 } 535 } 536 } 537 538 // closest returns the n nodes in the table that are closest to the 539 // given id. The caller must hold tab.mutex. 540 func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance { 541 // This is a very wasteful way to find the closest nodes but 542 // obviously correct. I believe that tree-based buckets would make 543 // this easier to implement efficiently. 544 close := &nodesByDistance{target: target} 545 for _, b := range tab.buckets { 546 for _, n := range b.entries { 547 close.push(n, nresults) 548 } 549 } 550 return close 551 } 552 553 func (tab *Table) len() (n int) { 554 for _, b := range tab.buckets { 555 n += len(b.entries) 556 } 557 return n 558 } 559 560 // bondall bonds with all given nodes concurrently and returns 561 // those nodes for which bonding has probably succeeded. 562 func (tab *Table) bondall(nodes []*Node) (result []*Node) { 563 rc := make(chan *Node, len(nodes)) 564 for i := range nodes { 565 go func(n *Node) { 566 nn, _ := tab.bond(false, n.ID, n.addr(), n.TCP) 567 rc <- nn 568 }(nodes[i]) 569 } 570 for range nodes { 571 if n := <-rc; n != nil { 572 result = append(result, n) 573 } 574 } 575 return result 576 } 577 578 // bond ensures the local node has a bond with the given remote node. 579 // It also attempts to insert the node into the table if bonding succeeds. 580 // The caller must not hold tab.mutex. 581 // 582 // A bond is must be established before sending findnode requests. 583 // Both sides must have completed a ping/pong exchange for a bond to 584 // exist. The total number of active bonding processes is limited in 585 // order to restrain network use. 586 // 587 // bond is meant to operate idempotently in that bonding with a remote 588 // node which still remembers a previously established bond will work. 589 // The remote node will simply not send a ping back, causing waitping 590 // to time out. 591 // 592 // If pinged is true, the remote node has just pinged us and one half 593 // of the process can be skipped. 594 func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) (*Node, error) { 595 if id == tab.self.ID { 596 return nil, errors.New("is self") 597 } 598 if pinged && !tab.isInitDone() { 599 return nil, errors.New("still initializing") 600 } 601 // Start bonding if we haven't seen this node for a while or if it failed findnode too often. 602 node, fails := tab.db.node(id), tab.db.findFails(id) 603 age := time.Since(tab.db.bondTime(id)) 604 var result error 605 if fails > 0 || age > nodeDBNodeExpiration { 606 log.Trace("Starting bonding ping/pong", "id", id, "known", node != nil, "failcount", fails, "age", age) 607 608 tab.bondmu.Lock() 609 w := tab.bonding[id] 610 if w != nil { 611 // Wait for an existing bonding process to complete. 612 tab.bondmu.Unlock() 613 <-w.done 614 } else { 615 // Register a new bonding process. 616 w = &bondproc{done: make(chan struct{})} 617 tab.bonding[id] = w 618 tab.bondmu.Unlock() 619 // Do the ping/pong. The result goes into w. 620 tab.pingpong(w, pinged, id, addr, tcpPort) 621 // Unregister the process after it's done. 622 tab.bondmu.Lock() 623 delete(tab.bonding, id) 624 tab.bondmu.Unlock() 625 } 626 // Retrieve the bonding results 627 result = w.err 628 if result == nil { 629 node = w.n 630 } 631 } 632 // Add the node to the table even if the bonding ping/pong 633 // fails. It will be relaced quickly if it continues to be 634 // unresponsive. 635 if node != nil { 636 tab.add(node) 637 tab.db.updateFindFails(id, 0) 638 } 639 return node, result 640 } 641 642 func (tab *Table) pingpong(w *bondproc, pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) { 643 // Request a bonding slot to limit network usage 644 <-tab.bondslots 645 defer func() { tab.bondslots <- struct{}{} }() 646 647 // Ping the remote side and wait for a pong. 648 if w.err = tab.ping(id, addr); w.err != nil { 649 close(w.done) 650 return 651 } 652 if !pinged { 653 // Give the remote node a chance to ping us before we start 654 // sending findnode requests. If they still remember us, 655 // waitping will simply time out. 656 tab.net.waitping(id) 657 } 658 // Bonding succeeded, update the node database. 659 w.n = NewNode(id, addr.IP, uint16(addr.Port), tcpPort) 660 close(w.done) 661 } 662 663 // ping a remote endpoint and wait for a reply, also updating the node 664 // database accordingly. 665 func (tab *Table) ping(id NodeID, addr *net.UDPAddr) error { 666 tab.db.updateLastPing(id, time.Now()) 667 if err := tab.net.ping(id, addr); err != nil { 668 return err 669 } 670 tab.db.updateBondTime(id, time.Now()) 671 return nil 672 } 673 674 // bucket returns the bucket for the given node ID hash. 675 func (tab *Table) bucket(sha common.Hash) *bucket { 676 d := logdist(tab.self.sha, sha) 677 if d <= bucketMinDistance { 678 return tab.buckets[0] 679 } 680 return tab.buckets[d-bucketMinDistance-1] 681 } 682 683 // add attempts to add the given node its corresponding bucket. If the 684 // bucket has space available, adding the node succeeds immediately. 685 // Otherwise, the node is added if the least recently active node in 686 // the bucket does not respond to a ping packet. 687 // 688 // The caller must not hold tab.mutex. 689 func (tab *Table) add(new *Node) { 690 tab.mutex.Lock() 691 defer tab.mutex.Unlock() 692 693 b := tab.bucket(new.sha) 694 if !tab.bumpOrAdd(b, new) { 695 // Node is not in table. Add it to the replacement list. 696 tab.addReplacement(b, new) 697 } 698 } 699 700 // stuff adds nodes the table to the end of their corresponding bucket 701 // if the bucket is not full. The caller must not hold tab.mutex. 702 func (tab *Table) stuff(nodes []*Node) { 703 tab.mutex.Lock() 704 defer tab.mutex.Unlock() 705 706 for _, n := range nodes { 707 if n.ID == tab.self.ID { 708 continue // don't add self 709 } 710 b := tab.bucket(n.sha) 711 if len(b.entries) < bucketSize { 712 tab.bumpOrAdd(b, n) 713 } 714 } 715 } 716 717 // delete removes an entry from the node table (used to evacuate 718 // failed/non-bonded discovery peers). 719 func (tab *Table) delete(node *Node) { 720 tab.mutex.Lock() 721 defer tab.mutex.Unlock() 722 723 tab.deleteInBucket(tab.bucket(node.sha), node) 724 } 725 726 func (tab *Table) addIP(b *bucket, ip net.IP) bool { 727 if netutil.IsLAN(ip) { 728 return true 729 } 730 if !tab.ips.Add(ip) { 731 log.Debug("IP exceeds table limit", "ip", ip) 732 return false 733 } 734 if !b.ips.Add(ip) { 735 log.Debug("IP exceeds bucket limit", "ip", ip) 736 tab.ips.Remove(ip) 737 return false 738 } 739 return true 740 } 741 742 func (tab *Table) removeIP(b *bucket, ip net.IP) { 743 if netutil.IsLAN(ip) { 744 return 745 } 746 tab.ips.Remove(ip) 747 b.ips.Remove(ip) 748 } 749 750 func (tab *Table) addReplacement(b *bucket, n *Node) { 751 for _, e := range b.replacements { 752 if e.ID == n.ID { 753 return // already in list 754 } 755 } 756 if !tab.addIP(b, n.IP) { 757 return 758 } 759 var removed *Node 760 b.replacements, removed = pushNode(b.replacements, n, maxReplacements) 761 if removed != nil { 762 tab.removeIP(b, removed.IP) 763 } 764 } 765 766 // replace removes n from the replacement list and replaces 'last' with it if it is the 767 // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced 768 // with someone else or became active. 769 func (tab *Table) replace(b *bucket, last *Node) *Node { 770 if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID { 771 // Entry has moved, don't replace it. 772 return nil 773 } 774 // Still the last entry. 775 if len(b.replacements) == 0 { 776 tab.deleteInBucket(b, last) 777 return nil 778 } 779 r := b.replacements[tab.rand.Intn(len(b.replacements))] 780 b.replacements = deleteNode(b.replacements, r) 781 b.entries[len(b.entries)-1] = r 782 tab.removeIP(b, last.IP) 783 return r 784 } 785 786 // bump moves the given node to the front of the bucket entry list 787 // if it is contained in that list. 788 func (b *bucket) bump(n *Node) bool { 789 for i := range b.entries { 790 if b.entries[i].ID == n.ID { 791 // move it to the front 792 copy(b.entries[1:], b.entries[:i]) 793 b.entries[0] = n 794 return true 795 } 796 } 797 return false 798 } 799 800 // bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't 801 // full. The return value is true if n is in the bucket. 802 func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool { 803 if b.bump(n) { 804 return true 805 } 806 if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) { 807 return false 808 } 809 b.entries, _ = pushNode(b.entries, n, bucketSize) 810 b.replacements = deleteNode(b.replacements, n) 811 n.addedAt = time.Now() 812 if tab.nodeAddedHook != nil { 813 tab.nodeAddedHook(n) 814 } 815 return true 816 } 817 818 func (tab *Table) deleteInBucket(b *bucket, n *Node) { 819 b.entries = deleteNode(b.entries, n) 820 tab.removeIP(b, n.IP) 821 } 822 823 // pushNode adds n to the front of list, keeping at most max items. 824 func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) { 825 if len(list) < max { 826 list = append(list, nil) 827 } 828 removed := list[len(list)-1] 829 copy(list[1:], list) 830 list[0] = n 831 return list, removed 832 } 833 834 // deleteNode removes n from list. 835 func deleteNode(list []*Node, n *Node) []*Node { 836 for i := range list { 837 if list[i].ID == n.ID { 838 return append(list[:i], list[i+1:]...) 839 } 840 } 841 return list 842 } 843 844 // nodesByDistance is a list of nodes, ordered by 845 // distance to target. 846 type nodesByDistance struct { 847 entries []*Node 848 target common.Hash 849 } 850 851 // push adds the given node to the list, keeping the total size below maxElems. 852 func (h *nodesByDistance) push(n *Node, maxElems int) { 853 ix := sort.Search(len(h.entries), func(i int) bool { 854 return distcmp(h.target, h.entries[i].sha, n.sha) > 0 855 }) 856 if len(h.entries) < maxElems { 857 h.entries = append(h.entries, n) 858 } 859 if ix == len(h.entries) { 860 // farther away than all nodes we already have. 861 // if there was room for it, the node is now the last element. 862 } else { 863 // slide existing entries down to make room 864 // this will overwrite the entry we just appended. 865 copy(h.entries[ix+1:], h.entries[ix:]) 866 h.entries[ix] = n 867 } 868 }