github.com/inphi/go-ethereum@v1.9.7/p2p/discover/table.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package discover implements the Node Discovery Protocol. 18 // 19 // The Node Discovery protocol provides a way to find RLPx nodes that 20 // can be connected to. It uses a Kademlia-like protocol to maintain a 21 // distributed database of the IDs and endpoints of all listening 22 // nodes. 23 package discover 24 25 import ( 26 crand "crypto/rand" 27 "encoding/binary" 28 "fmt" 29 mrand "math/rand" 30 "net" 31 "sort" 32 "sync" 33 "time" 34 35 "github.com/ethereum/go-ethereum/common" 36 "github.com/ethereum/go-ethereum/log" 37 "github.com/ethereum/go-ethereum/p2p/enode" 38 "github.com/ethereum/go-ethereum/p2p/netutil" 39 ) 40 41 const ( 42 alpha = 3 // Kademlia concurrency factor 43 bucketSize = 16 // Kademlia bucket size 44 maxReplacements = 10 // Size of per-bucket replacement list 45 46 // We keep buckets for the upper 1/15 of distances because 47 // it's very unlikely we'll ever encounter a node that's closer. 48 hashBits = len(common.Hash{}) * 8 49 nBuckets = hashBits / 15 // Number of buckets 50 bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket 51 52 // IP address limits. 53 bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 54 tableIPLimit, tableSubnet = 10, 24 55 56 refreshInterval = 30 * time.Minute 57 revalidateInterval = 10 * time.Second 58 copyNodesInterval = 30 * time.Second 59 seedMinTableTime = 5 * time.Minute 60 seedCount = 30 61 seedMaxAge = 5 * 24 * time.Hour 62 ) 63 64 // Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps 65 // itself up-to-date by verifying the liveness of neighbors and requesting their node 66 // records when announcements of a new record version are received. 67 type Table struct { 68 mutex sync.Mutex // protects buckets, bucket content, nursery, rand 69 buckets [nBuckets]*bucket // index of known nodes by distance 70 nursery []*node // bootstrap nodes 71 rand *mrand.Rand // source of randomness, periodically reseeded 72 ips netutil.DistinctNetSet 73 74 log log.Logger 75 db *enode.DB // database of known nodes 76 net transport 77 refreshReq chan chan struct{} 78 initDone chan struct{} 79 closeReq chan struct{} 80 closed chan struct{} 81 82 nodeAddedHook func(*node) // for testing 83 } 84 85 // transport is implemented by the UDP transports. 86 type transport interface { 87 Self() *enode.Node 88 RequestENR(*enode.Node) (*enode.Node, error) 89 lookupRandom() []*enode.Node 90 lookupSelf() []*enode.Node 91 ping(*enode.Node) (seq uint64, err error) 92 } 93 94 // bucket contains nodes, ordered by their last activity. the entry 95 // that was most recently active is the first element in entries. 96 type bucket struct { 97 entries []*node // live entries, sorted by time of last contact 98 replacements []*node // recently seen nodes to be used if revalidation fails 99 ips netutil.DistinctNetSet 100 } 101 102 func newTable(t transport, db *enode.DB, bootnodes []*enode.Node, log log.Logger) (*Table, error) { 103 tab := &Table{ 104 net: t, 105 db: db, 106 refreshReq: make(chan chan struct{}), 107 initDone: make(chan struct{}), 108 closeReq: make(chan struct{}), 109 closed: make(chan struct{}), 110 rand: mrand.New(mrand.NewSource(0)), 111 ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, 112 log: log, 113 } 114 if err := tab.setFallbackNodes(bootnodes); err != nil { 115 return nil, err 116 } 117 for i := range tab.buckets { 118 tab.buckets[i] = &bucket{ 119 ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, 120 } 121 } 122 tab.seedRand() 123 tab.loadSeedNodes() 124 125 return tab, nil 126 } 127 128 func (tab *Table) self() *enode.Node { 129 return tab.net.Self() 130 } 131 132 func (tab *Table) seedRand() { 133 var b [8]byte 134 crand.Read(b[:]) 135 136 tab.mutex.Lock() 137 tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:]))) 138 tab.mutex.Unlock() 139 } 140 141 // ReadRandomNodes fills the given slice with random nodes from the table. The results 142 // are guaranteed to be unique for a single invocation, no node will appear twice. 143 func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) { 144 if !tab.isInitDone() { 145 return 0 146 } 147 tab.mutex.Lock() 148 defer tab.mutex.Unlock() 149 150 var nodes []*enode.Node 151 for _, b := range &tab.buckets { 152 for _, n := range b.entries { 153 nodes = append(nodes, unwrapNode(n)) 154 } 155 } 156 // Shuffle. 157 for i := 0; i < len(nodes); i++ { 158 j := tab.rand.Intn(len(nodes)) 159 nodes[i], nodes[j] = nodes[j], nodes[i] 160 } 161 return copy(buf, nodes) 162 } 163 164 // getNode returns the node with the given ID or nil if it isn't in the table. 165 func (tab *Table) getNode(id enode.ID) *enode.Node { 166 tab.mutex.Lock() 167 defer tab.mutex.Unlock() 168 169 b := tab.bucket(id) 170 for _, e := range b.entries { 171 if e.ID() == id { 172 return unwrapNode(e) 173 } 174 } 175 return nil 176 } 177 178 // close terminates the network listener and flushes the node database. 179 func (tab *Table) close() { 180 close(tab.closeReq) 181 <-tab.closed 182 } 183 184 // setFallbackNodes sets the initial points of contact. These nodes 185 // are used to connect to the network if the table is empty and there 186 // are no known nodes in the database. 187 func (tab *Table) setFallbackNodes(nodes []*enode.Node) error { 188 for _, n := range nodes { 189 if err := n.ValidateComplete(); err != nil { 190 return fmt.Errorf("bad bootstrap node %q: %v", n, err) 191 } 192 } 193 tab.nursery = wrapNodes(nodes) 194 return nil 195 } 196 197 // isInitDone returns whether the table's initial seeding procedure has completed. 198 func (tab *Table) isInitDone() bool { 199 select { 200 case <-tab.initDone: 201 return true 202 default: 203 return false 204 } 205 } 206 207 func (tab *Table) refresh() <-chan struct{} { 208 done := make(chan struct{}) 209 select { 210 case tab.refreshReq <- done: 211 case <-tab.closeReq: 212 close(done) 213 } 214 return done 215 } 216 217 // loop schedules runs of doRefresh, doRevalidate and copyLiveNodes. 218 func (tab *Table) loop() { 219 var ( 220 revalidate = time.NewTimer(tab.nextRevalidateTime()) 221 refresh = time.NewTicker(refreshInterval) 222 copyNodes = time.NewTicker(copyNodesInterval) 223 refreshDone = make(chan struct{}) // where doRefresh reports completion 224 revalidateDone chan struct{} // where doRevalidate reports completion 225 waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs 226 ) 227 defer refresh.Stop() 228 defer revalidate.Stop() 229 defer copyNodes.Stop() 230 231 // Start initial refresh. 232 go tab.doRefresh(refreshDone) 233 234 loop: 235 for { 236 select { 237 case <-refresh.C: 238 tab.seedRand() 239 if refreshDone == nil { 240 refreshDone = make(chan struct{}) 241 go tab.doRefresh(refreshDone) 242 } 243 case req := <-tab.refreshReq: 244 waiting = append(waiting, req) 245 if refreshDone == nil { 246 refreshDone = make(chan struct{}) 247 go tab.doRefresh(refreshDone) 248 } 249 case <-refreshDone: 250 for _, ch := range waiting { 251 close(ch) 252 } 253 waiting, refreshDone = nil, nil 254 case <-revalidate.C: 255 revalidateDone = make(chan struct{}) 256 go tab.doRevalidate(revalidateDone) 257 case <-revalidateDone: 258 revalidate.Reset(tab.nextRevalidateTime()) 259 revalidateDone = nil 260 case <-copyNodes.C: 261 go tab.copyLiveNodes() 262 case <-tab.closeReq: 263 break loop 264 } 265 } 266 267 if refreshDone != nil { 268 <-refreshDone 269 } 270 for _, ch := range waiting { 271 close(ch) 272 } 273 if revalidateDone != nil { 274 <-revalidateDone 275 } 276 close(tab.closed) 277 } 278 279 // doRefresh performs a lookup for a random target to keep buckets full. seed nodes are 280 // inserted if the table is empty (initial bootstrap or discarded faulty peers). 281 func (tab *Table) doRefresh(done chan struct{}) { 282 defer close(done) 283 284 // Load nodes from the database and insert 285 // them. This should yield a few previously seen nodes that are 286 // (hopefully) still alive. 287 tab.loadSeedNodes() 288 289 // Run self lookup to discover new neighbor nodes. 290 tab.net.lookupSelf() 291 292 // The Kademlia paper specifies that the bucket refresh should 293 // perform a lookup in the least recently used bucket. We cannot 294 // adhere to this because the findnode target is a 512bit value 295 // (not hash-sized) and it is not easily possible to generate a 296 // sha3 preimage that falls into a chosen bucket. 297 // We perform a few lookups with a random target instead. 298 for i := 0; i < 3; i++ { 299 tab.net.lookupRandom() 300 } 301 } 302 303 func (tab *Table) loadSeedNodes() { 304 seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge)) 305 seeds = append(seeds, tab.nursery...) 306 for i := range seeds { 307 seed := seeds[i] 308 age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) }} 309 tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age) 310 tab.addSeenNode(seed) 311 } 312 } 313 314 // doRevalidate checks that the last node in a random bucket is still live and replaces or 315 // deletes the node if it isn't. 316 func (tab *Table) doRevalidate(done chan<- struct{}) { 317 defer func() { done <- struct{}{} }() 318 319 last, bi := tab.nodeToRevalidate() 320 if last == nil { 321 // No non-empty bucket found. 322 return 323 } 324 325 // Ping the selected node and wait for a pong. 326 remoteSeq, err := tab.net.ping(unwrapNode(last)) 327 328 // Also fetch record if the node replied and returned a higher sequence number. 329 if last.Seq() < remoteSeq { 330 n, err := tab.net.RequestENR(unwrapNode(last)) 331 if err != nil { 332 tab.log.Debug("ENR request failed", "id", last.ID(), "addr", last.addr(), "err", err) 333 } else { 334 last = &node{Node: *n, addedAt: last.addedAt, livenessChecks: last.livenessChecks} 335 } 336 } 337 338 tab.mutex.Lock() 339 defer tab.mutex.Unlock() 340 b := tab.buckets[bi] 341 if err == nil { 342 // The node responded, move it to the front. 343 last.livenessChecks++ 344 tab.log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks) 345 tab.bumpInBucket(b, last) 346 return 347 } 348 // No reply received, pick a replacement or delete the node if there aren't 349 // any replacements. 350 if r := tab.replace(b, last); r != nil { 351 tab.log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP()) 352 } else { 353 tab.log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks) 354 } 355 } 356 357 // nodeToRevalidate returns the last node in a random, non-empty bucket. 358 func (tab *Table) nodeToRevalidate() (n *node, bi int) { 359 tab.mutex.Lock() 360 defer tab.mutex.Unlock() 361 362 for _, bi = range tab.rand.Perm(len(tab.buckets)) { 363 b := tab.buckets[bi] 364 if len(b.entries) > 0 { 365 last := b.entries[len(b.entries)-1] 366 return last, bi 367 } 368 } 369 return nil, 0 370 } 371 372 func (tab *Table) nextRevalidateTime() time.Duration { 373 tab.mutex.Lock() 374 defer tab.mutex.Unlock() 375 376 return time.Duration(tab.rand.Int63n(int64(revalidateInterval))) 377 } 378 379 // copyLiveNodes adds nodes from the table to the database if they have been in the table 380 // longer then minTableTime. 381 func (tab *Table) copyLiveNodes() { 382 tab.mutex.Lock() 383 defer tab.mutex.Unlock() 384 385 now := time.Now() 386 for _, b := range &tab.buckets { 387 for _, n := range b.entries { 388 if n.livenessChecks > 0 && now.Sub(n.addedAt) >= seedMinTableTime { 389 tab.db.UpdateNode(unwrapNode(n)) 390 } 391 } 392 } 393 } 394 395 // closest returns the n nodes in the table that are closest to the 396 // given id. The caller must hold tab.mutex. 397 func (tab *Table) closest(target enode.ID, nresults int, checklive bool) *nodesByDistance { 398 // This is a very wasteful way to find the closest nodes but 399 // obviously correct. I believe that tree-based buckets would make 400 // this easier to implement efficiently. 401 close := &nodesByDistance{target: target} 402 for _, b := range &tab.buckets { 403 for _, n := range b.entries { 404 if checklive && n.livenessChecks == 0 { 405 continue 406 } 407 close.push(n, nresults) 408 } 409 } 410 return close 411 } 412 413 // len returns the number of nodes in the table. 414 func (tab *Table) len() (n int) { 415 tab.mutex.Lock() 416 defer tab.mutex.Unlock() 417 418 for _, b := range &tab.buckets { 419 n += len(b.entries) 420 } 421 return n 422 } 423 424 // bucket returns the bucket for the given node ID hash. 425 func (tab *Table) bucket(id enode.ID) *bucket { 426 d := enode.LogDist(tab.self().ID(), id) 427 if d <= bucketMinDistance { 428 return tab.buckets[0] 429 } 430 return tab.buckets[d-bucketMinDistance-1] 431 } 432 433 // addSeenNode adds a node which may or may not be live to the end of a bucket. If the 434 // bucket has space available, adding the node succeeds immediately. Otherwise, the node is 435 // added to the replacements list. 436 // 437 // The caller must not hold tab.mutex. 438 func (tab *Table) addSeenNode(n *node) { 439 if n.ID() == tab.self().ID() { 440 return 441 } 442 443 tab.mutex.Lock() 444 defer tab.mutex.Unlock() 445 b := tab.bucket(n.ID()) 446 if contains(b.entries, n.ID()) { 447 // Already in bucket, don't add. 448 return 449 } 450 if len(b.entries) >= bucketSize { 451 // Bucket full, maybe add as replacement. 452 tab.addReplacement(b, n) 453 return 454 } 455 if !tab.addIP(b, n.IP()) { 456 // Can't add: IP limit reached. 457 return 458 } 459 // Add to end of bucket: 460 b.entries = append(b.entries, n) 461 b.replacements = deleteNode(b.replacements, n) 462 n.addedAt = time.Now() 463 if tab.nodeAddedHook != nil { 464 tab.nodeAddedHook(n) 465 } 466 } 467 468 // addVerifiedNode adds a node whose existence has been verified recently to the front of a 469 // bucket. If the node is already in the bucket, it is moved to the front. If the bucket 470 // has no space, the node is added to the replacements list. 471 // 472 // There is an additional safety measure: if the table is still initializing the node 473 // is not added. This prevents an attack where the table could be filled by just sending 474 // ping repeatedly. 475 // 476 // The caller must not hold tab.mutex. 477 func (tab *Table) addVerifiedNode(n *node) { 478 if !tab.isInitDone() { 479 return 480 } 481 if n.ID() == tab.self().ID() { 482 return 483 } 484 485 tab.mutex.Lock() 486 defer tab.mutex.Unlock() 487 b := tab.bucket(n.ID()) 488 if tab.bumpInBucket(b, n) { 489 // Already in bucket, moved to front. 490 return 491 } 492 if len(b.entries) >= bucketSize { 493 // Bucket full, maybe add as replacement. 494 tab.addReplacement(b, n) 495 return 496 } 497 if !tab.addIP(b, n.IP()) { 498 // Can't add: IP limit reached. 499 return 500 } 501 // Add to front of bucket. 502 b.entries, _ = pushNode(b.entries, n, bucketSize) 503 b.replacements = deleteNode(b.replacements, n) 504 n.addedAt = time.Now() 505 if tab.nodeAddedHook != nil { 506 tab.nodeAddedHook(n) 507 } 508 } 509 510 // delete removes an entry from the node table. It is used to evacuate dead nodes. 511 func (tab *Table) delete(node *node) { 512 tab.mutex.Lock() 513 defer tab.mutex.Unlock() 514 515 tab.deleteInBucket(tab.bucket(node.ID()), node) 516 } 517 518 func (tab *Table) addIP(b *bucket, ip net.IP) bool { 519 if netutil.IsLAN(ip) { 520 return true 521 } 522 if !tab.ips.Add(ip) { 523 tab.log.Debug("IP exceeds table limit", "ip", ip) 524 return false 525 } 526 if !b.ips.Add(ip) { 527 tab.log.Debug("IP exceeds bucket limit", "ip", ip) 528 tab.ips.Remove(ip) 529 return false 530 } 531 return true 532 } 533 534 func (tab *Table) removeIP(b *bucket, ip net.IP) { 535 if netutil.IsLAN(ip) { 536 return 537 } 538 tab.ips.Remove(ip) 539 b.ips.Remove(ip) 540 } 541 542 func (tab *Table) addReplacement(b *bucket, n *node) { 543 for _, e := range b.replacements { 544 if e.ID() == n.ID() { 545 return // already in list 546 } 547 } 548 if !tab.addIP(b, n.IP()) { 549 return 550 } 551 var removed *node 552 b.replacements, removed = pushNode(b.replacements, n, maxReplacements) 553 if removed != nil { 554 tab.removeIP(b, removed.IP()) 555 } 556 } 557 558 // replace removes n from the replacement list and replaces 'last' with it if it is the 559 // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced 560 // with someone else or became active. 561 func (tab *Table) replace(b *bucket, last *node) *node { 562 if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() { 563 // Entry has moved, don't replace it. 564 return nil 565 } 566 // Still the last entry. 567 if len(b.replacements) == 0 { 568 tab.deleteInBucket(b, last) 569 return nil 570 } 571 r := b.replacements[tab.rand.Intn(len(b.replacements))] 572 b.replacements = deleteNode(b.replacements, r) 573 b.entries[len(b.entries)-1] = r 574 tab.removeIP(b, last.IP()) 575 return r 576 } 577 578 // bumpInBucket moves the given node to the front of the bucket entry list 579 // if it is contained in that list. 580 func (tab *Table) bumpInBucket(b *bucket, n *node) bool { 581 for i := range b.entries { 582 if b.entries[i].ID() == n.ID() { 583 if !n.IP().Equal(b.entries[i].IP()) { 584 // Endpoint has changed, ensure that the new IP fits into table limits. 585 tab.removeIP(b, b.entries[i].IP()) 586 if !tab.addIP(b, n.IP()) { 587 // It doesn't, put the previous one back. 588 tab.addIP(b, b.entries[i].IP()) 589 return false 590 } 591 } 592 // Move it to the front. 593 copy(b.entries[1:], b.entries[:i]) 594 b.entries[0] = n 595 return true 596 } 597 } 598 return false 599 } 600 601 func (tab *Table) deleteInBucket(b *bucket, n *node) { 602 b.entries = deleteNode(b.entries, n) 603 tab.removeIP(b, n.IP()) 604 } 605 606 func contains(ns []*node, id enode.ID) bool { 607 for _, n := range ns { 608 if n.ID() == id { 609 return true 610 } 611 } 612 return false 613 } 614 615 // pushNode adds n to the front of list, keeping at most max items. 616 func pushNode(list []*node, n *node, max int) ([]*node, *node) { 617 if len(list) < max { 618 list = append(list, nil) 619 } 620 removed := list[len(list)-1] 621 copy(list[1:], list) 622 list[0] = n 623 return list, removed 624 } 625 626 // deleteNode removes n from list. 627 func deleteNode(list []*node, n *node) []*node { 628 for i := range list { 629 if list[i].ID() == n.ID() { 630 return append(list[:i], list[i+1:]...) 631 } 632 } 633 return list 634 } 635 636 // nodesByDistance is a list of nodes, ordered by distance to target. 637 type nodesByDistance struct { 638 entries []*node 639 target enode.ID 640 } 641 642 // push adds the given node to the list, keeping the total size below maxElems. 643 func (h *nodesByDistance) push(n *node, maxElems int) { 644 ix := sort.Search(len(h.entries), func(i int) bool { 645 return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0 646 }) 647 if len(h.entries) < maxElems { 648 h.entries = append(h.entries, n) 649 } 650 if ix == len(h.entries) { 651 // farther away than all nodes we already have. 652 // if there was room for it, the node is now the last element. 653 } else { 654 // slide existing entries down to make room 655 // this will overwrite the entry we just appended. 656 copy(h.entries[ix+1:], h.entries[ix:]) 657 h.entries[ix] = n 658 } 659 }