github.com/luckypickle/go-ethereum-vet@v1.14.2/p2p/discover/table.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package discover implements the Node Discovery Protocol.
    18  //
    19  // The Node Discovery protocol provides a way to find RLPx nodes that
    20  // can be connected to. It uses a Kademlia-like protocol to maintain a
    21  // distributed database of the IDs and endpoints of all listening
    22  // nodes.
    23  package discover
    24  
    25  import (
    26  	crand "crypto/rand"
    27  	"encoding/binary"
    28  	"fmt"
    29  	mrand "math/rand"
    30  	"net"
    31  	"sort"
    32  	"sync"
    33  	"time"
    34  
    35  	"github.com/luckypickle/go-ethereum-vet/common"
    36  	"github.com/luckypickle/go-ethereum-vet/crypto"
    37  	"github.com/luckypickle/go-ethereum-vet/log"
    38  	"github.com/luckypickle/go-ethereum-vet/p2p/netutil"
    39  )
    40  
    41  const (
    42  	alpha           = 3  // Kademlia concurrency factor
    43  	bucketSize      = 16 // Kademlia bucket size
    44  	maxReplacements = 10 // Size of per-bucket replacement list
    45  
    46  	// We keep buckets for the upper 1/15 of distances because
    47  	// it's very unlikely we'll ever encounter a node that's closer.
    48  	hashBits          = len(common.Hash{}) * 8
    49  	nBuckets          = hashBits / 15       // Number of buckets
    50  	bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket
    51  
    52  	// IP address limits.
    53  	bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
    54  	tableIPLimit, tableSubnet   = 10, 24
    55  
    56  	maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped
    57  	refreshInterval     = 30 * time.Minute
    58  	revalidateInterval  = 10 * time.Second
    59  	copyNodesInterval   = 30 * time.Second
    60  	seedMinTableTime    = 5 * time.Minute
    61  	seedCount           = 30
    62  	seedMaxAge          = 5 * 24 * time.Hour
    63  )
    64  
    65  type Table struct {
    66  	mutex   sync.Mutex        // protects buckets, bucket content, nursery, rand
    67  	buckets [nBuckets]*bucket // index of known nodes by distance
    68  	nursery []*Node           // bootstrap nodes
    69  	rand    *mrand.Rand       // source of randomness, periodically reseeded
    70  	ips     netutil.DistinctNetSet
    71  
    72  	db         *nodeDB // database of known nodes
    73  	refreshReq chan chan struct{}
    74  	initDone   chan struct{}
    75  	closeReq   chan struct{}
    76  	closed     chan struct{}
    77  
    78  	nodeAddedHook func(*Node) // for testing
    79  
    80  	net  transport
    81  	self *Node // metadata of the local node
    82  }
    83  
    84  // transport is implemented by the UDP transport.
    85  // it is an interface so we can test without opening lots of UDP
    86  // sockets and without generating a private key.
    87  type transport interface {
    88  	ping(NodeID, *net.UDPAddr) error
    89  	findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error)
    90  	close()
    91  }
    92  
    93  // bucket contains nodes, ordered by their last activity. the entry
    94  // that was most recently active is the first element in entries.
    95  type bucket struct {
    96  	entries      []*Node // live entries, sorted by time of last contact
    97  	replacements []*Node // recently seen nodes to be used if revalidation fails
    98  	ips          netutil.DistinctNetSet
    99  }
   100  
   101  func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) {
   102  	// If no node database was given, use an in-memory one
   103  	db, err := newNodeDB(nodeDBPath, nodeDBVersion, ourID)
   104  	if err != nil {
   105  		return nil, err
   106  	}
   107  	tab := &Table{
   108  		net:        t,
   109  		db:         db,
   110  		self:       NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)),
   111  		refreshReq: make(chan chan struct{}),
   112  		initDone:   make(chan struct{}),
   113  		closeReq:   make(chan struct{}),
   114  		closed:     make(chan struct{}),
   115  		rand:       mrand.New(mrand.NewSource(0)),
   116  		ips:        netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
   117  	}
   118  	if err := tab.setFallbackNodes(bootnodes); err != nil {
   119  		return nil, err
   120  	}
   121  	for i := range tab.buckets {
   122  		tab.buckets[i] = &bucket{
   123  			ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
   124  		}
   125  	}
   126  	tab.seedRand()
   127  	tab.loadSeedNodes()
   128  	// Start the background expiration goroutine after loading seeds so that the search for
   129  	// seed nodes also considers older nodes that would otherwise be removed by the
   130  	// expiration.
   131  	tab.db.ensureExpirer()
   132  	go tab.loop()
   133  	return tab, nil
   134  }
   135  
   136  func (tab *Table) seedRand() {
   137  	var b [8]byte
   138  	crand.Read(b[:])
   139  
   140  	tab.mutex.Lock()
   141  	tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:])))
   142  	tab.mutex.Unlock()
   143  }
   144  
   145  // Self returns the local node.
   146  // The returned node should not be modified by the caller.
   147  func (tab *Table) Self() *Node {
   148  	return tab.self
   149  }
   150  
   151  // ReadRandomNodes fills the given slice with random nodes from the
   152  // table. It will not write the same node more than once. The nodes in
   153  // the slice are copies and can be modified by the caller.
   154  func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
   155  	if !tab.isInitDone() {
   156  		return 0
   157  	}
   158  	tab.mutex.Lock()
   159  	defer tab.mutex.Unlock()
   160  
   161  	// Find all non-empty buckets and get a fresh slice of their entries.
   162  	var buckets [][]*Node
   163  	for _, b := range &tab.buckets {
   164  		if len(b.entries) > 0 {
   165  			buckets = append(buckets, b.entries[:])
   166  		}
   167  	}
   168  	if len(buckets) == 0 {
   169  		return 0
   170  	}
   171  	// Shuffle the buckets.
   172  	for i := len(buckets) - 1; i > 0; i-- {
   173  		j := tab.rand.Intn(len(buckets))
   174  		buckets[i], buckets[j] = buckets[j], buckets[i]
   175  	}
   176  	// Move head of each bucket into buf, removing buckets that become empty.
   177  	var i, j int
   178  	for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
   179  		b := buckets[j]
   180  		buf[i] = &(*b[0])
   181  		buckets[j] = b[1:]
   182  		if len(b) == 1 {
   183  			buckets = append(buckets[:j], buckets[j+1:]...)
   184  		}
   185  		if len(buckets) == 0 {
   186  			break
   187  		}
   188  	}
   189  	return i + 1
   190  }
   191  
   192  // Close terminates the network listener and flushes the node database.
   193  func (tab *Table) Close() {
   194  	select {
   195  	case <-tab.closed:
   196  		// already closed.
   197  	case tab.closeReq <- struct{}{}:
   198  		<-tab.closed // wait for refreshLoop to end.
   199  	}
   200  }
   201  
   202  // setFallbackNodes sets the initial points of contact. These nodes
   203  // are used to connect to the network if the table is empty and there
   204  // are no known nodes in the database.
   205  func (tab *Table) setFallbackNodes(nodes []*Node) error {
   206  	for _, n := range nodes {
   207  		if err := n.validateComplete(); err != nil {
   208  			return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
   209  		}
   210  	}
   211  	tab.nursery = make([]*Node, 0, len(nodes))
   212  	for _, n := range nodes {
   213  		cpy := *n
   214  		// Recompute cpy.sha because the node might not have been
   215  		// created by NewNode or ParseNode.
   216  		cpy.sha = crypto.Keccak256Hash(n.ID[:])
   217  		tab.nursery = append(tab.nursery, &cpy)
   218  	}
   219  	return nil
   220  }
   221  
   222  // isInitDone returns whether the table's initial seeding procedure has completed.
   223  func (tab *Table) isInitDone() bool {
   224  	select {
   225  	case <-tab.initDone:
   226  		return true
   227  	default:
   228  		return false
   229  	}
   230  }
   231  
   232  // Resolve searches for a specific node with the given ID.
   233  // It returns nil if the node could not be found.
   234  func (tab *Table) Resolve(targetID NodeID) *Node {
   235  	// If the node is present in the local table, no
   236  	// network interaction is required.
   237  	hash := crypto.Keccak256Hash(targetID[:])
   238  	tab.mutex.Lock()
   239  	cl := tab.closest(hash, 1)
   240  	tab.mutex.Unlock()
   241  	if len(cl.entries) > 0 && cl.entries[0].ID == targetID {
   242  		return cl.entries[0]
   243  	}
   244  	// Otherwise, do a network lookup.
   245  	result := tab.Lookup(targetID)
   246  	for _, n := range result {
   247  		if n.ID == targetID {
   248  			return n
   249  		}
   250  	}
   251  	return nil
   252  }
   253  
   254  // Lookup performs a network search for nodes close
   255  // to the given target. It approaches the target by querying
   256  // nodes that are closer to it on each iteration.
   257  // The given target does not need to be an actual node
   258  // identifier.
   259  func (tab *Table) Lookup(targetID NodeID) []*Node {
   260  	return tab.lookup(targetID, true)
   261  }
   262  
   263  func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
   264  	var (
   265  		target         = crypto.Keccak256Hash(targetID[:])
   266  		asked          = make(map[NodeID]bool)
   267  		seen           = make(map[NodeID]bool)
   268  		reply          = make(chan []*Node, alpha)
   269  		pendingQueries = 0
   270  		result         *nodesByDistance
   271  	)
   272  	// don't query further if we hit ourself.
   273  	// unlikely to happen often in practice.
   274  	asked[tab.self.ID] = true
   275  
   276  	for {
   277  		tab.mutex.Lock()
   278  		// generate initial result set
   279  		result = tab.closest(target, bucketSize)
   280  		tab.mutex.Unlock()
   281  		if len(result.entries) > 0 || !refreshIfEmpty {
   282  			break
   283  		}
   284  		// The result set is empty, all nodes were dropped, refresh.
   285  		// We actually wait for the refresh to complete here. The very
   286  		// first query will hit this case and run the bootstrapping
   287  		// logic.
   288  		<-tab.refresh()
   289  		refreshIfEmpty = false
   290  	}
   291  
   292  	for {
   293  		// ask the alpha closest nodes that we haven't asked yet
   294  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   295  			n := result.entries[i]
   296  			if !asked[n.ID] {
   297  				asked[n.ID] = true
   298  				pendingQueries++
   299  				go tab.findnode(n, targetID, reply)
   300  			}
   301  		}
   302  		if pendingQueries == 0 {
   303  			// we have asked all closest nodes, stop the search
   304  			break
   305  		}
   306  		// wait for the next reply
   307  		for _, n := range <-reply {
   308  			if n != nil && !seen[n.ID] {
   309  				seen[n.ID] = true
   310  				result.push(n, bucketSize)
   311  			}
   312  		}
   313  		pendingQueries--
   314  	}
   315  	return result.entries
   316  }
   317  
   318  func (tab *Table) findnode(n *Node, targetID NodeID, reply chan<- []*Node) {
   319  	fails := tab.db.findFails(n.ID)
   320  	r, err := tab.net.findnode(n.ID, n.addr(), targetID)
   321  	if err != nil || len(r) == 0 {
   322  		fails++
   323  		tab.db.updateFindFails(n.ID, fails)
   324  		log.Trace("Findnode failed", "id", n.ID, "failcount", fails, "err", err)
   325  		if fails >= maxFindnodeFailures {
   326  			log.Trace("Too many findnode failures, dropping", "id", n.ID, "failcount", fails)
   327  			tab.delete(n)
   328  		}
   329  	} else if fails > 0 {
   330  		tab.db.updateFindFails(n.ID, fails-1)
   331  	}
   332  
   333  	// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
   334  	// just remove those again during revalidation.
   335  	for _, n := range r {
   336  		tab.add(n)
   337  	}
   338  	reply <- r
   339  }
   340  
   341  func (tab *Table) refresh() <-chan struct{} {
   342  	done := make(chan struct{})
   343  	select {
   344  	case tab.refreshReq <- done:
   345  	case <-tab.closed:
   346  		close(done)
   347  	}
   348  	return done
   349  }
   350  
   351  // loop schedules refresh, revalidate runs and coordinates shutdown.
   352  func (tab *Table) loop() {
   353  	var (
   354  		revalidate     = time.NewTimer(tab.nextRevalidateTime())
   355  		refresh        = time.NewTicker(refreshInterval)
   356  		copyNodes      = time.NewTicker(copyNodesInterval)
   357  		revalidateDone = make(chan struct{})
   358  		refreshDone    = make(chan struct{})           // where doRefresh reports completion
   359  		waiting        = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
   360  	)
   361  	defer refresh.Stop()
   362  	defer revalidate.Stop()
   363  	defer copyNodes.Stop()
   364  
   365  	// Start initial refresh.
   366  	go tab.doRefresh(refreshDone)
   367  
   368  loop:
   369  	for {
   370  		select {
   371  		case <-refresh.C:
   372  			tab.seedRand()
   373  			if refreshDone == nil {
   374  				refreshDone = make(chan struct{})
   375  				go tab.doRefresh(refreshDone)
   376  			}
   377  		case req := <-tab.refreshReq:
   378  			waiting = append(waiting, req)
   379  			if refreshDone == nil {
   380  				refreshDone = make(chan struct{})
   381  				go tab.doRefresh(refreshDone)
   382  			}
   383  		case <-refreshDone:
   384  			for _, ch := range waiting {
   385  				close(ch)
   386  			}
   387  			waiting, refreshDone = nil, nil
   388  		case <-revalidate.C:
   389  			go tab.doRevalidate(revalidateDone)
   390  		case <-revalidateDone:
   391  			revalidate.Reset(tab.nextRevalidateTime())
   392  		case <-copyNodes.C:
   393  			go tab.copyLiveNodes()
   394  		case <-tab.closeReq:
   395  			break loop
   396  		}
   397  	}
   398  
   399  	if tab.net != nil {
   400  		tab.net.close()
   401  	}
   402  	if refreshDone != nil {
   403  		<-refreshDone
   404  	}
   405  	for _, ch := range waiting {
   406  		close(ch)
   407  	}
   408  	tab.db.close()
   409  	close(tab.closed)
   410  }
   411  
   412  // doRefresh performs a lookup for a random target to keep buckets
   413  // full. seed nodes are inserted if the table is empty (initial
   414  // bootstrap or discarded faulty peers).
   415  func (tab *Table) doRefresh(done chan struct{}) {
   416  	defer close(done)
   417  
   418  	// Load nodes from the database and insert
   419  	// them. This should yield a few previously seen nodes that are
   420  	// (hopefully) still alive.
   421  	tab.loadSeedNodes()
   422  
   423  	// Run self lookup to discover new neighbor nodes.
   424  	tab.lookup(tab.self.ID, false)
   425  
   426  	// The Kademlia paper specifies that the bucket refresh should
   427  	// perform a lookup in the least recently used bucket. We cannot
   428  	// adhere to this because the findnode target is a 512bit value
   429  	// (not hash-sized) and it is not easily possible to generate a
   430  	// sha3 preimage that falls into a chosen bucket.
   431  	// We perform a few lookups with a random target instead.
   432  	for i := 0; i < 3; i++ {
   433  		var target NodeID
   434  		crand.Read(target[:])
   435  		tab.lookup(target, false)
   436  	}
   437  }
   438  
   439  func (tab *Table) loadSeedNodes() {
   440  	seeds := tab.db.querySeeds(seedCount, seedMaxAge)
   441  	seeds = append(seeds, tab.nursery...)
   442  	for i := range seeds {
   443  		seed := seeds[i]
   444  		age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.lastPongReceived(seed.ID)) }}
   445  		log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age)
   446  		tab.add(seed)
   447  	}
   448  }
   449  
   450  // doRevalidate checks that the last node in a random bucket is still live
   451  // and replaces or deletes the node if it isn't.
   452  func (tab *Table) doRevalidate(done chan<- struct{}) {
   453  	defer func() { done <- struct{}{} }()
   454  
   455  	last, bi := tab.nodeToRevalidate()
   456  	if last == nil {
   457  		// No non-empty bucket found.
   458  		return
   459  	}
   460  
   461  	// Ping the selected node and wait for a pong.
   462  	err := tab.net.ping(last.ID, last.addr())
   463  
   464  	tab.mutex.Lock()
   465  	defer tab.mutex.Unlock()
   466  	b := tab.buckets[bi]
   467  	if err == nil {
   468  		// The node responded, move it to the front.
   469  		log.Trace("Revalidated node", "b", bi, "id", last.ID)
   470  		b.bump(last)
   471  		return
   472  	}
   473  	// No reply received, pick a replacement or delete the node if there aren't
   474  	// any replacements.
   475  	if r := tab.replace(b, last); r != nil {
   476  		log.Trace("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP)
   477  	} else {
   478  		log.Trace("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP)
   479  	}
   480  }
   481  
   482  // nodeToRevalidate returns the last node in a random, non-empty bucket.
   483  func (tab *Table) nodeToRevalidate() (n *Node, bi int) {
   484  	tab.mutex.Lock()
   485  	defer tab.mutex.Unlock()
   486  
   487  	for _, bi = range tab.rand.Perm(len(tab.buckets)) {
   488  		b := tab.buckets[bi]
   489  		if len(b.entries) > 0 {
   490  			last := b.entries[len(b.entries)-1]
   491  			return last, bi
   492  		}
   493  	}
   494  	return nil, 0
   495  }
   496  
   497  func (tab *Table) nextRevalidateTime() time.Duration {
   498  	tab.mutex.Lock()
   499  	defer tab.mutex.Unlock()
   500  
   501  	return time.Duration(tab.rand.Int63n(int64(revalidateInterval)))
   502  }
   503  
   504  // copyLiveNodes adds nodes from the table to the database if they have been in the table
   505  // longer then minTableTime.
   506  func (tab *Table) copyLiveNodes() {
   507  	tab.mutex.Lock()
   508  	defer tab.mutex.Unlock()
   509  
   510  	now := time.Now()
   511  	for _, b := range &tab.buckets {
   512  		for _, n := range b.entries {
   513  			if now.Sub(n.addedAt) >= seedMinTableTime {
   514  				tab.db.updateNode(n)
   515  			}
   516  		}
   517  	}
   518  }
   519  
   520  // closest returns the n nodes in the table that are closest to the
   521  // given id. The caller must hold tab.mutex.
   522  func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
   523  	// This is a very wasteful way to find the closest nodes but
   524  	// obviously correct. I believe that tree-based buckets would make
   525  	// this easier to implement efficiently.
   526  	close := &nodesByDistance{target: target}
   527  	for _, b := range &tab.buckets {
   528  		for _, n := range b.entries {
   529  			close.push(n, nresults)
   530  		}
   531  	}
   532  	return close
   533  }
   534  
   535  func (tab *Table) len() (n int) {
   536  	for _, b := range &tab.buckets {
   537  		n += len(b.entries)
   538  	}
   539  	return n
   540  }
   541  
   542  // bucket returns the bucket for the given node ID hash.
   543  func (tab *Table) bucket(sha common.Hash) *bucket {
   544  	d := logdist(tab.self.sha, sha)
   545  	if d <= bucketMinDistance {
   546  		return tab.buckets[0]
   547  	}
   548  	return tab.buckets[d-bucketMinDistance-1]
   549  }
   550  
   551  // add attempts to add the given node to its corresponding bucket. If the bucket has space
   552  // available, adding the node succeeds immediately. Otherwise, the node is added if the
   553  // least recently active node in the bucket does not respond to a ping packet.
   554  //
   555  // The caller must not hold tab.mutex.
   556  func (tab *Table) add(n *Node) {
   557  	tab.mutex.Lock()
   558  	defer tab.mutex.Unlock()
   559  
   560  	b := tab.bucket(n.sha)
   561  	if !tab.bumpOrAdd(b, n) {
   562  		// Node is not in table. Add it to the replacement list.
   563  		tab.addReplacement(b, n)
   564  	}
   565  }
   566  
   567  // addThroughPing adds the given node to the table. Compared to plain
   568  // 'add' there is an additional safety measure: if the table is still
   569  // initializing the node is not added. This prevents an attack where the
   570  // table could be filled by just sending ping repeatedly.
   571  //
   572  // The caller must not hold tab.mutex.
   573  func (tab *Table) addThroughPing(n *Node) {
   574  	if !tab.isInitDone() {
   575  		return
   576  	}
   577  	tab.add(n)
   578  }
   579  
   580  // stuff adds nodes the table to the end of their corresponding bucket
   581  // if the bucket is not full. The caller must not hold tab.mutex.
   582  func (tab *Table) stuff(nodes []*Node) {
   583  	tab.mutex.Lock()
   584  	defer tab.mutex.Unlock()
   585  
   586  	for _, n := range nodes {
   587  		if n.ID == tab.self.ID {
   588  			continue // don't add self
   589  		}
   590  		b := tab.bucket(n.sha)
   591  		if len(b.entries) < bucketSize {
   592  			tab.bumpOrAdd(b, n)
   593  		}
   594  	}
   595  }
   596  
   597  // delete removes an entry from the node table. It is used to evacuate dead nodes.
   598  func (tab *Table) delete(node *Node) {
   599  	tab.mutex.Lock()
   600  	defer tab.mutex.Unlock()
   601  
   602  	tab.deleteInBucket(tab.bucket(node.sha), node)
   603  }
   604  
   605  func (tab *Table) addIP(b *bucket, ip net.IP) bool {
   606  	if netutil.IsLAN(ip) {
   607  		return true
   608  	}
   609  	if !tab.ips.Add(ip) {
   610  		log.Debug("IP exceeds table limit", "ip", ip)
   611  		return false
   612  	}
   613  	if !b.ips.Add(ip) {
   614  		log.Debug("IP exceeds bucket limit", "ip", ip)
   615  		tab.ips.Remove(ip)
   616  		return false
   617  	}
   618  	return true
   619  }
   620  
   621  func (tab *Table) removeIP(b *bucket, ip net.IP) {
   622  	if netutil.IsLAN(ip) {
   623  		return
   624  	}
   625  	tab.ips.Remove(ip)
   626  	b.ips.Remove(ip)
   627  }
   628  
   629  func (tab *Table) addReplacement(b *bucket, n *Node) {
   630  	for _, e := range b.replacements {
   631  		if e.ID == n.ID {
   632  			return // already in list
   633  		}
   634  	}
   635  	if !tab.addIP(b, n.IP) {
   636  		return
   637  	}
   638  	var removed *Node
   639  	b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
   640  	if removed != nil {
   641  		tab.removeIP(b, removed.IP)
   642  	}
   643  }
   644  
   645  // replace removes n from the replacement list and replaces 'last' with it if it is the
   646  // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
   647  // with someone else or became active.
   648  func (tab *Table) replace(b *bucket, last *Node) *Node {
   649  	if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID {
   650  		// Entry has moved, don't replace it.
   651  		return nil
   652  	}
   653  	// Still the last entry.
   654  	if len(b.replacements) == 0 {
   655  		tab.deleteInBucket(b, last)
   656  		return nil
   657  	}
   658  	r := b.replacements[tab.rand.Intn(len(b.replacements))]
   659  	b.replacements = deleteNode(b.replacements, r)
   660  	b.entries[len(b.entries)-1] = r
   661  	tab.removeIP(b, last.IP)
   662  	return r
   663  }
   664  
   665  // bump moves the given node to the front of the bucket entry list
   666  // if it is contained in that list.
   667  func (b *bucket) bump(n *Node) bool {
   668  	for i := range b.entries {
   669  		if b.entries[i].ID == n.ID {
   670  			// move it to the front
   671  			copy(b.entries[1:], b.entries[:i])
   672  			b.entries[0] = n
   673  			return true
   674  		}
   675  	}
   676  	return false
   677  }
   678  
   679  // bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
   680  // full. The return value is true if n is in the bucket.
   681  func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
   682  	if b.bump(n) {
   683  		return true
   684  	}
   685  	if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) {
   686  		return false
   687  	}
   688  	b.entries, _ = pushNode(b.entries, n, bucketSize)
   689  	b.replacements = deleteNode(b.replacements, n)
   690  	n.addedAt = time.Now()
   691  	if tab.nodeAddedHook != nil {
   692  		tab.nodeAddedHook(n)
   693  	}
   694  	return true
   695  }
   696  
   697  func (tab *Table) deleteInBucket(b *bucket, n *Node) {
   698  	b.entries = deleteNode(b.entries, n)
   699  	tab.removeIP(b, n.IP)
   700  }
   701  
   702  // pushNode adds n to the front of list, keeping at most max items.
   703  func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
   704  	if len(list) < max {
   705  		list = append(list, nil)
   706  	}
   707  	removed := list[len(list)-1]
   708  	copy(list[1:], list)
   709  	list[0] = n
   710  	return list, removed
   711  }
   712  
   713  // deleteNode removes n from list.
   714  func deleteNode(list []*Node, n *Node) []*Node {
   715  	for i := range list {
   716  		if list[i].ID == n.ID {
   717  			return append(list[:i], list[i+1:]...)
   718  		}
   719  	}
   720  	return list
   721  }
   722  
   723  // nodesByDistance is a list of nodes, ordered by
   724  // distance to target.
   725  type nodesByDistance struct {
   726  	entries []*Node
   727  	target  common.Hash
   728  }
   729  
   730  // push adds the given node to the list, keeping the total size below maxElems.
   731  func (h *nodesByDistance) push(n *Node, maxElems int) {
   732  	ix := sort.Search(len(h.entries), func(i int) bool {
   733  		return distcmp(h.target, h.entries[i].sha, n.sha) > 0
   734  	})
   735  	if len(h.entries) < maxElems {
   736  		h.entries = append(h.entries, n)
   737  	}
   738  	if ix == len(h.entries) {
   739  		// farther away than all nodes we already have.
   740  		// if there was room for it, the node is now the last element.
   741  	} else {
   742  		// slide existing entries down to make room
   743  		// this will overwrite the entry we just appended.
   744  		copy(h.entries[ix+1:], h.entries[ix:])
   745  		h.entries[ix] = n
   746  	}
   747  }