github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/p2p/discover/table.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package discover implements the Node Discovery Protocol.
    18  //
    19  // The Node Discovery protocol provides a way to find RLPx nodes that
    20  // can be connected to. It uses a Kademlia-like protocol to maintain a
    21  // distributed database of the IDs and endpoints of all listening
    22  // nodes.
    23  package discover
    24  
    25  import (
    26  	"crypto/ecdsa"
    27  	crand "crypto/rand"
    28  	"encoding/binary"
    29  	"fmt"
    30  	mrand "math/rand"
    31  	"net"
    32  	"sort"
    33  	"sync"
    34  	"time"
    35  
    36  	"github.com/ethereum/go-ethereum/common"
    37  	"github.com/ethereum/go-ethereum/crypto"
    38  	"github.com/ethereum/go-ethereum/log"
    39  	"github.com/ethereum/go-ethereum/p2p/enode"
    40  	"github.com/ethereum/go-ethereum/p2p/netutil"
    41  )
    42  
    43  const (
    44  	alpha           = 3  // Kademlia concurrency factor
    45  	bucketSize      = 16 // Kademlia bucket size
    46  	maxReplacements = 10 // Size of per-bucket replacement list
    47  
    48  	// We keep buckets for the upper 1/15 of distances because
    49  	// it's very unlikely we'll ever encounter a node that's closer.
    50  	hashBits          = len(common.Hash{}) * 8
    51  	nBuckets          = hashBits / 15       // Number of buckets
    52  	bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket
    53  
    54  	// IP address limits.
    55  	bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
    56  	tableIPLimit, tableSubnet   = 10, 24
    57  
    58  	maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped
    59  	refreshInterval     = 30 * time.Minute
    60  	revalidateInterval  = 10 * time.Second
    61  	copyNodesInterval   = 30 * time.Second
    62  	seedMinTableTime    = 5 * time.Minute
    63  	seedCount           = 30
    64  	seedMaxAge          = 5 * 24 * time.Hour
    65  )
    66  
    67  type Table struct {
    68  	mutex   sync.Mutex        // protects buckets, bucket content, nursery, rand
    69  	buckets [nBuckets]*bucket // index of known nodes by distance
    70  	nursery []*node           // bootstrap nodes
    71  	rand    *mrand.Rand       // source of randomness, periodically reseeded
    72  	ips     netutil.DistinctNetSet
    73  
    74  	db         *enode.DB // database of known nodes
    75  	net        transport
    76  	refreshReq chan chan struct{}
    77  	initDone   chan struct{}
    78  	closeReq   chan struct{}
    79  	closed     chan struct{}
    80  
    81  	nodeAddedHook func(*node) // for testing
    82  }
    83  
    84  // transport is implemented by the UDP transport.
    85  // it is an interface so we can test without opening lots of UDP
    86  // sockets and without generating a private key.
    87  type transport interface {
    88  	self() *enode.Node
    89  	ping(enode.ID, *net.UDPAddr) error
    90  	findnode(toid enode.ID, addr *net.UDPAddr, target encPubkey) ([]*node, error)
    91  	close()
    92  }
    93  
    94  // bucket contains nodes, ordered by their last activity. the entry
    95  // that was most recently active is the first element in entries.
    96  type bucket struct {
    97  	entries      []*node // live entries, sorted by time of last contact
    98  	replacements []*node // recently seen nodes to be used if revalidation fails
    99  	ips          netutil.DistinctNetSet
   100  }
   101  
   102  func newTable(t transport, db *enode.DB, bootnodes []*enode.Node) (*Table, error) {
   103  	tab := &Table{
   104  		net:        t,
   105  		db:         db,
   106  		refreshReq: make(chan chan struct{}),
   107  		initDone:   make(chan struct{}),
   108  		closeReq:   make(chan struct{}),
   109  		closed:     make(chan struct{}),
   110  		rand:       mrand.New(mrand.NewSource(0)),
   111  		ips:        netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
   112  	}
   113  	if err := tab.setFallbackNodes(bootnodes); err != nil {
   114  		return nil, err
   115  	}
   116  	for i := range tab.buckets {
   117  		tab.buckets[i] = &bucket{
   118  			ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
   119  		}
   120  	}
   121  	tab.seedRand()
   122  	tab.loadSeedNodes()
   123  
   124  	go tab.loop()
   125  	return tab, nil
   126  }
   127  
   128  func (tab *Table) self() *enode.Node {
   129  	return tab.net.self()
   130  }
   131  
   132  func (tab *Table) seedRand() {
   133  	var b [8]byte
   134  	crand.Read(b[:])
   135  
   136  	tab.mutex.Lock()
   137  	tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:])))
   138  	tab.mutex.Unlock()
   139  }
   140  
   141  // ReadRandomNodes fills the given slice with random nodes from the table. The results
   142  // are guaranteed to be unique for a single invocation, no node will appear twice.
   143  func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
   144  	if !tab.isInitDone() {
   145  		return 0
   146  	}
   147  	tab.mutex.Lock()
   148  	defer tab.mutex.Unlock()
   149  
   150  	// Find all non-empty buckets and get a fresh slice of their entries.
   151  	var buckets [][]*node
   152  	for _, b := range &tab.buckets {
   153  		if len(b.entries) > 0 {
   154  			buckets = append(buckets, b.entries)
   155  		}
   156  	}
   157  	if len(buckets) == 0 {
   158  		return 0
   159  	}
   160  	// Shuffle the buckets.
   161  	for i := len(buckets) - 1; i > 0; i-- {
   162  		j := tab.rand.Intn(len(buckets))
   163  		buckets[i], buckets[j] = buckets[j], buckets[i]
   164  	}
   165  	// Move head of each bucket into buf, removing buckets that become empty.
   166  	var i, j int
   167  	for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
   168  		b := buckets[j]
   169  		buf[i] = unwrapNode(b[0])
   170  		buckets[j] = b[1:]
   171  		if len(b) == 1 {
   172  			buckets = append(buckets[:j], buckets[j+1:]...)
   173  		}
   174  		if len(buckets) == 0 {
   175  			break
   176  		}
   177  	}
   178  	return i + 1
   179  }
   180  
   181  // Close terminates the network listener and flushes the node database.
   182  func (tab *Table) Close() {
   183  	if tab.net != nil {
   184  		tab.net.close()
   185  	}
   186  
   187  	select {
   188  	case <-tab.closed:
   189  		// already closed.
   190  	case tab.closeReq <- struct{}{}:
   191  		<-tab.closed // wait for refreshLoop to end.
   192  	}
   193  }
   194  
   195  // setFallbackNodes sets the initial points of contact. These nodes
   196  // are used to connect to the network if the table is empty and there
   197  // are no known nodes in the database.
   198  func (tab *Table) setFallbackNodes(nodes []*enode.Node) error {
   199  	for _, n := range nodes {
   200  		if err := n.ValidateComplete(); err != nil {
   201  			return fmt.Errorf("bad bootstrap node %q: %v", n, err)
   202  		}
   203  	}
   204  	tab.nursery = wrapNodes(nodes)
   205  	return nil
   206  }
   207  
   208  // isInitDone returns whether the table's initial seeding procedure has completed.
   209  func (tab *Table) isInitDone() bool {
   210  	select {
   211  	case <-tab.initDone:
   212  		return true
   213  	default:
   214  		return false
   215  	}
   216  }
   217  
   218  // Resolve searches for a specific node with the given ID.
   219  // It returns nil if the node could not be found.
   220  func (tab *Table) Resolve(n *enode.Node) *enode.Node {
   221  	// If the node is present in the local table, no
   222  	// network interaction is required.
   223  	hash := n.ID()
   224  	tab.mutex.Lock()
   225  	cl := tab.closest(hash, 1)
   226  	tab.mutex.Unlock()
   227  	if len(cl.entries) > 0 && cl.entries[0].ID() == hash {
   228  		return unwrapNode(cl.entries[0])
   229  	}
   230  	// Otherwise, do a network lookup.
   231  	result := tab.lookup(encodePubkey(n.Pubkey()), true)
   232  	for _, n := range result {
   233  		if n.ID() == hash {
   234  			return unwrapNode(n)
   235  		}
   236  	}
   237  	return nil
   238  }
   239  
   240  // LookupRandom finds random nodes in the network.
   241  func (tab *Table) LookupRandom() []*enode.Node {
   242  	var target encPubkey
   243  	crand.Read(target[:])
   244  	return unwrapNodes(tab.lookup(target, true))
   245  }
   246  
   247  // lookup performs a network search for nodes close to the given target. It approaches the
   248  // target by querying nodes that are closer to it on each iteration. The given target does
   249  // not need to be an actual node identifier.
   250  func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
   251  	var (
   252  		target         = enode.ID(crypto.Keccak256Hash(targetKey[:]))
   253  		asked          = make(map[enode.ID]bool)
   254  		seen           = make(map[enode.ID]bool)
   255  		reply          = make(chan []*node, alpha)
   256  		pendingQueries = 0
   257  		result         *nodesByDistance
   258  	)
   259  	// don't query further if we hit ourself.
   260  	// unlikely to happen often in practice.
   261  	asked[tab.self().ID()] = true
   262  
   263  	for {
   264  		tab.mutex.Lock()
   265  		// generate initial result set
   266  		result = tab.closest(target, bucketSize)
   267  		tab.mutex.Unlock()
   268  		if len(result.entries) > 0 || !refreshIfEmpty {
   269  			break
   270  		}
   271  		// The result set is empty, all nodes were dropped, refresh.
   272  		// We actually wait for the refresh to complete here. The very
   273  		// first query will hit this case and run the bootstrapping
   274  		// logic.
   275  		<-tab.refresh()
   276  		refreshIfEmpty = false
   277  	}
   278  
   279  	for {
   280  		// ask the alpha closest nodes that we haven't asked yet
   281  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   282  			n := result.entries[i]
   283  			if !asked[n.ID()] {
   284  				asked[n.ID()] = true
   285  				pendingQueries++
   286  				go tab.findnode(n, targetKey, reply)
   287  			}
   288  		}
   289  		if pendingQueries == 0 {
   290  			// we have asked all closest nodes, stop the search
   291  			break
   292  		}
   293  		// wait for the next reply
   294  		for _, n := range <-reply {
   295  			if n != nil && !seen[n.ID()] {
   296  				seen[n.ID()] = true
   297  				result.push(n, bucketSize)
   298  			}
   299  		}
   300  		pendingQueries--
   301  	}
   302  	return result.entries
   303  }
   304  
   305  func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
   306  	fails := tab.db.FindFails(n.ID())
   307  	r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
   308  	if err != nil || len(r) == 0 {
   309  		fails++
   310  		tab.db.UpdateFindFails(n.ID(), fails)
   311  		log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
   312  		if fails >= maxFindnodeFailures {
   313  			log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
   314  			tab.delete(n)
   315  		}
   316  	} else if fails > 0 {
   317  		tab.db.UpdateFindFails(n.ID(), fails-1)
   318  	}
   319  
   320  	// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
   321  	// just remove those again during revalidation.
   322  	for _, n := range r {
   323  		tab.add(n)
   324  	}
   325  	reply <- r
   326  }
   327  
   328  func (tab *Table) refresh() <-chan struct{} {
   329  	done := make(chan struct{})
   330  	select {
   331  	case tab.refreshReq <- done:
   332  	case <-tab.closed:
   333  		close(done)
   334  	}
   335  	return done
   336  }
   337  
   338  // loop schedules refresh, revalidate runs and coordinates shutdown.
   339  func (tab *Table) loop() {
   340  	var (
   341  		revalidate     = time.NewTimer(tab.nextRevalidateTime())
   342  		refresh        = time.NewTicker(refreshInterval)
   343  		copyNodes      = time.NewTicker(copyNodesInterval)
   344  		refreshDone    = make(chan struct{})           // where doRefresh reports completion
   345  		revalidateDone chan struct{}                   // where doRevalidate reports completion
   346  		waiting        = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
   347  	)
   348  	defer refresh.Stop()
   349  	defer revalidate.Stop()
   350  	defer copyNodes.Stop()
   351  
   352  	// Start initial refresh.
   353  	go tab.doRefresh(refreshDone)
   354  
   355  loop:
   356  	for {
   357  		select {
   358  		case <-refresh.C:
   359  			tab.seedRand()
   360  			if refreshDone == nil {
   361  				refreshDone = make(chan struct{})
   362  				go tab.doRefresh(refreshDone)
   363  			}
   364  		case req := <-tab.refreshReq:
   365  			waiting = append(waiting, req)
   366  			if refreshDone == nil {
   367  				refreshDone = make(chan struct{})
   368  				go tab.doRefresh(refreshDone)
   369  			}
   370  		case <-refreshDone:
   371  			for _, ch := range waiting {
   372  				close(ch)
   373  			}
   374  			waiting, refreshDone = nil, nil
   375  		case <-revalidate.C:
   376  			revalidateDone = make(chan struct{})
   377  			go tab.doRevalidate(revalidateDone)
   378  		case <-revalidateDone:
   379  			revalidate.Reset(tab.nextRevalidateTime())
   380  			revalidateDone = nil
   381  		case <-copyNodes.C:
   382  			go tab.copyLiveNodes()
   383  		case <-tab.closeReq:
   384  			break loop
   385  		}
   386  	}
   387  
   388  	if refreshDone != nil {
   389  		<-refreshDone
   390  	}
   391  	for _, ch := range waiting {
   392  		close(ch)
   393  	}
   394  	if revalidateDone != nil {
   395  		<-revalidateDone
   396  	}
   397  	close(tab.closed)
   398  }
   399  
   400  // doRefresh performs a lookup for a random target to keep buckets
   401  // full. seed nodes are inserted if the table is empty (initial
   402  // bootstrap or discarded faulty peers).
   403  func (tab *Table) doRefresh(done chan struct{}) {
   404  	defer close(done)
   405  
   406  	// Load nodes from the database and insert
   407  	// them. This should yield a few previously seen nodes that are
   408  	// (hopefully) still alive.
   409  	tab.loadSeedNodes()
   410  
   411  	// Run self lookup to discover new neighbor nodes.
   412  	// We can only do this if we have a secp256k1 identity.
   413  	var key ecdsa.PublicKey
   414  	if err := tab.self().Load((*enode.Secp256k1)(&key)); err == nil {
   415  		tab.lookup(encodePubkey(&key), false)
   416  	}
   417  
   418  	// The Kademlia paper specifies that the bucket refresh should
   419  	// perform a lookup in the least recently used bucket. We cannot
   420  	// adhere to this because the findnode target is a 512bit value
   421  	// (not hash-sized) and it is not easily possible to generate a
   422  	// sha3 preimage that falls into a chosen bucket.
   423  	// We perform a few lookups with a random target instead.
   424  	for i := 0; i < 3; i++ {
   425  		var target encPubkey
   426  		crand.Read(target[:])
   427  		tab.lookup(target, false)
   428  	}
   429  }
   430  
   431  func (tab *Table) loadSeedNodes() {
   432  	seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge))
   433  	seeds = append(seeds, tab.nursery...)
   434  	for i := range seeds {
   435  		seed := seeds[i]
   436  		age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
   437  		log.Debug("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
   438  		tab.add(seed)
   439  	}
   440  }
   441  
   442  // doRevalidate checks that the last node in a random bucket is still live
   443  // and replaces or deletes the node if it isn't.
   444  func (tab *Table) doRevalidate(done chan<- struct{}) {
   445  	defer func() { done <- struct{}{} }()
   446  
   447  	last, bi := tab.nodeToRevalidate()
   448  	if last == nil {
   449  		// No non-empty bucket found.
   450  		return
   451  	}
   452  
   453  	// Ping the selected node and wait for a pong.
   454  	err := tab.net.ping(last.ID(), last.addr())
   455  
   456  	tab.mutex.Lock()
   457  	defer tab.mutex.Unlock()
   458  	b := tab.buckets[bi]
   459  	if err == nil {
   460  		// The node responded, move it to the front.
   461  		log.Debug("Revalidated node", "b", bi, "id", last.ID())
   462  		b.bump(last)
   463  		return
   464  	}
   465  	// No reply received, pick a replacement or delete the node if there aren't
   466  	// any replacements.
   467  	if r := tab.replace(b, last); r != nil {
   468  		log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "r", r.ID(), "rip", r.IP())
   469  	} else {
   470  		log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP())
   471  	}
   472  }
   473  
   474  // nodeToRevalidate returns the last node in a random, non-empty bucket.
   475  func (tab *Table) nodeToRevalidate() (n *node, bi int) {
   476  	tab.mutex.Lock()
   477  	defer tab.mutex.Unlock()
   478  
   479  	for _, bi = range tab.rand.Perm(len(tab.buckets)) {
   480  		b := tab.buckets[bi]
   481  		if len(b.entries) > 0 {
   482  			last := b.entries[len(b.entries)-1]
   483  			return last, bi
   484  		}
   485  	}
   486  	return nil, 0
   487  }
   488  
   489  func (tab *Table) nextRevalidateTime() time.Duration {
   490  	tab.mutex.Lock()
   491  	defer tab.mutex.Unlock()
   492  
   493  	return time.Duration(tab.rand.Int63n(int64(revalidateInterval)))
   494  }
   495  
   496  // copyLiveNodes adds nodes from the table to the database if they have been in the table
   497  // longer then minTableTime.
   498  func (tab *Table) copyLiveNodes() {
   499  	tab.mutex.Lock()
   500  	defer tab.mutex.Unlock()
   501  
   502  	now := time.Now()
   503  	for _, b := range &tab.buckets {
   504  		for _, n := range b.entries {
   505  			if now.Sub(n.addedAt) >= seedMinTableTime {
   506  				tab.db.UpdateNode(unwrapNode(n))
   507  			}
   508  		}
   509  	}
   510  }
   511  
   512  // closest returns the n nodes in the table that are closest to the
   513  // given id. The caller must hold tab.mutex.
   514  func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance {
   515  	// This is a very wasteful way to find the closest nodes but
   516  	// obviously correct. I believe that tree-based buckets would make
   517  	// this easier to implement efficiently.
   518  	close := &nodesByDistance{target: target}
   519  	for _, b := range &tab.buckets {
   520  		for _, n := range b.entries {
   521  			close.push(n, nresults)
   522  		}
   523  	}
   524  	return close
   525  }
   526  
   527  func (tab *Table) len() (n int) {
   528  	for _, b := range &tab.buckets {
   529  		n += len(b.entries)
   530  	}
   531  	return n
   532  }
   533  
   534  // bucket returns the bucket for the given node ID hash.
   535  func (tab *Table) bucket(id enode.ID) *bucket {
   536  	d := enode.LogDist(tab.self().ID(), id)
   537  	if d <= bucketMinDistance {
   538  		return tab.buckets[0]
   539  	}
   540  	return tab.buckets[d-bucketMinDistance-1]
   541  }
   542  
   543  // add attempts to add the given node to its corresponding bucket. If the bucket has space
   544  // available, adding the node succeeds immediately. Otherwise, the node is added if the
   545  // least recently active node in the bucket does not respond to a ping packet.
   546  //
   547  // The caller must not hold tab.mutex.
   548  func (tab *Table) add(n *node) {
   549  	if n.ID() == tab.self().ID() {
   550  		return
   551  	}
   552  
   553  	tab.mutex.Lock()
   554  	defer tab.mutex.Unlock()
   555  	b := tab.bucket(n.ID())
   556  	if !tab.bumpOrAdd(b, n) {
   557  		// Node is not in table. Add it to the replacement list.
   558  		tab.addReplacement(b, n)
   559  	}
   560  }
   561  
   562  // addThroughPing adds the given node to the table. Compared to plain
   563  // 'add' there is an additional safety measure: if the table is still
   564  // initializing the node is not added. This prevents an attack where the
   565  // table could be filled by just sending ping repeatedly.
   566  //
   567  // The caller must not hold tab.mutex.
   568  func (tab *Table) addThroughPing(n *node) {
   569  	if !tab.isInitDone() {
   570  		return
   571  	}
   572  	tab.add(n)
   573  }
   574  
   575  // stuff adds nodes the table to the end of their corresponding bucket
   576  // if the bucket is not full. The caller must not hold tab.mutex.
   577  func (tab *Table) stuff(nodes []*node) {
   578  	tab.mutex.Lock()
   579  	defer tab.mutex.Unlock()
   580  
   581  	for _, n := range nodes {
   582  		if n.ID() == tab.self().ID() {
   583  			continue // don't add self
   584  		}
   585  		b := tab.bucket(n.ID())
   586  		if len(b.entries) < bucketSize {
   587  			tab.bumpOrAdd(b, n)
   588  		}
   589  	}
   590  }
   591  
   592  // delete removes an entry from the node table. It is used to evacuate dead nodes.
   593  func (tab *Table) delete(node *node) {
   594  	tab.mutex.Lock()
   595  	defer tab.mutex.Unlock()
   596  
   597  	tab.deleteInBucket(tab.bucket(node.ID()), node)
   598  }
   599  
   600  func (tab *Table) addIP(b *bucket, ip net.IP) bool {
   601  	if netutil.IsLAN(ip) {
   602  		return true
   603  	}
   604  	if !tab.ips.Add(ip) {
   605  		log.Debug("IP exceeds table limit", "ip", ip)
   606  		return false
   607  	}
   608  	if !b.ips.Add(ip) {
   609  		log.Debug("IP exceeds bucket limit", "ip", ip)
   610  		tab.ips.Remove(ip)
   611  		return false
   612  	}
   613  	return true
   614  }
   615  
   616  func (tab *Table) removeIP(b *bucket, ip net.IP) {
   617  	if netutil.IsLAN(ip) {
   618  		return
   619  	}
   620  	tab.ips.Remove(ip)
   621  	b.ips.Remove(ip)
   622  }
   623  
   624  func (tab *Table) addReplacement(b *bucket, n *node) {
   625  	for _, e := range b.replacements {
   626  		if e.ID() == n.ID() {
   627  			return // already in list
   628  		}
   629  	}
   630  	if !tab.addIP(b, n.IP()) {
   631  		return
   632  	}
   633  	var removed *node
   634  	b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
   635  	if removed != nil {
   636  		tab.removeIP(b, removed.IP())
   637  	}
   638  }
   639  
   640  // replace removes n from the replacement list and replaces 'last' with it if it is the
   641  // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
   642  // with someone else or became active.
   643  func (tab *Table) replace(b *bucket, last *node) *node {
   644  	if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() {
   645  		// Entry has moved, don't replace it.
   646  		return nil
   647  	}
   648  	// Still the last entry.
   649  	if len(b.replacements) == 0 {
   650  		tab.deleteInBucket(b, last)
   651  		return nil
   652  	}
   653  	r := b.replacements[tab.rand.Intn(len(b.replacements))]
   654  	b.replacements = deleteNode(b.replacements, r)
   655  	b.entries[len(b.entries)-1] = r
   656  	tab.removeIP(b, last.IP())
   657  	return r
   658  }
   659  
   660  // bump moves the given node to the front of the bucket entry list
   661  // if it is contained in that list.
   662  func (b *bucket) bump(n *node) bool {
   663  	for i := range b.entries {
   664  		if b.entries[i].ID() == n.ID() {
   665  			// move it to the front
   666  			copy(b.entries[1:], b.entries[:i])
   667  			b.entries[0] = n
   668  			return true
   669  		}
   670  	}
   671  	return false
   672  }
   673  
   674  // bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
   675  // full. The return value is true if n is in the bucket.
   676  func (tab *Table) bumpOrAdd(b *bucket, n *node) bool {
   677  	if b.bump(n) {
   678  		return true
   679  	}
   680  	if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP()) {
   681  		return false
   682  	}
   683  	b.entries, _ = pushNode(b.entries, n, bucketSize)
   684  	b.replacements = deleteNode(b.replacements, n)
   685  	n.addedAt = time.Now()
   686  	if tab.nodeAddedHook != nil {
   687  		tab.nodeAddedHook(n)
   688  	}
   689  	return true
   690  }
   691  
   692  func (tab *Table) deleteInBucket(b *bucket, n *node) {
   693  	b.entries = deleteNode(b.entries, n)
   694  	tab.removeIP(b, n.IP())
   695  }
   696  
   697  // pushNode adds n to the front of list, keeping at most max items.
   698  func pushNode(list []*node, n *node, max int) ([]*node, *node) {
   699  	if len(list) < max {
   700  		list = append(list, nil)
   701  	}
   702  	removed := list[len(list)-1]
   703  	copy(list[1:], list)
   704  	list[0] = n
   705  	return list, removed
   706  }
   707  
   708  // deleteNode removes n from list.
   709  func deleteNode(list []*node, n *node) []*node {
   710  	for i := range list {
   711  		if list[i].ID() == n.ID() {
   712  			return append(list[:i], list[i+1:]...)
   713  		}
   714  	}
   715  	return list
   716  }
   717  
   718  // nodesByDistance is a list of nodes, ordered by
   719  // distance to target.
   720  type nodesByDistance struct {
   721  	entries []*node
   722  	target  enode.ID
   723  }
   724  
   725  // push adds the given node to the list, keeping the total size below maxElems.
   726  func (h *nodesByDistance) push(n *node, maxElems int) {
   727  	ix := sort.Search(len(h.entries), func(i int) bool {
   728  		return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0
   729  	})
   730  	if len(h.entries) < maxElems {
   731  		h.entries = append(h.entries, n)
   732  	}
   733  	if ix == len(h.entries) {
   734  		// farther away than all nodes we already have.
   735  		// if there was room for it, the node is now the last element.
   736  	} else {
   737  		// slide existing entries down to make room
   738  		// this will overwrite the entry we just appended.
   739  		copy(h.entries[ix+1:], h.entries[ix:])
   740  		h.entries[ix] = n
   741  	}
   742  }