github.com/pslzym/go-ethereum@v1.8.17-0.20180926104442-4b6824e07b1b/p2p/discover/table.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package discover implements the Node Discovery Protocol.
    18  //
    19  // The Node Discovery protocol provides a way to find RLPx nodes that
    20  // can be connected to. It uses a Kademlia-like protocol to maintain a
    21  // distributed database of the IDs and endpoints of all listening
    22  // nodes.
    23  package discover
    24  
    25  import (
    26  	"crypto/ecdsa"
    27  	crand "crypto/rand"
    28  	"encoding/binary"
    29  	"fmt"
    30  	mrand "math/rand"
    31  	"net"
    32  	"sort"
    33  	"sync"
    34  	"time"
    35  
    36  	"github.com/ethereum/go-ethereum/common"
    37  	"github.com/ethereum/go-ethereum/crypto"
    38  	"github.com/ethereum/go-ethereum/log"
    39  	"github.com/ethereum/go-ethereum/p2p/enode"
    40  	"github.com/ethereum/go-ethereum/p2p/netutil"
    41  )
    42  
    43  const (
    44  	alpha           = 3  // Kademlia concurrency factor
    45  	bucketSize      = 16 // Kademlia bucket size
    46  	maxReplacements = 10 // Size of per-bucket replacement list
    47  
    48  	// We keep buckets for the upper 1/15 of distances because
    49  	// it's very unlikely we'll ever encounter a node that's closer.
    50  	hashBits          = len(common.Hash{}) * 8
    51  	nBuckets          = hashBits / 15       // Number of buckets
    52  	bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket
    53  
    54  	// IP address limits.
    55  	bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
    56  	tableIPLimit, tableSubnet   = 10, 24
    57  
    58  	maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped
    59  	refreshInterval     = 30 * time.Minute
    60  	revalidateInterval  = 10 * time.Second
    61  	copyNodesInterval   = 30 * time.Second
    62  	seedMinTableTime    = 5 * time.Minute
    63  	seedCount           = 30
    64  	seedMaxAge          = 5 * 24 * time.Hour
    65  )
    66  
    67  type Table struct {
    68  	mutex   sync.Mutex        // protects buckets, bucket content, nursery, rand
    69  	buckets [nBuckets]*bucket // index of known nodes by distance
    70  	nursery []*node           // bootstrap nodes
    71  	rand    *mrand.Rand       // source of randomness, periodically reseeded
    72  	ips     netutil.DistinctNetSet
    73  
    74  	db         *enode.DB // database of known nodes
    75  	refreshReq chan chan struct{}
    76  	initDone   chan struct{}
    77  	closeReq   chan struct{}
    78  	closed     chan struct{}
    79  
    80  	nodeAddedHook func(*node) // for testing
    81  
    82  	net  transport
    83  	self *node // metadata of the local node
    84  }
    85  
    86  // transport is implemented by the UDP transport.
    87  // it is an interface so we can test without opening lots of UDP
    88  // sockets and without generating a private key.
    89  type transport interface {
    90  	ping(enode.ID, *net.UDPAddr) error
    91  	findnode(toid enode.ID, addr *net.UDPAddr, target encPubkey) ([]*node, error)
    92  	close()
    93  }
    94  
    95  // bucket contains nodes, ordered by their last activity. the entry
    96  // that was most recently active is the first element in entries.
    97  type bucket struct {
    98  	entries      []*node // live entries, sorted by time of last contact
    99  	replacements []*node // recently seen nodes to be used if revalidation fails
   100  	ips          netutil.DistinctNetSet
   101  }
   102  
   103  func newTable(t transport, self *enode.Node, db *enode.DB, bootnodes []*enode.Node) (*Table, error) {
   104  	tab := &Table{
   105  		net:        t,
   106  		db:         db,
   107  		self:       wrapNode(self),
   108  		refreshReq: make(chan chan struct{}),
   109  		initDone:   make(chan struct{}),
   110  		closeReq:   make(chan struct{}),
   111  		closed:     make(chan struct{}),
   112  		rand:       mrand.New(mrand.NewSource(0)),
   113  		ips:        netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
   114  	}
   115  	if err := tab.setFallbackNodes(bootnodes); err != nil {
   116  		return nil, err
   117  	}
   118  	for i := range tab.buckets {
   119  		tab.buckets[i] = &bucket{
   120  			ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
   121  		}
   122  	}
   123  	tab.seedRand()
   124  	tab.loadSeedNodes()
   125  
   126  	go tab.loop()
   127  	return tab, nil
   128  }
   129  
   130  func (tab *Table) seedRand() {
   131  	var b [8]byte
   132  	crand.Read(b[:])
   133  
   134  	tab.mutex.Lock()
   135  	tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:])))
   136  	tab.mutex.Unlock()
   137  }
   138  
   139  // Self returns the local node.
   140  func (tab *Table) Self() *enode.Node {
   141  	return unwrapNode(tab.self)
   142  }
   143  
   144  // ReadRandomNodes fills the given slice with random nodes from the table. The results
   145  // are guaranteed to be unique for a single invocation, no node will appear twice.
   146  func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
   147  	if !tab.isInitDone() {
   148  		return 0
   149  	}
   150  	tab.mutex.Lock()
   151  	defer tab.mutex.Unlock()
   152  
   153  	// Find all non-empty buckets and get a fresh slice of their entries.
   154  	var buckets [][]*node
   155  	for _, b := range &tab.buckets {
   156  		if len(b.entries) > 0 {
   157  			buckets = append(buckets, b.entries)
   158  		}
   159  	}
   160  	if len(buckets) == 0 {
   161  		return 0
   162  	}
   163  	// Shuffle the buckets.
   164  	for i := len(buckets) - 1; i > 0; i-- {
   165  		j := tab.rand.Intn(len(buckets))
   166  		buckets[i], buckets[j] = buckets[j], buckets[i]
   167  	}
   168  	// Move head of each bucket into buf, removing buckets that become empty.
   169  	var i, j int
   170  	for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
   171  		b := buckets[j]
   172  		buf[i] = unwrapNode(b[0])
   173  		buckets[j] = b[1:]
   174  		if len(b) == 1 {
   175  			buckets = append(buckets[:j], buckets[j+1:]...)
   176  		}
   177  		if len(buckets) == 0 {
   178  			break
   179  		}
   180  	}
   181  	return i + 1
   182  }
   183  
   184  // Close terminates the network listener and flushes the node database.
   185  func (tab *Table) Close() {
   186  	select {
   187  	case <-tab.closed:
   188  		// already closed.
   189  	case tab.closeReq <- struct{}{}:
   190  		<-tab.closed // wait for refreshLoop to end.
   191  	}
   192  }
   193  
   194  // setFallbackNodes sets the initial points of contact. These nodes
   195  // are used to connect to the network if the table is empty and there
   196  // are no known nodes in the database.
   197  func (tab *Table) setFallbackNodes(nodes []*enode.Node) error {
   198  	for _, n := range nodes {
   199  		if err := n.ValidateComplete(); err != nil {
   200  			return fmt.Errorf("bad bootstrap node %q: %v", n, err)
   201  		}
   202  	}
   203  	tab.nursery = wrapNodes(nodes)
   204  	return nil
   205  }
   206  
   207  // isInitDone returns whether the table's initial seeding procedure has completed.
   208  func (tab *Table) isInitDone() bool {
   209  	select {
   210  	case <-tab.initDone:
   211  		return true
   212  	default:
   213  		return false
   214  	}
   215  }
   216  
   217  // Resolve searches for a specific node with the given ID.
   218  // It returns nil if the node could not be found.
   219  func (tab *Table) Resolve(n *enode.Node) *enode.Node {
   220  	// If the node is present in the local table, no
   221  	// network interaction is required.
   222  	hash := n.ID()
   223  	tab.mutex.Lock()
   224  	cl := tab.closest(hash, 1)
   225  	tab.mutex.Unlock()
   226  	if len(cl.entries) > 0 && cl.entries[0].ID() == hash {
   227  		return unwrapNode(cl.entries[0])
   228  	}
   229  	// Otherwise, do a network lookup.
   230  	result := tab.lookup(encodePubkey(n.Pubkey()), true)
   231  	for _, n := range result {
   232  		if n.ID() == hash {
   233  			return unwrapNode(n)
   234  		}
   235  	}
   236  	return nil
   237  }
   238  
   239  // LookupRandom finds random nodes in the network.
   240  func (tab *Table) LookupRandom() []*enode.Node {
   241  	var target encPubkey
   242  	crand.Read(target[:])
   243  	return unwrapNodes(tab.lookup(target, true))
   244  }
   245  
   246  // lookup performs a network search for nodes close to the given target. It approaches the
   247  // target by querying nodes that are closer to it on each iteration. The given target does
   248  // not need to be an actual node identifier.
   249  func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
   250  	var (
   251  		target         = enode.ID(crypto.Keccak256Hash(targetKey[:]))
   252  		asked          = make(map[enode.ID]bool)
   253  		seen           = make(map[enode.ID]bool)
   254  		reply          = make(chan []*node, alpha)
   255  		pendingQueries = 0
   256  		result         *nodesByDistance
   257  	)
   258  	// don't query further if we hit ourself.
   259  	// unlikely to happen often in practice.
   260  	asked[tab.self.ID()] = true
   261  
   262  	for {
   263  		tab.mutex.Lock()
   264  		// generate initial result set
   265  		result = tab.closest(target, bucketSize)
   266  		tab.mutex.Unlock()
   267  		if len(result.entries) > 0 || !refreshIfEmpty {
   268  			break
   269  		}
   270  		// The result set is empty, all nodes were dropped, refresh.
   271  		// We actually wait for the refresh to complete here. The very
   272  		// first query will hit this case and run the bootstrapping
   273  		// logic.
   274  		<-tab.refresh()
   275  		refreshIfEmpty = false
   276  	}
   277  
   278  	for {
   279  		// ask the alpha closest nodes that we haven't asked yet
   280  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   281  			n := result.entries[i]
   282  			if !asked[n.ID()] {
   283  				asked[n.ID()] = true
   284  				pendingQueries++
   285  				go tab.findnode(n, targetKey, reply)
   286  			}
   287  		}
   288  		if pendingQueries == 0 {
   289  			// we have asked all closest nodes, stop the search
   290  			break
   291  		}
   292  		// wait for the next reply
   293  		for _, n := range <-reply {
   294  			if n != nil && !seen[n.ID()] {
   295  				seen[n.ID()] = true
   296  				result.push(n, bucketSize)
   297  			}
   298  		}
   299  		pendingQueries--
   300  	}
   301  	return result.entries
   302  }
   303  
   304  func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
   305  	fails := tab.db.FindFails(n.ID())
   306  	r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
   307  	if err != nil || len(r) == 0 {
   308  		fails++
   309  		tab.db.UpdateFindFails(n.ID(), fails)
   310  		log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
   311  		if fails >= maxFindnodeFailures {
   312  			log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
   313  			tab.delete(n)
   314  		}
   315  	} else if fails > 0 {
   316  		tab.db.UpdateFindFails(n.ID(), fails-1)
   317  	}
   318  
   319  	// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
   320  	// just remove those again during revalidation.
   321  	for _, n := range r {
   322  		tab.add(n)
   323  	}
   324  	reply <- r
   325  }
   326  
   327  func (tab *Table) refresh() <-chan struct{} {
   328  	done := make(chan struct{})
   329  	select {
   330  	case tab.refreshReq <- done:
   331  	case <-tab.closed:
   332  		close(done)
   333  	}
   334  	return done
   335  }
   336  
   337  // loop schedules refresh, revalidate runs and coordinates shutdown.
   338  func (tab *Table) loop() {
   339  	var (
   340  		revalidate     = time.NewTimer(tab.nextRevalidateTime())
   341  		refresh        = time.NewTicker(refreshInterval)
   342  		copyNodes      = time.NewTicker(copyNodesInterval)
   343  		revalidateDone = make(chan struct{})
   344  		refreshDone    = make(chan struct{})           // where doRefresh reports completion
   345  		waiting        = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
   346  	)
   347  	defer refresh.Stop()
   348  	defer revalidate.Stop()
   349  	defer copyNodes.Stop()
   350  
   351  	// Start initial refresh.
   352  	go tab.doRefresh(refreshDone)
   353  
   354  loop:
   355  	for {
   356  		select {
   357  		case <-refresh.C:
   358  			tab.seedRand()
   359  			if refreshDone == nil {
   360  				refreshDone = make(chan struct{})
   361  				go tab.doRefresh(refreshDone)
   362  			}
   363  		case req := <-tab.refreshReq:
   364  			waiting = append(waiting, req)
   365  			if refreshDone == nil {
   366  				refreshDone = make(chan struct{})
   367  				go tab.doRefresh(refreshDone)
   368  			}
   369  		case <-refreshDone:
   370  			for _, ch := range waiting {
   371  				close(ch)
   372  			}
   373  			waiting, refreshDone = nil, nil
   374  		case <-revalidate.C:
   375  			go tab.doRevalidate(revalidateDone)
   376  		case <-revalidateDone:
   377  			revalidate.Reset(tab.nextRevalidateTime())
   378  		case <-copyNodes.C:
   379  			go tab.copyLiveNodes()
   380  		case <-tab.closeReq:
   381  			break loop
   382  		}
   383  	}
   384  
   385  	if tab.net != nil {
   386  		tab.net.close()
   387  	}
   388  	if refreshDone != nil {
   389  		<-refreshDone
   390  	}
   391  	for _, ch := range waiting {
   392  		close(ch)
   393  	}
   394  	close(tab.closed)
   395  }
   396  
   397  // doRefresh performs a lookup for a random target to keep buckets
   398  // full. seed nodes are inserted if the table is empty (initial
   399  // bootstrap or discarded faulty peers).
   400  func (tab *Table) doRefresh(done chan struct{}) {
   401  	defer close(done)
   402  
   403  	// Load nodes from the database and insert
   404  	// them. This should yield a few previously seen nodes that are
   405  	// (hopefully) still alive.
   406  	tab.loadSeedNodes()
   407  
   408  	// Run self lookup to discover new neighbor nodes.
   409  	// We can only do this if we have a secp256k1 identity.
   410  	var key ecdsa.PublicKey
   411  	if err := tab.self.Load((*enode.Secp256k1)(&key)); err == nil {
   412  		tab.lookup(encodePubkey(&key), false)
   413  	}
   414  
   415  	// The Kademlia paper specifies that the bucket refresh should
   416  	// perform a lookup in the least recently used bucket. We cannot
   417  	// adhere to this because the findnode target is a 512bit value
   418  	// (not hash-sized) and it is not easily possible to generate a
   419  	// sha3 preimage that falls into a chosen bucket.
   420  	// We perform a few lookups with a random target instead.
   421  	for i := 0; i < 3; i++ {
   422  		var target encPubkey
   423  		crand.Read(target[:])
   424  		tab.lookup(target, false)
   425  	}
   426  }
   427  
   428  func (tab *Table) loadSeedNodes() {
   429  	seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge))
   430  	seeds = append(seeds, tab.nursery...)
   431  	for i := range seeds {
   432  		seed := seeds[i]
   433  		age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
   434  		log.Debug("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
   435  		tab.add(seed)
   436  	}
   437  }
   438  
   439  // doRevalidate checks that the last node in a random bucket is still live
   440  // and replaces or deletes the node if it isn't.
   441  func (tab *Table) doRevalidate(done chan<- struct{}) {
   442  	defer func() { done <- struct{}{} }()
   443  
   444  	last, bi := tab.nodeToRevalidate()
   445  	if last == nil {
   446  		// No non-empty bucket found.
   447  		return
   448  	}
   449  
   450  	// Ping the selected node and wait for a pong.
   451  	err := tab.net.ping(last.ID(), last.addr())
   452  
   453  	tab.mutex.Lock()
   454  	defer tab.mutex.Unlock()
   455  	b := tab.buckets[bi]
   456  	if err == nil {
   457  		// The node responded, move it to the front.
   458  		log.Debug("Revalidated node", "b", bi, "id", last.ID())
   459  		b.bump(last)
   460  		return
   461  	}
   462  	// No reply received, pick a replacement or delete the node if there aren't
   463  	// any replacements.
   464  	if r := tab.replace(b, last); r != nil {
   465  		log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "r", r.ID(), "rip", r.IP())
   466  	} else {
   467  		log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP())
   468  	}
   469  }
   470  
   471  // nodeToRevalidate returns the last node in a random, non-empty bucket.
   472  func (tab *Table) nodeToRevalidate() (n *node, bi int) {
   473  	tab.mutex.Lock()
   474  	defer tab.mutex.Unlock()
   475  
   476  	for _, bi = range tab.rand.Perm(len(tab.buckets)) {
   477  		b := tab.buckets[bi]
   478  		if len(b.entries) > 0 {
   479  			last := b.entries[len(b.entries)-1]
   480  			return last, bi
   481  		}
   482  	}
   483  	return nil, 0
   484  }
   485  
   486  func (tab *Table) nextRevalidateTime() time.Duration {
   487  	tab.mutex.Lock()
   488  	defer tab.mutex.Unlock()
   489  
   490  	return time.Duration(tab.rand.Int63n(int64(revalidateInterval)))
   491  }
   492  
   493  // copyLiveNodes adds nodes from the table to the database if they have been in the table
   494  // longer then minTableTime.
   495  func (tab *Table) copyLiveNodes() {
   496  	tab.mutex.Lock()
   497  	defer tab.mutex.Unlock()
   498  
   499  	now := time.Now()
   500  	for _, b := range &tab.buckets {
   501  		for _, n := range b.entries {
   502  			if now.Sub(n.addedAt) >= seedMinTableTime {
   503  				tab.db.UpdateNode(unwrapNode(n))
   504  			}
   505  		}
   506  	}
   507  }
   508  
   509  // closest returns the n nodes in the table that are closest to the
   510  // given id. The caller must hold tab.mutex.
   511  func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance {
   512  	// This is a very wasteful way to find the closest nodes but
   513  	// obviously correct. I believe that tree-based buckets would make
   514  	// this easier to implement efficiently.
   515  	close := &nodesByDistance{target: target}
   516  	for _, b := range &tab.buckets {
   517  		for _, n := range b.entries {
   518  			close.push(n, nresults)
   519  		}
   520  	}
   521  	return close
   522  }
   523  
   524  func (tab *Table) len() (n int) {
   525  	for _, b := range &tab.buckets {
   526  		n += len(b.entries)
   527  	}
   528  	return n
   529  }
   530  
   531  // bucket returns the bucket for the given node ID hash.
   532  func (tab *Table) bucket(id enode.ID) *bucket {
   533  	d := enode.LogDist(tab.self.ID(), id)
   534  	if d <= bucketMinDistance {
   535  		return tab.buckets[0]
   536  	}
   537  	return tab.buckets[d-bucketMinDistance-1]
   538  }
   539  
   540  // add attempts to add the given node to its corresponding bucket. If the bucket has space
   541  // available, adding the node succeeds immediately. Otherwise, the node is added if the
   542  // least recently active node in the bucket does not respond to a ping packet.
   543  //
   544  // The caller must not hold tab.mutex.
   545  func (tab *Table) add(n *node) {
   546  	if n.ID() == tab.self.ID() {
   547  		return
   548  	}
   549  
   550  	tab.mutex.Lock()
   551  	defer tab.mutex.Unlock()
   552  	b := tab.bucket(n.ID())
   553  	if !tab.bumpOrAdd(b, n) {
   554  		// Node is not in table. Add it to the replacement list.
   555  		tab.addReplacement(b, n)
   556  	}
   557  }
   558  
   559  // addThroughPing adds the given node to the table. Compared to plain
   560  // 'add' there is an additional safety measure: if the table is still
   561  // initializing the node is not added. This prevents an attack where the
   562  // table could be filled by just sending ping repeatedly.
   563  //
   564  // The caller must not hold tab.mutex.
   565  func (tab *Table) addThroughPing(n *node) {
   566  	if !tab.isInitDone() {
   567  		return
   568  	}
   569  	tab.add(n)
   570  }
   571  
   572  // stuff adds nodes the table to the end of their corresponding bucket
   573  // if the bucket is not full. The caller must not hold tab.mutex.
   574  func (tab *Table) stuff(nodes []*node) {
   575  	tab.mutex.Lock()
   576  	defer tab.mutex.Unlock()
   577  
   578  	for _, n := range nodes {
   579  		if n.ID() == tab.self.ID() {
   580  			continue // don't add self
   581  		}
   582  		b := tab.bucket(n.ID())
   583  		if len(b.entries) < bucketSize {
   584  			tab.bumpOrAdd(b, n)
   585  		}
   586  	}
   587  }
   588  
   589  // delete removes an entry from the node table. It is used to evacuate dead nodes.
   590  func (tab *Table) delete(node *node) {
   591  	tab.mutex.Lock()
   592  	defer tab.mutex.Unlock()
   593  
   594  	tab.deleteInBucket(tab.bucket(node.ID()), node)
   595  }
   596  
   597  func (tab *Table) addIP(b *bucket, ip net.IP) bool {
   598  	if netutil.IsLAN(ip) {
   599  		return true
   600  	}
   601  	if !tab.ips.Add(ip) {
   602  		log.Debug("IP exceeds table limit", "ip", ip)
   603  		return false
   604  	}
   605  	if !b.ips.Add(ip) {
   606  		log.Debug("IP exceeds bucket limit", "ip", ip)
   607  		tab.ips.Remove(ip)
   608  		return false
   609  	}
   610  	return true
   611  }
   612  
   613  func (tab *Table) removeIP(b *bucket, ip net.IP) {
   614  	if netutil.IsLAN(ip) {
   615  		return
   616  	}
   617  	tab.ips.Remove(ip)
   618  	b.ips.Remove(ip)
   619  }
   620  
   621  func (tab *Table) addReplacement(b *bucket, n *node) {
   622  	for _, e := range b.replacements {
   623  		if e.ID() == n.ID() {
   624  			return // already in list
   625  		}
   626  	}
   627  	if !tab.addIP(b, n.IP()) {
   628  		return
   629  	}
   630  	var removed *node
   631  	b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
   632  	if removed != nil {
   633  		tab.removeIP(b, removed.IP())
   634  	}
   635  }
   636  
   637  // replace removes n from the replacement list and replaces 'last' with it if it is the
   638  // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
   639  // with someone else or became active.
   640  func (tab *Table) replace(b *bucket, last *node) *node {
   641  	if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() {
   642  		// Entry has moved, don't replace it.
   643  		return nil
   644  	}
   645  	// Still the last entry.
   646  	if len(b.replacements) == 0 {
   647  		tab.deleteInBucket(b, last)
   648  		return nil
   649  	}
   650  	r := b.replacements[tab.rand.Intn(len(b.replacements))]
   651  	b.replacements = deleteNode(b.replacements, r)
   652  	b.entries[len(b.entries)-1] = r
   653  	tab.removeIP(b, last.IP())
   654  	return r
   655  }
   656  
   657  // bump moves the given node to the front of the bucket entry list
   658  // if it is contained in that list.
   659  func (b *bucket) bump(n *node) bool {
   660  	for i := range b.entries {
   661  		if b.entries[i].ID() == n.ID() {
   662  			// move it to the front
   663  			copy(b.entries[1:], b.entries[:i])
   664  			b.entries[0] = n
   665  			return true
   666  		}
   667  	}
   668  	return false
   669  }
   670  
   671  // bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
   672  // full. The return value is true if n is in the bucket.
   673  func (tab *Table) bumpOrAdd(b *bucket, n *node) bool {
   674  	if b.bump(n) {
   675  		return true
   676  	}
   677  	if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP()) {
   678  		return false
   679  	}
   680  	b.entries, _ = pushNode(b.entries, n, bucketSize)
   681  	b.replacements = deleteNode(b.replacements, n)
   682  	n.addedAt = time.Now()
   683  	if tab.nodeAddedHook != nil {
   684  		tab.nodeAddedHook(n)
   685  	}
   686  	return true
   687  }
   688  
   689  func (tab *Table) deleteInBucket(b *bucket, n *node) {
   690  	b.entries = deleteNode(b.entries, n)
   691  	tab.removeIP(b, n.IP())
   692  }
   693  
   694  // pushNode adds n to the front of list, keeping at most max items.
   695  func pushNode(list []*node, n *node, max int) ([]*node, *node) {
   696  	if len(list) < max {
   697  		list = append(list, nil)
   698  	}
   699  	removed := list[len(list)-1]
   700  	copy(list[1:], list)
   701  	list[0] = n
   702  	return list, removed
   703  }
   704  
   705  // deleteNode removes n from list.
   706  func deleteNode(list []*node, n *node) []*node {
   707  	for i := range list {
   708  		if list[i].ID() == n.ID() {
   709  			return append(list[:i], list[i+1:]...)
   710  		}
   711  	}
   712  	return list
   713  }
   714  
   715  // nodesByDistance is a list of nodes, ordered by
   716  // distance to target.
   717  type nodesByDistance struct {
   718  	entries []*node
   719  	target  enode.ID
   720  }
   721  
   722  // push adds the given node to the list, keeping the total size below maxElems.
   723  func (h *nodesByDistance) push(n *node, maxElems int) {
   724  	ix := sort.Search(len(h.entries), func(i int) bool {
   725  		return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0
   726  	})
   727  	if len(h.entries) < maxElems {
   728  		h.entries = append(h.entries, n)
   729  	}
   730  	if ix == len(h.entries) {
   731  		// farther away than all nodes we already have.
   732  		// if there was room for it, the node is now the last element.
   733  	} else {
   734  		// slide existing entries down to make room
   735  		// this will overwrite the entry we just appended.
   736  		copy(h.entries[ix+1:], h.entries[ix:])
   737  		h.entries[ix] = n
   738  	}
   739  }