github.com/cheng762/platon-go@v1.8.17-0.20190529111256-7deff2d7be26/p2p/discover/table.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package discover implements the Node Discovery Protocol.
    18  //
    19  // The Node Discovery protocol provides a way to find RLPx nodes that
    20  // can be connected to. It uses a Kademlia-like protocol to maintain a
    21  // distributed database of the IDs and endpoints of all listening
    22  // nodes.
    23  package discover
    24  
    25  import (
    26  	crand "crypto/rand"
    27  	"encoding/binary"
    28  	"fmt"
    29  	mrand "math/rand"
    30  	"net"
    31  	"sort"
    32  	"sync"
    33  	"time"
    34  
    35  	"github.com/PlatONnetwork/PlatON-Go/common"
    36  	"github.com/PlatONnetwork/PlatON-Go/crypto"
    37  	"github.com/PlatONnetwork/PlatON-Go/log"
    38  	"github.com/PlatONnetwork/PlatON-Go/p2p/netutil"
    39  )
    40  
    41  const (
    42  	alpha           = 3  // Kademlia concurrency factor
    43  	bucketSize      = 16 // Kademlia bucket size
    44  	maxReplacements = 10 // Size of per-bucket replacement list
    45  
    46  	// We keep buckets for the upper 1/15 of distances because
    47  	// it's very unlikely we'll ever encounter a node that's closer.
    48  	hashBits          = len(common.Hash{}) * 8
    49  	nBuckets          = hashBits / 15       // Number of buckets
    50  	bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket
    51  
    52  	// IP address limits.
    53  	bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
    54  	tableIPLimit, tableSubnet   = 10, 24
    55  
    56  	maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped
    57  	refreshInterval     = 30 * time.Minute
    58  	revalidateInterval  = 10 * time.Second
    59  	copyNodesInterval   = 30 * time.Second
    60  	seedMinTableTime    = 5 * time.Minute
    61  	seedCount           = 30
    62  	seedMaxAge          = 5 * 24 * time.Hour
    63  )
    64  
    65  type Table struct {
    66  	mutex   sync.Mutex        // protects buckets, bucket content, nursery, rand
    67  	buckets [nBuckets]*bucket // index of known nodes by distance
    68  	nursery []*Node           // bootstrap nodes
    69  	rand    *mrand.Rand       // source of randomness, periodically reseeded
    70  	ips     netutil.DistinctNetSet
    71  
    72  	db         *nodeDB // database of known nodes
    73  	refreshReq chan chan struct{}
    74  	initDone   chan struct{}
    75  	closeReq   chan struct{}
    76  	closed     chan struct{}
    77  
    78  	nodeAddedHook func(*Node) // for testing
    79  
    80  	net  transport
    81  	self *Node // metadata of the local node
    82  }
    83  
    84  // transport is implemented by the UDP transport.
    85  // it is an interface so we can test without opening lots of UDP
    86  // sockets and without generating a private key.
    87  type transport interface {
    88  	ping(NodeID, *net.UDPAddr) error
    89  	findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error)
    90  	close()
    91  }
    92  
    93  // bucket contains nodes, ordered by their last activity. the entry
    94  // that was most recently active is the first element in entries.
    95  type bucket struct {
    96  	entries      []*Node // live entries, sorted by time of last contact
    97  	replacements []*Node // recently seen nodes to be used if revalidation fails
    98  	ips          netutil.DistinctNetSet
    99  }
   100  
   101  func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) {
   102  	// If no node database was given, use an in-memory one
   103  	db, err := newNodeDB(nodeDBPath, nodeDBVersion, ourID)
   104  	if err != nil {
   105  		return nil, err
   106  	}
   107  	tab := &Table{
   108  		net:        t,
   109  		db:         db,
   110  		self:       NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)),
   111  		refreshReq: make(chan chan struct{}),
   112  		initDone:   make(chan struct{}),
   113  		closeReq:   make(chan struct{}),
   114  		closed:     make(chan struct{}),
   115  		rand:       mrand.New(mrand.NewSource(0)),
   116  		ips:        netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
   117  	}
   118  	if err := tab.setFallbackNodes(bootnodes); err != nil {
   119  		return nil, err
   120  	}
   121  	for i := range tab.buckets {
   122  		tab.buckets[i] = &bucket{
   123  			ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
   124  		}
   125  	}
   126  	tab.seedRand()
   127  	tab.loadSeedNodes()
   128  	// Start the background expiration goroutine after loading seeds so that the search for
   129  	// seed nodes also considers older nodes that would otherwise be removed by the
   130  	// expiration.
   131  	tab.db.ensureExpirer()
   132  	go tab.loop()
   133  	return tab, nil
   134  }
   135  
   136  func (tab *Table) seedRand() {
   137  	var b [8]byte
   138  	crand.Read(b[:])
   139  
   140  	tab.mutex.Lock()
   141  	tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:])))
   142  	tab.mutex.Unlock()
   143  }
   144  
   145  // Self returns the local node.
   146  // The returned node should not be modified by the caller.
   147  func (tab *Table) Self() *Node {
   148  	return tab.self
   149  }
   150  
   151  // ReadRandomNodes fills the given slice with random nodes from the
   152  // table. It will not write the same node more than once. The nodes in
   153  // the slice are copies and can be modified by the caller.
   154  func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
   155  	if !tab.isInitDone() {
   156  		return 0
   157  	}
   158  	tab.mutex.Lock()
   159  	defer tab.mutex.Unlock()
   160  
   161  	// Find all non-empty buckets and get a fresh slice of their entries.
   162  	var buckets [][]*Node
   163  	for _, b := range &tab.buckets {
   164  		if len(b.entries) > 0 {
   165  			buckets = append(buckets, b.entries)
   166  		}
   167  	}
   168  	if len(buckets) == 0 {
   169  		return 0
   170  	}
   171  	// Shuffle the buckets.
   172  	for i := len(buckets) - 1; i > 0; i-- {
   173  		j := tab.rand.Intn(len(buckets))
   174  		buckets[i], buckets[j] = buckets[j], buckets[i]
   175  	}
   176  	// Move head of each bucket into buf, removing buckets that become empty.
   177  	var i, j int
   178  	for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
   179  		b := buckets[j]
   180  		buf[i] = &(*b[0])
   181  		buckets[j] = b[1:]
   182  		if len(b) == 1 {
   183  			buckets = append(buckets[:j], buckets[j+1:]...)
   184  		}
   185  		if len(buckets) == 0 {
   186  			break
   187  		}
   188  	}
   189  	return i + 1
   190  }
   191  
   192  // Close terminates the network listener and flushes the node database.
   193  func (tab *Table) Close() {
   194  	select {
   195  	case <-tab.closed:
   196  		// already closed.
   197  	case tab.closeReq <- struct{}{}:
   198  		<-tab.closed // wait for refreshLoop to end.
   199  	}
   200  }
   201  
   202  // setFallbackNodes sets the initial points of contact. These nodes
   203  // are used to connect to the network if the table is empty and there
   204  // are no known nodes in the database.
   205  func (tab *Table) setFallbackNodes(nodes []*Node) error {
   206  	for _, n := range nodes {
   207  		if err := n.validateComplete(); err != nil {
   208  			return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
   209  		}
   210  	}
   211  	tab.nursery = make([]*Node, 0, len(nodes))
   212  	for _, n := range nodes {
   213  		cpy := *n
   214  		// Recompute cpy.sha because the node might not have been
   215  		// created by NewNode or ParseNode.
   216  		cpy.sha = crypto.Keccak256Hash(n.ID[:])
   217  		tab.nursery = append(tab.nursery, &cpy)
   218  	}
   219  	return nil
   220  }
   221  
   222  // isInitDone returns whether the table's initial seeding procedure has completed.
   223  func (tab *Table) isInitDone() bool {
   224  	select {
   225  	case <-tab.initDone:
   226  		return true
   227  	default:
   228  		return false
   229  	}
   230  }
   231  
   232  // Resolve searches for a specific node with the given ID.
   233  // It returns nil if the node could not be found.
   234  func (tab *Table) Resolve(targetID NodeID) *Node {
   235  	// If the node is present in the local table, no
   236  	// network interaction is required.
   237  	hash := crypto.Keccak256Hash(targetID[:])
   238  	tab.mutex.Lock()
   239  	cl := tab.closest(hash, 1)
   240  	tab.mutex.Unlock()
   241  	if len(cl.entries) > 0 && cl.entries[0].ID == targetID {
   242  		return cl.entries[0]
   243  	}
   244  	// Otherwise, do a network lookup.
   245  	result := tab.Lookup(targetID)
   246  	for _, n := range result {
   247  		if n.ID == targetID {
   248  			return n
   249  		}
   250  	}
   251  	return nil
   252  }
   253  
   254  // Lookup performs a network search for nodes close
   255  // to the given target. It approaches the target by querying
   256  // nodes that are closer to it on each iteration.
   257  // The given target does not need to be an actual node
   258  // identifier.
   259  func (tab *Table) Lookup(targetID NodeID) []*Node {
   260  	return tab.lookup(targetID, true)
   261  }
   262  
   263  func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
   264  	var (
   265  		target         = crypto.Keccak256Hash(targetID[:])
   266  		asked          = make(map[NodeID]bool)
   267  		seen           = make(map[NodeID]bool)
   268  		reply          = make(chan []*Node, alpha)
   269  		pendingQueries = 0
   270  		result         *nodesByDistance
   271  	)
   272  	// don't query further if we hit ourself.
   273  	// unlikely to happen often in practice.
   274  	asked[tab.self.ID] = true
   275  
   276  	for {
   277  		tab.mutex.Lock()
   278  		// generate initial result set
   279  		result = tab.closest(target, bucketSize)
   280  		tab.mutex.Unlock()
   281  		if len(result.entries) > 0 || !refreshIfEmpty {
   282  			break
   283  		}
   284  		// The result set is empty, all nodes were dropped, refresh.
   285  		// We actually wait for the refresh to complete here. The very
   286  		// first query will hit this case and run the bootstrapping
   287  		// logic.
   288  		<-tab.refresh()
   289  		refreshIfEmpty = false
   290  	}
   291  
   292  	for {
   293  		// ask the alpha closest nodes that we haven't asked yet
   294  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   295  			n := result.entries[i]
   296  			if !asked[n.ID] {
   297  				asked[n.ID] = true
   298  				pendingQueries++
   299  				go tab.findnode(n, targetID, reply)
   300  			}
   301  		}
   302  		if pendingQueries == 0 {
   303  			// we have asked all closest nodes, stop the search
   304  			break
   305  		}
   306  		// wait for the next reply
   307  		for _, n := range <-reply {
   308  			if n != nil && !seen[n.ID] {
   309  				seen[n.ID] = true
   310  				result.push(n, bucketSize)
   311  			}
   312  		}
   313  		pendingQueries--
   314  	}
   315  	return result.entries
   316  }
   317  
   318  func (tab *Table) findnode(n *Node, targetID NodeID, reply chan<- []*Node) {
   319  	fails := tab.db.findFails(n.ID)
   320  	r, err := tab.net.findnode(n.ID, n.addr(), targetID)
   321  	if err != nil || len(r) == 0 {
   322  		fails++
   323  		tab.db.updateFindFails(n.ID, fails)
   324  		log.Trace("Findnode failed", "id", n.ID, "failcount", fails, "err", err)
   325  		if fails >= maxFindnodeFailures {
   326  			log.Trace("Too many findnode failures, dropping", "id", n.ID, "failcount", fails)
   327  			tab.delete(n)
   328  		}
   329  	} else if fails > 0 {
   330  		tab.db.updateFindFails(n.ID, fails-1)
   331  	}
   332  
   333  	// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
   334  	// just remove those again during revalidation.
   335  	for _, n := range r {
   336  		log.Debug("Findnode list", "nodeIP", n.addr(), "nodeID", n.ID)
   337  		tab.add(n)
   338  	}
   339  	reply <- r
   340  }
   341  
   342  func (tab *Table) refresh() <-chan struct{} {
   343  	done := make(chan struct{})
   344  	select {
   345  	case tab.refreshReq <- done:
   346  	case <-tab.closed:
   347  		close(done)
   348  	}
   349  	return done
   350  }
   351  
   352  // loop schedules refresh, revalidate runs and coordinates shutdown.
   353  func (tab *Table) loop() {
   354  	var (
   355  		revalidate     = time.NewTimer(tab.nextRevalidateTime())
   356  		refresh        = time.NewTicker(refreshInterval)
   357  		copyNodes      = time.NewTicker(copyNodesInterval)
   358  		revalidateDone = make(chan struct{})
   359  		refreshDone    = make(chan struct{})           // where doRefresh reports completion
   360  		waiting        = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
   361  	)
   362  	defer refresh.Stop()
   363  	defer revalidate.Stop()
   364  	defer copyNodes.Stop()
   365  
   366  	// Start initial refresh.
   367  	go tab.doRefresh(refreshDone)
   368  
   369  loop:
   370  	for {
   371  		select {
   372  		case <-refresh.C:
   373  			tab.seedRand()
   374  			if refreshDone == nil {
   375  				refreshDone = make(chan struct{})
   376  				go tab.doRefresh(refreshDone)
   377  			}
   378  		case req := <-tab.refreshReq:
   379  			waiting = append(waiting, req)
   380  			if refreshDone == nil {
   381  				refreshDone = make(chan struct{})
   382  				go tab.doRefresh(refreshDone)
   383  			}
   384  		case <-refreshDone:
   385  			for _, ch := range waiting {
   386  				close(ch)
   387  			}
   388  			waiting, refreshDone = nil, nil
   389  		case <-revalidate.C:
   390  			go tab.doRevalidate(revalidateDone)
   391  		case <-revalidateDone:
   392  			revalidate.Reset(tab.nextRevalidateTime())
   393  		case <-copyNodes.C:
   394  			go tab.copyLiveNodes()
   395  		case <-tab.closeReq:
   396  			break loop
   397  		}
   398  	}
   399  
   400  	if tab.net != nil {
   401  		tab.net.close()
   402  	}
   403  	if refreshDone != nil {
   404  		<-refreshDone
   405  	}
   406  	for _, ch := range waiting {
   407  		close(ch)
   408  	}
   409  	tab.db.close()
   410  	close(tab.closed)
   411  }
   412  
   413  // doRefresh performs a lookup for a random target to keep buckets
   414  // full. seed nodes are inserted if the table is empty (initial
   415  // bootstrap or discarded faulty peers).
   416  func (tab *Table) doRefresh(done chan struct{}) {
   417  	defer close(done)
   418  
   419  	// Load nodes from the database and insert
   420  	// them. This should yield a few previously seen nodes that are
   421  	// (hopefully) still alive.
   422  	tab.loadSeedNodes()
   423  
   424  	// Run self lookup to discover new neighbor nodes.
   425  	tab.lookup(tab.self.ID, false)
   426  
   427  	// The Kademlia paper specifies that the bucket refresh should
   428  	// perform a lookup in the least recently used bucket. We cannot
   429  	// adhere to this because the findnode target is a 512bit value
   430  	// (not hash-sized) and it is not easily possible to generate a
   431  	// sha3 preimage that falls into a chosen bucket.
   432  	// We perform a few lookups with a random target instead.
   433  	for i := 0; i < 3; i++ {
   434  		var target NodeID
   435  		crand.Read(target[:])
   436  		tab.lookup(target, false)
   437  	}
   438  }
   439  
   440  func (tab *Table) loadSeedNodes() {
   441  	seeds := tab.db.querySeeds(seedCount, seedMaxAge)
   442  	seeds = append(seeds, tab.nursery...)
   443  	for i := range seeds {
   444  		seed := seeds[i]
   445  		age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.lastPongReceived(seed.ID)) }}
   446  		log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age)
   447  		tab.add(seed)
   448  	}
   449  }
   450  
   451  // doRevalidate checks that the last node in a random bucket is still live
   452  // and replaces or deletes the node if it isn't.
   453  func (tab *Table) doRevalidate(done chan<- struct{}) {
   454  	defer func() { done <- struct{}{} }()
   455  
   456  	last, bi := tab.nodeToRevalidate()
   457  	if last == nil {
   458  		// No non-empty bucket found.
   459  		return
   460  	}
   461  
   462  	// Ping the selected node and wait for a pong.
   463  	err := tab.net.ping(last.ID, last.addr())
   464  
   465  	tab.mutex.Lock()
   466  	defer tab.mutex.Unlock()
   467  	b := tab.buckets[bi]
   468  	if err == nil {
   469  		// The node responded, move it to the front.
   470  		log.Trace("Revalidated node", "b", bi, "id", last.ID)
   471  		b.bump(last)
   472  		return
   473  	}
   474  	// No reply received, pick a replacement or delete the node if there aren't
   475  	// any replacements.
   476  	if r := tab.replace(b, last); r != nil {
   477  		log.Trace("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP)
   478  	} else {
   479  		log.Trace("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP)
   480  	}
   481  }
   482  
   483  // nodeToRevalidate returns the last node in a random, non-empty bucket.
   484  func (tab *Table) nodeToRevalidate() (n *Node, bi int) {
   485  	tab.mutex.Lock()
   486  	defer tab.mutex.Unlock()
   487  
   488  	for _, bi = range tab.rand.Perm(len(tab.buckets)) {
   489  		b := tab.buckets[bi]
   490  		if len(b.entries) > 0 {
   491  			last := b.entries[len(b.entries)-1]
   492  			return last, bi
   493  		}
   494  	}
   495  	return nil, 0
   496  }
   497  
   498  func (tab *Table) nextRevalidateTime() time.Duration {
   499  	tab.mutex.Lock()
   500  	defer tab.mutex.Unlock()
   501  
   502  	return time.Duration(tab.rand.Int63n(int64(revalidateInterval)))
   503  }
   504  
   505  // copyLiveNodes adds nodes from the table to the database if they have been in the table
   506  // longer then minTableTime.
   507  func (tab *Table) copyLiveNodes() {
   508  	tab.mutex.Lock()
   509  	defer tab.mutex.Unlock()
   510  
   511  	now := time.Now()
   512  	for _, b := range &tab.buckets {
   513  		for _, n := range b.entries {
   514  			if now.Sub(n.addedAt) >= seedMinTableTime {
   515  				tab.db.updateNode(n)
   516  			}
   517  		}
   518  	}
   519  }
   520  
   521  // closest returns the n nodes in the table that are closest to the
   522  // given id. The caller must hold tab.mutex.
   523  func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
   524  	// This is a very wasteful way to find the closest nodes but
   525  	// obviously correct. I believe that tree-based buckets would make
   526  	// this easier to implement efficiently.
   527  	close := &nodesByDistance{target: target}
   528  	for _, b := range &tab.buckets {
   529  		for _, n := range b.entries {
   530  			close.push(n, nresults)
   531  		}
   532  	}
   533  	return close
   534  }
   535  
   536  func (tab *Table) len() (n int) {
   537  	for _, b := range &tab.buckets {
   538  		n += len(b.entries)
   539  	}
   540  	return n
   541  }
   542  
   543  // bucket returns the bucket for the given node ID hash.
   544  func (tab *Table) bucket(sha common.Hash) *bucket {
   545  	d := logdist(tab.self.sha, sha)
   546  	if d <= bucketMinDistance {
   547  		return tab.buckets[0]
   548  	}
   549  	return tab.buckets[d-bucketMinDistance-1]
   550  }
   551  
   552  // add attempts to add the given node to its corresponding bucket. If the bucket has space
   553  // available, adding the node succeeds immediately. Otherwise, the node is added if the
   554  // least recently active node in the bucket does not respond to a ping packet.
   555  //
   556  // The caller must not hold tab.mutex.
   557  func (tab *Table) add(n *Node) {
   558  	tab.mutex.Lock()
   559  	defer tab.mutex.Unlock()
   560  
   561  	b := tab.bucket(n.sha)
   562  	if !tab.bumpOrAdd(b, n) {
   563  		// Node is not in table. Add it to the replacement list.
   564  		tab.addReplacement(b, n)
   565  	}
   566  }
   567  
   568  // addThroughPing adds the given node to the table. Compared to plain
   569  // 'add' there is an additional safety measure: if the table is still
   570  // initializing the node is not added. This prevents an attack where the
   571  // table could be filled by just sending ping repeatedly.
   572  //
   573  // The caller must not hold tab.mutex.
   574  func (tab *Table) addThroughPing(n *Node) {
   575  	if !tab.isInitDone() {
   576  		return
   577  	}
   578  	tab.add(n)
   579  }
   580  
   581  // stuff adds nodes the table to the end of their corresponding bucket
   582  // if the bucket is not full. The caller must not hold tab.mutex.
   583  func (tab *Table) stuff(nodes []*Node) {
   584  	tab.mutex.Lock()
   585  	defer tab.mutex.Unlock()
   586  
   587  	for _, n := range nodes {
   588  		if n.ID == tab.self.ID {
   589  			continue // don't add self
   590  		}
   591  		b := tab.bucket(n.sha)
   592  		if len(b.entries) < bucketSize {
   593  			tab.bumpOrAdd(b, n)
   594  		}
   595  	}
   596  }
   597  
   598  // delete removes an entry from the node table. It is used to evacuate dead nodes.
   599  func (tab *Table) delete(node *Node) {
   600  	tab.mutex.Lock()
   601  	defer tab.mutex.Unlock()
   602  
   603  	tab.deleteInBucket(tab.bucket(node.sha), node)
   604  }
   605  
   606  func (tab *Table) addIP(b *bucket, ip net.IP) bool {
   607  	if netutil.IsLAN(ip) {
   608  		return true
   609  	}
   610  	if !tab.ips.Add(ip) {
   611  		log.Debug("IP exceeds table limit", "ip", ip)
   612  		return false
   613  	}
   614  	if !b.ips.Add(ip) {
   615  		log.Debug("IP exceeds bucket limit", "ip", ip)
   616  		tab.ips.Remove(ip)
   617  		return false
   618  	}
   619  	return true
   620  }
   621  
   622  func (tab *Table) removeIP(b *bucket, ip net.IP) {
   623  	if netutil.IsLAN(ip) {
   624  		return
   625  	}
   626  	tab.ips.Remove(ip)
   627  	b.ips.Remove(ip)
   628  }
   629  
   630  func (tab *Table) addReplacement(b *bucket, n *Node) {
   631  	for _, e := range b.replacements {
   632  		if e.ID == n.ID {
   633  			return // already in list
   634  		}
   635  	}
   636  	if !tab.addIP(b, n.IP) {
   637  		return
   638  	}
   639  	var removed *Node
   640  	b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
   641  	if removed != nil {
   642  		tab.removeIP(b, removed.IP)
   643  	}
   644  }
   645  
   646  // replace removes n from the replacement list and replaces 'last' with it if it is the
   647  // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
   648  // with someone else or became active.
   649  func (tab *Table) replace(b *bucket, last *Node) *Node {
   650  	if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID {
   651  		// Entry has moved, don't replace it.
   652  		return nil
   653  	}
   654  	// Still the last entry.
   655  	if len(b.replacements) == 0 {
   656  		tab.deleteInBucket(b, last)
   657  		return nil
   658  	}
   659  	r := b.replacements[tab.rand.Intn(len(b.replacements))]
   660  	b.replacements = deleteNode(b.replacements, r)
   661  	b.entries[len(b.entries)-1] = r
   662  	tab.removeIP(b, last.IP)
   663  	return r
   664  }
   665  
   666  // bump moves the given node to the front of the bucket entry list
   667  // if it is contained in that list.
   668  func (b *bucket) bump(n *Node) bool {
   669  	for i := range b.entries {
   670  		if b.entries[i].ID == n.ID {
   671  			// move it to the front
   672  			copy(b.entries[1:], b.entries[:i])
   673  			b.entries[0] = n
   674  			return true
   675  		}
   676  	}
   677  	return false
   678  }
   679  
   680  // bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
   681  // full. The return value is true if n is in the bucket.
   682  func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
   683  	if b.bump(n) {
   684  		return true
   685  	}
   686  	if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) {
   687  		return false
   688  	}
   689  	b.entries, _ = pushNode(b.entries, n, bucketSize)
   690  	b.replacements = deleteNode(b.replacements, n)
   691  	n.addedAt = time.Now()
   692  	if tab.nodeAddedHook != nil {
   693  		tab.nodeAddedHook(n)
   694  	}
   695  	return true
   696  }
   697  
   698  func (tab *Table) deleteInBucket(b *bucket, n *Node) {
   699  	b.entries = deleteNode(b.entries, n)
   700  	tab.removeIP(b, n.IP)
   701  }
   702  
   703  // pushNode adds n to the front of list, keeping at most max items.
   704  func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
   705  	if len(list) < max {
   706  		list = append(list, nil)
   707  	}
   708  	removed := list[len(list)-1]
   709  	copy(list[1:], list)
   710  	list[0] = n
   711  	return list, removed
   712  }
   713  
   714  // deleteNode removes n from list.
   715  func deleteNode(list []*Node, n *Node) []*Node {
   716  	for i := range list {
   717  		if list[i].ID == n.ID {
   718  			return append(list[:i], list[i+1:]...)
   719  		}
   720  	}
   721  	return list
   722  }
   723  
   724  // nodesByDistance is a list of nodes, ordered by
   725  // distance to target.
   726  type nodesByDistance struct {
   727  	entries []*Node
   728  	target  common.Hash
   729  }
   730  
   731  // push adds the given node to the list, keeping the total size below maxElems.
   732  func (h *nodesByDistance) push(n *Node, maxElems int) {
   733  	ix := sort.Search(len(h.entries), func(i int) bool {
   734  		return distcmp(h.target, h.entries[i].sha, n.sha) > 0
   735  	})
   736  	if len(h.entries) < maxElems {
   737  		h.entries = append(h.entries, n)
   738  	}
   739  	if ix == len(h.entries) {
   740  		// farther away than all nodes we already have.
   741  		// if there was room for it, the node is now the last element.
   742  	} else {
   743  		// slide existing entries down to make room
   744  		// this will overwrite the entry we just appended.
   745  		copy(h.entries[ix+1:], h.entries[ix:])
   746  		h.entries[ix] = n
   747  	}
   748  }