github.com/core-coin/go-core/v2@v2.1.9/p2p/discover/table.go (about)

     1  // Copyright 2015 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package discover implements the Node Discovery Protocol.
    18  //
    19  // The Node Discovery protocol provides a way to find RLPx nodes that
    20  // can be connected to. It uses a Kademlia-like protocol to maintain a
    21  // distributed database of the IDs and endpoints of all listening
    22  // nodes.
    23  package discover
    24  
    25  import (
    26  	crand "crypto/rand"
    27  	"encoding/binary"
    28  	"fmt"
    29  	mrand "math/rand"
    30  	"net"
    31  	"sort"
    32  	"sync"
    33  	"time"
    34  
    35  	"github.com/core-coin/go-core/v2/common"
    36  	"github.com/core-coin/go-core/v2/log"
    37  	"github.com/core-coin/go-core/v2/p2p/enode"
    38  	"github.com/core-coin/go-core/v2/p2p/netutil"
    39  )
    40  
    41  const (
    42  	alpha           = 3  // Kademlia concurrency factor
    43  	bucketSize      = 16 // Kademlia bucket size
    44  	maxReplacements = 10 // Size of per-bucket replacement list
    45  
    46  	// We keep buckets for the upper 1/15 of distances because
    47  	// it's very unlikely we'll ever encounter a node that's closer.
    48  	hashBits          = len(common.Hash{}) * 8
    49  	nBuckets          = hashBits / 15       // Number of buckets
    50  	bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket
    51  
    52  	// IP address limits.
    53  	bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
    54  	tableIPLimit, tableSubnet   = 10, 24
    55  
    56  	refreshInterval    = 30 * time.Minute
    57  	revalidateInterval = 10 * time.Second
    58  	copyNodesInterval  = 30 * time.Second
    59  	seedMinTableTime   = 5 * time.Minute
    60  	seedCount          = 30
    61  	seedMaxAge         = 5 * 24 * time.Hour
    62  )
    63  
    64  // Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps
    65  // itself up-to-date by verifying the liveness of neighbors and requesting their node
    66  // records when announcements of a new record version are received.
    67  type Table struct {
    68  	mutex   sync.Mutex        // protects buckets, bucket content, nursery, rand
    69  	buckets [nBuckets]*bucket // index of known nodes by distance
    70  	nursery []*node           // bootstrap nodes
    71  	rand    *mrand.Rand       // source of randomness, periodically reseeded
    72  	ips     netutil.DistinctNetSet
    73  
    74  	log        log.Logger
    75  	db         *enode.DB // database of known nodes
    76  	net        transport
    77  	refreshReq chan chan struct{}
    78  	initDone   chan struct{}
    79  	closeReq   chan struct{}
    80  	closed     chan struct{}
    81  
    82  	nodeAddedHook func(*node) // for testing
    83  }
    84  
    85  // transport is implemented by the UDP transports.
    86  type transport interface {
    87  	Self() *enode.Node
    88  	RequestENR(*enode.Node) (*enode.Node, error)
    89  	lookupRandom() []*enode.Node
    90  	lookupSelf() []*enode.Node
    91  	ping(*enode.Node) (seq uint64, err error)
    92  }
    93  
    94  // bucket contains nodes, ordered by their last activity. the entry
    95  // that was most recently active is the first element in entries.
    96  type bucket struct {
    97  	entries      []*node // live entries, sorted by time of last contact
    98  	replacements []*node // recently seen nodes to be used if revalidation fails
    99  	ips          netutil.DistinctNetSet
   100  }
   101  
   102  func newTable(t transport, db *enode.DB, bootnodes []*enode.Node, log log.Logger) (*Table, error) {
   103  	tab := &Table{
   104  		net:        t,
   105  		db:         db,
   106  		refreshReq: make(chan chan struct{}),
   107  		initDone:   make(chan struct{}),
   108  		closeReq:   make(chan struct{}),
   109  		closed:     make(chan struct{}),
   110  		rand:       mrand.New(mrand.NewSource(0)),
   111  		ips:        netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
   112  		log:        log,
   113  	}
   114  	if err := tab.setFallbackNodes(bootnodes); err != nil {
   115  		return nil, err
   116  	}
   117  	for i := range tab.buckets {
   118  		tab.buckets[i] = &bucket{
   119  			ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
   120  		}
   121  	}
   122  	tab.seedRand()
   123  	tab.loadSeedNodes()
   124  
   125  	return tab, nil
   126  }
   127  
   128  func (tab *Table) self() *enode.Node {
   129  	return tab.net.Self()
   130  }
   131  
   132  func (tab *Table) seedRand() {
   133  	var b [8]byte
   134  	crand.Read(b[:])
   135  
   136  	tab.mutex.Lock()
   137  	tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:])))
   138  	tab.mutex.Unlock()
   139  }
   140  
   141  // ReadRandomNodes fills the given slice with random nodes from the table. The results
   142  // are guaranteed to be unique for a single invocation, no node will appear twice.
   143  func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
   144  	if !tab.isInitDone() {
   145  		return 0
   146  	}
   147  	tab.mutex.Lock()
   148  	defer tab.mutex.Unlock()
   149  
   150  	var nodes []*enode.Node
   151  	for _, b := range &tab.buckets {
   152  		for _, n := range b.entries {
   153  			nodes = append(nodes, unwrapNode(n))
   154  		}
   155  	}
   156  	// Shuffle.
   157  	for i := 0; i < len(nodes); i++ {
   158  		j := tab.rand.Intn(len(nodes))
   159  		nodes[i], nodes[j] = nodes[j], nodes[i]
   160  	}
   161  	return copy(buf, nodes)
   162  }
   163  
   164  // getNode returns the node with the given ID or nil if it isn't in the table.
   165  func (tab *Table) getNode(id enode.ID) *enode.Node {
   166  	tab.mutex.Lock()
   167  	defer tab.mutex.Unlock()
   168  
   169  	b := tab.bucket(id)
   170  	for _, e := range b.entries {
   171  		if e.ID() == id {
   172  			return unwrapNode(e)
   173  		}
   174  	}
   175  	return nil
   176  }
   177  
   178  // close terminates the network listener and flushes the node database.
   179  func (tab *Table) close() {
   180  	close(tab.closeReq)
   181  	<-tab.closed
   182  }
   183  
   184  // setFallbackNodes sets the initial points of contact. These nodes
   185  // are used to connect to the network if the table is empty and there
   186  // are no known nodes in the database.
   187  func (tab *Table) setFallbackNodes(nodes []*enode.Node) error {
   188  	for _, n := range nodes {
   189  		if err := n.ValidateComplete(); err != nil {
   190  			return fmt.Errorf("bad bootstrap node %q: %v", n, err)
   191  		}
   192  	}
   193  	tab.nursery = wrapNodes(nodes)
   194  	return nil
   195  }
   196  
   197  // isInitDone returns whether the table's initial seeding procedure has completed.
   198  func (tab *Table) isInitDone() bool {
   199  	select {
   200  	case <-tab.initDone:
   201  		return true
   202  	default:
   203  		return false
   204  	}
   205  }
   206  
   207  func (tab *Table) refresh() <-chan struct{} {
   208  	done := make(chan struct{})
   209  	select {
   210  	case tab.refreshReq <- done:
   211  	case <-tab.closeReq:
   212  		close(done)
   213  	}
   214  	return done
   215  }
   216  
   217  // loop schedules runs of doRefresh, doRevalidate and copyLiveNodes.
   218  func (tab *Table) loop() {
   219  	var (
   220  		revalidate     = time.NewTimer(tab.nextRevalidateTime())
   221  		refresh        = time.NewTicker(refreshInterval)
   222  		copyNodes      = time.NewTicker(copyNodesInterval)
   223  		refreshDone    = make(chan struct{})           // where doRefresh reports completion
   224  		revalidateDone chan struct{}                   // where doRevalidate reports completion
   225  		waiting        = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
   226  	)
   227  	defer refresh.Stop()
   228  	defer revalidate.Stop()
   229  	defer copyNodes.Stop()
   230  
   231  	// Start initial refresh.
   232  	go tab.doRefresh(refreshDone)
   233  
   234  loop:
   235  	for {
   236  		select {
   237  		case <-refresh.C:
   238  			tab.seedRand()
   239  			if refreshDone == nil {
   240  				refreshDone = make(chan struct{})
   241  				go tab.doRefresh(refreshDone)
   242  			}
   243  		case req := <-tab.refreshReq:
   244  			waiting = append(waiting, req)
   245  			if refreshDone == nil {
   246  				refreshDone = make(chan struct{})
   247  				go tab.doRefresh(refreshDone)
   248  			}
   249  		case <-refreshDone:
   250  			for _, ch := range waiting {
   251  				close(ch)
   252  			}
   253  			waiting, refreshDone = nil, nil
   254  		case <-revalidate.C:
   255  			revalidateDone = make(chan struct{})
   256  			go tab.doRevalidate(revalidateDone)
   257  		case <-revalidateDone:
   258  			revalidate.Reset(tab.nextRevalidateTime())
   259  			revalidateDone = nil
   260  		case <-copyNodes.C:
   261  			go tab.copyLiveNodes()
   262  		case <-tab.closeReq:
   263  			break loop
   264  		}
   265  	}
   266  
   267  	if refreshDone != nil {
   268  		<-refreshDone
   269  	}
   270  	for _, ch := range waiting {
   271  		close(ch)
   272  	}
   273  	if revalidateDone != nil {
   274  		<-revalidateDone
   275  	}
   276  	close(tab.closed)
   277  }
   278  
   279  // doRefresh performs a lookup for a random target to keep buckets full. seed nodes are
   280  // inserted if the table is empty (initial bootstrap or discarded faulty peers).
   281  func (tab *Table) doRefresh(done chan struct{}) {
   282  	defer close(done)
   283  
   284  	// Load nodes from the database and insert
   285  	// them. This should yield a few previously seen nodes that are
   286  	// (hopefully) still alive.
   287  	tab.loadSeedNodes()
   288  
   289  	// Run self lookup to discover new neighbor nodes.
   290  	tab.net.lookupSelf()
   291  
   292  	// The Kademlia paper specifies that the bucket refresh should
   293  	// perform a lookup in the least recently used bucket. We cannot
   294  	// adhere to this because the findnode target is a 512bit value
   295  	// (not hash-sized) and it is not easily possible to generate a
   296  	// sha3 preimage that falls into a chosen bucket.
   297  	// We perform a few lookups with a random target instead.
   298  	for i := 0; i < 3; i++ {
   299  		tab.net.lookupRandom()
   300  	}
   301  }
   302  
   303  func (tab *Table) loadSeedNodes() {
   304  	seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge))
   305  	seeds = append(seeds, tab.nursery...)
   306  	for i := range seeds {
   307  		seed := seeds[i]
   308  		age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) }}
   309  		tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
   310  		tab.addSeenNode(seed)
   311  	}
   312  }
   313  
   314  // doRevalidate checks that the last node in a random bucket is still live and replaces or
   315  // deletes the node if it isn't.
   316  func (tab *Table) doRevalidate(done chan<- struct{}) {
   317  	defer func() { done <- struct{}{} }()
   318  
   319  	last, bi := tab.nodeToRevalidate()
   320  	if last == nil {
   321  		// No non-empty bucket found.
   322  		return
   323  	}
   324  
   325  	// Ping the selected node and wait for a pong.
   326  	remoteSeq, err := tab.net.ping(unwrapNode(last))
   327  
   328  	// Also fetch record if the node replied and returned a higher sequence number.
   329  	if last.Seq() < remoteSeq {
   330  		n, err := tab.net.RequestENR(unwrapNode(last))
   331  		if err != nil {
   332  			tab.log.Debug("ENR request failed", "id", last.ID(), "addr", last.addr(), "err", err)
   333  		} else {
   334  			last = &node{Node: *n, addedAt: last.addedAt, livenessChecks: last.livenessChecks}
   335  		}
   336  	}
   337  
   338  	tab.mutex.Lock()
   339  	defer tab.mutex.Unlock()
   340  	b := tab.buckets[bi]
   341  	if err == nil {
   342  		// The node responded, move it to the front.
   343  		last.livenessChecks++
   344  		tab.log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks)
   345  		tab.bumpInBucket(b, last)
   346  		return
   347  	}
   348  	// No reply received, pick a replacement or delete the node if there aren't
   349  	// any replacements.
   350  	if r := tab.replace(b, last); r != nil {
   351  		tab.log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP())
   352  	} else {
   353  		tab.log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks)
   354  	}
   355  }
   356  
   357  // nodeToRevalidate returns the last node in a random, non-empty bucket.
   358  func (tab *Table) nodeToRevalidate() (n *node, bi int) {
   359  	tab.mutex.Lock()
   360  	defer tab.mutex.Unlock()
   361  
   362  	for _, bi = range tab.rand.Perm(len(tab.buckets)) {
   363  		b := tab.buckets[bi]
   364  		if len(b.entries) > 0 {
   365  			last := b.entries[len(b.entries)-1]
   366  			return last, bi
   367  		}
   368  	}
   369  	return nil, 0
   370  }
   371  
   372  func (tab *Table) nextRevalidateTime() time.Duration {
   373  	tab.mutex.Lock()
   374  	defer tab.mutex.Unlock()
   375  
   376  	return time.Duration(tab.rand.Int63n(int64(revalidateInterval)))
   377  }
   378  
   379  // copyLiveNodes adds nodes from the table to the database if they have been in the table
   380  // longer than seedMinTableTime.
   381  func (tab *Table) copyLiveNodes() {
   382  	tab.mutex.Lock()
   383  	defer tab.mutex.Unlock()
   384  
   385  	now := time.Now()
   386  	for _, b := range &tab.buckets {
   387  		for _, n := range b.entries {
   388  			if n.livenessChecks > 0 && now.Sub(n.addedAt) >= seedMinTableTime {
   389  				tab.db.UpdateNode(unwrapNode(n))
   390  			}
   391  		}
   392  	}
   393  }
   394  
   395  // findnodeByID returns the n nodes in the table that are closest to the given id.
   396  // This is used by the FINDNODE/v4 handler.
   397  //
   398  // The preferLive parameter says whether the caller wants liveness-checked results. If
   399  // preferLive is true and the table contains any verified nodes, the result will not
   400  // contain unverified nodes. However, if there are no verified nodes at all, the result
   401  // will contain unverified nodes.
   402  func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance {
   403  	tab.mutex.Lock()
   404  	defer tab.mutex.Unlock()
   405  
   406  	// Scan all buckets. There might be a better way to do this, but there aren't that many
   407  	// buckets, so this solution should be fine. The worst-case complexity of this loop
   408  	// is O(tab.len() * nresults).
   409  	nodes := &nodesByDistance{target: target}
   410  	liveNodes := &nodesByDistance{target: target}
   411  	for _, b := range &tab.buckets {
   412  		for _, n := range b.entries {
   413  			nodes.push(n, nresults)
   414  			if preferLive && n.livenessChecks > 0 {
   415  				liveNodes.push(n, nresults)
   416  			}
   417  		}
   418  	}
   419  
   420  	if preferLive && len(liveNodes.entries) > 0 {
   421  		return liveNodes
   422  	}
   423  	return nodes
   424  }
   425  
   426  // len returns the number of nodes in the table.
   427  func (tab *Table) len() (n int) {
   428  	tab.mutex.Lock()
   429  	defer tab.mutex.Unlock()
   430  
   431  	for _, b := range &tab.buckets {
   432  		n += len(b.entries)
   433  	}
   434  	return n
   435  }
   436  
   437  // bucketLen returns the number of nodes in the bucket for the given ID.
   438  func (tab *Table) bucketLen(id enode.ID) int {
   439  	tab.mutex.Lock()
   440  	defer tab.mutex.Unlock()
   441  
   442  	return len(tab.bucket(id).entries)
   443  }
   444  
   445  // bucket returns the bucket for the given node ID hash.
   446  func (tab *Table) bucket(id enode.ID) *bucket {
   447  	d := enode.LogDist(tab.self().ID(), id)
   448  	return tab.bucketAtDistance(d)
   449  }
   450  
   451  func (tab *Table) bucketAtDistance(d int) *bucket {
   452  	if d <= bucketMinDistance {
   453  		return tab.buckets[0]
   454  	}
   455  	return tab.buckets[d-bucketMinDistance-1]
   456  }
   457  
   458  // addSeenNode adds a node which may or may not be live to the end of a bucket. If the
   459  // bucket has space available, adding the node succeeds immediately. Otherwise, the node is
   460  // added to the replacements list.
   461  //
   462  // The caller must not hold tab.mutex.
   463  func (tab *Table) addSeenNode(n *node) {
   464  	if n.ID() == tab.self().ID() {
   465  		return
   466  	}
   467  
   468  	tab.mutex.Lock()
   469  	defer tab.mutex.Unlock()
   470  	b := tab.bucket(n.ID())
   471  	if contains(b.entries, n.ID()) {
   472  		// Already in bucket, don't add.
   473  		return
   474  	}
   475  	if len(b.entries) >= bucketSize {
   476  		// Bucket full, maybe add as replacement.
   477  		tab.addReplacement(b, n)
   478  		return
   479  	}
   480  	if !tab.addIP(b, n.IP()) {
   481  		// Can't add: IP limit reached.
   482  		return
   483  	}
   484  	// Add to end of bucket:
   485  	b.entries = append(b.entries, n)
   486  	b.replacements = deleteNode(b.replacements, n)
   487  	n.addedAt = time.Now()
   488  	if tab.nodeAddedHook != nil {
   489  		tab.nodeAddedHook(n)
   490  	}
   491  }
   492  
   493  // addVerifiedNode adds a node whose existence has been verified recently to the front of a
   494  // bucket. If the node is already in the bucket, it is moved to the front. If the bucket
   495  // has no space, the node is added to the replacements list.
   496  //
   497  // There is an additional safety measure: if the table is still initializing the node
   498  // is not added. This prevents an attack where the table could be filled by just sending
   499  // ping repeatedly.
   500  //
   501  // The caller must not hold tab.mutex.
   502  func (tab *Table) addVerifiedNode(n *node) {
   503  	if !tab.isInitDone() {
   504  		return
   505  	}
   506  	if n.ID() == tab.self().ID() {
   507  		return
   508  	}
   509  
   510  	tab.mutex.Lock()
   511  	defer tab.mutex.Unlock()
   512  	b := tab.bucket(n.ID())
   513  	if tab.bumpInBucket(b, n) {
   514  		// Already in bucket, moved to front.
   515  		return
   516  	}
   517  	if len(b.entries) >= bucketSize {
   518  		// Bucket full, maybe add as replacement.
   519  		tab.addReplacement(b, n)
   520  		return
   521  	}
   522  	if !tab.addIP(b, n.IP()) {
   523  		// Can't add: IP limit reached.
   524  		return
   525  	}
   526  	// Add to front of bucket.
   527  	b.entries, _ = pushNode(b.entries, n, bucketSize)
   528  	b.replacements = deleteNode(b.replacements, n)
   529  	n.addedAt = time.Now()
   530  	if tab.nodeAddedHook != nil {
   531  		tab.nodeAddedHook(n)
   532  	}
   533  }
   534  
   535  // delete removes an entry from the node table. It is used to evacuate dead nodes.
   536  func (tab *Table) delete(node *node) {
   537  	tab.mutex.Lock()
   538  	defer tab.mutex.Unlock()
   539  
   540  	tab.deleteInBucket(tab.bucket(node.ID()), node)
   541  }
   542  
   543  func (tab *Table) addIP(b *bucket, ip net.IP) bool {
   544  	if len(ip) == 0 {
   545  		return false // Nodes without IP cannot be added.
   546  	}
   547  	if netutil.IsLAN(ip) {
   548  		return true
   549  	}
   550  	if !tab.ips.Add(ip) {
   551  		tab.log.Debug("IP exceeds table limit", "ip", ip)
   552  		return false
   553  	}
   554  	if !b.ips.Add(ip) {
   555  		tab.log.Debug("IP exceeds bucket limit", "ip", ip)
   556  		tab.ips.Remove(ip)
   557  		return false
   558  	}
   559  	return true
   560  }
   561  
   562  func (tab *Table) removeIP(b *bucket, ip net.IP) {
   563  	if netutil.IsLAN(ip) {
   564  		return
   565  	}
   566  	tab.ips.Remove(ip)
   567  	b.ips.Remove(ip)
   568  }
   569  
   570  func (tab *Table) addReplacement(b *bucket, n *node) {
   571  	for _, e := range b.replacements {
   572  		if e.ID() == n.ID() {
   573  			return // already in list
   574  		}
   575  	}
   576  	if !tab.addIP(b, n.IP()) {
   577  		return
   578  	}
   579  	var removed *node
   580  	b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
   581  	if removed != nil {
   582  		tab.removeIP(b, removed.IP())
   583  	}
   584  }
   585  
   586  // replace removes n from the replacement list and replaces 'last' with it if it is the
   587  // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
   588  // with someone else or became active.
   589  func (tab *Table) replace(b *bucket, last *node) *node {
   590  	if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() {
   591  		// Entry has moved, don't replace it.
   592  		return nil
   593  	}
   594  	// Still the last entry.
   595  	if len(b.replacements) == 0 {
   596  		tab.deleteInBucket(b, last)
   597  		return nil
   598  	}
   599  	r := b.replacements[tab.rand.Intn(len(b.replacements))]
   600  	b.replacements = deleteNode(b.replacements, r)
   601  	b.entries[len(b.entries)-1] = r
   602  	tab.removeIP(b, last.IP())
   603  	return r
   604  }
   605  
   606  // bumpInBucket moves the given node to the front of the bucket entry list
   607  // if it is contained in that list.
   608  func (tab *Table) bumpInBucket(b *bucket, n *node) bool {
   609  	for i := range b.entries {
   610  		if b.entries[i].ID() == n.ID() {
   611  			if !n.IP().Equal(b.entries[i].IP()) {
   612  				// Endpoint has changed, ensure that the new IP fits into table limits.
   613  				tab.removeIP(b, b.entries[i].IP())
   614  				if !tab.addIP(b, n.IP()) {
   615  					// It doesn't, put the previous one back.
   616  					tab.addIP(b, b.entries[i].IP())
   617  					return false
   618  				}
   619  			}
   620  			// Move it to the front.
   621  			copy(b.entries[1:], b.entries[:i])
   622  			b.entries[0] = n
   623  			return true
   624  		}
   625  	}
   626  	return false
   627  }
   628  
   629  func (tab *Table) deleteInBucket(b *bucket, n *node) {
   630  	b.entries = deleteNode(b.entries, n)
   631  	tab.removeIP(b, n.IP())
   632  }
   633  
   634  func contains(ns []*node, id enode.ID) bool {
   635  	for _, n := range ns {
   636  		if n.ID() == id {
   637  			return true
   638  		}
   639  	}
   640  	return false
   641  }
   642  
   643  // pushNode adds n to the front of list, keeping at most max items.
   644  func pushNode(list []*node, n *node, max int) ([]*node, *node) {
   645  	if len(list) < max {
   646  		list = append(list, nil)
   647  	}
   648  	removed := list[len(list)-1]
   649  	copy(list[1:], list)
   650  	list[0] = n
   651  	return list, removed
   652  }
   653  
   654  // deleteNode removes n from list.
   655  func deleteNode(list []*node, n *node) []*node {
   656  	for i := range list {
   657  		if list[i].ID() == n.ID() {
   658  			return append(list[:i], list[i+1:]...)
   659  		}
   660  	}
   661  	return list
   662  }
   663  
   664  // nodesByDistance is a list of nodes, ordered by distance to target.
   665  type nodesByDistance struct {
   666  	entries []*node
   667  	target  enode.ID
   668  }
   669  
   670  // push adds the given node to the list, keeping the total size below maxElems.
   671  func (h *nodesByDistance) push(n *node, maxElems int) {
   672  	ix := sort.Search(len(h.entries), func(i int) bool {
   673  		return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0
   674  	})
   675  	if len(h.entries) < maxElems {
   676  		h.entries = append(h.entries, n)
   677  	}
   678  	if ix == len(h.entries) {
   679  		// farther away than all nodes we already have.
   680  		// if there was room for it, the node is now the last element.
   681  	} else {
   682  		// slide existing entries down to make room
   683  		// this will overwrite the entry we just appended.
   684  		copy(h.entries[ix+1:], h.entries[ix:])
   685  		h.entries[ix] = n
   686  	}
   687  }