github.com/xinfinOrg/xdposchain@v1.1.0/p2p/discover/table.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package discover implements the Node Discovery Protocol.
    18  //
    19  // The Node Discovery protocol provides a way to find RLPx nodes that
    20  // can be connected to. It uses a Kademlia-like protocol to maintain a
    21  // distributed database of the IDs and endpoints of all listening
    22  // nodes.
    23  package discover
    24  
    25  import (
    26  	"crypto/ecdsa"
    27  	crand "crypto/rand"
    28  	"encoding/binary"
    29  	"fmt"
    30  	mrand "math/rand"
    31  	"net"
    32  	"sort"
    33  	"sync"
    34  	"time"
    35  
    36  	"github.com/ethereum/go-ethereum/common"
    37  	"github.com/ethereum/go-ethereum/crypto"
    38  	"github.com/ethereum/go-ethereum/log"
    39  	"github.com/ethereum/go-ethereum/p2p/enode"
    40  	"github.com/ethereum/go-ethereum/p2p/netutil"
    41  )
    42  
    43  const (
    44  	alpha           = 3   // Kademlia concurrency factor
    45  	bucketSize      = 200 // Kademlia bucket size
    46  	maxReplacements = 10  // Size of per-bucket replacement list
    47  
    48  	// We keep buckets for the upper 1/15 of distances because
    49  	// it's very unlikely we'll ever encounter a node that's closer.
    50  	hashBits          = len(common.Hash{}) * 8
    51  	nBuckets          = hashBits / 15       // Number of buckets
    52  	bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket
    53  
    54  	// IP address limits.
    55  	bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
    56  	tableIPLimit, tableSubnet   = 10, 24
    57  
    58  	maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped
    59  	refreshInterval     = 30 * time.Minute
    60  	revalidateInterval  = 10 * time.Second
    61  	copyNodesInterval   = 30 * time.Second
    62  	seedMinTableTime    = 5 * time.Minute
    63  	seedCount           = 30
    64  	seedMaxAge          = 5 * 24 * time.Hour
    65  )
    66  
    67  type Table struct {
    68  	mutex   sync.Mutex        // protects buckets, bucket content, nursery, rand
    69  	buckets [nBuckets]*bucket // index of known nodes by distance
    70  	nursery []*node           // bootstrap nodes
    71  	rand    *mrand.Rand       // source of randomness, periodically reseeded
    72  	ips     netutil.DistinctNetSet
    73  
    74  	db         *enode.DB // database of known nodes
    75  	net        transport
    76  	refreshReq chan chan struct{}
    77  	initDone   chan struct{}
    78  
    79  	closeOnce sync.Once
    80  	closeReq  chan struct{}
    81  	closed    chan struct{}
    82  
    83  	nodeAddedHook func(*node) // for testing
    84  }
    85  
    86  // transport is implemented by the UDP transport.
    87  // it is an interface so we can test without opening lots of UDP
    88  // sockets and without generating a private key.
    89  type transport interface {
    90  	self() *enode.Node
    91  	ping(enode.ID, *net.UDPAddr) error
    92  	findnode(toid enode.ID, addr *net.UDPAddr, target encPubkey) ([]*node, error)
    93  	close()
    94  }
    95  
    96  // bucket contains nodes, ordered by their last activity. the entry
    97  // that was most recently active is the first element in entries.
    98  type bucket struct {
    99  	entries      []*node // live entries, sorted by time of last contact
   100  	replacements []*node // recently seen nodes to be used if revalidation fails
   101  	ips          netutil.DistinctNetSet
   102  }
   103  
   104  func newTable(t transport, db *enode.DB, bootnodes []*enode.Node) (*Table, error) {
   105  	tab := &Table{
   106  		net:        t,
   107  		db:         db,
   108  		refreshReq: make(chan chan struct{}),
   109  		initDone:   make(chan struct{}),
   110  		closeReq:   make(chan struct{}),
   111  		closed:     make(chan struct{}),
   112  		rand:       mrand.New(mrand.NewSource(0)),
   113  		ips:        netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
   114  	}
   115  	if err := tab.setFallbackNodes(bootnodes); err != nil {
   116  		return nil, err
   117  	}
   118  	for i := range tab.buckets {
   119  		tab.buckets[i] = &bucket{
   120  			ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
   121  		}
   122  	}
   123  	tab.seedRand()
   124  	tab.loadSeedNodes()
   125  
   126  	go tab.loop()
   127  	return tab, nil
   128  }
   129  
   130  func (tab *Table) self() *enode.Node {
   131  	return tab.net.self()
   132  }
   133  
   134  func (tab *Table) seedRand() {
   135  	var b [8]byte
   136  	crand.Read(b[:])
   137  
   138  	tab.mutex.Lock()
   139  	tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:])))
   140  	tab.mutex.Unlock()
   141  }
   142  
   143  // ReadRandomNodes fills the given slice with random nodes from the table. The results
   144  // are guaranteed to be unique for a single invocation, no node will appear twice.
   145  func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
   146  	if !tab.isInitDone() {
   147  		return 0
   148  	}
   149  	tab.mutex.Lock()
   150  	defer tab.mutex.Unlock()
   151  
   152  	// Find all non-empty buckets and get a fresh slice of their entries.
   153  	var buckets [][]*node
   154  	for _, b := range &tab.buckets {
   155  		if len(b.entries) > 0 {
   156  			buckets = append(buckets, b.entries)
   157  		}
   158  	}
   159  	if len(buckets) == 0 {
   160  		return 0
   161  	}
   162  	// Shuffle the buckets.
   163  	for i := len(buckets) - 1; i > 0; i-- {
   164  		j := tab.rand.Intn(len(buckets))
   165  		buckets[i], buckets[j] = buckets[j], buckets[i]
   166  	}
   167  	// Move head of each bucket into buf, removing buckets that become empty.
   168  	var i, j int
   169  	for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
   170  		b := buckets[j]
   171  		buf[i] = unwrapNode(b[0])
   172  		buckets[j] = b[1:]
   173  		if len(b) == 1 {
   174  			buckets = append(buckets[:j], buckets[j+1:]...)
   175  		}
   176  		if len(buckets) == 0 {
   177  			break
   178  		}
   179  	}
   180  	return i + 1
   181  }
   182  
   183  // Close terminates the network listener and flushes the node database.
   184  func (tab *Table) Close() {
   185  	tab.closeOnce.Do(func() {
   186  		if tab.net != nil {
   187  			tab.net.close()
   188  		}
   189  		// Wait for loop to end.
   190  		close(tab.closeReq)
   191  		<-tab.closed
   192  	})
   193  }
   194  
   195  // setFallbackNodes sets the initial points of contact. These nodes
   196  // are used to connect to the network if the table is empty and there
   197  // are no known nodes in the database.
   198  func (tab *Table) setFallbackNodes(nodes []*enode.Node) error {
   199  	for _, n := range nodes {
   200  		if err := n.ValidateComplete(); err != nil {
   201  			return fmt.Errorf("bad bootstrap node %q: %v", n, err)
   202  		}
   203  	}
   204  	tab.nursery = wrapNodes(nodes)
   205  	return nil
   206  }
   207  
   208  // isInitDone returns whether the table's initial seeding procedure has completed.
   209  func (tab *Table) isInitDone() bool {
   210  	select {
   211  	case <-tab.initDone:
   212  		return true
   213  	default:
   214  		return false
   215  	}
   216  }
   217  
   218  // Resolve searches for a specific node with the given ID.
   219  // It returns nil if the node could not be found.
   220  func (tab *Table) Resolve(n *enode.Node) *enode.Node {
   221  	// If the node is present in the local table, no
   222  	// network interaction is required.
   223  	hash := n.ID()
   224  	tab.mutex.Lock()
   225  	cl := tab.closest(hash, 1)
   226  	tab.mutex.Unlock()
   227  	if len(cl.entries) > 0 && cl.entries[0].ID() == hash {
   228  		return unwrapNode(cl.entries[0])
   229  	}
   230  	// Otherwise, do a network lookup.
   231  	result := tab.lookup(encodePubkey(n.Pubkey()), true)
   232  	for _, n := range result {
   233  		if n.ID() == hash {
   234  			return unwrapNode(n)
   235  		}
   236  	}
   237  	return nil
   238  }
   239  
   240  // LookupRandom finds random nodes in the network.
   241  func (tab *Table) LookupRandom() []*enode.Node {
   242  	var target encPubkey
   243  	crand.Read(target[:])
   244  	return unwrapNodes(tab.lookup(target, true))
   245  }
   246  
   247  // lookup performs a network search for nodes close to the given target. It approaches the
   248  // target by querying nodes that are closer to it on each iteration. The given target does
   249  // not need to be an actual node identifier.
   250  func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
   251  	var (
   252  		target         = enode.ID(crypto.Keccak256Hash(targetKey[:]))
   253  		asked          = make(map[enode.ID]bool)
   254  		seen           = make(map[enode.ID]bool)
   255  		reply          = make(chan []*node, alpha)
   256  		pendingQueries = 0
   257  		result         *nodesByDistance
   258  	)
   259  	// don't query further if we hit ourself.
   260  	// unlikely to happen often in practice.
   261  	asked[tab.self().ID()] = true
   262  
   263  	for {
   264  		tab.mutex.Lock()
   265  		// generate initial result set
   266  		result = tab.closest(target, bucketSize)
   267  		tab.mutex.Unlock()
   268  		if len(result.entries) > 0 || !refreshIfEmpty {
   269  			break
   270  		}
   271  		// The result set is empty, all nodes were dropped, refresh.
   272  		// We actually wait for the refresh to complete here. The very
   273  		// first query will hit this case and run the bootstrapping
   274  		// logic.
   275  		<-tab.refresh()
   276  		refreshIfEmpty = false
   277  	}
   278  
   279  	for {
   280  		// ask the alpha closest nodes that we haven't asked yet
   281  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   282  			n := result.entries[i]
   283  			if !asked[n.ID()] {
   284  				asked[n.ID()] = true
   285  				pendingQueries++
   286  				go tab.findnode(n, targetKey, reply)
   287  			}
   288  		}
   289  		if pendingQueries == 0 {
   290  			// we have asked all closest nodes, stop the search
   291  			break
   292  		}
   293  		select {
   294  		case nodes := <-reply:
   295  			for _, n := range nodes {
   296  				if n != nil && !seen[n.ID()] {
   297  					seen[n.ID()] = true
   298  					result.push(n, bucketSize)
   299  				}
   300  			}
   301  		case <-tab.closeReq:
   302  			return nil // shutdown, no need to continue.
   303  		}
   304  		pendingQueries--
   305  	}
   306  	return result.entries
   307  }
   308  
   309  func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
   310  	fails := tab.db.FindFails(n.ID(), n.IP())
   311  	r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
   312  	if err == errClosed {
   313  		// Avoid recording failures on shutdown.
   314  		reply <- nil
   315  		return
   316  	} else if len(r) == 0 {
   317  		fails++
   318  		tab.db.UpdateFindFails(n.ID(), n.IP(), fails)
   319  		log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
   320  		if fails >= maxFindnodeFailures {
   321  			log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
   322  			tab.delete(n)
   323  		}
   324  	} else if fails > 0 {
   325  		tab.db.UpdateFindFails(n.ID(), n.IP(), fails-1)
   326  	}
   327  
   328  	// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
   329  	// just remove those again during revalidation.
   330  	for _, n := range r {
   331  		tab.addSeenNode(n)
   332  	}
   333  	reply <- r
   334  }
   335  
   336  func (tab *Table) refresh() <-chan struct{} {
   337  	done := make(chan struct{})
   338  	select {
   339  	case tab.refreshReq <- done:
   340  	case <-tab.closeReq:
   341  		close(done)
   342  	}
   343  	return done
   344  }
   345  
   346  // loop schedules refresh, revalidate runs and coordinates shutdown.
   347  func (tab *Table) loop() {
   348  	var (
   349  		revalidate     = time.NewTimer(tab.nextRevalidateTime())
   350  		refresh        = time.NewTicker(refreshInterval)
   351  		copyNodes      = time.NewTicker(copyNodesInterval)
   352  		refreshDone    = make(chan struct{})           // where doRefresh reports completion
   353  		revalidateDone chan struct{}                   // where doRevalidate reports completion
   354  		waiting        = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
   355  	)
   356  	defer refresh.Stop()
   357  	defer revalidate.Stop()
   358  	defer copyNodes.Stop()
   359  
   360  	// Start initial refresh.
   361  	go tab.doRefresh(refreshDone)
   362  
   363  loop:
   364  	for {
   365  		select {
   366  		case <-refresh.C:
   367  			tab.seedRand()
   368  			if refreshDone == nil {
   369  				refreshDone = make(chan struct{})
   370  				go tab.doRefresh(refreshDone)
   371  			}
   372  		case req := <-tab.refreshReq:
   373  			waiting = append(waiting, req)
   374  			if refreshDone == nil {
   375  				refreshDone = make(chan struct{})
   376  				go tab.doRefresh(refreshDone)
   377  			}
   378  		case <-refreshDone:
   379  			for _, ch := range waiting {
   380  				close(ch)
   381  			}
   382  			waiting, refreshDone = nil, nil
   383  		case <-revalidate.C:
   384  			revalidateDone = make(chan struct{})
   385  			go tab.doRevalidate(revalidateDone)
   386  		case <-revalidateDone:
   387  			revalidate.Reset(tab.nextRevalidateTime())
   388  			revalidateDone = nil
   389  		case <-copyNodes.C:
   390  			go tab.copyLiveNodes()
   391  		case <-tab.closeReq:
   392  			break loop
   393  		}
   394  	}
   395  
   396  	if refreshDone != nil {
   397  		<-refreshDone
   398  	}
   399  	for _, ch := range waiting {
   400  		close(ch)
   401  	}
   402  	if revalidateDone != nil {
   403  		<-revalidateDone
   404  	}
   405  	close(tab.closed)
   406  }
   407  
   408  // doRefresh performs a lookup for a random target to keep buckets
   409  // full. seed nodes are inserted if the table is empty (initial
   410  // bootstrap or discarded faulty peers).
   411  func (tab *Table) doRefresh(done chan struct{}) {
   412  	defer close(done)
   413  
   414  	// Load nodes from the database and insert
   415  	// them. This should yield a few previously seen nodes that are
   416  	// (hopefully) still alive.
   417  	tab.loadSeedNodes()
   418  
   419  	// Run self lookup to discover new neighbor nodes.
   420  	// We can only do this if we have a secp256k1 identity.
   421  	var key ecdsa.PublicKey
   422  	if err := tab.self().Load((*enode.Secp256k1)(&key)); err == nil {
   423  		tab.lookup(encodePubkey(&key), false)
   424  	}
   425  
   426  	// The Kademlia paper specifies that the bucket refresh should
   427  	// perform a lookup in the least recently used bucket. We cannot
   428  	// adhere to this because the findnode target is a 512bit value
   429  	// (not hash-sized) and it is not easily possible to generate a
   430  	// sha3 preimage that falls into a chosen bucket.
   431  	// We perform a few lookups with a random target instead.
   432  	for i := 0; i < 3; i++ {
   433  		var target encPubkey
   434  		crand.Read(target[:])
   435  		tab.lookup(target, false)
   436  	}
   437  }
   438  
   439  func (tab *Table) loadSeedNodes() {
   440  	seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge))
   441  	seeds = append(seeds, tab.nursery...)
   442  	for i := range seeds {
   443  		seed := seeds[i]
   444  		age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) }}
   445  		log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
   446  		tab.addSeenNode(seed)
   447  	}
   448  }
   449  
   450  // doRevalidate checks that the last node in a random bucket is still live
   451  // and replaces or deletes the node if it isn't.
   452  func (tab *Table) doRevalidate(done chan<- struct{}) {
   453  	defer func() { done <- struct{}{} }()
   454  
   455  	last, bi := tab.nodeToRevalidate()
   456  	if last == nil {
   457  		// No non-empty bucket found.
   458  		return
   459  	}
   460  
   461  	// Ping the selected node and wait for a pong.
   462  	err := tab.net.ping(last.ID(), last.addr())
   463  
   464  	tab.mutex.Lock()
   465  	defer tab.mutex.Unlock()
   466  	b := tab.buckets[bi]
   467  	if err == nil {
   468  		// The node responded, move it to the front.
   469  		last.livenessChecks++
   470  		log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks)
   471  		tab.bumpInBucket(b, last)
   472  		return
   473  	}
   474  	// No reply received, pick a replacement or delete the node if there aren't
   475  	// any replacements.
   476  	if r := tab.replace(b, last); r != nil {
   477  		log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP())
   478  	} else {
   479  		log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks)
   480  	}
   481  }
   482  
   483  // nodeToRevalidate returns the last node in a random, non-empty bucket.
   484  func (tab *Table) nodeToRevalidate() (n *node, bi int) {
   485  	tab.mutex.Lock()
   486  	defer tab.mutex.Unlock()
   487  
   488  	for _, bi = range tab.rand.Perm(len(tab.buckets)) {
   489  		b := tab.buckets[bi]
   490  		if len(b.entries) > 0 {
   491  			last := b.entries[len(b.entries)-1]
   492  			return last, bi
   493  		}
   494  	}
   495  	return nil, 0
   496  }
   497  
   498  func (tab *Table) nextRevalidateTime() time.Duration {
   499  	tab.mutex.Lock()
   500  	defer tab.mutex.Unlock()
   501  
   502  	return time.Duration(tab.rand.Int63n(int64(revalidateInterval)))
   503  }
   504  
   505  // copyLiveNodes adds nodes from the table to the database if they have been in the table
   506  // longer then minTableTime.
   507  func (tab *Table) copyLiveNodes() {
   508  	tab.mutex.Lock()
   509  	defer tab.mutex.Unlock()
   510  
   511  	now := time.Now()
   512  	for _, b := range &tab.buckets {
   513  		for _, n := range b.entries {
   514  			if n.livenessChecks > 0 && now.Sub(n.addedAt) >= seedMinTableTime {
   515  				tab.db.UpdateNode(unwrapNode(n))
   516  			}
   517  		}
   518  	}
   519  }
   520  
   521  // closest returns the n nodes in the table that are closest to the
   522  // given id. The caller must hold tab.mutex.
   523  func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance {
   524  	// This is a very wasteful way to find the closest nodes but
   525  	// obviously correct. I believe that tree-based buckets would make
   526  	// this easier to implement efficiently.
   527  	close := &nodesByDistance{target: target}
   528  	for _, b := range &tab.buckets {
   529  		for _, n := range b.entries {
   530  			if n.livenessChecks > 0 {
   531  				close.push(n, nresults)
   532  			}
   533  		}
   534  	}
   535  	return close
   536  }
   537  
   538  func (tab *Table) len() (n int) {
   539  	for _, b := range &tab.buckets {
   540  		n += len(b.entries)
   541  	}
   542  	return n
   543  }
   544  
   545  // bucket returns the bucket for the given node ID hash.
   546  func (tab *Table) bucket(id enode.ID) *bucket {
   547  	d := enode.LogDist(tab.self().ID(), id)
   548  	if d <= bucketMinDistance {
   549  		return tab.buckets[0]
   550  	}
   551  	return tab.buckets[d-bucketMinDistance-1]
   552  }
   553  
   554  // addSeenNode adds a node which may or may not be live to the end of a bucket. If the
   555  // bucket has space available, adding the node succeeds immediately. Otherwise, the node is
   556  // added to the replacements list.
   557  //
   558  // The caller must not hold tab.mutex.
   559  func (tab *Table) addSeenNode(n *node) {
   560  	if n.ID() == tab.self().ID() {
   561  		return
   562  	}
   563  
   564  	tab.mutex.Lock()
   565  	defer tab.mutex.Unlock()
   566  	b := tab.bucket(n.ID())
   567  	if contains(b.entries, n.ID()) {
   568  		// Already in bucket, don't add.
   569  		return
   570  	}
   571  	if len(b.entries) >= bucketSize {
   572  		// Bucket full, maybe add as replacement.
   573  		tab.addReplacement(b, n)
   574  		return
   575  	}
   576  	if !tab.addIP(b, n.IP()) {
   577  		// Can't add: IP limit reached.
   578  		return
   579  	}
   580  	// Add to end of bucket:
   581  	b.entries = append(b.entries, n)
   582  	b.replacements = deleteNode(b.replacements, n)
   583  	n.addedAt = time.Now()
   584  	if tab.nodeAddedHook != nil {
   585  		tab.nodeAddedHook(n)
   586  	}
   587  }
   588  
   589  // addVerifiedNode adds a node whose existence has been verified recently to the front of a
   590  // bucket. If the node is already in the bucket, it is moved to the front. If the bucket
   591  // has no space, the node is added to the replacements list.
   592  //
   593  // There is an additional safety measure: if the table is still initializing the node
   594  // is not added. This prevents an attack where the table could be filled by just sending
   595  // ping repeatedly.
   596  //
   597  // The caller must not hold tab.mutex.
   598  func (tab *Table) addVerifiedNode(n *node) {
   599  	if !tab.isInitDone() {
   600  		return
   601  	}
   602  	if n.ID() == tab.self().ID() {
   603  		return
   604  	}
   605  
   606  	tab.mutex.Lock()
   607  	defer tab.mutex.Unlock()
   608  	b := tab.bucket(n.ID())
   609  	if tab.bumpInBucket(b, n) {
   610  		// Already in bucket, moved to front.
   611  		return
   612  	}
   613  	if len(b.entries) >= bucketSize {
   614  		// Bucket full, maybe add as replacement.
   615  		tab.addReplacement(b, n)
   616  		return
   617  	}
   618  	if !tab.addIP(b, n.IP()) {
   619  		// Can't add: IP limit reached.
   620  		return
   621  	}
   622  	// Add to front of bucket.
   623  	b.entries, _ = pushNode(b.entries, n, bucketSize)
   624  	b.replacements = deleteNode(b.replacements, n)
   625  	n.addedAt = time.Now()
   626  	if tab.nodeAddedHook != nil {
   627  		tab.nodeAddedHook(n)
   628  	}
   629  }
   630  
   631  // delete removes an entry from the node table. It is used to evacuate dead nodes.
   632  func (tab *Table) delete(node *node) {
   633  	tab.mutex.Lock()
   634  	defer tab.mutex.Unlock()
   635  
   636  	tab.deleteInBucket(tab.bucket(node.ID()), node)
   637  }
   638  
   639  func (tab *Table) addIP(b *bucket, ip net.IP) bool {
   640  	if netutil.IsLAN(ip) {
   641  		return true
   642  	}
   643  	if !tab.ips.Add(ip) {
   644  		log.Debug("IP exceeds table limit", "ip", ip)
   645  		return false
   646  	}
   647  	if !b.ips.Add(ip) {
   648  		log.Debug("IP exceeds bucket limit", "ip", ip)
   649  		tab.ips.Remove(ip)
   650  		return false
   651  	}
   652  	return true
   653  }
   654  
   655  func (tab *Table) removeIP(b *bucket, ip net.IP) {
   656  	if netutil.IsLAN(ip) {
   657  		return
   658  	}
   659  	tab.ips.Remove(ip)
   660  	b.ips.Remove(ip)
   661  }
   662  
   663  func (tab *Table) addReplacement(b *bucket, n *node) {
   664  	for _, e := range b.replacements {
   665  		if e.ID() == n.ID() {
   666  			return // already in list
   667  		}
   668  	}
   669  	if !tab.addIP(b, n.IP()) {
   670  		return
   671  	}
   672  	var removed *node
   673  	b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
   674  	if removed != nil {
   675  		tab.removeIP(b, removed.IP())
   676  	}
   677  }
   678  
   679  // replace removes n from the replacement list and replaces 'last' with it if it is the
   680  // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
   681  // with someone else or became active.
   682  func (tab *Table) replace(b *bucket, last *node) *node {
   683  	if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() {
   684  		// Entry has moved, don't replace it.
   685  		return nil
   686  	}
   687  	// Still the last entry.
   688  	if len(b.replacements) == 0 {
   689  		tab.deleteInBucket(b, last)
   690  		return nil
   691  	}
   692  	r := b.replacements[tab.rand.Intn(len(b.replacements))]
   693  	b.replacements = deleteNode(b.replacements, r)
   694  	b.entries[len(b.entries)-1] = r
   695  	tab.removeIP(b, last.IP())
   696  	return r
   697  }
   698  
   699  // bumpInBucket moves the given node to the front of the bucket entry list
   700  // if it is contained in that list.
   701  func (tab *Table) bumpInBucket(b *bucket, n *node) bool {
   702  	for i := range b.entries {
   703  		if b.entries[i].ID() == n.ID() {
   704  			if !n.IP().Equal(b.entries[i].IP()) {
   705  				// Endpoint has changed, ensure that the new IP fits into table limits.
   706  				tab.removeIP(b, b.entries[i].IP())
   707  				if !tab.addIP(b, n.IP()) {
   708  					// It doesn't, put the previous one back.
   709  					tab.addIP(b, b.entries[i].IP())
   710  					return false
   711  				}
   712  			}
   713  			// Move it to the front.
   714  			copy(b.entries[1:], b.entries[:i])
   715  			b.entries[0] = n
   716  			return true
   717  		}
   718  	}
   719  	return false
   720  }
   721  
   722  func (tab *Table) deleteInBucket(b *bucket, n *node) {
   723  	b.entries = deleteNode(b.entries, n)
   724  	tab.removeIP(b, n.IP())
   725  }
   726  
   727  func contains(ns []*node, id enode.ID) bool {
   728  	for _, n := range ns {
   729  		if n.ID() == id {
   730  			return true
   731  		}
   732  	}
   733  	return false
   734  }
   735  
   736  // pushNode adds n to the front of list, keeping at most max items.
   737  func pushNode(list []*node, n *node, max int) ([]*node, *node) {
   738  	if len(list) < max {
   739  		list = append(list, nil)
   740  	}
   741  	removed := list[len(list)-1]
   742  	copy(list[1:], list)
   743  	list[0] = n
   744  	return list, removed
   745  }
   746  
   747  // deleteNode removes n from list.
   748  func deleteNode(list []*node, n *node) []*node {
   749  	for i := range list {
   750  		if list[i].ID() == n.ID() {
   751  			return append(list[:i], list[i+1:]...)
   752  		}
   753  	}
   754  	return list
   755  }
   756  
   757  // nodesByDistance is a list of nodes, ordered by
   758  // distance to target.
   759  type nodesByDistance struct {
   760  	entries []*node
   761  	target  enode.ID
   762  }
   763  
   764  // push adds the given node to the list, keeping the total size below maxElems.
   765  func (h *nodesByDistance) push(n *node, maxElems int) {
   766  	ix := sort.Search(len(h.entries), func(i int) bool {
   767  		return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0
   768  	})
   769  	if len(h.entries) < maxElems {
   770  		h.entries = append(h.entries, n)
   771  	}
   772  	if ix == len(h.entries) {
   773  		// farther away than all nodes we already have.
   774  		// if there was room for it, the node is now the last element.
   775  	} else {
   776  		// slide existing entries down to make room
   777  		// this will overwrite the entry we just appended.
   778  		copy(h.entries[ix+1:], h.entries[ix:])
   779  		h.entries[ix] = n
   780  	}
   781  }