github.com/ethereum/go-ethereum@v1.16.1/p2p/discover/table.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package discover implements the Node Discovery Protocol.
    18  //
    19  // The Node Discovery protocol provides a way to find RLPx nodes that
    20  // can be connected to. It uses a Kademlia-like protocol to maintain a
    21  // distributed database of the IDs and endpoints of all listening
    22  // nodes.
    23  package discover
    24  
    25  import (
    26  	"context"
    27  	"fmt"
    28  	"net/netip"
    29  	"slices"
    30  	"sync"
    31  	"time"
    32  
    33  	"github.com/ethereum/go-ethereum/common"
    34  	"github.com/ethereum/go-ethereum/common/mclock"
    35  	"github.com/ethereum/go-ethereum/log"
    36  	"github.com/ethereum/go-ethereum/metrics"
    37  	"github.com/ethereum/go-ethereum/p2p/enode"
    38  	"github.com/ethereum/go-ethereum/p2p/netutil"
    39  )
    40  
    41  const (
    42  	alpha           = 3  // Kademlia concurrency factor
    43  	bucketSize      = 16 // Kademlia bucket size
    44  	maxReplacements = 10 // Size of per-bucket replacement list
    45  
    46  	// We keep buckets for the upper 1/15 of distances because
    47  	// it's very unlikely we'll ever encounter a node that's closer.
    48  	hashBits          = len(common.Hash{}) * 8
    49  	nBuckets          = hashBits / 15       // Number of buckets
    50  	bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket
    51  
    52  	// IP address limits.
    53  	bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
    54  	tableIPLimit, tableSubnet   = 10, 24
    55  
    56  	seedMinTableTime = 5 * time.Minute
    57  	seedCount        = 30
    58  	seedMaxAge       = 5 * 24 * time.Hour
    59  )
    60  
    61  // Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps
    62  // itself up-to-date by verifying the liveness of neighbors and requesting their node
    63  // records when announcements of a new record version are received.
    64  type Table struct {
    65  	mutex        sync.Mutex        // protects buckets, bucket content, nursery, rand
    66  	buckets      [nBuckets]*bucket // index of known nodes by distance
    67  	nursery      []*enode.Node     // bootstrap nodes
    68  	rand         reseedingRandom   // source of randomness, periodically reseeded
    69  	ips          netutil.DistinctNetSet
    70  	revalidation tableRevalidation
    71  
    72  	db  *enode.DB // database of known nodes
    73  	net transport
    74  	cfg Config
    75  	log log.Logger
    76  
    77  	// loop channels
    78  	refreshReq      chan chan struct{}
    79  	revalResponseCh chan revalidationResponse
    80  	addNodeCh       chan addNodeOp
    81  	addNodeHandled  chan bool
    82  	trackRequestCh  chan trackRequestOp
    83  	initDone        chan struct{}
    84  	closeReq        chan struct{}
    85  	closed          chan struct{}
    86  
    87  	nodeAddedHook   func(*bucket, *tableNode)
    88  	nodeRemovedHook func(*bucket, *tableNode)
    89  }
    90  
    91  // transport is implemented by the UDP transports.
    92  type transport interface {
    93  	Self() *enode.Node
    94  	RequestENR(*enode.Node) (*enode.Node, error)
    95  	lookupRandom() []*enode.Node
    96  	lookupSelf() []*enode.Node
    97  	ping(*enode.Node) (seq uint64, err error)
    98  }
    99  
   100  // bucket contains nodes, ordered by their last activity. the entry
   101  // that was most recently active is the first element in entries.
   102  type bucket struct {
   103  	entries      []*tableNode // live entries, sorted by time of last contact
   104  	replacements []*tableNode // recently seen nodes to be used if revalidation fails
   105  	ips          netutil.DistinctNetSet
   106  	index        int
   107  }
   108  
   109  type addNodeOp struct {
   110  	node         *enode.Node
   111  	isInbound    bool
   112  	forceSetLive bool // for tests
   113  }
   114  
   115  type trackRequestOp struct {
   116  	node       *enode.Node
   117  	foundNodes []*enode.Node
   118  	success    bool
   119  }
   120  
   121  func newTable(t transport, db *enode.DB, cfg Config) (*Table, error) {
   122  	cfg = cfg.withDefaults()
   123  	tab := &Table{
   124  		net:             t,
   125  		db:              db,
   126  		cfg:             cfg,
   127  		log:             cfg.Log,
   128  		refreshReq:      make(chan chan struct{}),
   129  		revalResponseCh: make(chan revalidationResponse),
   130  		addNodeCh:       make(chan addNodeOp),
   131  		addNodeHandled:  make(chan bool),
   132  		trackRequestCh:  make(chan trackRequestOp),
   133  		initDone:        make(chan struct{}),
   134  		closeReq:        make(chan struct{}),
   135  		closed:          make(chan struct{}),
   136  		ips:             netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
   137  	}
   138  	for i := range tab.buckets {
   139  		tab.buckets[i] = &bucket{
   140  			index: i,
   141  			ips:   netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
   142  		}
   143  	}
   144  	tab.rand.seed()
   145  	tab.revalidation.init(&cfg)
   146  
   147  	// initial table content
   148  	if err := tab.setFallbackNodes(cfg.Bootnodes); err != nil {
   149  		return nil, err
   150  	}
   151  	tab.loadSeedNodes()
   152  
   153  	return tab, nil
   154  }
   155  
   156  // Nodes returns all nodes contained in the table.
   157  func (tab *Table) Nodes() [][]BucketNode {
   158  	tab.mutex.Lock()
   159  	defer tab.mutex.Unlock()
   160  
   161  	nodes := make([][]BucketNode, len(tab.buckets))
   162  	for i, b := range &tab.buckets {
   163  		nodes[i] = make([]BucketNode, len(b.entries))
   164  		for j, n := range b.entries {
   165  			nodes[i][j] = BucketNode{
   166  				Node:          n.Node,
   167  				Checks:        int(n.livenessChecks),
   168  				Live:          n.isValidatedLive,
   169  				AddedToTable:  n.addedToTable,
   170  				AddedToBucket: n.addedToBucket,
   171  			}
   172  		}
   173  	}
   174  	return nodes
   175  }
   176  
   177  func (tab *Table) self() *enode.Node {
   178  	return tab.net.Self()
   179  }
   180  
   181  // getNode returns the node with the given ID or nil if it isn't in the table.
   182  func (tab *Table) getNode(id enode.ID) *enode.Node {
   183  	tab.mutex.Lock()
   184  	defer tab.mutex.Unlock()
   185  
   186  	b := tab.bucket(id)
   187  	for _, e := range b.entries {
   188  		if e.ID() == id {
   189  			return e.Node
   190  		}
   191  	}
   192  	return nil
   193  }
   194  
   195  // close terminates the network listener and flushes the node database.
   196  func (tab *Table) close() {
   197  	close(tab.closeReq)
   198  	<-tab.closed
   199  }
   200  
   201  // setFallbackNodes sets the initial points of contact. These nodes
   202  // are used to connect to the network if the table is empty and there
   203  // are no known nodes in the database.
   204  func (tab *Table) setFallbackNodes(nodes []*enode.Node) error {
   205  	nursery := make([]*enode.Node, 0, len(nodes))
   206  	for _, n := range nodes {
   207  		if err := n.ValidateComplete(); err != nil {
   208  			return fmt.Errorf("bad bootstrap node %q: %v", n, err)
   209  		}
   210  		if tab.cfg.NetRestrict != nil && !tab.cfg.NetRestrict.ContainsAddr(n.IPAddr()) {
   211  			tab.log.Error("Bootstrap node filtered by netrestrict", "id", n.ID(), "ip", n.IPAddr())
   212  			continue
   213  		}
   214  		nursery = append(nursery, n)
   215  	}
   216  	tab.nursery = nursery
   217  	return nil
   218  }
   219  
   220  // isInitDone returns whether the table's initial seeding procedure has completed.
   221  func (tab *Table) isInitDone() bool {
   222  	select {
   223  	case <-tab.initDone:
   224  		return true
   225  	default:
   226  		return false
   227  	}
   228  }
   229  
   230  func (tab *Table) refresh() <-chan struct{} {
   231  	done := make(chan struct{})
   232  	select {
   233  	case tab.refreshReq <- done:
   234  	case <-tab.closeReq:
   235  		close(done)
   236  	}
   237  	return done
   238  }
   239  
   240  // findnodeByID returns the n nodes in the table that are closest to the given id.
   241  // This is used by the FINDNODE/v4 handler.
   242  //
   243  // The preferLive parameter says whether the caller wants liveness-checked results. If
   244  // preferLive is true and the table contains any verified nodes, the result will not
   245  // contain unverified nodes. However, if there are no verified nodes at all, the result
   246  // will contain unverified nodes.
   247  func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance {
   248  	tab.mutex.Lock()
   249  	defer tab.mutex.Unlock()
   250  
   251  	// Scan all buckets. There might be a better way to do this, but there aren't that many
   252  	// buckets, so this solution should be fine. The worst-case complexity of this loop
   253  	// is O(tab.len() * nresults).
   254  	nodes := &nodesByDistance{target: target}
   255  	liveNodes := &nodesByDistance{target: target}
   256  	for _, b := range &tab.buckets {
   257  		for _, n := range b.entries {
   258  			nodes.push(n.Node, nresults)
   259  			if preferLive && n.isValidatedLive {
   260  				liveNodes.push(n.Node, nresults)
   261  			}
   262  		}
   263  	}
   264  
   265  	if preferLive && len(liveNodes.entries) > 0 {
   266  		return liveNodes
   267  	}
   268  	return nodes
   269  }
   270  
   271  // appendBucketNodes adds nodes at the given distance to the result slice.
   272  // This is used by the FINDNODE/v5 handler.
   273  func (tab *Table) appendBucketNodes(dist uint, result []*enode.Node, checkLive bool) []*enode.Node {
   274  	if dist > 256 {
   275  		return result
   276  	}
   277  	if dist == 0 {
   278  		return append(result, tab.self())
   279  	}
   280  
   281  	tab.mutex.Lock()
   282  	for _, n := range tab.bucketAtDistance(int(dist)).entries {
   283  		if !checkLive || n.isValidatedLive {
   284  			result = append(result, n.Node)
   285  		}
   286  	}
   287  	tab.mutex.Unlock()
   288  
   289  	// Shuffle result to avoid always returning same nodes in FINDNODE/v5.
   290  	tab.rand.Shuffle(len(result), func(i, j int) {
   291  		result[i], result[j] = result[j], result[i]
   292  	})
   293  	return result
   294  }
   295  
   296  // len returns the number of nodes in the table.
   297  func (tab *Table) len() (n int) {
   298  	tab.mutex.Lock()
   299  	defer tab.mutex.Unlock()
   300  
   301  	for _, b := range &tab.buckets {
   302  		n += len(b.entries)
   303  	}
   304  	return n
   305  }
   306  
   307  // addFoundNode adds a node which may not be live. If the bucket has space available,
   308  // adding the node succeeds immediately. Otherwise, the node is added to the replacements
   309  // list.
   310  //
   311  // The caller must not hold tab.mutex.
   312  func (tab *Table) addFoundNode(n *enode.Node, forceSetLive bool) bool {
   313  	op := addNodeOp{node: n, isInbound: false, forceSetLive: forceSetLive}
   314  	select {
   315  	case tab.addNodeCh <- op:
   316  		return <-tab.addNodeHandled
   317  	case <-tab.closeReq:
   318  		return false
   319  	}
   320  }
   321  
   322  // addInboundNode adds a node from an inbound contact. If the bucket has no space, the
   323  // node is added to the replacements list.
   324  //
   325  // There is an additional safety measure: if the table is still initializing the node is
   326  // not added. This prevents an attack where the table could be filled by just sending ping
   327  // repeatedly.
   328  //
   329  // The caller must not hold tab.mutex.
   330  func (tab *Table) addInboundNode(n *enode.Node) bool {
   331  	op := addNodeOp{node: n, isInbound: true}
   332  	select {
   333  	case tab.addNodeCh <- op:
   334  		return <-tab.addNodeHandled
   335  	case <-tab.closeReq:
   336  		return false
   337  	}
   338  }
   339  
   340  func (tab *Table) trackRequest(n *enode.Node, success bool, foundNodes []*enode.Node) {
   341  	op := trackRequestOp{n, foundNodes, success}
   342  	select {
   343  	case tab.trackRequestCh <- op:
   344  	case <-tab.closeReq:
   345  	}
   346  }
   347  
   348  // loop is the main loop of Table.
   349  func (tab *Table) loop() {
   350  	var (
   351  		refresh         = time.NewTimer(tab.nextRefreshTime())
   352  		refreshDone     = make(chan struct{})           // where doRefresh reports completion
   353  		waiting         = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
   354  		revalTimer      = mclock.NewAlarm(tab.cfg.Clock)
   355  		reseedRandTimer = time.NewTicker(10 * time.Minute)
   356  	)
   357  	defer refresh.Stop()
   358  	defer revalTimer.Stop()
   359  	defer reseedRandTimer.Stop()
   360  
   361  	// Start initial refresh.
   362  	go tab.doRefresh(refreshDone)
   363  
   364  loop:
   365  	for {
   366  		nextTime := tab.revalidation.run(tab, tab.cfg.Clock.Now())
   367  		revalTimer.Schedule(nextTime)
   368  
   369  		select {
   370  		case <-reseedRandTimer.C:
   371  			tab.rand.seed()
   372  
   373  		case <-revalTimer.C():
   374  
   375  		case r := <-tab.revalResponseCh:
   376  			tab.revalidation.handleResponse(tab, r)
   377  
   378  		case op := <-tab.addNodeCh:
   379  			tab.mutex.Lock()
   380  			ok := tab.handleAddNode(op)
   381  			tab.mutex.Unlock()
   382  			tab.addNodeHandled <- ok
   383  
   384  		case op := <-tab.trackRequestCh:
   385  			tab.handleTrackRequest(op)
   386  
   387  		case <-refresh.C:
   388  			if refreshDone == nil {
   389  				refreshDone = make(chan struct{})
   390  				go tab.doRefresh(refreshDone)
   391  			}
   392  
   393  		case req := <-tab.refreshReq:
   394  			waiting = append(waiting, req)
   395  			if refreshDone == nil {
   396  				refreshDone = make(chan struct{})
   397  				go tab.doRefresh(refreshDone)
   398  			}
   399  
   400  		case <-refreshDone:
   401  			for _, ch := range waiting {
   402  				close(ch)
   403  			}
   404  			waiting, refreshDone = nil, nil
   405  			refresh.Reset(tab.nextRefreshTime())
   406  
   407  		case <-tab.closeReq:
   408  			break loop
   409  		}
   410  	}
   411  
   412  	if refreshDone != nil {
   413  		<-refreshDone
   414  	}
   415  	for _, ch := range waiting {
   416  		close(ch)
   417  	}
   418  	close(tab.closed)
   419  }
   420  
   421  // doRefresh performs a lookup for a random target to keep buckets full. seed nodes are
   422  // inserted if the table is empty (initial bootstrap or discarded faulty peers).
   423  func (tab *Table) doRefresh(done chan struct{}) {
   424  	defer close(done)
   425  
   426  	// Load nodes from the database and insert
   427  	// them. This should yield a few previously seen nodes that are
   428  	// (hopefully) still alive.
   429  	tab.loadSeedNodes()
   430  
   431  	// Run self lookup to discover new neighbor nodes.
   432  	tab.net.lookupSelf()
   433  
   434  	// The Kademlia paper specifies that the bucket refresh should
   435  	// perform a lookup in the least recently used bucket. We cannot
   436  	// adhere to this because the findnode target is a 512bit value
   437  	// (not hash-sized) and it is not easily possible to generate a
   438  	// sha3 preimage that falls into a chosen bucket.
   439  	// We perform a few lookups with a random target instead.
   440  	for i := 0; i < 3; i++ {
   441  		tab.net.lookupRandom()
   442  	}
   443  }
   444  
   445  func (tab *Table) loadSeedNodes() {
   446  	seeds := tab.db.QuerySeeds(seedCount, seedMaxAge)
   447  	seeds = append(seeds, tab.nursery...)
   448  	for i := range seeds {
   449  		seed := seeds[i]
   450  		if tab.log.Enabled(context.Background(), log.LevelTrace) {
   451  			age := time.Since(tab.db.LastPongReceived(seed.ID(), seed.IPAddr()))
   452  			addr, _ := seed.UDPEndpoint()
   453  			tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", addr, "age", age)
   454  		}
   455  		tab.mutex.Lock()
   456  		tab.handleAddNode(addNodeOp{node: seed, isInbound: false})
   457  		tab.mutex.Unlock()
   458  	}
   459  }
   460  
   461  func (tab *Table) nextRefreshTime() time.Duration {
   462  	half := tab.cfg.RefreshInterval / 2
   463  	return half + time.Duration(tab.rand.Int63n(int64(half)))
   464  }
   465  
   466  // bucket returns the bucket for the given node ID hash.
   467  func (tab *Table) bucket(id enode.ID) *bucket {
   468  	d := enode.LogDist(tab.self().ID(), id)
   469  	return tab.bucketAtDistance(d)
   470  }
   471  
   472  func (tab *Table) bucketAtDistance(d int) *bucket {
   473  	if d <= bucketMinDistance {
   474  		return tab.buckets[0]
   475  	}
   476  	return tab.buckets[d-bucketMinDistance-1]
   477  }
   478  
   479  func (tab *Table) addIP(b *bucket, ip netip.Addr) bool {
   480  	if !ip.IsValid() || ip.IsUnspecified() {
   481  		return false // Nodes without IP cannot be added.
   482  	}
   483  	if netutil.AddrIsLAN(ip) {
   484  		return true
   485  	}
   486  	if !tab.ips.AddAddr(ip) {
   487  		tab.log.Debug("IP exceeds table limit", "ip", ip)
   488  		return false
   489  	}
   490  	if !b.ips.AddAddr(ip) {
   491  		tab.log.Debug("IP exceeds bucket limit", "ip", ip)
   492  		tab.ips.RemoveAddr(ip)
   493  		return false
   494  	}
   495  	return true
   496  }
   497  
   498  func (tab *Table) removeIP(b *bucket, ip netip.Addr) {
   499  	if netutil.AddrIsLAN(ip) {
   500  		return
   501  	}
   502  	tab.ips.RemoveAddr(ip)
   503  	b.ips.RemoveAddr(ip)
   504  }
   505  
   506  // handleAddNode adds the node in the request to the table, if there is space.
   507  // The caller must hold tab.mutex.
   508  func (tab *Table) handleAddNode(req addNodeOp) bool {
   509  	if req.node.ID() == tab.self().ID() {
   510  		return false
   511  	}
   512  	// For nodes from inbound contact, there is an additional safety measure: if the table
   513  	// is still initializing the node is not added.
   514  	if req.isInbound && !tab.isInitDone() {
   515  		return false
   516  	}
   517  
   518  	b := tab.bucket(req.node.ID())
   519  	n, _ := tab.bumpInBucket(b, req.node, req.isInbound)
   520  	if n != nil {
   521  		// Already in bucket.
   522  		return false
   523  	}
   524  	if len(b.entries) >= bucketSize {
   525  		// Bucket full, maybe add as replacement.
   526  		tab.addReplacement(b, req.node)
   527  		return false
   528  	}
   529  	if !tab.addIP(b, req.node.IPAddr()) {
   530  		// Can't add: IP limit reached.
   531  		return false
   532  	}
   533  
   534  	// Add to bucket.
   535  	wn := &tableNode{Node: req.node}
   536  	if req.forceSetLive {
   537  		wn.livenessChecks = 1
   538  		wn.isValidatedLive = true
   539  	}
   540  	b.entries = append(b.entries, wn)
   541  	b.replacements = deleteNode(b.replacements, wn.ID())
   542  	tab.nodeAdded(b, wn)
   543  	return true
   544  }
   545  
   546  // addReplacement adds n to the replacement cache of bucket b.
   547  func (tab *Table) addReplacement(b *bucket, n *enode.Node) {
   548  	if containsID(b.replacements, n.ID()) {
   549  		// TODO: update ENR
   550  		return
   551  	}
   552  	if !tab.addIP(b, n.IPAddr()) {
   553  		return
   554  	}
   555  
   556  	wn := &tableNode{Node: n, addedToTable: time.Now()}
   557  	var removed *tableNode
   558  	b.replacements, removed = pushNode(b.replacements, wn, maxReplacements)
   559  	if removed != nil {
   560  		tab.removeIP(b, removed.IPAddr())
   561  	}
   562  }
   563  
   564  func (tab *Table) nodeAdded(b *bucket, n *tableNode) {
   565  	if n.addedToTable == (time.Time{}) {
   566  		n.addedToTable = time.Now()
   567  	}
   568  	n.addedToBucket = time.Now()
   569  	tab.revalidation.nodeAdded(tab, n)
   570  	if tab.nodeAddedHook != nil {
   571  		tab.nodeAddedHook(b, n)
   572  	}
   573  	if metrics.Enabled() {
   574  		bucketsCounter[b.index].Inc(1)
   575  	}
   576  }
   577  
   578  func (tab *Table) nodeRemoved(b *bucket, n *tableNode) {
   579  	tab.revalidation.nodeRemoved(n)
   580  	if tab.nodeRemovedHook != nil {
   581  		tab.nodeRemovedHook(b, n)
   582  	}
   583  	if metrics.Enabled() {
   584  		bucketsCounter[b.index].Dec(1)
   585  	}
   586  }
   587  
   588  // deleteInBucket removes node n from the table.
   589  // If there are replacement nodes in the bucket, the node is replaced.
   590  func (tab *Table) deleteInBucket(b *bucket, id enode.ID) *tableNode {
   591  	index := slices.IndexFunc(b.entries, func(e *tableNode) bool { return e.ID() == id })
   592  	if index == -1 {
   593  		// Entry has been removed already.
   594  		return nil
   595  	}
   596  
   597  	// Remove the node.
   598  	n := b.entries[index]
   599  	b.entries = slices.Delete(b.entries, index, index+1)
   600  	tab.removeIP(b, n.IPAddr())
   601  	tab.nodeRemoved(b, n)
   602  
   603  	// Add replacement.
   604  	if len(b.replacements) == 0 {
   605  		tab.log.Debug("Removed dead node", "b", b.index, "id", n.ID(), "ip", n.IPAddr())
   606  		return nil
   607  	}
   608  	rindex := tab.rand.Intn(len(b.replacements))
   609  	rep := b.replacements[rindex]
   610  	b.replacements = slices.Delete(b.replacements, rindex, rindex+1)
   611  	b.entries = append(b.entries, rep)
   612  	tab.nodeAdded(b, rep)
   613  	tab.log.Debug("Replaced dead node", "b", b.index, "id", n.ID(), "ip", n.IPAddr(), "r", rep.ID(), "rip", rep.IPAddr())
   614  	return rep
   615  }
   616  
   617  // bumpInBucket updates a node record if it exists in the bucket.
   618  // The second return value reports whether the node's endpoint (IP/port) was updated.
   619  func (tab *Table) bumpInBucket(b *bucket, newRecord *enode.Node, isInbound bool) (n *tableNode, endpointChanged bool) {
   620  	i := slices.IndexFunc(b.entries, func(elem *tableNode) bool {
   621  		return elem.ID() == newRecord.ID()
   622  	})
   623  	if i == -1 {
   624  		return nil, false // not in bucket
   625  	}
   626  	n = b.entries[i]
   627  
   628  	// For inbound updates (from the node itself) we accept any change, even if it sets
   629  	// back the sequence number. For found nodes (!isInbound), seq has to advance. Note
   630  	// this check also ensures found discv4 nodes (which always have seq=0) can't be
   631  	// updated.
   632  	if newRecord.Seq() <= n.Seq() && !isInbound {
   633  		return n, false
   634  	}
   635  
   636  	// Check endpoint update against IP limits.
   637  	ipchanged := newRecord.IPAddr() != n.IPAddr()
   638  	portchanged := newRecord.UDP() != n.UDP()
   639  	if ipchanged {
   640  		tab.removeIP(b, n.IPAddr())
   641  		if !tab.addIP(b, newRecord.IPAddr()) {
   642  			// It doesn't fit with the limit, put the previous record back.
   643  			tab.addIP(b, n.IPAddr())
   644  			return n, false
   645  		}
   646  	}
   647  
   648  	// Apply update.
   649  	n.Node = newRecord
   650  	if ipchanged || portchanged {
   651  		// Ensure node is revalidated quickly for endpoint changes.
   652  		tab.revalidation.nodeEndpointChanged(tab, n)
   653  		return n, true
   654  	}
   655  	return n, false
   656  }
   657  
   658  func (tab *Table) handleTrackRequest(op trackRequestOp) {
   659  	var fails int
   660  	if op.success {
   661  		// Reset failure counter because it counts _consecutive_ failures.
   662  		tab.db.UpdateFindFails(op.node.ID(), op.node.IPAddr(), 0)
   663  	} else {
   664  		fails = tab.db.FindFails(op.node.ID(), op.node.IPAddr())
   665  		fails++
   666  		tab.db.UpdateFindFails(op.node.ID(), op.node.IPAddr(), fails)
   667  	}
   668  
   669  	tab.mutex.Lock()
   670  	defer tab.mutex.Unlock()
   671  
   672  	b := tab.bucket(op.node.ID())
   673  	// Remove the node from the local table if it fails to return anything useful too
   674  	// many times, but only if there are enough other nodes in the bucket. This latter
   675  	// condition specifically exists to make bootstrapping in smaller test networks more
   676  	// reliable.
   677  	if fails >= maxFindnodeFailures && len(b.entries) >= bucketSize/4 {
   678  		tab.deleteInBucket(b, op.node.ID())
   679  	}
   680  
   681  	// Add found nodes.
   682  	for _, n := range op.foundNodes {
   683  		tab.handleAddNode(addNodeOp{n, false, false})
   684  	}
   685  }
   686  
   687  // pushNode adds n to the front of list, keeping at most max items.
   688  func pushNode(list []*tableNode, n *tableNode, max int) ([]*tableNode, *tableNode) {
   689  	if len(list) < max {
   690  		list = append(list, nil)
   691  	}
   692  	removed := list[len(list)-1]
   693  	copy(list[1:], list)
   694  	list[0] = n
   695  	return list, removed
   696  }
   697  
   698  // deleteNode removes a node from the table.
   699  func (tab *Table) deleteNode(n *enode.Node) {
   700  	tab.mutex.Lock()
   701  	defer tab.mutex.Unlock()
   702  	b := tab.bucket(n.ID())
   703  	tab.deleteInBucket(b, n.ID())
   704  }