github.com/sixexorg/magnetic-ring@v0.0.0-20191119090307-31705a21e419/p2pserver/discover/table.go (about)

     1  package discover
     2  
     3  import (
     4  	"crypto/rand"
     5  	"encoding/binary"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"sort"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/ethereum/go-ethereum/common"
    14  	"github.com/ethereum/go-ethereum/crypto"
    15  	comm "github.com/sixexorg/magnetic-ring/common"
    16  	"github.com/sixexorg/magnetic-ring/log"
    17  )
    18  
    19  const (
    20  	alpha      = 3  // Kademlia concurrency factor
    21  	bucketSize = 16 // Kademlia bucket size
    22  	hashBits   = len(common.Hash{}) * 8
    23  	nBuckets   = hashBits + 1 // Number of buckets
    24  
    25  	maxBondingPingPongs = 16
    26  	maxFindnodeFailures = 5
    27  
    28  	// autoRefreshInterval = 1 * time.Hour // for normal
    29  	//autoRefreshInterval = 1 * time.Minute // for test
    30  	autoRefreshInterval = 5 * time.Second // for test
    31  	autoRefreshOrgInterval = 1 * time.Minute
    32  	seedCount           = 30
    33  	seedMaxAge          = 5 * 24 * time.Hour
    34  )
    35  
    36  type Table struct {
    37  	mutex   sync.Mutex        // protects buckets, their content, and nursery
    38  	buckets [nBuckets]*bucket // index of known nodes by distance
    39  	nursery []*Node           // bootstrap nodes
    40  	db      *nodeDB           // database of known nodes
    41  
    42  	refreshReq chan chan struct{}
    43  	closeReq   chan struct{}
    44  	closed     chan struct{}
    45  
    46  	bondmu    sync.Mutex
    47  	bonding   map[NodeID]*bondproc
    48  	bondslots chan struct{} // limits total number of active bonding processes
    49  
    50  	nodeAddedHook func(*Node) // for testing
    51  
    52  	net  transport
    53  	self *Node // metadata of the local node
    54  	// org
    55  	orgData *OrgInfo
    56  	connectData *ConnectInfo
    57  	stellarData *StellarInfo
    58  	stellarDisChan chan uint64
    59  }
    60  
    61  type bondproc struct {
    62  	err  error
    63  	n    *Node
    64  	done chan struct{}
    65  }
    66  
    67  // transport is implemented by the UDP transport.
    68  // it is an interface so we can test without opening lots of UDP
    69  // sockets and without generating a private key.
    70  type transport interface {
    71  	ping(NodeID, *net.UDPAddr) error
    72  	waitping(NodeID) error
    73  	findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error)
    74  	findcircle(toid NodeID, addr *net.UDPAddr, target comm.Address) ([]*Node, error)
    75  	sendcircle(ndoe,srcnode *Node,SrcCircleID comm.Address,ownid uint64,badd bool) error
    76  	sendConnectInfo(ndoe,srcnode *Node,ownid,remoteid uint64,bconnect,bstellar bool) error
    77  	reqPeerOrg(toid NodeID, toaddr *net.UDPAddr,node *Node) (*rtnTabPeerOrg, error)
    78  	reqPeerConnect(toid NodeID, toaddr *net.UDPAddr,node *Node) (*rtnTabPeerConnect, error)
    79  	connectCircuOrg(ndoe,connectnode *Node,orgid comm.Address) error
    80  	close()
    81  }
    82  
    83  // bucket contains nodes, ordered by their last activity. the entry
    84  // that was most recently active is the first element in entries.
    85  type bucket struct { 
    86  	entries []*Node
    87  }
    88  
    89  func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string,bootnode bool) (*Table, error) {
    90  	// If no node database was given, use an in-memory one
    91  	db, err := newNodeDB(nodeDBPath, Version, ourID)
    92  	if err != nil {
    93  		return nil, err
    94  	}
    95  	orgData := new(OrgInfo)
    96  	orgData.orgSingleData = make(map[comm.Address]NodeMap) 
    97  	orgData.peerIDToOrgIDMap = make(map[uint64]PeerOrg)
    98  	orgData.peerIDToNode = make(map[uint64]*OrgNodeInfo)
    99  
   100  	connectData := new(ConnectInfo)
   101  	connectData.peerToPeerMap = make(map[uint64]ConnPeer)
   102  	connectData.peerIDToNode = make(map[uint64]*OrgNodeInfo)
   103  
   104  	stellarData := new(StellarInfo)
   105  	stellarData.stellarmap = make(map[uint64]*Node)
   106  
   107  	tab := &Table{
   108  		net:        t,
   109  		db:         db,
   110  		self:       NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)),
   111  		bonding:    make(map[NodeID]*bondproc),
   112  		bondslots:  make(chan struct{}, maxBondingPingPongs),
   113  		refreshReq: make(chan chan struct{}),
   114  		closeReq:   make(chan struct{}),
   115  		closed:     make(chan struct{}),
   116  		stellarDisChan: make(chan uint64,StellarDisChanLen),
   117  		orgData:  orgData,
   118  		connectData: connectData,
   119  		stellarData: stellarData,
   120  	}
   121  	for i := 0; i < cap(tab.bondslots); i++ {
   122  		tab.bondslots <- struct{}{}
   123  	}
   124  	for i := range tab.buckets {
   125  		tab.buckets[i] = new(bucket)
   126  	}
   127  	go tab.refreshLoop()
   128  	if bootnode { // bn do something
   129  		// go tab.refreshConnect()
   130  		go tab.refreshOrg()
   131  		go tab.refreshStellarDis() // Check connection information between stellar nodes
   132  		go tab.loopprintf()
   133  		//go tab.checkCirculation()
   134  	}
   135  
   136  	
   137  	// go tab.circleprintf()
   138  	return tab, nil
   139  }
   140  
   141  // Self returns the local node.
   142  // The returned node should not be modified by the caller.
   143  func (tab *Table) Self() *Node {
   144  	return tab.self
   145  }
   146  
   147  // ReadRandomNodes fills the given slice with random nodes from the
   148  // table. It will not write the same node more than once. The nodes in
   149  // the slice are copies and can be modified by the caller.
   150  func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
   151  	tab.mutex.Lock()
   152  	defer tab.mutex.Unlock()
   153  	// TODO: tree-based buckets would help here
   154  	// Find all non-empty buckets and get a fresh slice of their entries.
   155  	var buckets [][]*Node
   156  	for _, b := range tab.buckets {
   157  		if len(b.entries) > 0 {
   158  			buckets = append(buckets, b.entries[:])
   159  		}
   160  	}
   161  	if len(buckets) == 0 {
   162  		return 0
   163  	}
   164  	// Shuffle the buckets.
   165  	// 洗桶。
   166  	for i := uint32(len(buckets)) - 1; i > 0; i-- {
   167  		j := randUint(i)
   168  		buckets[i], buckets[j] = buckets[j], buckets[i]
   169  	}
   170  	// Move head of each bucket into buf, removing buckets that become empty.
   171  	var i, j int
   172  	for i < len(buf) {
   173  		b := buckets[j]
   174  		tem := &(*b[0])
   175  		bfail := false
   176  		if tab.db.findFails(tem.ID) > 0 {
   177  			bfail = true
   178  		}
   179  		if bfail == false {
   180  			buf[i] = tem
   181  		}
   182  		buckets[j] = b[1:]
   183  		if len(b) == 1 {
   184  			buckets = append(buckets[:j], buckets[j+1:]...)
   185  		}
   186  		if len(buckets) == 0 {
   187  			break
   188  		}
   189  		if bfail == false {
   190  			i = i+1
   191  		}
   192  		j = (j+1)%len(buckets)
   193  	}
   194  	return i + 1
   195  }
   196  
   197  func randUint(max uint32) uint32 {
   198  	if max == 0 {
   199  		return 0
   200  	}
   201  	var b [4]byte
   202  	rand.Read(b[:])
   203  	return binary.BigEndian.Uint32(b[:]) % max
   204  }
   205  
   206  // Close terminates the network listener and flushes the node database.
   207  func (tab *Table) Close() {
   208  	select {
   209  	case <-tab.closed:
   210  		// already closed.
   211  	case tab.closeReq <- struct{}{}:
   212  		<-tab.closed // wait for refreshLoop to end.
   213  	}
   214  }
   215  
   216  // SetFallbackNodes sets the initial points of contact. These nodes
   217  // are used to connect to the network if the table is empty and there
   218  // are no known nodes in the database.
   219  func (tab *Table) SetFallbackNodes(nodes []*Node) error {
   220  	for _, n := range nodes {
   221  		if err := n.validateComplete(); err != nil {
   222  			return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
   223  		}
   224  	}
   225  	tab.mutex.Lock()
   226  	tab.nursery = make([]*Node, 0, len(nodes))
   227  	for _, n := range nodes {
   228  		cpy := *n
   229  		// Recompute cpy.sha because the node might not have been
   230  		// created by NewNode or ParseNode.
   231  		cpy.sha = crypto.Keccak256Hash(n.ID[:])
   232  		tab.nursery = append(tab.nursery, &cpy)
   233  	}
   234  	tab.mutex.Unlock()
   235  	fmt.Println(" ******* Table SetFallbackNodes nodes:",nodes)
   236  	tab.refresh()
   237  	return nil
   238  }
   239  
   240  // Resolve searches for a specific node with the given ID.
   241  // It returns nil if the node could not be found.
   242  func (tab *Table) Resolve(targetID NodeID) *Node {
   243  	// If the node is present in the local table, no
   244  	// network interaction is required.
   245  	hash := crypto.Keccak256Hash(targetID[:])
   246  	tab.mutex.Lock()
   247  	cl := tab.closest(hash, 1)
   248  	tab.mutex.Unlock()
   249  	if len(cl.entries) > 0 && cl.entries[0].ID == targetID {
   250  		return cl.entries[0]
   251  	}
   252  	// Otherwise, do a network lookup.
   253  	result := tab.Lookup(targetID)
   254  	for _, n := range result {
   255  		if n.ID == targetID {
   256  			return n
   257  		}
   258  	}
   259  	return nil
   260  }
   261  
   262  // Lookup performs a network search for nodes close
   263  // to the given target. It approaches the target by querying
   264  // nodes that are closer to it on each iteration.
   265  // The given target does not need to be an actual node
   266  // identifier.
   267  func (tab *Table) Lookup(targetID NodeID) []*Node {
   268  	return tab.lookup(targetID, true)
   269  }
   270  
   271  
   272  func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
   273  	var (
   274  		target         = crypto.Keccak256Hash(targetID[:])
   275  		asked          = make(map[NodeID]bool)
   276  		seen           = make(map[NodeID]bool)
   277  		reply          = make(chan []*Node, alpha)
   278  		pendingQueries = 0
   279  		result         *nodesByDistance
   280  	)
   281  	// don't query further if we hit ourself.
   282  	// unlikely to happen often in practice.
   283  	asked[tab.self.ID] = true
   284  
   285  	for {
   286  		tab.mutex.Lock()
   287  		// generate initial result set
   288  		result = tab.closest(target, bucketSize)
   289  		tab.mutex.Unlock()
   290  		if len(result.entries) > 0 || !refreshIfEmpty {
   291  			break
   292  		}
   293  		// The result set is empty, all nodes were dropped, refresh.
   294  		// We actually wait for the refresh to complete here. The very
   295  		// first query will hit this case and run the bootstrapping
   296  		// logic.
   297  		<-tab.refresh()
   298  		refreshIfEmpty = false
   299  	}
   300  
   301  	for {
   302  		// ask the alpha closest nodes that we haven't asked yet
   303  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   304  			n := result.entries[i]
   305  			if !asked[n.ID] {
   306  				asked[n.ID] = true
   307  				pendingQueries++
   308  				go func() {
   309  					// Find potential neighbors to bond with
   310  					r, err := tab.net.findnode(n.ID, n.addr(), targetID)
   311  					if err != nil {
   312  						// Bump the failure counter to detect and evacuate non-bonded entries
   313  						fails := tab.db.findFails(n.ID) + 1
   314  						tab.db.updateFindFails(n.ID, fails)
   315  						log.Trace("Bumping findnode failure counter", "id", n.ID, "failcount", fails)
   316  
   317  						// fmt.Println(" ******* fails:",fails,",err:",err)
   318  						if fails >= maxFindnodeFailures {
   319  							log.Trace("Too many findnode failures, dropping", "id", n.ID, "failcount", fails)
   320  							tab.delete(n)
   321  						}
   322  					}
   323  					reply <- tab.bondall(r)
   324  				}()
   325  			}
   326  		}
   327  		if pendingQueries == 0 {
   328  			// we have asked all closest nodes, stop the search
   329  			break
   330  		}
   331  		// wait for the next reply
   332  		for _, n := range <-reply {
   333  			if n != nil && !seen[n.ID] {
   334  				seen[n.ID] = true
   335  				result.push(n, bucketSize)
   336  			}
   337  		}
   338  		pendingQueries--
   339  	}
   340  	return result.entries
   341  }
   342  
   343  func (tab *Table) refresh() <-chan struct{} {
   344  	// fmt.Println(" *******Table  refresh() ...... ")
   345  	done := make(chan struct{})
   346  	select {
   347  	case tab.refreshReq <- done:
   348  	case <-tab.closed:
   349  		close(done)
   350  	}
   351  	return done
   352  }
   353  
   354  func (tab *Table) noloopprintf() {
   355  	
   356  	tab.mutex.Lock()
   357  	fmt.Println(" ***** noloopprintf Table loopprintf len:",len(tab.buckets))
   358  	for i, b := range tab.buckets {
   359  		if len(b.entries) > 0 {
   360  			fmt.Println(" ***** noloopprintf Table loopprintf i:",i,"len:",len(b.entries),
   361  			"data:",b.entries)
   362  		}
   363  	}
   364  	tab.mutex.Unlock()
   365  		
   366  }
   367  
   368  func (tab *Table) loopprintf() {
   369  	num := 0
   370  	for {
   371  		tab.mutex.Lock()
   372  		fmt.Println(" ***** Table loopprintf len:",len(tab.buckets))
   373  		for i, b := range tab.buckets {
   374  			if len(b.entries) > 0 {
   375  				fmt.Println(" ***** Table loopprintf i:",i,"len:",len(b.entries),
   376  				"data:",b.entries)
   377  			}
   378  		}
   379  		tab.mutex.Unlock()
   380  		num = num + 1
   381  		time.Sleep(10*time.Second)
   382  	}
   383  }
   384  
   385  // The above initialization starts a goroutine refreshLoop(). This function mainly performs the following work.
   386  
   387  // 1. Perform a refresh every hour (autoRefreshInterval)
   388  // 2. If a refreshReq request is received. Then do the refresh work.
   389  // 3. If a close message is received. Then close it.
   390  // So the main job of the function is to start the refresh job. doRefresh
   391  // refreshLoop schedules doRefresh runs and coordinates shutdown.
   392  func (tab *Table) refreshLoop() {
   393  	var (
   394  		timer   = time.NewTicker(autoRefreshInterval)
   395  		waiting []chan struct{} // accumulates waiting callers while doRefresh runs
   396  		done    chan struct{}   // where doRefresh reports completion
   397  	)
   398  loop:
   399  	for {
   400  		select {
   401  		case <-timer.C:
   402  			if done == nil {
   403  				done = make(chan struct{})
   404  				//fmt.Println(" ********* refreshLoop doRefresh done ")
   405  				go tab.doRefresh(done)
   406  			}
   407  		case req := <-tab.refreshReq:
   408  			waiting = append(waiting, req)
   409  			if done == nil {
   410  				done = make(chan struct{})
   411  				// fmt.Println(" ******* req := <-tab.refreshReq: ")
   412  				go tab.doRefresh(done)
   413  			}
   414  		case <-done:
   415  			for _, ch := range waiting {
   416  				close(ch)
   417  			}
   418  			waiting = nil
   419  			done = nil
   420  		case <-tab.closeReq:
   421  			break loop
   422  		}
   423  	}
   424  
   425  	if tab.net != nil {
   426  		tab.net.close()
   427  	}
   428  	if done != nil {
   429  		<-done
   430  	}
   431  	for _, ch := range waiting {
   432  		close(ch)
   433  	}
   434  	tab.db.close()
   435  	close(tab.closed)
   436  }
   437  
   438  // doRefresh performs a lookup for a random target to keep buckets
   439  // full. seed nodes are inserted if the table is empty (initial
   440  // bootstrap or discarded faulty peers).
   441  func (tab *Table) doRefresh(done chan struct{}) {
   442  	defer close(done)
   443  
   444  	// The Kademlia paper specifies that the bucket refresh should
   445  	// perform a lookup in the least recently used bucket. We cannot
   446  	// adhere to this because the findnode target is a 512bit value
   447  	// (not hash-sized) and it is not easily possible to generate a
   448  	// sha3 preimage that falls into a chosen bucket.
   449  	// We perform a lookup with a random target instead.
   450  	var target NodeID
   451  	rand.Read(target[:])
   452  	result := tab.lookup(target, false)
   453  	if len(result) > 0 {
   454  		return
   455  	}
   456  
   457  	// The table is empty. Load nodes from the database and insert
   458  	// them. This should yield a few previously seen nodes that are
   459  	// (hopefully) still alive.
   460  	// seeds := tab.db.querySeeds(seedCount, seedMaxAge)
   461  	seeds := make([]*Node,0)
   462  	// fmt.Println(" ******* Table doRefresh ...... len(seeds):",len(seeds),"seeds:",seeds)
   463  	// fmt.Println(" ******* Table doRefresh ...... tab.nursery:",tab.nursery)
   464  	seeds = tab.bondall(append(seeds, tab.nursery...))
   465  
   466  	if len(seeds) == 0 {
   467  		log.Debug("No discv4 seed nodes found")
   468  	}
   469  	for _, n := range seeds {
   470  		age := log.Lazy{Fn: func() time.Duration { return time.Since(tab.db.lastPong(n.ID)) }}
   471  		log.Trace("Found seed node in database", "id", n.ID, "addr", n.addr(), "age", age)
   472  	}
   473  	tab.mutex.Lock()
   474  	tab.stuff(seeds)
   475  	tab.mutex.Unlock()
   476  
   477  	// Finally, do a self lookup to fill up the buckets.
   478  	// fmt.Println(" ********* Table doRefresh sleep over over ......")
   479  	tab.lookup(tab.self.ID, false)
   480  }
   481  
   482  // closest returns the n nodes in the table that are closest to the
   483  // given id. The caller must hold tab.mutex.
   484  func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
   485  	// This is a very wasteful way to find the closest nodes but
   486  	// obviously correct. I believe that tree-based buckets would make
   487  	// this easier to implement efficiently.
   488  	close := &nodesByDistance{target: target}
   489  	for _, b := range tab.buckets {
   490  		for _, n := range b.entries {
   491  			close.push(n, nresults)
   492  		}
   493  	}
   494  	return close
   495  }
   496  
   497  func (tab *Table) len() (n int) {
   498  	for _, b := range tab.buckets {
   499  		n += len(b.entries)
   500  	}
   501  	return n
   502  }
   503  
   504  // bondall bonds with all given nodes concurrently and returns
   505  // those nodes for which bonding has probably succeeded.
   506  func (tab *Table) bondall(nodes []*Node) (result []*Node) {
   507  	rc := make(chan *Node, len(nodes))
   508  	// fmt.Println(" ***** Table bondall ...... ")
   509  	for i := range nodes {
   510  		go func(n *Node) {
   511  			nn, _ := tab.bond(false, n.ID, n.addr(), n.TCP)
   512  			rc <- nn
   513  		}(nodes[i])
   514  	}
   515  	for range nodes {
   516  		if n := <-rc; n != nil {
   517  			result = append(result, n)
   518  		}
   519  	}
   520  	return result
   521  }
   522  
   523  // bond ensures the local node has a bond with the given remote node.
   524  // It also attempts to insert the node into the table if bonding succeeds.
   525  // The caller must not hold tab.mutex.
   526  //
   527  // A bond is must be established before sending findnode requests.
   528  // Both sides must have completed a ping/pong exchange for a bond to
   529  // exist. The total number of active bonding processes is limited in
   530  // order to restrain network use.
   531  //
   532  // bond is meant to operate idempotently in that bonding with a remote
   533  // node which still remembers a previously established bond will work.
   534  // The remote node will simply not send a ping back, causing waitping
   535  // to time out.
   536  //
   537  // If pinged is true, the remote node has just pinged us and one half
   538  // of the process can be skipped.
   539  func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) (*Node, error) {
   540  	// fmt.Println(" ***** Table bond id:",id)
   541  	if id == tab.self.ID {
   542  		return nil, errors.New("is self")
   543  	}
   544  	// Retrieve a previously known node and any recent findnode failures
   545  	node, fails := tab.db.node(id), 0
   546  	if node != nil {
   547  		fails = tab.db.findFails(id)
   548  	}
   549  	// If the node is unknown (non-bonded) or failed (remotely unknown), bond from scratch
   550  	var result error
   551  	age := time.Since(tab.db.lastPong(id))
   552  	if node == nil || fails > 0 || age > nodeDBNodeExpiration {
   553  		log.Trace("Starting bonding ping/pong", "id", id, "known", node != nil, "failcount", fails, "age", age)
   554  
   555  		tab.bondmu.Lock()
   556  		w := tab.bonding[id]
   557  		if w != nil {
   558  			// Wait for an existing bonding process to complete.
   559  			tab.bondmu.Unlock()
   560  			<-w.done
   561  		} else {
   562  			// Register a new bonding process.
   563  			w = &bondproc{done: make(chan struct{})}
   564  			tab.bonding[id] = w
   565  			tab.bondmu.Unlock()
   566  			// Do the ping/pong. The result goes into w.
   567  			tab.pingpong(w, pinged, id, addr, tcpPort)
   568  			// Unregister the process after it's done.
   569  			tab.bondmu.Lock()
   570  			delete(tab.bonding, id)
   571  			tab.bondmu.Unlock()
   572  		}
   573  		// Retrieve the bonding results
   574  		result = w.err
   575  		if result == nil {
   576  			node = w.n
   577  		}
   578  	}
   579  	if node != nil {
   580  		// Add the node to the table even if the bonding ping/pong
   581  		// fails. It will be relaced quickly if it continues to be
   582  		// unresponsive.
   583  		tab.add(node)
   584  		tab.db.updateFindFails(id, 0)
   585  	}
   586  	return node, result
   587  }
   588  
   589  func (tab *Table) pingpong(w *bondproc, pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) {
   590  	// Request a bonding slot to limit network usage
   591  	<-tab.bondslots
   592  	defer func() { tab.bondslots <- struct{}{} }()
   593  
   594  	// Ping the remote side and wait for a pong.
   595  	// fmt.Println(" ******* Table pingpong id:",id)
   596  	if w.err = tab.ping(id, addr); w.err != nil {
   597  		close(w.done)
   598  		return
   599  	}
   600  	if !pinged {
   601  		// Give the remote node a chance to ping us before we start
   602  		// sending findnode requests. If they still remember us,
   603  		// waitping will simply time out.
   604  		tab.net.waitping(id)
   605  	}
   606  	// Bonding succeeded, update the node database.
   607  	w.n = NewNode(id, addr.IP, uint16(addr.Port), tcpPort)
   608  	tab.db.updateNode(w.n)
   609  	close(w.done)
   610  }
   611  
   612  // ping a remote endpoint and wait for a reply, also updating the node
   613  // database accordingly.
   614  func (tab *Table) ping(id NodeID, addr *net.UDPAddr) error {
   615  	tab.db.updateLastPing(id, time.Now())
   616  	// fmt.Println(" ***** Table ping id:",id)
   617  	if err := tab.net.ping(id, addr); err != nil {
   618  		return err
   619  	}
   620  	tab.db.updateLastPong(id, time.Now())
   621  
   622  	// Start the background expiration goroutine after the first
   623  	// successful communication. Subsequent calls have no effect if it
   624  	// is already running. We do this here instead of somewhere else
   625  	// so that the search for seed nodes also considers older nodes
   626  	// that would otherwise be removed by the expiration.
   627  	tab.db.ensureExpirer()
   628  	return nil
   629  }
   630  
   631  // add attempts to add the given node its corresponding bucket. If the
   632  // bucket has space available, adding the node succeeds immediately.
   633  // Otherwise, the node is added if the least recently active node in
   634  // the bucket does not respond to a ping packet.
   635  //
   636  // The caller must not hold tab.mutex.
   637  func (tab *Table) add(new *Node) {
   638  	b := tab.buckets[logdist(tab.self.sha, new.sha)]
   639  	tab.mutex.Lock()
   640  	defer tab.mutex.Unlock()
   641  	if b.bump(new) {
   642  		return
   643  	}
   644  	var oldest *Node
   645  	if len(b.entries) == bucketSize {
   646  		oldest = b.entries[bucketSize-1]
   647  		if oldest.contested {
   648  			// The node is already being replaced, don't attempt
   649  			// to replace it.
   650  			return
   651  		}
   652  		oldest.contested = true
   653  		// Let go of the mutex so other goroutines can access
   654  		// the table while we ping the least recently active node.
   655  		tab.mutex.Unlock()
   656  		fmt.Println(" ***** Tale add tab.ping oldest.ID:",oldest.ID)
   657  		err := tab.ping(oldest.ID, oldest.addr())
   658  		tab.mutex.Lock()
   659  		oldest.contested = false
   660  		if err == nil {
   661  			// The node responded, don't replace it.
   662  			return
   663  		}
   664  	}
   665  	added := b.replace(new, oldest)
   666  	// nodeAddedHook for test...
   667  	if added && tab.nodeAddedHook != nil {
   668  		tab.nodeAddedHook(new)
   669  	}
   670  }
   671  
   672  // stuff adds nodes the table to the end of their corresponding bucket
   673  // if the bucket is not full. The caller must hold tab.mutex.
   674  func (tab *Table) stuff(nodes []*Node) {
   675  outer:
   676  	for _, n := range nodes {
   677  		if n.ID == tab.self.ID {
   678  			continue // don't add self
   679  		}
   680  		bucket := tab.buckets[logdist(tab.self.sha, n.sha)]
   681  		for i := range bucket.entries {
   682  			if bucket.entries[i].ID == n.ID {
   683  				continue outer // already in bucket
   684  			}
   685  		}
   686  		if len(bucket.entries) < bucketSize {
   687  			bucket.entries = append(bucket.entries, n)
   688  			// nodeAddedHook for test...
   689  			if tab.nodeAddedHook != nil {
   690  				tab.nodeAddedHook(n)
   691  			}
   692  		}
   693  	}
   694  }
   695  
   696  // delete removes an entry from the node table (used to evacuate
   697  // failed/non-bonded discovery peers).
   698  func (tab *Table) delete(node *Node) {
   699  	tab.mutex.Lock()
   700  	defer tab.mutex.Unlock()
   701  	bucket := tab.buckets[logdist(tab.self.sha, node.sha)]
   702  	for i := range bucket.entries {
   703  		if bucket.entries[i].ID == node.ID {
   704  			bucket.entries = append(bucket.entries[:i], bucket.entries[i+1:]...)
   705  			return
   706  		}
   707  	}
   708  }
   709  
   710  func (b *bucket) replace(n *Node, last *Node) bool {
   711  	// Don't add if b already contains n.
   712  	for i := range b.entries {
   713  		if b.entries[i].ID == n.ID {
   714  			return false
   715  		}
   716  	}
   717  	// Replace last if it is still the last entry or just add n if b
   718  	// isn't full. If is no longer the last entry, it has either been
   719  	// replaced with someone else or became active.
   720  	if len(b.entries) == bucketSize && (last == nil || b.entries[bucketSize-1].ID != last.ID) {
   721  		return false
   722  	}
   723  	if len(b.entries) < bucketSize {
   724  		b.entries = append(b.entries, nil)
   725  	}
   726  	copy(b.entries[1:], b.entries)
   727  	b.entries[0] = n
   728  	return true
   729  }
   730  
   731  func (b *bucket) bump(n *Node) bool {
   732  	for i := range b.entries {
   733  		if b.entries[i].ID == n.ID {
   734  			// move it to the front
   735  			copy(b.entries[1:], b.entries[:i])
   736  			b.entries[0] = n
   737  			return true
   738  		}
   739  	}
   740  	return false
   741  }
   742  
   743  // nodesByDistance is a list of nodes, ordered by
   744  // distance to target.
   745  type nodesByDistance struct {
   746  	entries []*Node
   747  	target  common.Hash
   748  }
   749  
   750  // push adds the given node to the list, keeping the total size below maxElems.
   751  func (h *nodesByDistance) push(n *Node, maxElems int) {
   752  	ix := sort.Search(len(h.entries), func(i int) bool {
   753  		return distcmp(h.target, h.entries[i].sha, n.sha) > 0
   754  	})
   755  	if len(h.entries) < maxElems {
   756  		h.entries = append(h.entries, n)
   757  	}
   758  	if ix == len(h.entries) {
   759  		// farther away than all nodes we already have.
   760  		// if there was room for it, the node is now the last element.
   761  	} else {
   762  		// slide existing entries down to make room
   763  		// this will overwrite the entry we just appended.
   764  		copy(h.entries[ix+1:], h.entries[ix:])
   765  		h.entries[ix] = n
   766  	}
   767  }