github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/p2p/discv5/net.go (about)

     1  // Copyright 2016 The Spectrum Authors
     2  // This file is part of the Spectrum library.
     3  //
     4  // The Spectrum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The Spectrum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package discv5
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/ecdsa"
    22  	"errors"
    23  	"fmt"
    24  	"net"
    25  	"time"
    26  
    27  	"github.com/SmartMeshFoundation/Spectrum/common"
    28  	"github.com/SmartMeshFoundation/Spectrum/common/mclock"
    29  	"github.com/SmartMeshFoundation/Spectrum/crypto"
    30  	"github.com/SmartMeshFoundation/Spectrum/crypto/sha3"
    31  	"github.com/SmartMeshFoundation/Spectrum/log"
    32  	"github.com/SmartMeshFoundation/Spectrum/p2p/nat"
    33  	"github.com/SmartMeshFoundation/Spectrum/p2p/netutil"
    34  	"github.com/SmartMeshFoundation/Spectrum/rlp"
    35  )
    36  
    37  var (
    38  	errInvalidEvent = errors.New("invalid in current state")
    39  	errNoQuery      = errors.New("no pending query")
    40  	errWrongAddress = errors.New("unknown sender address")
    41  )
    42  
    43  const (
    44  	autoRefreshInterval   = 1 * time.Hour
    45  	bucketRefreshInterval = 1 * time.Minute
    46  	seedCount             = 30
    47  	seedMaxAge            = 5 * 24 * time.Hour
    48  	lowPort               = 1024
    49  )
    50  
    51  const testTopic = "foo"
    52  
    53  const (
    54  	printTestImgLogs = false
    55  )
    56  
    57  // Network manages the table and all protocol interaction.
    58  type Network struct {
    59  	db          *nodeDB // database of known nodes
    60  	conn        transport
    61  	netrestrict *netutil.Netlist
    62  
    63  	closed           chan struct{}          // closed when loop is done
    64  	closeReq         chan struct{}          // 'request to close'
    65  	refreshReq       chan []*Node           // lookups ask for refresh on this channel
    66  	refreshResp      chan (<-chan struct{}) // ...and get the channel to block on from this one
    67  	read             chan ingressPacket     // ingress packets arrive here
    68  	timeout          chan timeoutEvent
    69  	queryReq         chan *findnodeQuery // lookups submit findnode queries on this channel
    70  	tableOpReq       chan func()
    71  	tableOpResp      chan struct{}
    72  	topicRegisterReq chan topicRegisterReq
    73  	topicSearchReq   chan topicSearchReq
    74  
    75  	// State of the main loop.
    76  	tab           *Table
    77  	topictab      *topicTable
    78  	ticketStore   *ticketStore
    79  	nursery       []*Node
    80  	nodes         map[NodeID]*Node // tracks active nodes with state != known
    81  	timeoutTimers map[timeoutEvent]*time.Timer
    82  
    83  	// Revalidation queues.
    84  	// Nodes put on these queues will be pinged eventually.
    85  	slowRevalidateQueue []*Node
    86  	fastRevalidateQueue []*Node
    87  
    88  	// Buffers for state transition.
    89  	sendBuf []*ingressPacket
    90  }
    91  
    92  // transport is implemented by the UDP transport.
    93  // it is an interface so we can test without opening lots of UDP
    94  // sockets and without generating a private key.
    95  type transport interface {
    96  	sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte)
    97  	sendNeighbours(remote *Node, nodes []*Node)
    98  	sendFindnodeHash(remote *Node, target common.Hash)
    99  	sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte)
   100  	sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node)
   101  
   102  	send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte)
   103  
   104  	localAddr() *net.UDPAddr
   105  	Close()
   106  }
   107  
   108  type findnodeQuery struct {
   109  	remote   *Node
   110  	target   common.Hash
   111  	reply    chan<- []*Node
   112  	nresults int // counter for received nodes
   113  }
   114  
   115  type topicRegisterReq struct {
   116  	add   bool
   117  	topic Topic
   118  }
   119  
   120  type topicSearchReq struct {
   121  	topic  Topic
   122  	found  chan<- *Node
   123  	lookup chan<- bool
   124  	delay  time.Duration
   125  }
   126  
   127  type topicSearchResult struct {
   128  	target lookupInfo
   129  	nodes  []*Node
   130  }
   131  
   132  type timeoutEvent struct {
   133  	ev   nodeEvent
   134  	node *Node
   135  }
   136  
   137  func newNetwork(conn transport, ourPubkey ecdsa.PublicKey, natm nat.Interface, dbPath string, netrestrict *netutil.Netlist) (*Network, error) {
   138  	ourID := PubkeyID(&ourPubkey)
   139  
   140  	var db *nodeDB
   141  	if dbPath != "<no database>" {
   142  		var err error
   143  		if db, err = newNodeDB(dbPath, Version, ourID); err != nil {
   144  			return nil, err
   145  		}
   146  	}
   147  
   148  	tab := newTable(ourID, conn.localAddr())
   149  	net := &Network{
   150  		db:               db,
   151  		conn:             conn,
   152  		netrestrict:      netrestrict,
   153  		tab:              tab,
   154  		topictab:         newTopicTable(db, tab.self),
   155  		ticketStore:      newTicketStore(),
   156  		refreshReq:       make(chan []*Node),
   157  		refreshResp:      make(chan (<-chan struct{})),
   158  		closed:           make(chan struct{}),
   159  		closeReq:         make(chan struct{}),
   160  		read:             make(chan ingressPacket, 100),
   161  		timeout:          make(chan timeoutEvent),
   162  		timeoutTimers:    make(map[timeoutEvent]*time.Timer),
   163  		tableOpReq:       make(chan func()),
   164  		tableOpResp:      make(chan struct{}),
   165  		queryReq:         make(chan *findnodeQuery),
   166  		topicRegisterReq: make(chan topicRegisterReq),
   167  		topicSearchReq:   make(chan topicSearchReq),
   168  		nodes:            make(map[NodeID]*Node),
   169  	}
   170  	go net.loop()
   171  	return net, nil
   172  }
   173  
   174  // Close terminates the network listener and flushes the node database.
   175  func (net *Network) Close() {
   176  	net.conn.Close()
   177  	select {
   178  	case <-net.closed:
   179  	case net.closeReq <- struct{}{}:
   180  		<-net.closed
   181  	}
   182  }
   183  
   184  // Self returns the local node.
   185  // The returned node should not be modified by the caller.
   186  func (net *Network) Self() *Node {
   187  	return net.tab.self
   188  }
   189  
   190  // ReadRandomNodes fills the given slice with random nodes from the
   191  // table. It will not write the same node more than once. The nodes in
   192  // the slice are copies and can be modified by the caller.
   193  func (net *Network) ReadRandomNodes(buf []*Node) (n int) {
   194  	net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) })
   195  	return n
   196  }
   197  
   198  // SetFallbackNodes sets the initial points of contact. These nodes
   199  // are used to connect to the network if the table is empty and there
   200  // are no known nodes in the database.
   201  func (net *Network) SetFallbackNodes(nodes []*Node) error {
   202  	nursery := make([]*Node, 0, len(nodes))
   203  	for _, n := range nodes {
   204  		if err := n.validateComplete(); err != nil {
   205  			return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
   206  		}
   207  		// Recompute cpy.sha because the node might not have been
   208  		// created by NewNode or ParseNode.
   209  		cpy := *n
   210  		cpy.sha = crypto.Keccak256Hash(n.ID[:])
   211  		nursery = append(nursery, &cpy)
   212  	}
   213  	net.reqRefresh(nursery)
   214  	return nil
   215  }
   216  
   217  // Resolve searches for a specific node with the given ID.
   218  // It returns nil if the node could not be found.
   219  func (net *Network) Resolve(targetID NodeID) *Node {
   220  	result := net.lookup(crypto.Keccak256Hash(targetID[:]), true)
   221  	for _, n := range result {
   222  		if n.ID == targetID {
   223  			return n
   224  		}
   225  	}
   226  	return nil
   227  }
   228  
   229  // Lookup performs a network search for nodes close
   230  // to the given target. It approaches the target by querying
   231  // nodes that are closer to it on each iteration.
   232  // The given target does not need to be an actual node
   233  // identifier.
   234  //
   235  // The local node may be included in the result.
   236  func (net *Network) Lookup(targetID NodeID) []*Node {
   237  	return net.lookup(crypto.Keccak256Hash(targetID[:]), false)
   238  }
   239  
   240  func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node {
   241  	var (
   242  		asked          = make(map[NodeID]bool)
   243  		seen           = make(map[NodeID]bool)
   244  		reply          = make(chan []*Node, alpha)
   245  		result         = nodesByDistance{target: target}
   246  		pendingQueries = 0
   247  	)
   248  	// Get initial answers from the local node.
   249  	result.push(net.tab.self, bucketSize)
   250  	for {
   251  		// Ask the α closest nodes that we haven't asked yet.
   252  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   253  			n := result.entries[i]
   254  			if !asked[n.ID] {
   255  				asked[n.ID] = true
   256  				pendingQueries++
   257  				net.reqQueryFindnode(n, target, reply)
   258  			}
   259  		}
   260  		if pendingQueries == 0 {
   261  			// We have asked all closest nodes, stop the search.
   262  			break
   263  		}
   264  		// Wait for the next reply.
   265  		select {
   266  		case nodes := <-reply:
   267  			for _, n := range nodes {
   268  				if n != nil && !seen[n.ID] {
   269  					seen[n.ID] = true
   270  					result.push(n, bucketSize)
   271  					if stopOnMatch && n.sha == target {
   272  						return result.entries
   273  					}
   274  				}
   275  			}
   276  			pendingQueries--
   277  		case <-time.After(respTimeout):
   278  			// forget all pending requests, start new ones
   279  			pendingQueries = 0
   280  			reply = make(chan []*Node, alpha)
   281  		}
   282  	}
   283  	return result.entries
   284  }
   285  
   286  func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) {
   287  	select {
   288  	case net.topicRegisterReq <- topicRegisterReq{true, topic}:
   289  	case <-net.closed:
   290  		return
   291  	}
   292  	select {
   293  	case <-net.closed:
   294  	case <-stop:
   295  		select {
   296  		case net.topicRegisterReq <- topicRegisterReq{false, topic}:
   297  		case <-net.closed:
   298  		}
   299  	}
   300  }
   301  
   302  func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) {
   303  	for {
   304  		select {
   305  		case <-net.closed:
   306  			return
   307  		case delay, ok := <-setPeriod:
   308  			select {
   309  			case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}:
   310  			case <-net.closed:
   311  				return
   312  			}
   313  			if !ok {
   314  				return
   315  			}
   316  		}
   317  	}
   318  }
   319  
   320  func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} {
   321  	select {
   322  	case net.refreshReq <- nursery:
   323  		return <-net.refreshResp
   324  	case <-net.closed:
   325  		return net.closed
   326  	}
   327  }
   328  
   329  func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool {
   330  	q := &findnodeQuery{remote: n, target: target, reply: reply}
   331  	select {
   332  	case net.queryReq <- q:
   333  		return true
   334  	case <-net.closed:
   335  		return false
   336  	}
   337  }
   338  
   339  func (net *Network) reqReadPacket(pkt ingressPacket) {
   340  	select {
   341  	case net.read <- pkt:
   342  	case <-net.closed:
   343  	}
   344  }
   345  
   346  func (net *Network) reqTableOp(f func()) (called bool) {
   347  	select {
   348  	case net.tableOpReq <- f:
   349  		<-net.tableOpResp
   350  		return true
   351  	case <-net.closed:
   352  		return false
   353  	}
   354  }
   355  
   356  // TODO: external address handling.
   357  
   358  type topicSearchInfo struct {
   359  	lookupChn chan<- bool
   360  	period    time.Duration
   361  }
   362  
   363  const maxSearchCount = 5
   364  
   365  func (net *Network) loop() {
   366  	var (
   367  		refreshTimer       = time.NewTicker(autoRefreshInterval)
   368  		bucketRefreshTimer = time.NewTimer(bucketRefreshInterval)
   369  		refreshDone        chan struct{} // closed when the 'refresh' lookup has ended
   370  	)
   371  
   372  	// Tracking the next ticket to register.
   373  	var (
   374  		nextTicket        *ticketRef
   375  		nextRegisterTimer *time.Timer
   376  		nextRegisterTime  <-chan time.Time
   377  	)
   378  	defer func() {
   379  		if nextRegisterTimer != nil {
   380  			nextRegisterTimer.Stop()
   381  		}
   382  	}()
   383  	resetNextTicket := func() {
   384  		ticket, timeout := net.ticketStore.nextFilteredTicket()
   385  		if nextTicket != ticket {
   386  			nextTicket = ticket
   387  			if nextRegisterTimer != nil {
   388  				nextRegisterTimer.Stop()
   389  				nextRegisterTime = nil
   390  			}
   391  			if ticket != nil {
   392  				nextRegisterTimer = time.NewTimer(timeout)
   393  				nextRegisterTime = nextRegisterTimer.C
   394  			}
   395  		}
   396  	}
   397  
   398  	// Tracking registration and search lookups.
   399  	var (
   400  		topicRegisterLookupTarget lookupInfo
   401  		topicRegisterLookupDone   chan []*Node
   402  		topicRegisterLookupTick   = time.NewTimer(0)
   403  		searchReqWhenRefreshDone  []topicSearchReq
   404  		searchInfo                = make(map[Topic]topicSearchInfo)
   405  		activeSearchCount         int
   406  	)
   407  	topicSearchLookupDone := make(chan topicSearchResult, 100)
   408  	topicSearch := make(chan Topic, 100)
   409  	<-topicRegisterLookupTick.C
   410  
   411  	statsDump := time.NewTicker(10 * time.Second)
   412  
   413  loop:
   414  	for {
   415  		resetNextTicket()
   416  
   417  		select {
   418  		case <-net.closeReq:
   419  			log.Trace("<-net.closeReq")
   420  			break loop
   421  
   422  		// Ingress packet handling.
   423  		case pkt := <-net.read:
   424  			//fmt.Println("read", pkt.ev)
   425  			log.Trace("<-net.read")
   426  			n := net.internNode(&pkt)
   427  			prestate := n.state
   428  			status := "ok"
   429  			if err := net.handle(n, pkt.ev, &pkt); err != nil {
   430  				status = err.Error()
   431  			}
   432  			log.Trace("", "msg", log.Lazy{Fn: func() string {
   433  				return fmt.Sprintf("<<< (%d) %v from %x@%v: %v -> %v (%v)",
   434  					net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status)
   435  			}})
   436  			// TODO: persist state if n.state goes >= known, delete if it goes <= known
   437  
   438  		// State transition timeouts.
   439  		case timeout := <-net.timeout:
   440  			log.Trace("<-net.timeout")
   441  			if net.timeoutTimers[timeout] == nil {
   442  				// Stale timer (was aborted).
   443  				continue
   444  			}
   445  			delete(net.timeoutTimers, timeout)
   446  			prestate := timeout.node.state
   447  			status := "ok"
   448  			if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
   449  				status = err.Error()
   450  			}
   451  			log.Trace("", "msg", log.Lazy{Fn: func() string {
   452  				return fmt.Sprintf("--- (%d) %v for %x@%v: %v -> %v (%v)",
   453  					net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status)
   454  			}})
   455  
   456  		// Querying.
   457  		case q := <-net.queryReq:
   458  			log.Trace("<-net.queryReq")
   459  			if !q.start(net) {
   460  				q.remote.deferQuery(q)
   461  			}
   462  
   463  		// Interacting with the table.
   464  		case f := <-net.tableOpReq:
   465  			log.Trace("<-net.tableOpReq")
   466  			f()
   467  			net.tableOpResp <- struct{}{}
   468  
   469  		// Topic registration stuff.
   470  		case req := <-net.topicRegisterReq:
   471  			log.Trace("<-net.topicRegisterReq")
   472  			if !req.add {
   473  				net.ticketStore.removeRegisterTopic(req.topic)
   474  				continue
   475  			}
   476  			net.ticketStore.addTopic(req.topic, true)
   477  			// If we're currently waiting idle (nothing to look up), give the ticket store a
   478  			// chance to start it sooner. This should speed up convergence of the radius
   479  			// determination for new topics.
   480  			// if topicRegisterLookupDone == nil {
   481  			if topicRegisterLookupTarget.target == (common.Hash{}) {
   482  				log.Trace("topicRegisterLookupTarget == null")
   483  				if topicRegisterLookupTick.Stop() {
   484  					<-topicRegisterLookupTick.C
   485  				}
   486  				target, delay := net.ticketStore.nextRegisterLookup()
   487  				topicRegisterLookupTarget = target
   488  				topicRegisterLookupTick.Reset(delay)
   489  			}
   490  
   491  		case nodes := <-topicRegisterLookupDone:
   492  			log.Trace("<-topicRegisterLookupDone")
   493  			net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte {
   494  				net.ping(n, n.addr())
   495  				return n.pingEcho
   496  			})
   497  			target, delay := net.ticketStore.nextRegisterLookup()
   498  			topicRegisterLookupTarget = target
   499  			topicRegisterLookupTick.Reset(delay)
   500  			topicRegisterLookupDone = nil
   501  
   502  		case <-topicRegisterLookupTick.C:
   503  			log.Trace("<-topicRegisterLookupTick")
   504  			if (topicRegisterLookupTarget.target == common.Hash{}) {
   505  				target, delay := net.ticketStore.nextRegisterLookup()
   506  				topicRegisterLookupTarget = target
   507  				topicRegisterLookupTick.Reset(delay)
   508  				topicRegisterLookupDone = nil
   509  			} else {
   510  				topicRegisterLookupDone = make(chan []*Node)
   511  				target := topicRegisterLookupTarget.target
   512  				go func() { topicRegisterLookupDone <- net.lookup(target, false) }()
   513  			}
   514  
   515  		case <-nextRegisterTime:
   516  			log.Trace("<-nextRegisterTime")
   517  			net.ticketStore.ticketRegistered(*nextTicket)
   518  			//fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
   519  			net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
   520  
   521  		case req := <-net.topicSearchReq:
   522  			if refreshDone == nil {
   523  				log.Trace("<-net.topicSearchReq")
   524  				info, ok := searchInfo[req.topic]
   525  				if ok {
   526  					if req.delay == time.Duration(0) {
   527  						delete(searchInfo, req.topic)
   528  						net.ticketStore.removeSearchTopic(req.topic)
   529  					} else {
   530  						info.period = req.delay
   531  						searchInfo[req.topic] = info
   532  					}
   533  					continue
   534  				}
   535  				if req.delay != time.Duration(0) {
   536  					var info topicSearchInfo
   537  					info.period = req.delay
   538  					info.lookupChn = req.lookup
   539  					searchInfo[req.topic] = info
   540  					net.ticketStore.addSearchTopic(req.topic, req.found)
   541  					topicSearch <- req.topic
   542  				}
   543  			} else {
   544  				searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req)
   545  			}
   546  
   547  		case topic := <-topicSearch:
   548  			if activeSearchCount < maxSearchCount {
   549  				activeSearchCount++
   550  				target := net.ticketStore.nextSearchLookup(topic)
   551  				go func() {
   552  					nodes := net.lookup(target.target, false)
   553  					topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes}
   554  				}()
   555  			}
   556  			period := searchInfo[topic].period
   557  			if period != time.Duration(0) {
   558  				go func() {
   559  					time.Sleep(period)
   560  					topicSearch <- topic
   561  				}()
   562  			}
   563  
   564  		case res := <-topicSearchLookupDone:
   565  			activeSearchCount--
   566  			if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
   567  				lookupChn <- net.ticketStore.radius[res.target.topic].converged
   568  			}
   569  			net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node) []byte {
   570  				net.ping(n, n.addr())
   571  				return n.pingEcho
   572  			}, func(n *Node, topic Topic) []byte {
   573  				if n.state == known {
   574  					return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
   575  				} else {
   576  					if n.state == unknown {
   577  						net.ping(n, n.addr())
   578  					}
   579  					return nil
   580  				}
   581  			})
   582  
   583  		case <-statsDump.C:
   584  			log.Trace("<-statsDump.C")
   585  			/*r, ok := net.ticketStore.radius[testTopic]
   586  			if !ok {
   587  				fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now())
   588  			} else {
   589  				topics := len(net.ticketStore.tickets)
   590  				tickets := len(net.ticketStore.nodes)
   591  				rad := r.radius / (maxRadius/10000+1)
   592  				fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now())
   593  			}*/
   594  
   595  			tm := mclock.Now()
   596  			for topic, r := range net.ticketStore.radius {
   597  				if printTestImgLogs {
   598  					rad := r.radius / (maxRadius/1000000 + 1)
   599  					minrad := r.minRadius / (maxRadius/1000000 + 1)
   600  					fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad)
   601  					fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad)
   602  				}
   603  			}
   604  			for topic, t := range net.topictab.topics {
   605  				wp := t.wcl.nextWaitPeriod(tm)
   606  				if printTestImgLogs {
   607  					fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000)
   608  				}
   609  			}
   610  
   611  		// Periodic / lookup-initiated bucket refresh.
   612  		case <-refreshTimer.C:
   613  			log.Trace("<-refreshTimer.C")
   614  			// TODO: ideally we would start the refresh timer after
   615  			// fallback nodes have been set for the first time.
   616  			if refreshDone == nil {
   617  				refreshDone = make(chan struct{})
   618  				net.refresh(refreshDone)
   619  			}
   620  		case <-bucketRefreshTimer.C:
   621  			target := net.tab.chooseBucketRefreshTarget()
   622  			go func() {
   623  				net.lookup(target, false)
   624  				bucketRefreshTimer.Reset(bucketRefreshInterval)
   625  			}()
   626  		case newNursery := <-net.refreshReq:
   627  			log.Trace("<-net.refreshReq")
   628  			if newNursery != nil {
   629  				net.nursery = newNursery
   630  			}
   631  			if refreshDone == nil {
   632  				refreshDone = make(chan struct{})
   633  				net.refresh(refreshDone)
   634  			}
   635  			net.refreshResp <- refreshDone
   636  		case <-refreshDone:
   637  			log.Trace("<-net.refreshDone")
   638  			refreshDone = nil
   639  			list := searchReqWhenRefreshDone
   640  			searchReqWhenRefreshDone = nil
   641  			go func() {
   642  				for _, req := range list {
   643  					net.topicSearchReq <- req
   644  				}
   645  			}()
   646  		}
   647  	}
   648  	log.Trace("loop stopped")
   649  
   650  	log.Debug(fmt.Sprintf("shutting down"))
   651  	if net.conn != nil {
   652  		net.conn.Close()
   653  	}
   654  	if refreshDone != nil {
   655  		// TODO: wait for pending refresh.
   656  		//<-refreshResults
   657  	}
   658  	// Cancel all pending timeouts.
   659  	for _, timer := range net.timeoutTimers {
   660  		timer.Stop()
   661  	}
   662  	if net.db != nil {
   663  		net.db.close()
   664  	}
   665  	close(net.closed)
   666  }
   667  
   668  // Everything below runs on the Network.loop goroutine
   669  // and can modify Node, Table and Network at any time without locking.
   670  
   671  func (net *Network) refresh(done chan<- struct{}) {
   672  	var seeds []*Node
   673  	if net.db != nil {
   674  		seeds = net.db.querySeeds(seedCount, seedMaxAge)
   675  	}
   676  	if len(seeds) == 0 {
   677  		seeds = net.nursery
   678  	}
   679  	if len(seeds) == 0 {
   680  		log.Trace("no seed nodes found")
   681  		close(done)
   682  		return
   683  	}
   684  	for _, n := range seeds {
   685  		log.Debug("", "msg", log.Lazy{Fn: func() string {
   686  			var age string
   687  			if net.db != nil {
   688  				age = time.Since(net.db.lastPong(n.ID)).String()
   689  			} else {
   690  				age = "unknown"
   691  			}
   692  			return fmt.Sprintf("seed node (age %s): %v", age, n)
   693  		}})
   694  		n = net.internNodeFromDB(n)
   695  		if n.state == unknown {
   696  			net.transition(n, verifyinit)
   697  		}
   698  		// Force-add the seed node so Lookup does something.
   699  		// It will be deleted again if verification fails.
   700  		net.tab.add(n)
   701  	}
   702  	// Start self lookup to fill up the buckets.
   703  	go func() {
   704  		net.Lookup(net.tab.self.ID)
   705  		close(done)
   706  	}()
   707  }
   708  
   709  // Node Interning.
   710  
   711  func (net *Network) internNode(pkt *ingressPacket) *Node {
   712  	if n := net.nodes[pkt.remoteID]; n != nil {
   713  		n.IP = pkt.remoteAddr.IP
   714  		n.UDP = uint16(pkt.remoteAddr.Port)
   715  		n.TCP = uint16(pkt.remoteAddr.Port)
   716  		return n
   717  	}
   718  	n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port))
   719  	n.state = unknown
   720  	net.nodes[pkt.remoteID] = n
   721  	return n
   722  }
   723  
   724  func (net *Network) internNodeFromDB(dbn *Node) *Node {
   725  	if n := net.nodes[dbn.ID]; n != nil {
   726  		return n
   727  	}
   728  	n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP)
   729  	n.state = unknown
   730  	net.nodes[n.ID] = n
   731  	return n
   732  }
   733  
   734  func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n *Node, err error) {
   735  	if rn.ID == net.tab.self.ID {
   736  		return nil, errors.New("is self")
   737  	}
   738  	if rn.UDP <= lowPort {
   739  		return nil, errors.New("low port")
   740  	}
   741  	n = net.nodes[rn.ID]
   742  	if n == nil {
   743  		// We haven't seen this node before.
   744  		n, err = nodeFromRPC(sender, rn)
   745  		if net.netrestrict != nil && !net.netrestrict.Contains(n.IP) {
   746  			return n, errors.New("not contained in netrestrict whitelist")
   747  		}
   748  		if err == nil {
   749  			n.state = unknown
   750  			net.nodes[n.ID] = n
   751  		}
   752  		return n, err
   753  	}
   754  	if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
   755  		err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
   756  	}
   757  	return n, err
   758  }
   759  
   760  // nodeNetGuts is embedded in Node and contains fields.
   761  type nodeNetGuts struct {
   762  	// This is a cached copy of sha3(ID) which is used for node
   763  	// distance calculations. This is part of Node in order to make it
   764  	// possible to write tests that need a node at a certain distance.
   765  	// In those tests, the content of sha will not actually correspond
   766  	// with ID.
   767  	sha common.Hash
   768  
   769  	// State machine fields. Access to these fields
   770  	// is restricted to the Network.loop goroutine.
   771  	state             *nodeState
   772  	pingEcho          []byte           // hash of last ping sent by us
   773  	pingTopics        []Topic          // topic set sent by us in last ping
   774  	deferredQueries   []*findnodeQuery // queries that can't be sent yet
   775  	pendingNeighbours *findnodeQuery   // current query, waiting for reply
   776  	queryTimeouts     int
   777  }
   778  
   779  func (n *nodeNetGuts) deferQuery(q *findnodeQuery) {
   780  	n.deferredQueries = append(n.deferredQueries, q)
   781  }
   782  
   783  func (n *nodeNetGuts) startNextQuery(net *Network) {
   784  	if len(n.deferredQueries) == 0 {
   785  		return
   786  	}
   787  	nextq := n.deferredQueries[0]
   788  	if nextq.start(net) {
   789  		n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...)
   790  	}
   791  }
   792  
   793  func (q *findnodeQuery) start(net *Network) bool {
   794  	// Satisfy queries against the local node directly.
   795  	if q.remote == net.tab.self {
   796  		closest := net.tab.closest(crypto.Keccak256Hash(q.target[:]), bucketSize)
   797  		q.reply <- closest.entries
   798  		return true
   799  	}
   800  	if q.remote.state.canQuery && q.remote.pendingNeighbours == nil {
   801  		net.conn.sendFindnodeHash(q.remote, q.target)
   802  		net.timedEvent(respTimeout, q.remote, neighboursTimeout)
   803  		q.remote.pendingNeighbours = q
   804  		return true
   805  	}
   806  	// If the node is not known yet, it won't accept queries.
   807  	// Initiate the transition to known.
   808  	// The request will be sent later when the node reaches known state.
   809  	if q.remote.state == unknown {
   810  		net.transition(q.remote, verifyinit)
   811  	}
   812  	return false
   813  }
   814  
   815  // Node Events (the input to the state machine).
   816  
   817  type nodeEvent uint
   818  
   819  //go:generate stringer -type=nodeEvent
   820  
   821  const (
   822  	invalidEvent nodeEvent = iota // zero is reserved
   823  
   824  	// Packet type events.
   825  	// These correspond to packet types in the UDP protocol.
   826  	pingPacket
   827  	pongPacket
   828  	findnodePacket
   829  	neighborsPacket
   830  	findnodeHashPacket
   831  	topicRegisterPacket
   832  	topicQueryPacket
   833  	topicNodesPacket
   834  
   835  	// Non-packet events.
   836  	// Event values in this category are allocated outside
   837  	// the packet type range (packet types are encoded as a single byte).
   838  	pongTimeout nodeEvent = iota + 256
   839  	pingTimeout
   840  	neighboursTimeout
   841  )
   842  
   843  // Node State Machine.
   844  
   845  type nodeState struct {
   846  	name     string
   847  	handle   func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error)
   848  	enter    func(*Network, *Node)
   849  	canQuery bool
   850  }
   851  
   852  func (s *nodeState) String() string {
   853  	return s.name
   854  }
   855  
   856  var (
   857  	unknown          *nodeState
   858  	verifyinit       *nodeState
   859  	verifywait       *nodeState
   860  	remoteverifywait *nodeState
   861  	known            *nodeState
   862  	contested        *nodeState
   863  	unresponsive     *nodeState
   864  )
   865  
   866  func init() {
   867  	unknown = &nodeState{
   868  		name: "unknown",
   869  		enter: func(net *Network, n *Node) {
   870  			net.tab.delete(n)
   871  			n.pingEcho = nil
   872  			// Abort active queries.
   873  			for _, q := range n.deferredQueries {
   874  				q.reply <- nil
   875  			}
   876  			n.deferredQueries = nil
   877  			if n.pendingNeighbours != nil {
   878  				n.pendingNeighbours.reply <- nil
   879  				n.pendingNeighbours = nil
   880  			}
   881  			n.queryTimeouts = 0
   882  		},
   883  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   884  			switch ev {
   885  			case pingPacket:
   886  				net.handlePing(n, pkt)
   887  				net.ping(n, pkt.remoteAddr)
   888  				return verifywait, nil
   889  			default:
   890  				return unknown, errInvalidEvent
   891  			}
   892  		},
   893  	}
   894  
   895  	verifyinit = &nodeState{
   896  		name: "verifyinit",
   897  		enter: func(net *Network, n *Node) {
   898  			net.ping(n, n.addr())
   899  		},
   900  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   901  			switch ev {
   902  			case pingPacket:
   903  				net.handlePing(n, pkt)
   904  				return verifywait, nil
   905  			case pongPacket:
   906  				err := net.handleKnownPong(n, pkt)
   907  				return remoteverifywait, err
   908  			case pongTimeout:
   909  				return unknown, nil
   910  			default:
   911  				return verifyinit, errInvalidEvent
   912  			}
   913  		},
   914  	}
   915  
   916  	verifywait = &nodeState{
   917  		name: "verifywait",
   918  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   919  			switch ev {
   920  			case pingPacket:
   921  				net.handlePing(n, pkt)
   922  				return verifywait, nil
   923  			case pongPacket:
   924  				err := net.handleKnownPong(n, pkt)
   925  				return known, err
   926  			case pongTimeout:
   927  				return unknown, nil
   928  			default:
   929  				return verifywait, errInvalidEvent
   930  			}
   931  		},
   932  	}
   933  
   934  	remoteverifywait = &nodeState{
   935  		name: "remoteverifywait",
   936  		enter: func(net *Network, n *Node) {
   937  			net.timedEvent(respTimeout, n, pingTimeout)
   938  		},
   939  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   940  			switch ev {
   941  			case pingPacket:
   942  				net.handlePing(n, pkt)
   943  				return remoteverifywait, nil
   944  			case pingTimeout:
   945  				return known, nil
   946  			default:
   947  				return remoteverifywait, errInvalidEvent
   948  			}
   949  		},
   950  	}
   951  
   952  	known = &nodeState{
   953  		name:     "known",
   954  		canQuery: true,
   955  		enter: func(net *Network, n *Node) {
   956  			n.queryTimeouts = 0
   957  			n.startNextQuery(net)
   958  			// Insert into the table and start revalidation of the last node
   959  			// in the bucket if it is full.
   960  			last := net.tab.add(n)
   961  			if last != nil && last.state == known {
   962  				// TODO: do this asynchronously
   963  				net.transition(last, contested)
   964  			}
   965  		},
   966  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   967  			switch ev {
   968  			case pingPacket:
   969  				net.handlePing(n, pkt)
   970  				return known, nil
   971  			case pongPacket:
   972  				err := net.handleKnownPong(n, pkt)
   973  				return known, err
   974  			default:
   975  				return net.handleQueryEvent(n, ev, pkt)
   976  			}
   977  		},
   978  	}
   979  
   980  	contested = &nodeState{
   981  		name:     "contested",
   982  		canQuery: true,
   983  		enter: func(net *Network, n *Node) {
   984  			net.ping(n, n.addr())
   985  		},
   986  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   987  			switch ev {
   988  			case pongPacket:
   989  				// Node is still alive.
   990  				err := net.handleKnownPong(n, pkt)
   991  				return known, err
   992  			case pongTimeout:
   993  				net.tab.deleteReplace(n)
   994  				return unresponsive, nil
   995  			case pingPacket:
   996  				net.handlePing(n, pkt)
   997  				return contested, nil
   998  			default:
   999  				return net.handleQueryEvent(n, ev, pkt)
  1000  			}
  1001  		},
  1002  	}
  1003  
  1004  	unresponsive = &nodeState{
  1005  		name:     "unresponsive",
  1006  		canQuery: true,
  1007  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
  1008  			switch ev {
  1009  			case pingPacket:
  1010  				net.handlePing(n, pkt)
  1011  				return known, nil
  1012  			case pongPacket:
  1013  				err := net.handleKnownPong(n, pkt)
  1014  				return known, err
  1015  			default:
  1016  				return net.handleQueryEvent(n, ev, pkt)
  1017  			}
  1018  		},
  1019  	}
  1020  }
  1021  
  1022  // handle processes packets sent by n and events related to n.
  1023  func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error {
  1024  	//fmt.Println("handle", n.addr().String(), n.state, ev)
  1025  	if pkt != nil {
  1026  		if err := net.checkPacket(n, ev, pkt); err != nil {
  1027  			//fmt.Println("check err:", err)
  1028  			return err
  1029  		}
  1030  		// Start the background expiration goroutine after the first
  1031  		// successful communication. Subsequent calls have no effect if it
  1032  		// is already running. We do this here instead of somewhere else
  1033  		// so that the search for seed nodes also considers older nodes
  1034  		// that would otherwise be removed by the expirer.
  1035  		if net.db != nil {
  1036  			net.db.ensureExpirer()
  1037  		}
  1038  	}
  1039  	if n.state == nil {
  1040  		n.state = unknown //???
  1041  	}
  1042  	next, err := n.state.handle(net, n, ev, pkt)
  1043  	net.transition(n, next)
  1044  	//fmt.Println("new state:", n.state)
  1045  	return err
  1046  }
  1047  
  1048  func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error {
  1049  	// Replay prevention checks.
  1050  	switch ev {
  1051  	case pingPacket, findnodeHashPacket, neighborsPacket:
  1052  		// TODO: check date is > last date seen
  1053  		// TODO: check ping version
  1054  	case pongPacket:
  1055  		if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) {
  1056  			// fmt.Println("pong reply token mismatch")
  1057  			return fmt.Errorf("pong reply token mismatch")
  1058  		}
  1059  		n.pingEcho = nil
  1060  	}
  1061  	// Address validation.
  1062  	// TODO: Ideally we would do the following:
  1063  	//  - reject all packets with wrong address except ping.
  1064  	//  - for ping with new address, transition to verifywait but keep the
  1065  	//    previous node (with old address) around. if the new one reaches known,
  1066  	//    swap it out.
  1067  	return nil
  1068  }
  1069  
  1070  func (net *Network) transition(n *Node, next *nodeState) {
  1071  	if n.state != next {
  1072  		n.state = next
  1073  		if next.enter != nil {
  1074  			next.enter(net, n)
  1075  		}
  1076  	}
  1077  
  1078  	// TODO: persist/unpersist node
  1079  }
  1080  
  1081  func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) {
  1082  	timeout := timeoutEvent{ev, n}
  1083  	net.timeoutTimers[timeout] = time.AfterFunc(d, func() {
  1084  		select {
  1085  		case net.timeout <- timeout:
  1086  		case <-net.closed:
  1087  		}
  1088  	})
  1089  }
  1090  
  1091  func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) {
  1092  	timer := net.timeoutTimers[timeoutEvent{ev, n}]
  1093  	if timer != nil {
  1094  		timer.Stop()
  1095  		delete(net.timeoutTimers, timeoutEvent{ev, n})
  1096  	}
  1097  }
  1098  
  1099  func (net *Network) ping(n *Node, addr *net.UDPAddr) {
  1100  	//fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex())
  1101  	if n.pingEcho != nil || n.ID == net.tab.self.ID {
  1102  		//fmt.Println(" not sent")
  1103  		return
  1104  	}
  1105  	log.Trace("Pinging remote node", "node", n.ID)
  1106  	n.pingTopics = net.ticketStore.regTopicSet()
  1107  	n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics)
  1108  	net.timedEvent(respTimeout, n, pongTimeout)
  1109  }
  1110  
  1111  func (net *Network) handlePing(n *Node, pkt *ingressPacket) {
  1112  	log.Trace("Handling remote ping", "node", n.ID)
  1113  	ping := pkt.data.(*ping)
  1114  	n.TCP = ping.From.TCP
  1115  	t := net.topictab.getTicket(n, ping.Topics)
  1116  
  1117  	pong := &pong{
  1118  		To:         makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB
  1119  		ReplyTok:   pkt.hash,
  1120  		Expiration: uint64(time.Now().Add(expiration).Unix()),
  1121  	}
  1122  	ticketToPong(t, pong)
  1123  	net.conn.send(n, pongPacket, pong)
  1124  }
  1125  
  1126  func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error {
  1127  	log.Trace("Handling known pong", "node", n.ID)
  1128  	net.abortTimedEvent(n, pongTimeout)
  1129  	now := mclock.Now()
  1130  	ticket, err := pongToTicket(now, n.pingTopics, n, pkt)
  1131  	if err == nil {
  1132  		// fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data)
  1133  		net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket)
  1134  	} else {
  1135  		log.Trace("Failed to convert pong to ticket", "err", err)
  1136  	}
  1137  	n.pingEcho = nil
  1138  	n.pingTopics = nil
  1139  	return err
  1140  }
  1141  
  1142  func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
  1143  	switch ev {
  1144  	case findnodePacket:
  1145  		target := crypto.Keccak256Hash(pkt.data.(*findnode).Target[:])
  1146  		results := net.tab.closest(target, bucketSize).entries
  1147  		net.conn.sendNeighbours(n, results)
  1148  		return n.state, nil
  1149  	case neighborsPacket:
  1150  		err := net.handleNeighboursPacket(n, pkt)
  1151  		return n.state, err
  1152  	case neighboursTimeout:
  1153  		if n.pendingNeighbours != nil {
  1154  			n.pendingNeighbours.reply <- nil
  1155  			n.pendingNeighbours = nil
  1156  		}
  1157  		n.queryTimeouts++
  1158  		if n.queryTimeouts > maxFindnodeFailures && n.state == known {
  1159  			return contested, errors.New("too many timeouts")
  1160  		}
  1161  		return n.state, nil
  1162  
  1163  	// v5
  1164  
  1165  	case findnodeHashPacket:
  1166  		results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries
  1167  		net.conn.sendNeighbours(n, results)
  1168  		return n.state, nil
  1169  	case topicRegisterPacket:
  1170  		//fmt.Println("got topicRegisterPacket")
  1171  		regdata := pkt.data.(*topicRegister)
  1172  		pong, err := net.checkTopicRegister(regdata)
  1173  		if err != nil {
  1174  			//fmt.Println(err)
  1175  			return n.state, fmt.Errorf("bad waiting ticket: %v", err)
  1176  		}
  1177  		net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods)
  1178  		return n.state, nil
  1179  	case topicQueryPacket:
  1180  		// TODO: handle expiration
  1181  		topic := pkt.data.(*topicQuery).Topic
  1182  		results := net.topictab.getEntries(topic)
  1183  		if _, ok := net.ticketStore.tickets[topic]; ok {
  1184  			results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too
  1185  		}
  1186  		if len(results) > 10 {
  1187  			results = results[:10]
  1188  		}
  1189  		var hash common.Hash
  1190  		copy(hash[:], pkt.hash)
  1191  		net.conn.sendTopicNodes(n, hash, results)
  1192  		return n.state, nil
  1193  	case topicNodesPacket:
  1194  		p := pkt.data.(*topicNodes)
  1195  		if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) {
  1196  			n.queryTimeouts++
  1197  			if n.queryTimeouts > maxFindnodeFailures && n.state == known {
  1198  				return contested, errors.New("too many timeouts")
  1199  			}
  1200  		}
  1201  		return n.state, nil
  1202  
  1203  	default:
  1204  		return n.state, errInvalidEvent
  1205  	}
  1206  }
  1207  
  1208  func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
  1209  	var pongpkt ingressPacket
  1210  	if err := decodePacket(data.Pong, &pongpkt); err != nil {
  1211  		return nil, err
  1212  	}
  1213  	if pongpkt.ev != pongPacket {
  1214  		return nil, errors.New("is not pong packet")
  1215  	}
  1216  	if pongpkt.remoteID != net.tab.self.ID {
  1217  		return nil, errors.New("not signed by us")
  1218  	}
  1219  	// check that we previously authorised all topics
  1220  	// that the other side is trying to register.
  1221  	if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash {
  1222  		return nil, errors.New("topic hash mismatch")
  1223  	}
  1224  	if data.Idx < 0 || int(data.Idx) >= len(data.Topics) {
  1225  		return nil, errors.New("topic index out of range")
  1226  	}
  1227  	return pongpkt.data.(*pong), nil
  1228  }
  1229  
  1230  func rlpHash(x interface{}) (h common.Hash) {
  1231  	hw := sha3.NewKeccak256()
  1232  	rlp.Encode(hw, x)
  1233  	hw.Sum(h[:0])
  1234  	return h
  1235  }
  1236  
  1237  func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error {
  1238  	if n.pendingNeighbours == nil {
  1239  		return errNoQuery
  1240  	}
  1241  	net.abortTimedEvent(n, neighboursTimeout)
  1242  
  1243  	req := pkt.data.(*neighbors)
  1244  	nodes := make([]*Node, len(req.Nodes))
  1245  	for i, rn := range req.Nodes {
  1246  		nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn)
  1247  		if err != nil {
  1248  			log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err))
  1249  			continue
  1250  		}
  1251  		nodes[i] = nn
  1252  		// Start validation of query results immediately.
  1253  		// This fills the table quickly.
  1254  		// TODO: generates way too many packets, maybe do it via queue.
  1255  		if nn.state == unknown {
  1256  			net.transition(nn, verifyinit)
  1257  		}
  1258  	}
  1259  	// TODO: don't ignore second packet
  1260  	n.pendingNeighbours.reply <- nodes
  1261  	n.pendingNeighbours = nil
  1262  	// Now that this query is done, start the next one.
  1263  	n.startNextQuery(net)
  1264  	return nil
  1265  }