github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/p2p/discv5/net.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  package discv5
    13  
    14  import (
    15  	"bytes"
    16  	"crypto/ecdsa"
    17  	"errors"
    18  	"fmt"
    19  	"net"
    20  	"time"
    21  
    22  	"github.com/Sberex/go-sberex/common"
    23  	"github.com/Sberex/go-sberex/common/mclock"
    24  	"github.com/Sberex/go-sberex/crypto"
    25  	"github.com/Sberex/go-sberex/crypto/sha3"
    26  	"github.com/Sberex/go-sberex/log"
    27  	"github.com/Sberex/go-sberex/p2p/netutil"
    28  	"github.com/Sberex/go-sberex/rlp"
    29  )
    30  
    31  var (
    32  	errInvalidEvent = errors.New("invalid in current state")
    33  	errNoQuery      = errors.New("no pending query")
    34  	errWrongAddress = errors.New("unknown sender address")
    35  )
    36  
    37  const (
    38  	autoRefreshInterval   = 1 * time.Hour
    39  	bucketRefreshInterval = 1 * time.Minute
    40  	seedCount             = 30
    41  	seedMaxAge            = 5 * 24 * time.Hour
    42  	lowPort               = 1024
    43  )
    44  
    45  const testTopic = "foo"
    46  
    47  const (
    48  	printTestImgLogs = false
    49  )
    50  
    51  // Network manages the table and all protocol interaction.
    52  type Network struct {
    53  	db          *nodeDB // database of known nodes
    54  	conn        transport
    55  	netrestrict *netutil.Netlist
    56  
    57  	closed           chan struct{}          // closed when loop is done
    58  	closeReq         chan struct{}          // 'request to close'
    59  	refreshReq       chan []*Node           // lookups ask for refresh on this channel
    60  	refreshResp      chan (<-chan struct{}) // ...and get the channel to block on from this one
    61  	read             chan ingressPacket     // ingress packets arrive here
    62  	timeout          chan timeoutEvent
    63  	queryReq         chan *findnodeQuery // lookups submit findnode queries on this channel
    64  	tableOpReq       chan func()
    65  	tableOpResp      chan struct{}
    66  	topicRegisterReq chan topicRegisterReq
    67  	topicSearchReq   chan topicSearchReq
    68  
    69  	// State of the main loop.
    70  	tab           *Table
    71  	topictab      *topicTable
    72  	ticketStore   *ticketStore
    73  	nursery       []*Node
    74  	nodes         map[NodeID]*Node // tracks active nodes with state != known
    75  	timeoutTimers map[timeoutEvent]*time.Timer
    76  
    77  	// Revalidation queues.
    78  	// Nodes put on these queues will be pinged eventually.
    79  	slowRevalidateQueue []*Node
    80  	fastRevalidateQueue []*Node
    81  
    82  	// Buffers for state transition.
    83  	sendBuf []*ingressPacket
    84  }
    85  
    86  // transport is implemented by the UDP transport.
    87  // it is an interface so we can test without opening lots of UDP
    88  // sockets and without generating a private key.
    89  type transport interface {
    90  	sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte)
    91  	sendNeighbours(remote *Node, nodes []*Node)
    92  	sendFindnodeHash(remote *Node, target common.Hash)
    93  	sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte)
    94  	sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node)
    95  
    96  	send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte)
    97  
    98  	localAddr() *net.UDPAddr
    99  	Close()
   100  }
   101  
   102  type findnodeQuery struct {
   103  	remote   *Node
   104  	target   common.Hash
   105  	reply    chan<- []*Node
   106  	nresults int // counter for received nodes
   107  }
   108  
   109  type topicRegisterReq struct {
   110  	add   bool
   111  	topic Topic
   112  }
   113  
   114  type topicSearchReq struct {
   115  	topic  Topic
   116  	found  chan<- *Node
   117  	lookup chan<- bool
   118  	delay  time.Duration
   119  }
   120  
   121  type topicSearchResult struct {
   122  	target lookupInfo
   123  	nodes  []*Node
   124  }
   125  
   126  type timeoutEvent struct {
   127  	ev   nodeEvent
   128  	node *Node
   129  }
   130  
   131  func newNetwork(conn transport, ourPubkey ecdsa.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) {
   132  	ourID := PubkeyID(&ourPubkey)
   133  
   134  	var db *nodeDB
   135  	if dbPath != "<no database>" {
   136  		var err error
   137  		if db, err = newNodeDB(dbPath, Version, ourID); err != nil {
   138  			return nil, err
   139  		}
   140  	}
   141  
   142  	tab := newTable(ourID, conn.localAddr())
   143  	net := &Network{
   144  		db:               db,
   145  		conn:             conn,
   146  		netrestrict:      netrestrict,
   147  		tab:              tab,
   148  		topictab:         newTopicTable(db, tab.self),
   149  		ticketStore:      newTicketStore(),
   150  		refreshReq:       make(chan []*Node),
   151  		refreshResp:      make(chan (<-chan struct{})),
   152  		closed:           make(chan struct{}),
   153  		closeReq:         make(chan struct{}),
   154  		read:             make(chan ingressPacket, 100),
   155  		timeout:          make(chan timeoutEvent),
   156  		timeoutTimers:    make(map[timeoutEvent]*time.Timer),
   157  		tableOpReq:       make(chan func()),
   158  		tableOpResp:      make(chan struct{}),
   159  		queryReq:         make(chan *findnodeQuery),
   160  		topicRegisterReq: make(chan topicRegisterReq),
   161  		topicSearchReq:   make(chan topicSearchReq),
   162  		nodes:            make(map[NodeID]*Node),
   163  	}
   164  	go net.loop()
   165  	return net, nil
   166  }
   167  
   168  // Close terminates the network listener and flushes the node database.
   169  func (net *Network) Close() {
   170  	net.conn.Close()
   171  	select {
   172  	case <-net.closed:
   173  	case net.closeReq <- struct{}{}:
   174  		<-net.closed
   175  	}
   176  }
   177  
   178  // Self returns the local node.
   179  // The returned node should not be modified by the caller.
   180  func (net *Network) Self() *Node {
   181  	return net.tab.self
   182  }
   183  
   184  // ReadRandomNodes fills the given slice with random nodes from the
   185  // table. It will not write the same node more than once. The nodes in
   186  // the slice are copies and can be modified by the caller.
   187  func (net *Network) ReadRandomNodes(buf []*Node) (n int) {
   188  	net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) })
   189  	return n
   190  }
   191  
   192  // SetFallbackNodes sets the initial points of contact. These nodes
   193  // are used to connect to the network if the table is empty and there
   194  // are no known nodes in the database.
   195  func (net *Network) SetFallbackNodes(nodes []*Node) error {
   196  	nursery := make([]*Node, 0, len(nodes))
   197  	for _, n := range nodes {
   198  		if err := n.validateComplete(); err != nil {
   199  			return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
   200  		}
   201  		// Recompute cpy.sha because the node might not have been
   202  		// created by NewNode or ParseNode.
   203  		cpy := *n
   204  		cpy.sha = crypto.Keccak256Hash(n.ID[:])
   205  		nursery = append(nursery, &cpy)
   206  	}
   207  	net.reqRefresh(nursery)
   208  	return nil
   209  }
   210  
   211  // Resolve searches for a specific node with the given ID.
   212  // It returns nil if the node could not be found.
   213  func (net *Network) Resolve(targetID NodeID) *Node {
   214  	result := net.lookup(crypto.Keccak256Hash(targetID[:]), true)
   215  	for _, n := range result {
   216  		if n.ID == targetID {
   217  			return n
   218  		}
   219  	}
   220  	return nil
   221  }
   222  
   223  // Lookup performs a network search for nodes close
   224  // to the given target. It approaches the target by querying
   225  // nodes that are closer to it on each iteration.
   226  // The given target does not need to be an actual node
   227  // identifier.
   228  //
   229  // The local node may be included in the result.
   230  func (net *Network) Lookup(targetID NodeID) []*Node {
   231  	return net.lookup(crypto.Keccak256Hash(targetID[:]), false)
   232  }
   233  
   234  func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node {
   235  	var (
   236  		asked          = make(map[NodeID]bool)
   237  		seen           = make(map[NodeID]bool)
   238  		reply          = make(chan []*Node, alpha)
   239  		result         = nodesByDistance{target: target}
   240  		pendingQueries = 0
   241  	)
   242  	// Get initial answers from the local node.
   243  	result.push(net.tab.self, bucketSize)
   244  	for {
   245  		// Ask the α closest nodes that we haven't asked yet.
   246  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   247  			n := result.entries[i]
   248  			if !asked[n.ID] {
   249  				asked[n.ID] = true
   250  				pendingQueries++
   251  				net.reqQueryFindnode(n, target, reply)
   252  			}
   253  		}
   254  		if pendingQueries == 0 {
   255  			// We have asked all closest nodes, stop the search.
   256  			break
   257  		}
   258  		// Wait for the next reply.
   259  		select {
   260  		case nodes := <-reply:
   261  			for _, n := range nodes {
   262  				if n != nil && !seen[n.ID] {
   263  					seen[n.ID] = true
   264  					result.push(n, bucketSize)
   265  					if stopOnMatch && n.sha == target {
   266  						return result.entries
   267  					}
   268  				}
   269  			}
   270  			pendingQueries--
   271  		case <-time.After(respTimeout):
   272  			// forget all pending requests, start new ones
   273  			pendingQueries = 0
   274  			reply = make(chan []*Node, alpha)
   275  		}
   276  	}
   277  	return result.entries
   278  }
   279  
   280  func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) {
   281  	select {
   282  	case net.topicRegisterReq <- topicRegisterReq{true, topic}:
   283  	case <-net.closed:
   284  		return
   285  	}
   286  	select {
   287  	case <-net.closed:
   288  	case <-stop:
   289  		select {
   290  		case net.topicRegisterReq <- topicRegisterReq{false, topic}:
   291  		case <-net.closed:
   292  		}
   293  	}
   294  }
   295  
   296  func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) {
   297  	for {
   298  		select {
   299  		case <-net.closed:
   300  			return
   301  		case delay, ok := <-setPeriod:
   302  			select {
   303  			case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}:
   304  			case <-net.closed:
   305  				return
   306  			}
   307  			if !ok {
   308  				return
   309  			}
   310  		}
   311  	}
   312  }
   313  
   314  func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} {
   315  	select {
   316  	case net.refreshReq <- nursery:
   317  		return <-net.refreshResp
   318  	case <-net.closed:
   319  		return net.closed
   320  	}
   321  }
   322  
   323  func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool {
   324  	q := &findnodeQuery{remote: n, target: target, reply: reply}
   325  	select {
   326  	case net.queryReq <- q:
   327  		return true
   328  	case <-net.closed:
   329  		return false
   330  	}
   331  }
   332  
   333  func (net *Network) reqReadPacket(pkt ingressPacket) {
   334  	select {
   335  	case net.read <- pkt:
   336  	case <-net.closed:
   337  	}
   338  }
   339  
   340  func (net *Network) reqTableOp(f func()) (called bool) {
   341  	select {
   342  	case net.tableOpReq <- f:
   343  		<-net.tableOpResp
   344  		return true
   345  	case <-net.closed:
   346  		return false
   347  	}
   348  }
   349  
   350  // TODO: external address handling.
   351  
   352  type topicSearchInfo struct {
   353  	lookupChn chan<- bool
   354  	period    time.Duration
   355  }
   356  
   357  const maxSearchCount = 5
   358  
   359  func (net *Network) loop() {
   360  	var (
   361  		refreshTimer       = time.NewTicker(autoRefreshInterval)
   362  		bucketRefreshTimer = time.NewTimer(bucketRefreshInterval)
   363  		refreshDone        chan struct{} // closed when the 'refresh' lookup has ended
   364  	)
   365  
   366  	// Tracking the next ticket to register.
   367  	var (
   368  		nextTicket        *ticketRef
   369  		nextRegisterTimer *time.Timer
   370  		nextRegisterTime  <-chan time.Time
   371  	)
   372  	defer func() {
   373  		if nextRegisterTimer != nil {
   374  			nextRegisterTimer.Stop()
   375  		}
   376  	}()
   377  	resetNextTicket := func() {
   378  		ticket, timeout := net.ticketStore.nextFilteredTicket()
   379  		if nextTicket != ticket {
   380  			nextTicket = ticket
   381  			if nextRegisterTimer != nil {
   382  				nextRegisterTimer.Stop()
   383  				nextRegisterTime = nil
   384  			}
   385  			if ticket != nil {
   386  				nextRegisterTimer = time.NewTimer(timeout)
   387  				nextRegisterTime = nextRegisterTimer.C
   388  			}
   389  		}
   390  	}
   391  
   392  	// Tracking registration and search lookups.
   393  	var (
   394  		topicRegisterLookupTarget lookupInfo
   395  		topicRegisterLookupDone   chan []*Node
   396  		topicRegisterLookupTick   = time.NewTimer(0)
   397  		searchReqWhenRefreshDone  []topicSearchReq
   398  		searchInfo                = make(map[Topic]topicSearchInfo)
   399  		activeSearchCount         int
   400  	)
   401  	topicSearchLookupDone := make(chan topicSearchResult, 100)
   402  	topicSearch := make(chan Topic, 100)
   403  	<-topicRegisterLookupTick.C
   404  
   405  	statsDump := time.NewTicker(10 * time.Second)
   406  
   407  loop:
   408  	for {
   409  		resetNextTicket()
   410  
   411  		select {
   412  		case <-net.closeReq:
   413  			log.Trace("<-net.closeReq")
   414  			break loop
   415  
   416  		// Ingress packet handling.
   417  		case pkt := <-net.read:
   418  			//fmt.Println("read", pkt.ev)
   419  			log.Trace("<-net.read")
   420  			n := net.internNode(&pkt)
   421  			prestate := n.state
   422  			status := "ok"
   423  			if err := net.handle(n, pkt.ev, &pkt); err != nil {
   424  				status = err.Error()
   425  			}
   426  			log.Trace("", "msg", log.Lazy{Fn: func() string {
   427  				return fmt.Sprintf("<<< (%d) %v from %x@%v: %v -> %v (%v)",
   428  					net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status)
   429  			}})
   430  			// TODO: persist state if n.state goes >= known, delete if it goes <= known
   431  
   432  		// State transition timeouts.
   433  		case timeout := <-net.timeout:
   434  			log.Trace("<-net.timeout")
   435  			if net.timeoutTimers[timeout] == nil {
   436  				// Stale timer (was aborted).
   437  				continue
   438  			}
   439  			delete(net.timeoutTimers, timeout)
   440  			prestate := timeout.node.state
   441  			status := "ok"
   442  			if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
   443  				status = err.Error()
   444  			}
   445  			log.Trace("", "msg", log.Lazy{Fn: func() string {
   446  				return fmt.Sprintf("--- (%d) %v for %x@%v: %v -> %v (%v)",
   447  					net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status)
   448  			}})
   449  
   450  		// Querying.
   451  		case q := <-net.queryReq:
   452  			log.Trace("<-net.queryReq")
   453  			if !q.start(net) {
   454  				q.remote.deferQuery(q)
   455  			}
   456  
   457  		// Interacting with the table.
   458  		case f := <-net.tableOpReq:
   459  			log.Trace("<-net.tableOpReq")
   460  			f()
   461  			net.tableOpResp <- struct{}{}
   462  
   463  		// Topic registration stuff.
   464  		case req := <-net.topicRegisterReq:
   465  			log.Trace("<-net.topicRegisterReq")
   466  			if !req.add {
   467  				net.ticketStore.removeRegisterTopic(req.topic)
   468  				continue
   469  			}
   470  			net.ticketStore.addTopic(req.topic, true)
   471  			// If we're currently waiting idle (nothing to look up), give the ticket store a
   472  			// chance to start it sooner. This should speed up convergence of the radius
   473  			// determination for new topics.
   474  			// if topicRegisterLookupDone == nil {
   475  			if topicRegisterLookupTarget.target == (common.Hash{}) {
   476  				log.Trace("topicRegisterLookupTarget == null")
   477  				if topicRegisterLookupTick.Stop() {
   478  					<-topicRegisterLookupTick.C
   479  				}
   480  				target, delay := net.ticketStore.nextRegisterLookup()
   481  				topicRegisterLookupTarget = target
   482  				topicRegisterLookupTick.Reset(delay)
   483  			}
   484  
   485  		case nodes := <-topicRegisterLookupDone:
   486  			log.Trace("<-topicRegisterLookupDone")
   487  			net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte {
   488  				net.ping(n, n.addr())
   489  				return n.pingEcho
   490  			})
   491  			target, delay := net.ticketStore.nextRegisterLookup()
   492  			topicRegisterLookupTarget = target
   493  			topicRegisterLookupTick.Reset(delay)
   494  			topicRegisterLookupDone = nil
   495  
   496  		case <-topicRegisterLookupTick.C:
   497  			log.Trace("<-topicRegisterLookupTick")
   498  			if (topicRegisterLookupTarget.target == common.Hash{}) {
   499  				target, delay := net.ticketStore.nextRegisterLookup()
   500  				topicRegisterLookupTarget = target
   501  				topicRegisterLookupTick.Reset(delay)
   502  				topicRegisterLookupDone = nil
   503  			} else {
   504  				topicRegisterLookupDone = make(chan []*Node)
   505  				target := topicRegisterLookupTarget.target
   506  				go func() { topicRegisterLookupDone <- net.lookup(target, false) }()
   507  			}
   508  
   509  		case <-nextRegisterTime:
   510  			log.Trace("<-nextRegisterTime")
   511  			net.ticketStore.ticketRegistered(*nextTicket)
   512  			//fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
   513  			net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
   514  
   515  		case req := <-net.topicSearchReq:
   516  			if refreshDone == nil {
   517  				log.Trace("<-net.topicSearchReq")
   518  				info, ok := searchInfo[req.topic]
   519  				if ok {
   520  					if req.delay == time.Duration(0) {
   521  						delete(searchInfo, req.topic)
   522  						net.ticketStore.removeSearchTopic(req.topic)
   523  					} else {
   524  						info.period = req.delay
   525  						searchInfo[req.topic] = info
   526  					}
   527  					continue
   528  				}
   529  				if req.delay != time.Duration(0) {
   530  					var info topicSearchInfo
   531  					info.period = req.delay
   532  					info.lookupChn = req.lookup
   533  					searchInfo[req.topic] = info
   534  					net.ticketStore.addSearchTopic(req.topic, req.found)
   535  					topicSearch <- req.topic
   536  				}
   537  			} else {
   538  				searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req)
   539  			}
   540  
   541  		case topic := <-topicSearch:
   542  			if activeSearchCount < maxSearchCount {
   543  				activeSearchCount++
   544  				target := net.ticketStore.nextSearchLookup(topic)
   545  				go func() {
   546  					nodes := net.lookup(target.target, false)
   547  					topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes}
   548  				}()
   549  			}
   550  			period := searchInfo[topic].period
   551  			if period != time.Duration(0) {
   552  				go func() {
   553  					time.Sleep(period)
   554  					topicSearch <- topic
   555  				}()
   556  			}
   557  
   558  		case res := <-topicSearchLookupDone:
   559  			activeSearchCount--
   560  			if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
   561  				lookupChn <- net.ticketStore.radius[res.target.topic].converged
   562  			}
   563  			net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte {
   564  				if n.state != nil && n.state.canQuery {
   565  					return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
   566  				} else {
   567  					if n.state == unknown {
   568  						net.ping(n, n.addr())
   569  					}
   570  					return nil
   571  				}
   572  			})
   573  
   574  		case <-statsDump.C:
   575  			log.Trace("<-statsDump.C")
   576  			/*r, ok := net.ticketStore.radius[testTopic]
   577  			if !ok {
   578  				fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now())
   579  			} else {
   580  				topics := len(net.ticketStore.tickets)
   581  				tickets := len(net.ticketStore.nodes)
   582  				rad := r.radius / (maxRadius/10000+1)
   583  				fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now())
   584  			}*/
   585  
   586  			tm := mclock.Now()
   587  			for topic, r := range net.ticketStore.radius {
   588  				if printTestImgLogs {
   589  					rad := r.radius / (maxRadius/1000000 + 1)
   590  					minrad := r.minRadius / (maxRadius/1000000 + 1)
   591  					fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad)
   592  					fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad)
   593  				}
   594  			}
   595  			for topic, t := range net.topictab.topics {
   596  				wp := t.wcl.nextWaitPeriod(tm)
   597  				if printTestImgLogs {
   598  					fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000)
   599  				}
   600  			}
   601  
   602  		// Periodic / lookup-initiated bucket refresh.
   603  		case <-refreshTimer.C:
   604  			log.Trace("<-refreshTimer.C")
   605  			// TODO: ideally we would start the refresh timer after
   606  			// fallback nodes have been set for the first time.
   607  			if refreshDone == nil {
   608  				refreshDone = make(chan struct{})
   609  				net.refresh(refreshDone)
   610  			}
   611  		case <-bucketRefreshTimer.C:
   612  			target := net.tab.chooseBucketRefreshTarget()
   613  			go func() {
   614  				net.lookup(target, false)
   615  				bucketRefreshTimer.Reset(bucketRefreshInterval)
   616  			}()
   617  		case newNursery := <-net.refreshReq:
   618  			log.Trace("<-net.refreshReq")
   619  			if newNursery != nil {
   620  				net.nursery = newNursery
   621  			}
   622  			if refreshDone == nil {
   623  				refreshDone = make(chan struct{})
   624  				net.refresh(refreshDone)
   625  			}
   626  			net.refreshResp <- refreshDone
   627  		case <-refreshDone:
   628  			log.Trace("<-net.refreshDone", "table size", net.tab.count)
   629  			if net.tab.count != 0 {
   630  				refreshDone = nil
   631  				list := searchReqWhenRefreshDone
   632  				searchReqWhenRefreshDone = nil
   633  				go func() {
   634  					for _, req := range list {
   635  						net.topicSearchReq <- req
   636  					}
   637  				}()
   638  			} else {
   639  				refreshDone = make(chan struct{})
   640  				net.refresh(refreshDone)
   641  			}
   642  		}
   643  	}
   644  	log.Trace("loop stopped")
   645  
   646  	log.Debug(fmt.Sprintf("shutting down"))
   647  	if net.conn != nil {
   648  		net.conn.Close()
   649  	}
   650  	if refreshDone != nil {
   651  		// TODO: wait for pending refresh.
   652  		//<-refreshResults
   653  	}
   654  	// Cancel all pending timeouts.
   655  	for _, timer := range net.timeoutTimers {
   656  		timer.Stop()
   657  	}
   658  	if net.db != nil {
   659  		net.db.close()
   660  	}
   661  	close(net.closed)
   662  }
   663  
   664  // Everything below runs on the Network.loop goroutine
   665  // and can modify Node, Table and Network at any time without locking.
   666  
   667  func (net *Network) refresh(done chan<- struct{}) {
   668  	var seeds []*Node
   669  	if net.db != nil {
   670  		seeds = net.db.querySeeds(seedCount, seedMaxAge)
   671  	}
   672  	if len(seeds) == 0 {
   673  		seeds = net.nursery
   674  	}
   675  	if len(seeds) == 0 {
   676  		log.Trace("no seed nodes found")
   677  		close(done)
   678  		return
   679  	}
   680  	for _, n := range seeds {
   681  		log.Debug("", "msg", log.Lazy{Fn: func() string {
   682  			var age string
   683  			if net.db != nil {
   684  				age = time.Since(net.db.lastPong(n.ID)).String()
   685  			} else {
   686  				age = "unknown"
   687  			}
   688  			return fmt.Sprintf("seed node (age %s): %v", age, n)
   689  		}})
   690  		n = net.internNodeFromDB(n)
   691  		if n.state == unknown {
   692  			net.transition(n, verifyinit)
   693  		}
   694  		// Force-add the seed node so Lookup does something.
   695  		// It will be deleted again if verification fails.
   696  		net.tab.add(n)
   697  	}
   698  	// Start self lookup to fill up the buckets.
   699  	go func() {
   700  		net.Lookup(net.tab.self.ID)
   701  		close(done)
   702  	}()
   703  }
   704  
   705  // Node Interning.
   706  
   707  func (net *Network) internNode(pkt *ingressPacket) *Node {
   708  	if n := net.nodes[pkt.remoteID]; n != nil {
   709  		n.IP = pkt.remoteAddr.IP
   710  		n.UDP = uint16(pkt.remoteAddr.Port)
   711  		n.TCP = uint16(pkt.remoteAddr.Port)
   712  		return n
   713  	}
   714  	n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port))
   715  	n.state = unknown
   716  	net.nodes[pkt.remoteID] = n
   717  	return n
   718  }
   719  
   720  func (net *Network) internNodeFromDB(dbn *Node) *Node {
   721  	if n := net.nodes[dbn.ID]; n != nil {
   722  		return n
   723  	}
   724  	n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP)
   725  	n.state = unknown
   726  	net.nodes[n.ID] = n
   727  	return n
   728  }
   729  
   730  func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n *Node, err error) {
   731  	if rn.ID == net.tab.self.ID {
   732  		return nil, errors.New("is self")
   733  	}
   734  	if rn.UDP <= lowPort {
   735  		return nil, errors.New("low port")
   736  	}
   737  	n = net.nodes[rn.ID]
   738  	if n == nil {
   739  		// We haven't seen this node before.
   740  		n, err = nodeFromRPC(sender, rn)
   741  		if net.netrestrict != nil && !net.netrestrict.Contains(n.IP) {
   742  			return n, errors.New("not contained in netrestrict whitelist")
   743  		}
   744  		if err == nil {
   745  			n.state = unknown
   746  			net.nodes[n.ID] = n
   747  		}
   748  		return n, err
   749  	}
   750  	if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
   751  		if n.state == known {
   752  			// reject address change if node is known by us
   753  			err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
   754  		} else {
   755  			// accept otherwise; this will be handled nicer with signed ENRs
   756  			n.IP = rn.IP
   757  			n.UDP = rn.UDP
   758  			n.TCP = rn.TCP
   759  		}
   760  	}
   761  	return n, err
   762  }
   763  
   764  // nodeNetGuts is embedded in Node and contains fields.
   765  type nodeNetGuts struct {
   766  	// This is a cached copy of sha3(ID) which is used for node
   767  	// distance calculations. This is part of Node in order to make it
   768  	// possible to write tests that need a node at a certain distance.
   769  	// In those tests, the content of sha will not actually correspond
   770  	// with ID.
   771  	sha common.Hash
   772  
   773  	// State machine fields. Access to these fields
   774  	// is restricted to the Network.loop goroutine.
   775  	state             *nodeState
   776  	pingEcho          []byte           // hash of last ping sent by us
   777  	pingTopics        []Topic          // topic set sent by us in last ping
   778  	deferredQueries   []*findnodeQuery // queries that can't be sent yet
   779  	pendingNeighbours *findnodeQuery   // current query, waiting for reply
   780  	queryTimeouts     int
   781  }
   782  
   783  func (n *nodeNetGuts) deferQuery(q *findnodeQuery) {
   784  	n.deferredQueries = append(n.deferredQueries, q)
   785  }
   786  
   787  func (n *nodeNetGuts) startNextQuery(net *Network) {
   788  	if len(n.deferredQueries) == 0 {
   789  		return
   790  	}
   791  	nextq := n.deferredQueries[0]
   792  	if nextq.start(net) {
   793  		n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...)
   794  	}
   795  }
   796  
   797  func (q *findnodeQuery) start(net *Network) bool {
   798  	// Satisfy queries against the local node directly.
   799  	if q.remote == net.tab.self {
   800  		closest := net.tab.closest(crypto.Keccak256Hash(q.target[:]), bucketSize)
   801  		q.reply <- closest.entries
   802  		return true
   803  	}
   804  	if q.remote.state.canQuery && q.remote.pendingNeighbours == nil {
   805  		net.conn.sendFindnodeHash(q.remote, q.target)
   806  		net.timedEvent(respTimeout, q.remote, neighboursTimeout)
   807  		q.remote.pendingNeighbours = q
   808  		return true
   809  	}
   810  	// If the node is not known yet, it won't accept queries.
   811  	// Initiate the transition to known.
   812  	// The request will be sent later when the node reaches known state.
   813  	if q.remote.state == unknown {
   814  		net.transition(q.remote, verifyinit)
   815  	}
   816  	return false
   817  }
   818  
   819  // Node Events (the input to the state machine).
   820  
   821  type nodeEvent uint
   822  
   823  //go:generate stringer -type=nodeEvent
   824  
   825  const (
   826  	invalidEvent nodeEvent = iota // zero is reserved
   827  
   828  	// Packet type events.
   829  	// These correspond to packet types in the UDP protocol.
   830  	pingPacket
   831  	pongPacket
   832  	findnodePacket
   833  	neighborsPacket
   834  	findnodeHashPacket
   835  	topicRegisterPacket
   836  	topicQueryPacket
   837  	topicNodesPacket
   838  
   839  	// Non-packet events.
   840  	// Event values in this category are allocated outside
   841  	// the packet type range (packet types are encoded as a single byte).
   842  	pongTimeout nodeEvent = iota + 256
   843  	pingTimeout
   844  	neighboursTimeout
   845  )
   846  
   847  // Node State Machine.
   848  
   849  type nodeState struct {
   850  	name     string
   851  	handle   func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error)
   852  	enter    func(*Network, *Node)
   853  	canQuery bool
   854  }
   855  
   856  func (s *nodeState) String() string {
   857  	return s.name
   858  }
   859  
   860  var (
   861  	unknown          *nodeState
   862  	verifyinit       *nodeState
   863  	verifywait       *nodeState
   864  	remoteverifywait *nodeState
   865  	known            *nodeState
   866  	contested        *nodeState
   867  	unresponsive     *nodeState
   868  )
   869  
   870  func init() {
   871  	unknown = &nodeState{
   872  		name: "unknown",
   873  		enter: func(net *Network, n *Node) {
   874  			net.tab.delete(n)
   875  			n.pingEcho = nil
   876  			// Abort active queries.
   877  			for _, q := range n.deferredQueries {
   878  				q.reply <- nil
   879  			}
   880  			n.deferredQueries = nil
   881  			if n.pendingNeighbours != nil {
   882  				n.pendingNeighbours.reply <- nil
   883  				n.pendingNeighbours = nil
   884  			}
   885  			n.queryTimeouts = 0
   886  		},
   887  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   888  			switch ev {
   889  			case pingPacket:
   890  				net.handlePing(n, pkt)
   891  				net.ping(n, pkt.remoteAddr)
   892  				return verifywait, nil
   893  			default:
   894  				return unknown, errInvalidEvent
   895  			}
   896  		},
   897  	}
   898  
   899  	verifyinit = &nodeState{
   900  		name: "verifyinit",
   901  		enter: func(net *Network, n *Node) {
   902  			net.ping(n, n.addr())
   903  		},
   904  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   905  			switch ev {
   906  			case pingPacket:
   907  				net.handlePing(n, pkt)
   908  				return verifywait, nil
   909  			case pongPacket:
   910  				err := net.handleKnownPong(n, pkt)
   911  				return remoteverifywait, err
   912  			case pongTimeout:
   913  				return unknown, nil
   914  			default:
   915  				return verifyinit, errInvalidEvent
   916  			}
   917  		},
   918  	}
   919  
   920  	verifywait = &nodeState{
   921  		name: "verifywait",
   922  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   923  			switch ev {
   924  			case pingPacket:
   925  				net.handlePing(n, pkt)
   926  				return verifywait, nil
   927  			case pongPacket:
   928  				err := net.handleKnownPong(n, pkt)
   929  				return known, err
   930  			case pongTimeout:
   931  				return unknown, nil
   932  			default:
   933  				return verifywait, errInvalidEvent
   934  			}
   935  		},
   936  	}
   937  
   938  	remoteverifywait = &nodeState{
   939  		name: "remoteverifywait",
   940  		enter: func(net *Network, n *Node) {
   941  			net.timedEvent(respTimeout, n, pingTimeout)
   942  		},
   943  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   944  			switch ev {
   945  			case pingPacket:
   946  				net.handlePing(n, pkt)
   947  				return remoteverifywait, nil
   948  			case pingTimeout:
   949  				return known, nil
   950  			default:
   951  				return remoteverifywait, errInvalidEvent
   952  			}
   953  		},
   954  	}
   955  
   956  	known = &nodeState{
   957  		name:     "known",
   958  		canQuery: true,
   959  		enter: func(net *Network, n *Node) {
   960  			n.queryTimeouts = 0
   961  			n.startNextQuery(net)
   962  			// Insert into the table and start revalidation of the last node
   963  			// in the bucket if it is full.
   964  			last := net.tab.add(n)
   965  			if last != nil && last.state == known {
   966  				// TODO: do this asynchronously
   967  				net.transition(last, contested)
   968  			}
   969  		},
   970  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   971  			switch ev {
   972  			case pingPacket:
   973  				net.handlePing(n, pkt)
   974  				return known, nil
   975  			case pongPacket:
   976  				err := net.handleKnownPong(n, pkt)
   977  				return known, err
   978  			default:
   979  				return net.handleQueryEvent(n, ev, pkt)
   980  			}
   981  		},
   982  	}
   983  
   984  	contested = &nodeState{
   985  		name:     "contested",
   986  		canQuery: true,
   987  		enter: func(net *Network, n *Node) {
   988  			net.ping(n, n.addr())
   989  		},
   990  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   991  			switch ev {
   992  			case pongPacket:
   993  				// Node is still alive.
   994  				err := net.handleKnownPong(n, pkt)
   995  				return known, err
   996  			case pongTimeout:
   997  				net.tab.deleteReplace(n)
   998  				return unresponsive, nil
   999  			case pingPacket:
  1000  				net.handlePing(n, pkt)
  1001  				return contested, nil
  1002  			default:
  1003  				return net.handleQueryEvent(n, ev, pkt)
  1004  			}
  1005  		},
  1006  	}
  1007  
  1008  	unresponsive = &nodeState{
  1009  		name:     "unresponsive",
  1010  		canQuery: true,
  1011  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
  1012  			switch ev {
  1013  			case pingPacket:
  1014  				net.handlePing(n, pkt)
  1015  				return known, nil
  1016  			case pongPacket:
  1017  				err := net.handleKnownPong(n, pkt)
  1018  				return known, err
  1019  			default:
  1020  				return net.handleQueryEvent(n, ev, pkt)
  1021  			}
  1022  		},
  1023  	}
  1024  }
  1025  
  1026  // handle processes packets sent by n and events related to n.
  1027  func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error {
  1028  	//fmt.Println("handle", n.addr().String(), n.state, ev)
  1029  	if pkt != nil {
  1030  		if err := net.checkPacket(n, ev, pkt); err != nil {
  1031  			//fmt.Println("check err:", err)
  1032  			return err
  1033  		}
  1034  		// Start the background expiration goroutine after the first
  1035  		// successful communication. Subsequent calls have no effect if it
  1036  		// is already running. We do this here instead of somewhere else
  1037  		// so that the search for seed nodes also considers older nodes
  1038  		// that would otherwise be removed by the expirer.
  1039  		if net.db != nil {
  1040  			net.db.ensureExpirer()
  1041  		}
  1042  	}
  1043  	if n.state == nil {
  1044  		n.state = unknown //???
  1045  	}
  1046  	next, err := n.state.handle(net, n, ev, pkt)
  1047  	net.transition(n, next)
  1048  	//fmt.Println("new state:", n.state)
  1049  	return err
  1050  }
  1051  
  1052  func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error {
  1053  	// Replay prevention checks.
  1054  	switch ev {
  1055  	case pingPacket, findnodeHashPacket, neighborsPacket:
  1056  		// TODO: check date is > last date seen
  1057  		// TODO: check ping version
  1058  	case pongPacket:
  1059  		if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) {
  1060  			// fmt.Println("pong reply token mismatch")
  1061  			return fmt.Errorf("pong reply token mismatch")
  1062  		}
  1063  		n.pingEcho = nil
  1064  	}
  1065  	// Address validation.
  1066  	// TODO: Ideally we would do the following:
  1067  	//  - reject all packets with wrong address except ping.
  1068  	//  - for ping with new address, transition to verifywait but keep the
  1069  	//    previous node (with old address) around. if the new one reaches known,
  1070  	//    swap it out.
  1071  	return nil
  1072  }
  1073  
  1074  func (net *Network) transition(n *Node, next *nodeState) {
  1075  	if n.state != next {
  1076  		n.state = next
  1077  		if next.enter != nil {
  1078  			next.enter(net, n)
  1079  		}
  1080  	}
  1081  
  1082  	// TODO: persist/unpersist node
  1083  }
  1084  
  1085  func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) {
  1086  	timeout := timeoutEvent{ev, n}
  1087  	net.timeoutTimers[timeout] = time.AfterFunc(d, func() {
  1088  		select {
  1089  		case net.timeout <- timeout:
  1090  		case <-net.closed:
  1091  		}
  1092  	})
  1093  }
  1094  
  1095  func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) {
  1096  	timer := net.timeoutTimers[timeoutEvent{ev, n}]
  1097  	if timer != nil {
  1098  		timer.Stop()
  1099  		delete(net.timeoutTimers, timeoutEvent{ev, n})
  1100  	}
  1101  }
  1102  
  1103  func (net *Network) ping(n *Node, addr *net.UDPAddr) {
  1104  	//fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex())
  1105  	if n.pingEcho != nil || n.ID == net.tab.self.ID {
  1106  		//fmt.Println(" not sent")
  1107  		return
  1108  	}
  1109  	log.Trace("Pinging remote node", "node", n.ID)
  1110  	n.pingTopics = net.ticketStore.regTopicSet()
  1111  	n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics)
  1112  	net.timedEvent(respTimeout, n, pongTimeout)
  1113  }
  1114  
  1115  func (net *Network) handlePing(n *Node, pkt *ingressPacket) {
  1116  	log.Trace("Handling remote ping", "node", n.ID)
  1117  	ping := pkt.data.(*ping)
  1118  	n.TCP = ping.From.TCP
  1119  	t := net.topictab.getTicket(n, ping.Topics)
  1120  
  1121  	pong := &pong{
  1122  		To:         makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB
  1123  		ReplyTok:   pkt.hash,
  1124  		Expiration: uint64(time.Now().Add(expiration).Unix()),
  1125  	}
  1126  	ticketToPong(t, pong)
  1127  	net.conn.send(n, pongPacket, pong)
  1128  }
  1129  
  1130  func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error {
  1131  	log.Trace("Handling known pong", "node", n.ID)
  1132  	net.abortTimedEvent(n, pongTimeout)
  1133  	now := mclock.Now()
  1134  	ticket, err := pongToTicket(now, n.pingTopics, n, pkt)
  1135  	if err == nil {
  1136  		// fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data)
  1137  		net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket)
  1138  	} else {
  1139  		log.Trace("Failed to convert pong to ticket", "err", err)
  1140  	}
  1141  	n.pingEcho = nil
  1142  	n.pingTopics = nil
  1143  	return err
  1144  }
  1145  
  1146  func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
  1147  	switch ev {
  1148  	case findnodePacket:
  1149  		target := crypto.Keccak256Hash(pkt.data.(*findnode).Target[:])
  1150  		results := net.tab.closest(target, bucketSize).entries
  1151  		net.conn.sendNeighbours(n, results)
  1152  		return n.state, nil
  1153  	case neighborsPacket:
  1154  		err := net.handleNeighboursPacket(n, pkt)
  1155  		return n.state, err
  1156  	case neighboursTimeout:
  1157  		if n.pendingNeighbours != nil {
  1158  			n.pendingNeighbours.reply <- nil
  1159  			n.pendingNeighbours = nil
  1160  		}
  1161  		n.queryTimeouts++
  1162  		if n.queryTimeouts > maxFindnodeFailures && n.state == known {
  1163  			return contested, errors.New("too many timeouts")
  1164  		}
  1165  		return n.state, nil
  1166  
  1167  	// v5
  1168  
  1169  	case findnodeHashPacket:
  1170  		results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries
  1171  		net.conn.sendNeighbours(n, results)
  1172  		return n.state, nil
  1173  	case topicRegisterPacket:
  1174  		//fmt.Println("got topicRegisterPacket")
  1175  		regdata := pkt.data.(*topicRegister)
  1176  		pong, err := net.checkTopicRegister(regdata)
  1177  		if err != nil {
  1178  			//fmt.Println(err)
  1179  			return n.state, fmt.Errorf("bad waiting ticket: %v", err)
  1180  		}
  1181  		net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods)
  1182  		return n.state, nil
  1183  	case topicQueryPacket:
  1184  		// TODO: handle expiration
  1185  		topic := pkt.data.(*topicQuery).Topic
  1186  		results := net.topictab.getEntries(topic)
  1187  		if _, ok := net.ticketStore.tickets[topic]; ok {
  1188  			results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too
  1189  		}
  1190  		if len(results) > 10 {
  1191  			results = results[:10]
  1192  		}
  1193  		var hash common.Hash
  1194  		copy(hash[:], pkt.hash)
  1195  		net.conn.sendTopicNodes(n, hash, results)
  1196  		return n.state, nil
  1197  	case topicNodesPacket:
  1198  		p := pkt.data.(*topicNodes)
  1199  		if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) {
  1200  			n.queryTimeouts++
  1201  			if n.queryTimeouts > maxFindnodeFailures && n.state == known {
  1202  				return contested, errors.New("too many timeouts")
  1203  			}
  1204  		}
  1205  		return n.state, nil
  1206  
  1207  	default:
  1208  		return n.state, errInvalidEvent
  1209  	}
  1210  }
  1211  
  1212  func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
  1213  	var pongpkt ingressPacket
  1214  	if err := decodePacket(data.Pong, &pongpkt); err != nil {
  1215  		return nil, err
  1216  	}
  1217  	if pongpkt.ev != pongPacket {
  1218  		return nil, errors.New("is not pong packet")
  1219  	}
  1220  	if pongpkt.remoteID != net.tab.self.ID {
  1221  		return nil, errors.New("not signed by us")
  1222  	}
  1223  	// check that we previously authorised all topics
  1224  	// that the other side is trying to register.
  1225  	if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash {
  1226  		return nil, errors.New("topic hash mismatch")
  1227  	}
  1228  	if data.Idx < 0 || int(data.Idx) >= len(data.Topics) {
  1229  		return nil, errors.New("topic index out of range")
  1230  	}
  1231  	return pongpkt.data.(*pong), nil
  1232  }
  1233  
  1234  func rlpHash(x interface{}) (h common.Hash) {
  1235  	hw := sha3.NewKeccak256()
  1236  	rlp.Encode(hw, x)
  1237  	hw.Sum(h[:0])
  1238  	return h
  1239  }
  1240  
  1241  func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error {
  1242  	if n.pendingNeighbours == nil {
  1243  		return errNoQuery
  1244  	}
  1245  	net.abortTimedEvent(n, neighboursTimeout)
  1246  
  1247  	req := pkt.data.(*neighbors)
  1248  	nodes := make([]*Node, len(req.Nodes))
  1249  	for i, rn := range req.Nodes {
  1250  		nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn)
  1251  		if err != nil {
  1252  			log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err))
  1253  			continue
  1254  		}
  1255  		nodes[i] = nn
  1256  		// Start validation of query results immediately.
  1257  		// This fills the table quickly.
  1258  		// TODO: generates way too many packets, maybe do it via queue.
  1259  		if nn.state == unknown {
  1260  			net.transition(nn, verifyinit)
  1261  		}
  1262  	}
  1263  	// TODO: don't ignore second packet
  1264  	n.pendingNeighbours.reply <- nodes
  1265  	n.pendingNeighbours = nil
  1266  	// Now that this query is done, start the next one.
  1267  	n.startNextQuery(net)
  1268  	return nil
  1269  }