github.com/simplechain-org/go-simplechain@v1.0.6/p2p/discv5/net.go (about)

     1  // Copyright 2016 The go-simplechain Authors
     2  // This file is part of the go-simplechain library.
     3  //
     4  // The go-simplechain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-simplechain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-simplechain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package discv5
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/ecdsa"
    22  	"errors"
    23  	"fmt"
    24  	"net"
    25  	"time"
    26  
    27  	"github.com/simplechain-org/go-simplechain/common"
    28  	"github.com/simplechain-org/go-simplechain/common/mclock"
    29  	"github.com/simplechain-org/go-simplechain/crypto"
    30  	"github.com/simplechain-org/go-simplechain/log"
    31  	"github.com/simplechain-org/go-simplechain/p2p/netutil"
    32  	"github.com/simplechain-org/go-simplechain/rlp"
    33  	"golang.org/x/crypto/sha3"
    34  )
    35  
    36  var (
    37  	errInvalidEvent = errors.New("invalid in current state")
    38  	errNoQuery      = errors.New("no pending query")
    39  )
    40  
    41  const (
    42  	autoRefreshInterval   = 1 * time.Hour
    43  	bucketRefreshInterval = 1 * time.Minute
    44  	seedCount             = 30
    45  	seedMaxAge            = 5 * 24 * time.Hour
    46  	lowPort               = 1024
    47  )
    48  
    49  const testTopic = "foo"
    50  
    51  const (
    52  	printTestImgLogs = false
    53  )
    54  
    55  // Network manages the table and all protocol interaction.
    56  type Network struct {
    57  	db          *nodeDB // database of known nodes
    58  	conn        transport
    59  	netrestrict *netutil.Netlist
    60  
    61  	closed           chan struct{}          // closed when loop is done
    62  	closeReq         chan struct{}          // 'request to close'
    63  	refreshReq       chan []*Node           // lookups ask for refresh on this channel
    64  	refreshResp      chan (<-chan struct{}) // ...and get the channel to block on from this one
    65  	read             chan ingressPacket     // ingress packets arrive here
    66  	timeout          chan timeoutEvent
    67  	queryReq         chan *findnodeQuery // lookups submit findnode queries on this channel
    68  	tableOpReq       chan func()
    69  	tableOpResp      chan struct{}
    70  	topicRegisterReq chan topicRegisterReq
    71  	topicSearchReq   chan topicSearchReq
    72  
    73  	// State of the main loop.
    74  	tab           *Table
    75  	topictab      *topicTable
    76  	ticketStore   *ticketStore
    77  	nursery       []*Node
    78  	nodes         map[NodeID]*Node // tracks active nodes with state != known
    79  	timeoutTimers map[timeoutEvent]*time.Timer
    80  }
    81  
    82  // transport is implemented by the UDP transport.
    83  // it is an interface so we can test without opening lots of UDP
    84  // sockets and without generating a private key.
    85  type transport interface {
    86  	sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte)
    87  	sendNeighbours(remote *Node, nodes []*Node)
    88  	sendFindnodeHash(remote *Node, target common.Hash)
    89  	sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte)
    90  	sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node)
    91  
    92  	send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte)
    93  
    94  	localAddr() *net.UDPAddr
    95  	Close()
    96  }
    97  
    98  type findnodeQuery struct {
    99  	remote *Node
   100  	target common.Hash
   101  	reply  chan<- []*Node
   102  }
   103  
   104  type topicRegisterReq struct {
   105  	add   bool
   106  	topic Topic
   107  }
   108  
   109  type topicSearchReq struct {
   110  	topic  Topic
   111  	found  chan<- *Node
   112  	lookup chan<- bool
   113  	delay  time.Duration
   114  }
   115  
   116  type topicSearchResult struct {
   117  	target lookupInfo
   118  	nodes  []*Node
   119  }
   120  
   121  type timeoutEvent struct {
   122  	ev   nodeEvent
   123  	node *Node
   124  }
   125  
   126  func newNetwork(conn transport, ourPubkey ecdsa.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) {
   127  	ourID := PubkeyID(&ourPubkey)
   128  
   129  	var db *nodeDB
   130  	if dbPath != "<no database>" {
   131  		var err error
   132  		if db, err = newNodeDB(dbPath, Version, ourID); err != nil {
   133  			return nil, err
   134  		}
   135  	}
   136  
   137  	tab := newTable(ourID, conn.localAddr())
   138  	net := &Network{
   139  		db:               db,
   140  		conn:             conn,
   141  		netrestrict:      netrestrict,
   142  		tab:              tab,
   143  		topictab:         newTopicTable(db, tab.self),
   144  		ticketStore:      newTicketStore(),
   145  		refreshReq:       make(chan []*Node),
   146  		refreshResp:      make(chan (<-chan struct{})),
   147  		closed:           make(chan struct{}),
   148  		closeReq:         make(chan struct{}),
   149  		read:             make(chan ingressPacket, 100),
   150  		timeout:          make(chan timeoutEvent),
   151  		timeoutTimers:    make(map[timeoutEvent]*time.Timer),
   152  		tableOpReq:       make(chan func()),
   153  		tableOpResp:      make(chan struct{}),
   154  		queryReq:         make(chan *findnodeQuery),
   155  		topicRegisterReq: make(chan topicRegisterReq),
   156  		topicSearchReq:   make(chan topicSearchReq),
   157  		nodes:            make(map[NodeID]*Node),
   158  	}
   159  	go net.loop()
   160  	return net, nil
   161  }
   162  
   163  // Close terminates the network listener and flushes the node database.
   164  func (net *Network) Close() {
   165  	net.conn.Close()
   166  	select {
   167  	case <-net.closed:
   168  	case net.closeReq <- struct{}{}:
   169  		<-net.closed
   170  	}
   171  }
   172  
   173  // Self returns the local node.
   174  // The returned node should not be modified by the caller.
   175  func (net *Network) Self() *Node {
   176  	return net.tab.self
   177  }
   178  
   179  // ReadRandomNodes fills the given slice with random nodes from the
   180  // table. It will not write the same node more than once. The nodes in
   181  // the slice are copies and can be modified by the caller.
   182  func (net *Network) ReadRandomNodes(buf []*Node) (n int) {
   183  	net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) })
   184  	return n
   185  }
   186  
   187  // SetFallbackNodes sets the initial points of contact. These nodes
   188  // are used to connect to the network if the table is empty and there
   189  // are no known nodes in the database.
   190  func (net *Network) SetFallbackNodes(nodes []*Node) error {
   191  	nursery := make([]*Node, 0, len(nodes))
   192  	for _, n := range nodes {
   193  		if err := n.validateComplete(); err != nil {
   194  			return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
   195  		}
   196  		// Recompute cpy.sha because the node might not have been
   197  		// created by NewNode or ParseNode.
   198  		cpy := *n
   199  		cpy.sha = crypto.Keccak256Hash(n.ID[:])
   200  		nursery = append(nursery, &cpy)
   201  	}
   202  	net.reqRefresh(nursery)
   203  	return nil
   204  }
   205  
   206  // Resolve searches for a specific node with the given ID.
   207  // It returns nil if the node could not be found.
   208  func (net *Network) Resolve(targetID NodeID) *Node {
   209  	result := net.lookup(crypto.Keccak256Hash(targetID[:]), true)
   210  	for _, n := range result {
   211  		if n.ID == targetID {
   212  			return n
   213  		}
   214  	}
   215  	return nil
   216  }
   217  
   218  // Lookup performs a network search for nodes close
   219  // to the given target. It approaches the target by querying
   220  // nodes that are closer to it on each iteration.
   221  // The given target does not need to be an actual node
   222  // identifier.
   223  //
   224  // The local node may be included in the result.
   225  func (net *Network) Lookup(targetID NodeID) []*Node {
   226  	return net.lookup(crypto.Keccak256Hash(targetID[:]), false)
   227  }
   228  
   229  func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node {
   230  	var (
   231  		asked          = make(map[NodeID]bool)
   232  		seen           = make(map[NodeID]bool)
   233  		reply          = make(chan []*Node, alpha)
   234  		result         = nodesByDistance{target: target}
   235  		pendingQueries = 0
   236  	)
   237  	// Get initial answers from the local node.
   238  	result.push(net.tab.self, bucketSize)
   239  	for {
   240  		// Ask the α closest nodes that we haven't asked yet.
   241  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   242  			n := result.entries[i]
   243  			if !asked[n.ID] {
   244  				asked[n.ID] = true
   245  				pendingQueries++
   246  				net.reqQueryFindnode(n, target, reply)
   247  			}
   248  		}
   249  		if pendingQueries == 0 {
   250  			// We have asked all closest nodes, stop the search.
   251  			break
   252  		}
   253  		// Wait for the next reply.
   254  		select {
   255  		case nodes := <-reply:
   256  			for _, n := range nodes {
   257  				if n != nil && !seen[n.ID] {
   258  					seen[n.ID] = true
   259  					result.push(n, bucketSize)
   260  					if stopOnMatch && n.sha == target {
   261  						return result.entries
   262  					}
   263  				}
   264  			}
   265  			pendingQueries--
   266  		case <-time.After(respTimeout):
   267  			// forget all pending requests, start new ones
   268  			pendingQueries = 0
   269  			reply = make(chan []*Node, alpha)
   270  		}
   271  	}
   272  	return result.entries
   273  }
   274  
   275  func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) {
   276  	select {
   277  	case net.topicRegisterReq <- topicRegisterReq{true, topic}:
   278  	case <-net.closed:
   279  		return
   280  	}
   281  	select {
   282  	case <-net.closed:
   283  	case <-stop:
   284  		select {
   285  		case net.topicRegisterReq <- topicRegisterReq{false, topic}:
   286  		case <-net.closed:
   287  		}
   288  	}
   289  }
   290  
   291  func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) {
   292  	for {
   293  		select {
   294  		case <-net.closed:
   295  			return
   296  		case delay, ok := <-setPeriod:
   297  			select {
   298  			case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}:
   299  			case <-net.closed:
   300  				return
   301  			}
   302  			if !ok {
   303  				return
   304  			}
   305  		}
   306  	}
   307  }
   308  
   309  func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} {
   310  	select {
   311  	case net.refreshReq <- nursery:
   312  		return <-net.refreshResp
   313  	case <-net.closed:
   314  		return net.closed
   315  	}
   316  }
   317  
   318  func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool {
   319  	q := &findnodeQuery{remote: n, target: target, reply: reply}
   320  	select {
   321  	case net.queryReq <- q:
   322  		return true
   323  	case <-net.closed:
   324  		return false
   325  	}
   326  }
   327  
   328  func (net *Network) reqReadPacket(pkt ingressPacket) {
   329  	select {
   330  	case net.read <- pkt:
   331  	case <-net.closed:
   332  	}
   333  }
   334  
   335  func (net *Network) reqTableOp(f func()) (called bool) {
   336  	select {
   337  	case net.tableOpReq <- f:
   338  		<-net.tableOpResp
   339  		return true
   340  	case <-net.closed:
   341  		return false
   342  	}
   343  }
   344  
   345  // TODO: external address handling.
   346  
   347  type topicSearchInfo struct {
   348  	lookupChn chan<- bool
   349  	period    time.Duration
   350  }
   351  
   352  const maxSearchCount = 5
   353  
   354  func (net *Network) loop() {
   355  	var (
   356  		refreshTimer       = time.NewTicker(autoRefreshInterval)
   357  		bucketRefreshTimer = time.NewTimer(bucketRefreshInterval)
   358  		refreshDone        chan struct{} // closed when the 'refresh' lookup has ended
   359  	)
   360  
   361  	// Tracking the next ticket to register.
   362  	var (
   363  		nextTicket        *ticketRef
   364  		nextRegisterTimer *time.Timer
   365  		nextRegisterTime  <-chan time.Time
   366  	)
   367  	defer func() {
   368  		if nextRegisterTimer != nil {
   369  			nextRegisterTimer.Stop()
   370  		}
   371  	}()
   372  	resetNextTicket := func() {
   373  		ticket, timeout := net.ticketStore.nextFilteredTicket()
   374  		if nextTicket != ticket {
   375  			nextTicket = ticket
   376  			if nextRegisterTimer != nil {
   377  				nextRegisterTimer.Stop()
   378  				nextRegisterTime = nil
   379  			}
   380  			if ticket != nil {
   381  				nextRegisterTimer = time.NewTimer(timeout)
   382  				nextRegisterTime = nextRegisterTimer.C
   383  			}
   384  		}
   385  	}
   386  
   387  	// Tracking registration and search lookups.
   388  	var (
   389  		topicRegisterLookupTarget lookupInfo
   390  		topicRegisterLookupDone   chan []*Node
   391  		topicRegisterLookupTick   = time.NewTimer(0)
   392  		searchReqWhenRefreshDone  []topicSearchReq
   393  		searchInfo                = make(map[Topic]topicSearchInfo)
   394  		activeSearchCount         int
   395  	)
   396  	topicSearchLookupDone := make(chan topicSearchResult, 100)
   397  	topicSearch := make(chan Topic, 100)
   398  	<-topicRegisterLookupTick.C
   399  
   400  	statsDump := time.NewTicker(10 * time.Second)
   401  
   402  loop:
   403  	for {
   404  		resetNextTicket()
   405  
   406  		select {
   407  		case <-net.closeReq:
   408  			log.Trace("<-net.closeReq")
   409  			break loop
   410  
   411  		// Ingress packet handling.
   412  		case pkt := <-net.read:
   413  			//fmt.Println("read", pkt.ev)
   414  			log.Trace("<-net.read")
   415  			n := net.internNode(&pkt)
   416  			prestate := n.state
   417  			status := "ok"
   418  			if err := net.handle(n, pkt.ev, &pkt); err != nil {
   419  				status = err.Error()
   420  			}
   421  			log.Trace("", "msg", log.Lazy{Fn: func() string {
   422  				return fmt.Sprintf("<<< (%d) %v from %x@%v: %v -> %v (%v)",
   423  					net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status)
   424  			}})
   425  			// TODO: persist state if n.state goes >= known, delete if it goes <= known
   426  
   427  		// State transition timeouts.
   428  		case timeout := <-net.timeout:
   429  			log.Trace("<-net.timeout")
   430  			if net.timeoutTimers[timeout] == nil {
   431  				// Stale timer (was aborted).
   432  				continue
   433  			}
   434  			delete(net.timeoutTimers, timeout)
   435  			prestate := timeout.node.state
   436  			status := "ok"
   437  			if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
   438  				status = err.Error()
   439  			}
   440  			log.Trace("", "msg", log.Lazy{Fn: func() string {
   441  				return fmt.Sprintf("--- (%d) %v for %x@%v: %v -> %v (%v)",
   442  					net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status)
   443  			}})
   444  
   445  		// Querying.
   446  		case q := <-net.queryReq:
   447  			log.Trace("<-net.queryReq")
   448  			if !q.start(net) {
   449  				q.remote.deferQuery(q)
   450  			}
   451  
   452  		// Interacting with the table.
   453  		case f := <-net.tableOpReq:
   454  			log.Trace("<-net.tableOpReq")
   455  			f()
   456  			net.tableOpResp <- struct{}{}
   457  
   458  		// Topic registration stuff.
   459  		case req := <-net.topicRegisterReq:
   460  			log.Trace("<-net.topicRegisterReq")
   461  			if !req.add {
   462  				net.ticketStore.removeRegisterTopic(req.topic)
   463  				continue
   464  			}
   465  			net.ticketStore.addTopic(req.topic, true)
   466  			// If we're currently waiting idle (nothing to look up), give the ticket store a
   467  			// chance to start it sooner. This should speed up convergence of the radius
   468  			// determination for new topics.
   469  			// if topicRegisterLookupDone == nil {
   470  			if topicRegisterLookupTarget.target == (common.Hash{}) {
   471  				log.Trace("topicRegisterLookupTarget == null")
   472  				if topicRegisterLookupTick.Stop() {
   473  					<-topicRegisterLookupTick.C
   474  				}
   475  				target, delay := net.ticketStore.nextRegisterLookup()
   476  				topicRegisterLookupTarget = target
   477  				topicRegisterLookupTick.Reset(delay)
   478  			}
   479  
   480  		case nodes := <-topicRegisterLookupDone:
   481  			log.Trace("<-topicRegisterLookupDone")
   482  			net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte {
   483  				net.ping(n, n.addr())
   484  				return n.pingEcho
   485  			})
   486  			target, delay := net.ticketStore.nextRegisterLookup()
   487  			topicRegisterLookupTarget = target
   488  			topicRegisterLookupTick.Reset(delay)
   489  			topicRegisterLookupDone = nil
   490  
   491  		case <-topicRegisterLookupTick.C:
   492  			log.Trace("<-topicRegisterLookupTick")
   493  			if (topicRegisterLookupTarget.target == common.Hash{}) {
   494  				target, delay := net.ticketStore.nextRegisterLookup()
   495  				topicRegisterLookupTarget = target
   496  				topicRegisterLookupTick.Reset(delay)
   497  				topicRegisterLookupDone = nil
   498  			} else {
   499  				topicRegisterLookupDone = make(chan []*Node)
   500  				target := topicRegisterLookupTarget.target
   501  				go func() { topicRegisterLookupDone <- net.lookup(target, false) }()
   502  			}
   503  
   504  		case <-nextRegisterTime:
   505  			log.Trace("<-nextRegisterTime")
   506  			net.ticketStore.ticketRegistered(*nextTicket)
   507  			//fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
   508  			net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
   509  
   510  		case req := <-net.topicSearchReq:
   511  			if refreshDone == nil {
   512  				log.Trace("<-net.topicSearchReq")
   513  				info, ok := searchInfo[req.topic]
   514  				if ok {
   515  					if req.delay == time.Duration(0) {
   516  						delete(searchInfo, req.topic)
   517  						net.ticketStore.removeSearchTopic(req.topic)
   518  					} else {
   519  						info.period = req.delay
   520  						searchInfo[req.topic] = info
   521  					}
   522  					continue
   523  				}
   524  				if req.delay != time.Duration(0) {
   525  					var info topicSearchInfo
   526  					info.period = req.delay
   527  					info.lookupChn = req.lookup
   528  					searchInfo[req.topic] = info
   529  					net.ticketStore.addSearchTopic(req.topic, req.found)
   530  					topicSearch <- req.topic
   531  				}
   532  			} else {
   533  				searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req)
   534  			}
   535  
   536  		case topic := <-topicSearch:
   537  			if activeSearchCount < maxSearchCount {
   538  				activeSearchCount++
   539  				target := net.ticketStore.nextSearchLookup(topic)
   540  				go func() {
   541  					nodes := net.lookup(target.target, false)
   542  					topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes}
   543  				}()
   544  			}
   545  			period := searchInfo[topic].period
   546  			if period != time.Duration(0) {
   547  				go func() {
   548  					time.Sleep(period)
   549  					topicSearch <- topic
   550  				}()
   551  			}
   552  
   553  		case res := <-topicSearchLookupDone:
   554  			activeSearchCount--
   555  			if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
   556  				lookupChn <- net.ticketStore.radius[res.target.topic].converged
   557  			}
   558  			net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte {
   559  				if n.state != nil && n.state.canQuery {
   560  					return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
   561  				}
   562  				if n.state == unknown {
   563  					net.ping(n, n.addr())
   564  				}
   565  				return nil
   566  			})
   567  
   568  		case <-statsDump.C:
   569  			log.Trace("<-statsDump.C")
   570  			/*r, ok := net.ticketStore.radius[testTopic]
   571  			if !ok {
   572  				fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now())
   573  			} else {
   574  				topics := len(net.ticketStore.tickets)
   575  				tickets := len(net.ticketStore.nodes)
   576  				rad := r.radius / (maxRadius/10000+1)
   577  				fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now())
   578  			}*/
   579  
   580  			tm := mclock.Now()
   581  			for topic, r := range net.ticketStore.radius {
   582  				if printTestImgLogs {
   583  					rad := r.radius / (maxRadius/1000000 + 1)
   584  					minrad := r.minRadius / (maxRadius/1000000 + 1)
   585  					fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad)
   586  					fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad)
   587  				}
   588  			}
   589  			for topic, t := range net.topictab.topics {
   590  				wp := t.wcl.nextWaitPeriod(tm)
   591  				if printTestImgLogs {
   592  					fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000)
   593  				}
   594  			}
   595  
   596  		// Periodic / lookup-initiated bucket refresh.
   597  		case <-refreshTimer.C:
   598  			log.Trace("<-refreshTimer.C")
   599  			// TODO: ideally we would start the refresh timer after
   600  			// fallback nodes have been set for the first time.
   601  			if refreshDone == nil {
   602  				refreshDone = make(chan struct{})
   603  				net.refresh(refreshDone)
   604  			}
   605  		case <-bucketRefreshTimer.C:
   606  			target := net.tab.chooseBucketRefreshTarget()
   607  			go func() {
   608  				net.lookup(target, false)
   609  				bucketRefreshTimer.Reset(bucketRefreshInterval)
   610  			}()
   611  		case newNursery := <-net.refreshReq:
   612  			log.Trace("<-net.refreshReq")
   613  			if newNursery != nil {
   614  				net.nursery = newNursery
   615  			}
   616  			if refreshDone == nil {
   617  				refreshDone = make(chan struct{})
   618  				net.refresh(refreshDone)
   619  			}
   620  			net.refreshResp <- refreshDone
   621  		case <-refreshDone:
   622  			log.Trace("<-net.refreshDone", "table size", net.tab.count)
   623  			if net.tab.count != 0 {
   624  				refreshDone = nil
   625  				list := searchReqWhenRefreshDone
   626  				searchReqWhenRefreshDone = nil
   627  				go func() {
   628  					for _, req := range list {
   629  						net.topicSearchReq <- req
   630  					}
   631  				}()
   632  			} else {
   633  				refreshDone = make(chan struct{})
   634  				net.refresh(refreshDone)
   635  			}
   636  		}
   637  	}
   638  	log.Trace("loop stopped")
   639  
   640  	log.Debug(fmt.Sprintf("shutting down"))
   641  	if net.conn != nil {
   642  		net.conn.Close()
   643  	}
   644  	// TODO: wait for pending refresh.
   645  	// if refreshDone != nil {
   646  	// 	<-refreshResults
   647  	// }
   648  	// Cancel all pending timeouts.
   649  	for _, timer := range net.timeoutTimers {
   650  		timer.Stop()
   651  	}
   652  	if net.db != nil {
   653  		net.db.close()
   654  	}
   655  	close(net.closed)
   656  }
   657  
   658  // Everything below runs on the Network.loop goroutine
   659  // and can modify Node, Table and Network at any time without locking.
   660  
   661  func (net *Network) refresh(done chan<- struct{}) {
   662  	var seeds []*Node
   663  	if net.db != nil {
   664  		seeds = net.db.querySeeds(seedCount, seedMaxAge)
   665  	}
   666  	if len(seeds) == 0 {
   667  		seeds = net.nursery
   668  	}
   669  	if len(seeds) == 0 {
   670  		log.Trace("no seed nodes found")
   671  		time.AfterFunc(time.Second*10, func() { close(done) })
   672  		return
   673  	}
   674  	for _, n := range seeds {
   675  		log.Debug("", "msg", log.Lazy{Fn: func() string {
   676  			var age string
   677  			if net.db != nil {
   678  				age = time.Since(net.db.lastPong(n.ID)).String()
   679  			} else {
   680  				age = "unknown"
   681  			}
   682  			return fmt.Sprintf("seed node (age %s): %v", age, n)
   683  		}})
   684  		n = net.internNodeFromDB(n)
   685  		if n.state == unknown {
   686  			net.transition(n, verifyinit)
   687  		}
   688  		// Force-add the seed node so Lookup does something.
   689  		// It will be deleted again if verification fails.
   690  		net.tab.add(n)
   691  	}
   692  	// Start self lookup to fill up the buckets.
   693  	go func() {
   694  		net.Lookup(net.tab.self.ID)
   695  		close(done)
   696  	}()
   697  }
   698  
   699  // Node Interning.
   700  
   701  func (net *Network) internNode(pkt *ingressPacket) *Node {
   702  	if n := net.nodes[pkt.remoteID]; n != nil {
   703  		n.IP = pkt.remoteAddr.IP
   704  		n.UDP = uint16(pkt.remoteAddr.Port)
   705  		n.TCP = uint16(pkt.remoteAddr.Port)
   706  		return n
   707  	}
   708  	n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port))
   709  	n.state = unknown
   710  	net.nodes[pkt.remoteID] = n
   711  	return n
   712  }
   713  
   714  func (net *Network) internNodeFromDB(dbn *Node) *Node {
   715  	if n := net.nodes[dbn.ID]; n != nil {
   716  		return n
   717  	}
   718  	n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP)
   719  	n.state = unknown
   720  	net.nodes[n.ID] = n
   721  	return n
   722  }
   723  
   724  func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n *Node, err error) {
   725  	if rn.ID == net.tab.self.ID {
   726  		return nil, errors.New("is self")
   727  	}
   728  	if rn.UDP <= lowPort {
   729  		return nil, errors.New("low port")
   730  	}
   731  	n = net.nodes[rn.ID]
   732  	if n == nil {
   733  		// We haven't seen this node before.
   734  		n, err = nodeFromRPC(sender, rn)
   735  		if net.netrestrict != nil && !net.netrestrict.Contains(n.IP) {
   736  			return n, errors.New("not contained in netrestrict whitelist")
   737  		}
   738  		if err == nil {
   739  			n.state = unknown
   740  			net.nodes[n.ID] = n
   741  		}
   742  		return n, err
   743  	}
   744  	if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
   745  		if n.state == known {
   746  			// reject address change if node is known by us
   747  			err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
   748  		} else {
   749  			// accept otherwise; this will be handled nicer with signed ENRs
   750  			n.IP = rn.IP
   751  			n.UDP = rn.UDP
   752  			n.TCP = rn.TCP
   753  		}
   754  	}
   755  	return n, err
   756  }
   757  
   758  // nodeNetGuts is embedded in Node and contains fields.
   759  type nodeNetGuts struct {
   760  	// This is a cached copy of sha3(ID) which is used for node
   761  	// distance calculations. This is part of Node in order to make it
   762  	// possible to write tests that need a node at a certain distance.
   763  	// In those tests, the content of sha will not actually correspond
   764  	// with ID.
   765  	sha common.Hash
   766  
   767  	// State machine fields. Access to these fields
   768  	// is restricted to the Network.loop goroutine.
   769  	state             *nodeState
   770  	pingEcho          []byte           // hash of last ping sent by us
   771  	pingTopics        []Topic          // topic set sent by us in last ping
   772  	deferredQueries   []*findnodeQuery // queries that can't be sent yet
   773  	pendingNeighbours *findnodeQuery   // current query, waiting for reply
   774  	queryTimeouts     int
   775  }
   776  
   777  func (n *nodeNetGuts) deferQuery(q *findnodeQuery) {
   778  	n.deferredQueries = append(n.deferredQueries, q)
   779  }
   780  
   781  func (n *nodeNetGuts) startNextQuery(net *Network) {
   782  	if len(n.deferredQueries) == 0 {
   783  		return
   784  	}
   785  	nextq := n.deferredQueries[0]
   786  	if nextq.start(net) {
   787  		n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...)
   788  	}
   789  }
   790  
   791  func (q *findnodeQuery) start(net *Network) bool {
   792  	// Satisfy queries against the local node directly.
   793  	if q.remote == net.tab.self {
   794  		closest := net.tab.closest(q.target, bucketSize)
   795  		q.reply <- closest.entries
   796  		return true
   797  	}
   798  	if q.remote.state.canQuery && q.remote.pendingNeighbours == nil {
   799  		net.conn.sendFindnodeHash(q.remote, q.target)
   800  		net.timedEvent(respTimeout, q.remote, neighboursTimeout)
   801  		q.remote.pendingNeighbours = q
   802  		return true
   803  	}
   804  	// If the node is not known yet, it won't accept queries.
   805  	// Initiate the transition to known.
   806  	// The request will be sent later when the node reaches known state.
   807  	if q.remote.state == unknown {
   808  		net.transition(q.remote, verifyinit)
   809  	}
   810  	return false
   811  }
   812  
   813  // Node Events (the input to the state machine).
   814  
   815  type nodeEvent uint
   816  
   817  //go:generate stringer -type=nodeEvent
   818  
   819  const (
   820  
   821  	// Packet type events.
   822  	// These correspond to packet types in the UDP protocol.
   823  	pingPacket = iota + 1
   824  	pongPacket
   825  	findnodePacket
   826  	neighborsPacket
   827  	findnodeHashPacket
   828  	topicRegisterPacket
   829  	topicQueryPacket
   830  	topicNodesPacket
   831  
   832  	// Non-packet events.
   833  	// Event values in this category are allocated outside
   834  	// the packet type range (packet types are encoded as a single byte).
   835  	pongTimeout nodeEvent = iota + 256
   836  	pingTimeout
   837  	neighboursTimeout
   838  )
   839  
   840  // Node State Machine.
   841  
   842  type nodeState struct {
   843  	name     string
   844  	handle   func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error)
   845  	enter    func(*Network, *Node)
   846  	canQuery bool
   847  }
   848  
   849  func (s *nodeState) String() string {
   850  	return s.name
   851  }
   852  
   853  var (
   854  	unknown          *nodeState
   855  	verifyinit       *nodeState
   856  	verifywait       *nodeState
   857  	remoteverifywait *nodeState
   858  	known            *nodeState
   859  	contested        *nodeState
   860  	unresponsive     *nodeState
   861  )
   862  
   863  func init() {
   864  	unknown = &nodeState{
   865  		name: "unknown",
   866  		enter: func(net *Network, n *Node) {
   867  			net.tab.delete(n)
   868  			n.pingEcho = nil
   869  			// Abort active queries.
   870  			for _, q := range n.deferredQueries {
   871  				q.reply <- nil
   872  			}
   873  			n.deferredQueries = nil
   874  			if n.pendingNeighbours != nil {
   875  				n.pendingNeighbours.reply <- nil
   876  				n.pendingNeighbours = nil
   877  			}
   878  			n.queryTimeouts = 0
   879  		},
   880  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   881  			switch ev {
   882  			case pingPacket:
   883  				net.handlePing(n, pkt)
   884  				net.ping(n, pkt.remoteAddr)
   885  				return verifywait, nil
   886  			default:
   887  				return unknown, errInvalidEvent
   888  			}
   889  		},
   890  	}
   891  
   892  	verifyinit = &nodeState{
   893  		name: "verifyinit",
   894  		enter: func(net *Network, n *Node) {
   895  			net.ping(n, n.addr())
   896  		},
   897  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   898  			switch ev {
   899  			case pingPacket:
   900  				net.handlePing(n, pkt)
   901  				return verifywait, nil
   902  			case pongPacket:
   903  				err := net.handleKnownPong(n, pkt)
   904  				return remoteverifywait, err
   905  			case pongTimeout:
   906  				return unknown, nil
   907  			default:
   908  				return verifyinit, errInvalidEvent
   909  			}
   910  		},
   911  	}
   912  
   913  	verifywait = &nodeState{
   914  		name: "verifywait",
   915  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   916  			switch ev {
   917  			case pingPacket:
   918  				net.handlePing(n, pkt)
   919  				return verifywait, nil
   920  			case pongPacket:
   921  				err := net.handleKnownPong(n, pkt)
   922  				return known, err
   923  			case pongTimeout:
   924  				return unknown, nil
   925  			default:
   926  				return verifywait, errInvalidEvent
   927  			}
   928  		},
   929  	}
   930  
   931  	remoteverifywait = &nodeState{
   932  		name: "remoteverifywait",
   933  		enter: func(net *Network, n *Node) {
   934  			net.timedEvent(respTimeout, n, pingTimeout)
   935  		},
   936  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   937  			switch ev {
   938  			case pingPacket:
   939  				net.handlePing(n, pkt)
   940  				return remoteverifywait, nil
   941  			case pingTimeout:
   942  				return known, nil
   943  			default:
   944  				return remoteverifywait, errInvalidEvent
   945  			}
   946  		},
   947  	}
   948  
   949  	known = &nodeState{
   950  		name:     "known",
   951  		canQuery: true,
   952  		enter: func(net *Network, n *Node) {
   953  			n.queryTimeouts = 0
   954  			n.startNextQuery(net)
   955  			// Insert into the table and start revalidation of the last node
   956  			// in the bucket if it is full.
   957  			last := net.tab.add(n)
   958  			if last != nil && last.state == known {
   959  				// TODO: do this asynchronously
   960  				net.transition(last, contested)
   961  			}
   962  		},
   963  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   964  			switch ev {
   965  			case pingPacket:
   966  				net.handlePing(n, pkt)
   967  				return known, nil
   968  			case pongPacket:
   969  				err := net.handleKnownPong(n, pkt)
   970  				return known, err
   971  			default:
   972  				return net.handleQueryEvent(n, ev, pkt)
   973  			}
   974  		},
   975  	}
   976  
   977  	contested = &nodeState{
   978  		name:     "contested",
   979  		canQuery: true,
   980  		enter: func(net *Network, n *Node) {
   981  			net.ping(n, n.addr())
   982  		},
   983  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   984  			switch ev {
   985  			case pongPacket:
   986  				// Node is still alive.
   987  				err := net.handleKnownPong(n, pkt)
   988  				return known, err
   989  			case pongTimeout:
   990  				net.tab.deleteReplace(n)
   991  				return unresponsive, nil
   992  			case pingPacket:
   993  				net.handlePing(n, pkt)
   994  				return contested, nil
   995  			default:
   996  				return net.handleQueryEvent(n, ev, pkt)
   997  			}
   998  		},
   999  	}
  1000  
  1001  	unresponsive = &nodeState{
  1002  		name:     "unresponsive",
  1003  		canQuery: true,
  1004  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
  1005  			switch ev {
  1006  			case pingPacket:
  1007  				net.handlePing(n, pkt)
  1008  				return known, nil
  1009  			case pongPacket:
  1010  				err := net.handleKnownPong(n, pkt)
  1011  				return known, err
  1012  			default:
  1013  				return net.handleQueryEvent(n, ev, pkt)
  1014  			}
  1015  		},
  1016  	}
  1017  }
  1018  
  1019  // handle processes packets sent by n and events related to n.
  1020  func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error {
  1021  	//fmt.Println("handle", n.addr().String(), n.state, ev)
  1022  	if pkt != nil {
  1023  		if err := net.checkPacket(n, ev, pkt); err != nil {
  1024  			//fmt.Println("check err:", err)
  1025  			return err
  1026  		}
  1027  		// Start the background expiration goroutine after the first
  1028  		// successful communication. Subsequent calls have no effect if it
  1029  		// is already running. We do this here instead of somewhere else
  1030  		// so that the search for seed nodes also considers older nodes
  1031  		// that would otherwise be removed by the expirer.
  1032  		if net.db != nil {
  1033  			net.db.ensureExpirer()
  1034  		}
  1035  	}
  1036  	if n.state == nil {
  1037  		n.state = unknown //???
  1038  	}
  1039  	next, err := n.state.handle(net, n, ev, pkt)
  1040  	net.transition(n, next)
  1041  	//fmt.Println("new state:", n.state)
  1042  	return err
  1043  }
  1044  
  1045  func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error {
  1046  	// Replay prevention checks.
  1047  	switch ev {
  1048  	case pingPacket, findnodeHashPacket, neighborsPacket:
  1049  		// TODO: check date is > last date seen
  1050  		// TODO: check ping version
  1051  	case pongPacket:
  1052  		if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) {
  1053  			// fmt.Println("pong reply token mismatch")
  1054  			return fmt.Errorf("pong reply token mismatch")
  1055  		}
  1056  		n.pingEcho = nil
  1057  	}
  1058  	// Address validation.
  1059  	// TODO: Ideally we would do the following:
  1060  	//  - reject all packets with wrong address except ping.
  1061  	//  - for ping with new address, transition to verifywait but keep the
  1062  	//    previous node (with old address) around. if the new one reaches known,
  1063  	//    swap it out.
  1064  	return nil
  1065  }
  1066  
  1067  func (net *Network) transition(n *Node, next *nodeState) {
  1068  	if n.state != next {
  1069  		n.state = next
  1070  		if next.enter != nil {
  1071  			next.enter(net, n)
  1072  		}
  1073  	}
  1074  
  1075  	// TODO: persist/unpersist node
  1076  }
  1077  
  1078  func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) {
  1079  	timeout := timeoutEvent{ev, n}
  1080  	net.timeoutTimers[timeout] = time.AfterFunc(d, func() {
  1081  		select {
  1082  		case net.timeout <- timeout:
  1083  		case <-net.closed:
  1084  		}
  1085  	})
  1086  }
  1087  
  1088  func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) {
  1089  	timer := net.timeoutTimers[timeoutEvent{ev, n}]
  1090  	if timer != nil {
  1091  		timer.Stop()
  1092  		delete(net.timeoutTimers, timeoutEvent{ev, n})
  1093  	}
  1094  }
  1095  
  1096  func (net *Network) ping(n *Node, addr *net.UDPAddr) {
  1097  	//fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex())
  1098  	if n.pingEcho != nil || n.ID == net.tab.self.ID {
  1099  		//fmt.Println(" not sent")
  1100  		return
  1101  	}
  1102  	log.Trace("Pinging remote node", "node", n.ID)
  1103  	n.pingTopics = net.ticketStore.regTopicSet()
  1104  	n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics)
  1105  	net.timedEvent(respTimeout, n, pongTimeout)
  1106  }
  1107  
  1108  func (net *Network) handlePing(n *Node, pkt *ingressPacket) {
  1109  	log.Trace("Handling remote ping", "node", n.ID)
  1110  	ping := pkt.data.(*ping)
  1111  	n.TCP = ping.From.TCP
  1112  	t := net.topictab.getTicket(n, ping.Topics)
  1113  
  1114  	pong := &pong{
  1115  		To:         makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB
  1116  		ReplyTok:   pkt.hash,
  1117  		Expiration: uint64(time.Now().Add(expiration).Unix()),
  1118  	}
  1119  	ticketToPong(t, pong)
  1120  	net.conn.send(n, pongPacket, pong)
  1121  }
  1122  
  1123  func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error {
  1124  	log.Trace("Handling known pong", "node", n.ID)
  1125  	net.abortTimedEvent(n, pongTimeout)
  1126  	now := mclock.Now()
  1127  	ticket, err := pongToTicket(now, n.pingTopics, n, pkt)
  1128  	if err == nil {
  1129  		// fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data)
  1130  		net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket)
  1131  	} else {
  1132  		log.Trace("Failed to convert pong to ticket", "err", err)
  1133  	}
  1134  	n.pingEcho = nil
  1135  	n.pingTopics = nil
  1136  	return err
  1137  }
  1138  
  1139  func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
  1140  	switch ev {
  1141  	case findnodePacket:
  1142  		target := crypto.Keccak256Hash(pkt.data.(*findnode).Target[:])
  1143  		results := net.tab.closest(target, bucketSize).entries
  1144  		net.conn.sendNeighbours(n, results)
  1145  		return n.state, nil
  1146  	case neighborsPacket:
  1147  		err := net.handleNeighboursPacket(n, pkt)
  1148  		return n.state, err
  1149  	case neighboursTimeout:
  1150  		if n.pendingNeighbours != nil {
  1151  			n.pendingNeighbours.reply <- nil
  1152  			n.pendingNeighbours = nil
  1153  		}
  1154  		n.queryTimeouts++
  1155  		if n.queryTimeouts > maxFindnodeFailures && n.state == known {
  1156  			return contested, errors.New("too many timeouts")
  1157  		}
  1158  		return n.state, nil
  1159  
  1160  	// v5
  1161  
  1162  	case findnodeHashPacket:
  1163  		results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries
  1164  		net.conn.sendNeighbours(n, results)
  1165  		return n.state, nil
  1166  	case topicRegisterPacket:
  1167  		//fmt.Println("got topicRegisterPacket")
  1168  		regdata := pkt.data.(*topicRegister)
  1169  		pong, err := net.checkTopicRegister(regdata)
  1170  		if err != nil {
  1171  			//fmt.Println(err)
  1172  			return n.state, fmt.Errorf("bad waiting ticket: %v", err)
  1173  		}
  1174  		net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods)
  1175  		return n.state, nil
  1176  	case topicQueryPacket:
  1177  		// TODO: handle expiration
  1178  		topic := pkt.data.(*topicQuery).Topic
  1179  		results := net.topictab.getEntries(topic)
  1180  		if _, ok := net.ticketStore.tickets[topic]; ok {
  1181  			results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too
  1182  		}
  1183  		if len(results) > 10 {
  1184  			results = results[:10]
  1185  		}
  1186  		var hash common.Hash
  1187  		copy(hash[:], pkt.hash)
  1188  		net.conn.sendTopicNodes(n, hash, results)
  1189  		return n.state, nil
  1190  	case topicNodesPacket:
  1191  		p := pkt.data.(*topicNodes)
  1192  		if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) {
  1193  			n.queryTimeouts++
  1194  			if n.queryTimeouts > maxFindnodeFailures && n.state == known {
  1195  				return contested, errors.New("too many timeouts")
  1196  			}
  1197  		}
  1198  		return n.state, nil
  1199  
  1200  	default:
  1201  		return n.state, errInvalidEvent
  1202  	}
  1203  }
  1204  
  1205  func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
  1206  	var pongpkt ingressPacket
  1207  	if err := decodePacket(data.Pong, &pongpkt); err != nil {
  1208  		return nil, err
  1209  	}
  1210  	if pongpkt.ev != pongPacket {
  1211  		return nil, errors.New("is not pong packet")
  1212  	}
  1213  	if pongpkt.remoteID != net.tab.self.ID {
  1214  		return nil, errors.New("not signed by us")
  1215  	}
  1216  	// check that we previously authorised all topics
  1217  	// that the other side is trying to register.
  1218  	if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash {
  1219  		return nil, errors.New("topic hash mismatch")
  1220  	}
  1221  	if data.Idx >= uint(len(data.Topics)) {
  1222  		return nil, errors.New("topic index out of range")
  1223  	}
  1224  	return pongpkt.data.(*pong), nil
  1225  }
  1226  
  1227  func rlpHash(x interface{}) (h common.Hash) {
  1228  	hw := sha3.NewLegacyKeccak256()
  1229  	rlp.Encode(hw, x)
  1230  	hw.Sum(h[:0])
  1231  	return h
  1232  }
  1233  
  1234  func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error {
  1235  	if n.pendingNeighbours == nil {
  1236  		return errNoQuery
  1237  	}
  1238  	net.abortTimedEvent(n, neighboursTimeout)
  1239  
  1240  	req := pkt.data.(*neighbors)
  1241  	nodes := make([]*Node, len(req.Nodes))
  1242  	for i, rn := range req.Nodes {
  1243  		nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn)
  1244  		if err != nil {
  1245  			log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err))
  1246  			continue
  1247  		}
  1248  		nodes[i] = nn
  1249  		// Start validation of query results immediately.
  1250  		// This fills the table quickly.
  1251  		// TODO: generates way too many packets, maybe do it via queue.
  1252  		if nn.state == unknown {
  1253  			net.transition(nn, verifyinit)
  1254  		}
  1255  	}
  1256  	// TODO: don't ignore second packet
  1257  	n.pendingNeighbours.reply <- nodes
  1258  	n.pendingNeighbours = nil
  1259  	// Now that this query is done, start the next one.
  1260  	n.startNextQuery(net)
  1261  	return nil
  1262  }