github.com/digdeepmining/go-atheios@v1.5.13-0.20180902133602-d5687a2e6f43/p2p/discv5/net.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package discv5
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/ecdsa"
    22  	"errors"
    23  	"fmt"
    24  	"net"
    25  	"time"
    26  
    27  	"github.com/atheioschain/go-atheios/common"
    28  	"github.com/atheioschain/go-atheios/common/mclock"
    29  	"github.com/atheioschain/go-atheios/crypto"
    30  	"github.com/atheioschain/go-atheios/crypto/sha3"
    31  	"github.com/atheioschain/go-atheios/logger"
    32  	"github.com/atheioschain/go-atheios/logger/glog"
    33  	"github.com/atheioschain/go-atheios/p2p/nat"
    34  	"github.com/atheioschain/go-atheios/p2p/netutil"
    35  	"github.com/atheioschain/go-atheios/rlp"
    36  )
    37  
    38  var (
    39  	errInvalidEvent = errors.New("invalid in current state")
    40  	errNoQuery      = errors.New("no pending query")
    41  	errWrongAddress = errors.New("unknown sender address")
    42  )
    43  
    44  const (
    45  	autoRefreshInterval   = 1 * time.Hour
    46  	bucketRefreshInterval = 1 * time.Minute
    47  	seedCount             = 30
    48  	seedMaxAge            = 5 * 24 * time.Hour
    49  	lowPort               = 1024
    50  )
    51  
    52  const testTopic = "foo"
    53  
    54  const (
    55  	printDebugLogs   = false
    56  	printTestImgLogs = false
    57  )
    58  
    59  func debugLog(s string) {
    60  	if printDebugLogs {
    61  		fmt.Println(s)
    62  	}
    63  }
    64  
    65  // Network manages the table and all protocol interaction.
    66  type Network struct {
    67  	db          *nodeDB // database of known nodes
    68  	conn        transport
    69  	netrestrict *netutil.Netlist
    70  
    71  	closed           chan struct{}          // closed when loop is done
    72  	closeReq         chan struct{}          // 'request to close'
    73  	refreshReq       chan []*Node           // lookups ask for refresh on this channel
    74  	refreshResp      chan (<-chan struct{}) // ...and get the channel to block on from this one
    75  	read             chan ingressPacket     // ingress packets arrive here
    76  	timeout          chan timeoutEvent
    77  	queryReq         chan *findnodeQuery // lookups submit findnode queries on this channel
    78  	tableOpReq       chan func()
    79  	tableOpResp      chan struct{}
    80  	topicRegisterReq chan topicRegisterReq
    81  	topicSearchReq   chan topicSearchReq
    82  
    83  	// State of the main loop.
    84  	tab           *Table
    85  	topictab      *topicTable
    86  	ticketStore   *ticketStore
    87  	nursery       []*Node
    88  	nodes         map[NodeID]*Node // tracks active nodes with state != known
    89  	timeoutTimers map[timeoutEvent]*time.Timer
    90  
    91  	// Revalidation queues.
    92  	// Nodes put on these queues will be pinged eventually.
    93  	slowRevalidateQueue []*Node
    94  	fastRevalidateQueue []*Node
    95  
    96  	// Buffers for state transition.
    97  	sendBuf []*ingressPacket
    98  }
    99  
   100  // transport is implemented by the UDP transport.
   101  // it is an interface so we can test without opening lots of UDP
   102  // sockets and without generating a private key.
   103  type transport interface {
   104  	sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte)
   105  	sendNeighbours(remote *Node, nodes []*Node)
   106  	sendFindnodeHash(remote *Node, target common.Hash)
   107  	sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte)
   108  	sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node)
   109  
   110  	send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte)
   111  
   112  	localAddr() *net.UDPAddr
   113  	Close()
   114  }
   115  
   116  type findnodeQuery struct {
   117  	remote   *Node
   118  	target   common.Hash
   119  	reply    chan<- []*Node
   120  	nresults int // counter for received nodes
   121  }
   122  
   123  type topicRegisterReq struct {
   124  	add   bool
   125  	topic Topic
   126  }
   127  
   128  type topicSearchReq struct {
   129  	topic  Topic
   130  	found  chan<- *Node
   131  	lookup chan<- bool
   132  	delay  time.Duration
   133  }
   134  
   135  type topicSearchResult struct {
   136  	target lookupInfo
   137  	nodes  []*Node
   138  }
   139  
   140  type timeoutEvent struct {
   141  	ev   nodeEvent
   142  	node *Node
   143  }
   144  
   145  func newNetwork(conn transport, ourPubkey ecdsa.PublicKey, natm nat.Interface, dbPath string, netrestrict *netutil.Netlist) (*Network, error) {
   146  	ourID := PubkeyID(&ourPubkey)
   147  
   148  	var db *nodeDB
   149  	if dbPath != "<no database>" {
   150  		var err error
   151  		if db, err = newNodeDB(dbPath, Version, ourID); err != nil {
   152  			return nil, err
   153  		}
   154  	}
   155  
   156  	tab := newTable(ourID, conn.localAddr())
   157  	net := &Network{
   158  		db:               db,
   159  		conn:             conn,
   160  		netrestrict:      netrestrict,
   161  		tab:              tab,
   162  		topictab:         newTopicTable(db, tab.self),
   163  		ticketStore:      newTicketStore(),
   164  		refreshReq:       make(chan []*Node),
   165  		refreshResp:      make(chan (<-chan struct{})),
   166  		closed:           make(chan struct{}),
   167  		closeReq:         make(chan struct{}),
   168  		read:             make(chan ingressPacket, 100),
   169  		timeout:          make(chan timeoutEvent),
   170  		timeoutTimers:    make(map[timeoutEvent]*time.Timer),
   171  		tableOpReq:       make(chan func()),
   172  		tableOpResp:      make(chan struct{}),
   173  		queryReq:         make(chan *findnodeQuery),
   174  		topicRegisterReq: make(chan topicRegisterReq),
   175  		topicSearchReq:   make(chan topicSearchReq),
   176  		nodes:            make(map[NodeID]*Node),
   177  	}
   178  	go net.loop()
   179  	return net, nil
   180  }
   181  
   182  // Close terminates the network listener and flushes the node database.
   183  func (net *Network) Close() {
   184  	net.conn.Close()
   185  	select {
   186  	case <-net.closed:
   187  	case net.closeReq <- struct{}{}:
   188  		<-net.closed
   189  	}
   190  }
   191  
   192  // Self returns the local node.
   193  // The returned node should not be modified by the caller.
   194  func (net *Network) Self() *Node {
   195  	return net.tab.self
   196  }
   197  
   198  // ReadRandomNodes fills the given slice with random nodes from the
   199  // table. It will not write the same node more than once. The nodes in
   200  // the slice are copies and can be modified by the caller.
   201  func (net *Network) ReadRandomNodes(buf []*Node) (n int) {
   202  	net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) })
   203  	return n
   204  }
   205  
   206  // SetFallbackNodes sets the initial points of contact. These nodes
   207  // are used to connect to the network if the table is empty and there
   208  // are no known nodes in the database.
   209  func (net *Network) SetFallbackNodes(nodes []*Node) error {
   210  	nursery := make([]*Node, 0, len(nodes))
   211  	for _, n := range nodes {
   212  		if err := n.validateComplete(); err != nil {
   213  			return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
   214  		}
   215  		// Recompute cpy.sha because the node might not have been
   216  		// created by NewNode or ParseNode.
   217  		cpy := *n
   218  		cpy.sha = crypto.Keccak256Hash(n.ID[:])
   219  		nursery = append(nursery, &cpy)
   220  	}
   221  	net.reqRefresh(nursery)
   222  	return nil
   223  }
   224  
   225  // Resolve searches for a specific node with the given ID.
   226  // It returns nil if the node could not be found.
   227  func (net *Network) Resolve(targetID NodeID) *Node {
   228  	result := net.lookup(crypto.Keccak256Hash(targetID[:]), true)
   229  	for _, n := range result {
   230  		if n.ID == targetID {
   231  			return n
   232  		}
   233  	}
   234  	return nil
   235  }
   236  
   237  // Lookup performs a network search for nodes close
   238  // to the given target. It approaches the target by querying
   239  // nodes that are closer to it on each iteration.
   240  // The given target does not need to be an actual node
   241  // identifier.
   242  //
   243  // The local node may be included in the result.
   244  func (net *Network) Lookup(targetID NodeID) []*Node {
   245  	return net.lookup(crypto.Keccak256Hash(targetID[:]), false)
   246  }
   247  
   248  func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node {
   249  	var (
   250  		asked          = make(map[NodeID]bool)
   251  		seen           = make(map[NodeID]bool)
   252  		reply          = make(chan []*Node, alpha)
   253  		result         = nodesByDistance{target: target}
   254  		pendingQueries = 0
   255  	)
   256  	// Get initial answers from the local node.
   257  	result.push(net.tab.self, bucketSize)
   258  	for {
   259  		// Ask the α closest nodes that we haven't asked yet.
   260  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   261  			n := result.entries[i]
   262  			if !asked[n.ID] {
   263  				asked[n.ID] = true
   264  				pendingQueries++
   265  				net.reqQueryFindnode(n, target, reply)
   266  			}
   267  		}
   268  		if pendingQueries == 0 {
   269  			// We have asked all closest nodes, stop the search.
   270  			break
   271  		}
   272  		// Wait for the next reply.
   273  		select {
   274  		case nodes := <-reply:
   275  			for _, n := range nodes {
   276  				if n != nil && !seen[n.ID] {
   277  					seen[n.ID] = true
   278  					result.push(n, bucketSize)
   279  					if stopOnMatch && n.sha == target {
   280  						return result.entries
   281  					}
   282  				}
   283  			}
   284  			pendingQueries--
   285  		case <-time.After(respTimeout):
   286  			// forget all pending requests, start new ones
   287  			pendingQueries = 0
   288  			reply = make(chan []*Node, alpha)
   289  		}
   290  	}
   291  	return result.entries
   292  }
   293  
   294  func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) {
   295  	select {
   296  	case net.topicRegisterReq <- topicRegisterReq{true, topic}:
   297  	case <-net.closed:
   298  		return
   299  	}
   300  	select {
   301  	case <-net.closed:
   302  	case <-stop:
   303  		select {
   304  		case net.topicRegisterReq <- topicRegisterReq{false, topic}:
   305  		case <-net.closed:
   306  		}
   307  	}
   308  }
   309  
   310  func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) {
   311  	for {
   312  		select {
   313  		case <-net.closed:
   314  			return
   315  		case delay, ok := <-setPeriod:
   316  			select {
   317  			case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}:
   318  			case <-net.closed:
   319  				return
   320  			}
   321  			if !ok {
   322  				return
   323  			}
   324  		}
   325  	}
   326  }
   327  
   328  func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} {
   329  	select {
   330  	case net.refreshReq <- nursery:
   331  		return <-net.refreshResp
   332  	case <-net.closed:
   333  		return net.closed
   334  	}
   335  }
   336  
   337  func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool {
   338  	q := &findnodeQuery{remote: n, target: target, reply: reply}
   339  	select {
   340  	case net.queryReq <- q:
   341  		return true
   342  	case <-net.closed:
   343  		return false
   344  	}
   345  }
   346  
   347  func (net *Network) reqReadPacket(pkt ingressPacket) {
   348  	select {
   349  	case net.read <- pkt:
   350  	case <-net.closed:
   351  	}
   352  }
   353  
   354  func (net *Network) reqTableOp(f func()) (called bool) {
   355  	select {
   356  	case net.tableOpReq <- f:
   357  		<-net.tableOpResp
   358  		return true
   359  	case <-net.closed:
   360  		return false
   361  	}
   362  }
   363  
   364  // TODO: external address handling.
   365  
   366  type topicSearchInfo struct {
   367  	lookupChn chan<- bool
   368  	period    time.Duration
   369  }
   370  
   371  const maxSearchCount = 5
   372  
   373  func (net *Network) loop() {
   374  	var (
   375  		refreshTimer       = time.NewTicker(autoRefreshInterval)
   376  		bucketRefreshTimer = time.NewTimer(bucketRefreshInterval)
   377  		refreshDone        chan struct{} // closed when the 'refresh' lookup has ended
   378  	)
   379  
   380  	// Tracking the next ticket to register.
   381  	var (
   382  		nextTicket        *ticketRef
   383  		nextRegisterTimer *time.Timer
   384  		nextRegisterTime  <-chan time.Time
   385  	)
   386  	defer func() {
   387  		if nextRegisterTimer != nil {
   388  			nextRegisterTimer.Stop()
   389  		}
   390  	}()
   391  	resetNextTicket := func() {
   392  		t, timeout := net.ticketStore.nextFilteredTicket()
   393  		if t != nextTicket {
   394  			nextTicket = t
   395  			if nextRegisterTimer != nil {
   396  				nextRegisterTimer.Stop()
   397  				nextRegisterTime = nil
   398  			}
   399  			if t != nil {
   400  				nextRegisterTimer = time.NewTimer(timeout)
   401  				nextRegisterTime = nextRegisterTimer.C
   402  			}
   403  		}
   404  	}
   405  
   406  	// Tracking registration and search lookups.
   407  	var (
   408  		topicRegisterLookupTarget lookupInfo
   409  		topicRegisterLookupDone   chan []*Node
   410  		topicRegisterLookupTick   = time.NewTimer(0)
   411  		searchReqWhenRefreshDone  []topicSearchReq
   412  		searchInfo                = make(map[Topic]topicSearchInfo)
   413  		activeSearchCount         int
   414  	)
   415  	topicSearchLookupDone := make(chan topicSearchResult, 100)
   416  	topicSearch := make(chan Topic, 100)
   417  	<-topicRegisterLookupTick.C
   418  
   419  	statsDump := time.NewTicker(10 * time.Second)
   420  
   421  loop:
   422  	for {
   423  		resetNextTicket()
   424  
   425  		select {
   426  		case <-net.closeReq:
   427  			debugLog("<-net.closeReq")
   428  			break loop
   429  
   430  		// Ingress packet handling.
   431  		case pkt := <-net.read:
   432  			//fmt.Println("read", pkt.ev)
   433  			debugLog("<-net.read")
   434  			n := net.internNode(&pkt)
   435  			prestate := n.state
   436  			status := "ok"
   437  			if err := net.handle(n, pkt.ev, &pkt); err != nil {
   438  				status = err.Error()
   439  			}
   440  			if glog.V(logger.Detail) {
   441  				glog.Infof("<<< (%d) %v from %x@%v: %v -> %v (%v)",
   442  					net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status)
   443  			}
   444  			// TODO: persist state if n.state goes >= known, delete if it goes <= known
   445  
   446  		// State transition timeouts.
   447  		case timeout := <-net.timeout:
   448  			debugLog("<-net.timeout")
   449  			if net.timeoutTimers[timeout] == nil {
   450  				// Stale timer (was aborted).
   451  				continue
   452  			}
   453  			delete(net.timeoutTimers, timeout)
   454  			prestate := timeout.node.state
   455  			status := "ok"
   456  			if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
   457  				status = err.Error()
   458  			}
   459  			if glog.V(logger.Detail) {
   460  				glog.Infof("--- (%d) %v for %x@%v: %v -> %v (%v)",
   461  					net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status)
   462  			}
   463  
   464  		// Querying.
   465  		case q := <-net.queryReq:
   466  			debugLog("<-net.queryReq")
   467  			if !q.start(net) {
   468  				q.remote.deferQuery(q)
   469  			}
   470  
   471  		// Interacting with the table.
   472  		case f := <-net.tableOpReq:
   473  			debugLog("<-net.tableOpReq")
   474  			f()
   475  			net.tableOpResp <- struct{}{}
   476  
   477  		// Topic registration stuff.
   478  		case req := <-net.topicRegisterReq:
   479  			debugLog("<-net.topicRegisterReq")
   480  			if !req.add {
   481  				net.ticketStore.removeRegisterTopic(req.topic)
   482  				continue
   483  			}
   484  			net.ticketStore.addTopic(req.topic, true)
   485  			// If we're currently waiting idle (nothing to look up), give the ticket store a
   486  			// chance to start it sooner. This should speed up convergence of the radius
   487  			// determination for new topics.
   488  			// if topicRegisterLookupDone == nil {
   489  			if topicRegisterLookupTarget.target == (common.Hash{}) {
   490  				debugLog("topicRegisterLookupTarget == null")
   491  				if topicRegisterLookupTick.Stop() {
   492  					<-topicRegisterLookupTick.C
   493  				}
   494  				target, delay := net.ticketStore.nextRegisterLookup()
   495  				topicRegisterLookupTarget = target
   496  				topicRegisterLookupTick.Reset(delay)
   497  			}
   498  
   499  		case nodes := <-topicRegisterLookupDone:
   500  			debugLog("<-topicRegisterLookupDone")
   501  			net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte {
   502  				net.ping(n, n.addr())
   503  				return n.pingEcho
   504  			})
   505  			target, delay := net.ticketStore.nextRegisterLookup()
   506  			topicRegisterLookupTarget = target
   507  			topicRegisterLookupTick.Reset(delay)
   508  			topicRegisterLookupDone = nil
   509  
   510  		case <-topicRegisterLookupTick.C:
   511  			debugLog("<-topicRegisterLookupTick")
   512  			if (topicRegisterLookupTarget.target == common.Hash{}) {
   513  				target, delay := net.ticketStore.nextRegisterLookup()
   514  				topicRegisterLookupTarget = target
   515  				topicRegisterLookupTick.Reset(delay)
   516  				topicRegisterLookupDone = nil
   517  			} else {
   518  				topicRegisterLookupDone = make(chan []*Node)
   519  				target := topicRegisterLookupTarget.target
   520  				go func() { topicRegisterLookupDone <- net.lookup(target, false) }()
   521  			}
   522  
   523  		case <-nextRegisterTime:
   524  			debugLog("<-nextRegisterTime")
   525  			net.ticketStore.ticketRegistered(*nextTicket)
   526  			//fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
   527  			net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
   528  
   529  		case req := <-net.topicSearchReq:
   530  			if refreshDone == nil {
   531  				debugLog("<-net.topicSearchReq")
   532  				info, ok := searchInfo[req.topic]
   533  				if ok {
   534  					if req.delay == time.Duration(0) {
   535  						delete(searchInfo, req.topic)
   536  						net.ticketStore.removeSearchTopic(req.topic)
   537  					} else {
   538  						info.period = req.delay
   539  						searchInfo[req.topic] = info
   540  					}
   541  					continue
   542  				}
   543  				if req.delay != time.Duration(0) {
   544  					var info topicSearchInfo
   545  					info.period = req.delay
   546  					info.lookupChn = req.lookup
   547  					searchInfo[req.topic] = info
   548  					net.ticketStore.addSearchTopic(req.topic, req.found)
   549  					topicSearch <- req.topic
   550  				}
   551  			} else {
   552  				searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req)
   553  			}
   554  
   555  		case topic := <-topicSearch:
   556  			if activeSearchCount < maxSearchCount {
   557  				activeSearchCount++
   558  				target := net.ticketStore.nextSearchLookup(topic)
   559  				go func() {
   560  					nodes := net.lookup(target.target, false)
   561  					topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes}
   562  				}()
   563  			}
   564  			period := searchInfo[topic].period
   565  			if period != time.Duration(0) {
   566  				go func() {
   567  					time.Sleep(period)
   568  					topicSearch <- topic
   569  				}()
   570  			}
   571  
   572  		case res := <-topicSearchLookupDone:
   573  			activeSearchCount--
   574  			if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
   575  				lookupChn <- net.ticketStore.radius[res.target.topic].converged
   576  			}
   577  			net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node) []byte {
   578  				net.ping(n, n.addr())
   579  				return n.pingEcho
   580  			}, func(n *Node, topic Topic) []byte {
   581  				if n.state == known {
   582  					return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
   583  				} else {
   584  					if n.state == unknown {
   585  						net.ping(n, n.addr())
   586  					}
   587  					return nil
   588  				}
   589  			})
   590  
   591  		case <-statsDump.C:
   592  			debugLog("<-statsDump.C")
   593  			/*r, ok := net.ticketStore.radius[testTopic]
   594  			if !ok {
   595  				fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now())
   596  			} else {
   597  				topics := len(net.ticketStore.tickets)
   598  				tickets := len(net.ticketStore.nodes)
   599  				rad := r.radius / (maxRadius/10000+1)
   600  				fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now())
   601  			}*/
   602  
   603  			tm := mclock.Now()
   604  			for topic, r := range net.ticketStore.radius {
   605  				if printTestImgLogs {
   606  					rad := r.radius / (maxRadius/1000000 + 1)
   607  					minrad := r.minRadius / (maxRadius/1000000 + 1)
   608  					fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad)
   609  					fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad)
   610  				}
   611  			}
   612  			for topic, t := range net.topictab.topics {
   613  				wp := t.wcl.nextWaitPeriod(tm)
   614  				if printTestImgLogs {
   615  					fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000)
   616  				}
   617  			}
   618  
   619  		// Periodic / lookup-initiated bucket refresh.
   620  		case <-refreshTimer.C:
   621  			debugLog("<-refreshTimer.C")
   622  			// TODO: ideally we would start the refresh timer after
   623  			// fallback nodes have been set for the first time.
   624  			if refreshDone == nil {
   625  				refreshDone = make(chan struct{})
   626  				net.refresh(refreshDone)
   627  			}
   628  		case <-bucketRefreshTimer.C:
   629  			target := net.tab.chooseBucketRefreshTarget()
   630  			go func() {
   631  				net.lookup(target, false)
   632  				bucketRefreshTimer.Reset(bucketRefreshInterval)
   633  			}()
   634  		case newNursery := <-net.refreshReq:
   635  			debugLog("<-net.refreshReq")
   636  			if newNursery != nil {
   637  				net.nursery = newNursery
   638  			}
   639  			if refreshDone == nil {
   640  				refreshDone = make(chan struct{})
   641  				net.refresh(refreshDone)
   642  			}
   643  			net.refreshResp <- refreshDone
   644  		case <-refreshDone:
   645  			debugLog("<-net.refreshDone")
   646  			refreshDone = nil
   647  			list := searchReqWhenRefreshDone
   648  			searchReqWhenRefreshDone = nil
   649  			go func() {
   650  				for _, req := range list {
   651  					net.topicSearchReq <- req
   652  				}
   653  			}()
   654  		}
   655  	}
   656  	debugLog("loop stopped")
   657  
   658  	glog.V(logger.Debug).Infof("shutting down")
   659  	if net.conn != nil {
   660  		net.conn.Close()
   661  	}
   662  	if refreshDone != nil {
   663  		// TODO: wait for pending refresh.
   664  		//<-refreshResults
   665  	}
   666  	// Cancel all pending timeouts.
   667  	for _, timer := range net.timeoutTimers {
   668  		timer.Stop()
   669  	}
   670  	if net.db != nil {
   671  		net.db.close()
   672  	}
   673  	close(net.closed)
   674  }
   675  
   676  // Everything below runs on the Network.loop goroutine
   677  // and can modify Node, Table and Network at any time without locking.
   678  
   679  func (net *Network) refresh(done chan<- struct{}) {
   680  	var seeds []*Node
   681  	if net.db != nil {
   682  		seeds = net.db.querySeeds(seedCount, seedMaxAge)
   683  	}
   684  	if len(seeds) == 0 {
   685  		seeds = net.nursery
   686  	}
   687  	if len(seeds) == 0 {
   688  		glog.V(logger.Detail).Info("no seed nodes found")
   689  		close(done)
   690  		return
   691  	}
   692  	for _, n := range seeds {
   693  		if glog.V(logger.Debug) {
   694  			var age string
   695  			if net.db != nil {
   696  				age = time.Since(net.db.lastPong(n.ID)).String()
   697  			} else {
   698  				age = "unknown"
   699  			}
   700  			glog.Infof("seed node (age %s): %v", age, n)
   701  		}
   702  		n = net.internNodeFromDB(n)
   703  		if n.state == unknown {
   704  			net.transition(n, verifyinit)
   705  		}
   706  		// Force-add the seed node so Lookup does something.
   707  		// It will be deleted again if verification fails.
   708  		net.tab.add(n)
   709  	}
   710  	// Start self lookup to fill up the buckets.
   711  	go func() {
   712  		net.Lookup(net.tab.self.ID)
   713  		close(done)
   714  	}()
   715  }
   716  
   717  // Node Interning.
   718  
   719  func (net *Network) internNode(pkt *ingressPacket) *Node {
   720  	if n := net.nodes[pkt.remoteID]; n != nil {
   721  		n.IP = pkt.remoteAddr.IP
   722  		n.UDP = uint16(pkt.remoteAddr.Port)
   723  		n.TCP = uint16(pkt.remoteAddr.Port)
   724  		return n
   725  	}
   726  	n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port))
   727  	n.state = unknown
   728  	net.nodes[pkt.remoteID] = n
   729  	return n
   730  }
   731  
   732  func (net *Network) internNodeFromDB(dbn *Node) *Node {
   733  	if n := net.nodes[dbn.ID]; n != nil {
   734  		return n
   735  	}
   736  	n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP)
   737  	n.state = unknown
   738  	net.nodes[n.ID] = n
   739  	return n
   740  }
   741  
   742  func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n *Node, err error) {
   743  	if rn.ID == net.tab.self.ID {
   744  		return nil, errors.New("is self")
   745  	}
   746  	if rn.UDP <= lowPort {
   747  		return nil, errors.New("low port")
   748  	}
   749  	n = net.nodes[rn.ID]
   750  	if n == nil {
   751  		// We haven't seen this node before.
   752  		n, err = nodeFromRPC(sender, rn)
   753  		if net.netrestrict != nil && !net.netrestrict.Contains(n.IP) {
   754  			return n, errors.New("not contained in netrestrict whitelist")
   755  		}
   756  		if err == nil {
   757  			n.state = unknown
   758  			net.nodes[n.ID] = n
   759  		}
   760  		return n, err
   761  	}
   762  	if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
   763  		err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
   764  	}
   765  	return n, err
   766  }
   767  
   768  // nodeNetGuts is embedded in Node and contains fields.
   769  type nodeNetGuts struct {
   770  	// This is a cached copy of sha3(ID) which is used for node
   771  	// distance calculations. This is part of Node in order to make it
   772  	// possible to write tests that need a node at a certain distance.
   773  	// In those tests, the content of sha will not actually correspond
   774  	// with ID.
   775  	sha common.Hash
   776  
   777  	// State machine fields. Access to these fields
   778  	// is restricted to the Network.loop goroutine.
   779  	state             *nodeState
   780  	pingEcho          []byte           // hash of last ping sent by us
   781  	pingTopics        []Topic          // topic set sent by us in last ping
   782  	deferredQueries   []*findnodeQuery // queries that can't be sent yet
   783  	pendingNeighbours *findnodeQuery   // current query, waiting for reply
   784  	queryTimeouts     int
   785  }
   786  
   787  func (n *nodeNetGuts) deferQuery(q *findnodeQuery) {
   788  	n.deferredQueries = append(n.deferredQueries, q)
   789  }
   790  
   791  func (n *nodeNetGuts) startNextQuery(net *Network) {
   792  	if len(n.deferredQueries) == 0 {
   793  		return
   794  	}
   795  	nextq := n.deferredQueries[0]
   796  	if nextq.start(net) {
   797  		n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...)
   798  	}
   799  }
   800  
   801  func (q *findnodeQuery) start(net *Network) bool {
   802  	// Satisfy queries against the local node directly.
   803  	if q.remote == net.tab.self {
   804  		closest := net.tab.closest(crypto.Keccak256Hash(q.target[:]), bucketSize)
   805  		q.reply <- closest.entries
   806  		return true
   807  	}
   808  	if q.remote.state.canQuery && q.remote.pendingNeighbours == nil {
   809  		net.conn.sendFindnodeHash(q.remote, q.target)
   810  		net.timedEvent(respTimeout, q.remote, neighboursTimeout)
   811  		q.remote.pendingNeighbours = q
   812  		return true
   813  	}
   814  	// If the node is not known yet, it won't accept queries.
   815  	// Initiate the transition to known.
   816  	// The request will be sent later when the node reaches known state.
   817  	if q.remote.state == unknown {
   818  		net.transition(q.remote, verifyinit)
   819  	}
   820  	return false
   821  }
   822  
   823  // Node Events (the input to the state machine).
   824  
   825  type nodeEvent uint
   826  
   827  //go:generate stringer -type=nodeEvent
   828  
   829  const (
   830  	invalidEvent nodeEvent = iota // zero is reserved
   831  
   832  	// Packet type events.
   833  	// These correspond to packet types in the UDP protocol.
   834  	pingPacket
   835  	pongPacket
   836  	findnodePacket
   837  	neighborsPacket
   838  	findnodeHashPacket
   839  	topicRegisterPacket
   840  	topicQueryPacket
   841  	topicNodesPacket
   842  
   843  	// Non-packet events.
   844  	// Event values in this category are allocated outside
   845  	// the packet type range (packet types are encoded as a single byte).
   846  	pongTimeout nodeEvent = iota + 256
   847  	pingTimeout
   848  	neighboursTimeout
   849  )
   850  
   851  // Node State Machine.
   852  
   853  type nodeState struct {
   854  	name     string
   855  	handle   func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error)
   856  	enter    func(*Network, *Node)
   857  	canQuery bool
   858  }
   859  
   860  func (s *nodeState) String() string {
   861  	return s.name
   862  }
   863  
   864  var (
   865  	unknown          *nodeState
   866  	verifyinit       *nodeState
   867  	verifywait       *nodeState
   868  	remoteverifywait *nodeState
   869  	known            *nodeState
   870  	contested        *nodeState
   871  	unresponsive     *nodeState
   872  )
   873  
   874  func init() {
   875  	unknown = &nodeState{
   876  		name: "unknown",
   877  		enter: func(net *Network, n *Node) {
   878  			net.tab.delete(n)
   879  			n.pingEcho = nil
   880  			// Abort active queries.
   881  			for _, q := range n.deferredQueries {
   882  				q.reply <- nil
   883  			}
   884  			n.deferredQueries = nil
   885  			if n.pendingNeighbours != nil {
   886  				n.pendingNeighbours.reply <- nil
   887  				n.pendingNeighbours = nil
   888  			}
   889  			n.queryTimeouts = 0
   890  		},
   891  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   892  			switch ev {
   893  			case pingPacket:
   894  				net.handlePing(n, pkt)
   895  				net.ping(n, pkt.remoteAddr)
   896  				return verifywait, nil
   897  			default:
   898  				return unknown, errInvalidEvent
   899  			}
   900  		},
   901  	}
   902  
   903  	verifyinit = &nodeState{
   904  		name: "verifyinit",
   905  		enter: func(net *Network, n *Node) {
   906  			net.ping(n, n.addr())
   907  		},
   908  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   909  			switch ev {
   910  			case pingPacket:
   911  				net.handlePing(n, pkt)
   912  				return verifywait, nil
   913  			case pongPacket:
   914  				err := net.handleKnownPong(n, pkt)
   915  				return remoteverifywait, err
   916  			case pongTimeout:
   917  				return unknown, nil
   918  			default:
   919  				return verifyinit, errInvalidEvent
   920  			}
   921  		},
   922  	}
   923  
   924  	verifywait = &nodeState{
   925  		name: "verifywait",
   926  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   927  			switch ev {
   928  			case pingPacket:
   929  				net.handlePing(n, pkt)
   930  				return verifywait, nil
   931  			case pongPacket:
   932  				err := net.handleKnownPong(n, pkt)
   933  				return known, err
   934  			case pongTimeout:
   935  				return unknown, nil
   936  			default:
   937  				return verifywait, errInvalidEvent
   938  			}
   939  		},
   940  	}
   941  
   942  	remoteverifywait = &nodeState{
   943  		name: "remoteverifywait",
   944  		enter: func(net *Network, n *Node) {
   945  			net.timedEvent(respTimeout, n, pingTimeout)
   946  		},
   947  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   948  			switch ev {
   949  			case pingPacket:
   950  				net.handlePing(n, pkt)
   951  				return remoteverifywait, nil
   952  			case pingTimeout:
   953  				return known, nil
   954  			default:
   955  				return remoteverifywait, errInvalidEvent
   956  			}
   957  		},
   958  	}
   959  
   960  	known = &nodeState{
   961  		name:     "known",
   962  		canQuery: true,
   963  		enter: func(net *Network, n *Node) {
   964  			n.queryTimeouts = 0
   965  			n.startNextQuery(net)
   966  			// Insert into the table and start revalidation of the last node
   967  			// in the bucket if it is full.
   968  			last := net.tab.add(n)
   969  			if last != nil && last.state == known {
   970  				// TODO: do this asynchronously
   971  				net.transition(last, contested)
   972  			}
   973  		},
   974  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   975  			switch ev {
   976  			case pingPacket:
   977  				net.handlePing(n, pkt)
   978  				return known, nil
   979  			case pongPacket:
   980  				err := net.handleKnownPong(n, pkt)
   981  				return known, err
   982  			default:
   983  				return net.handleQueryEvent(n, ev, pkt)
   984  			}
   985  		},
   986  	}
   987  
   988  	contested = &nodeState{
   989  		name:     "contested",
   990  		canQuery: true,
   991  		enter: func(net *Network, n *Node) {
   992  			net.ping(n, n.addr())
   993  		},
   994  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   995  			switch ev {
   996  			case pongPacket:
   997  				// Node is still alive.
   998  				err := net.handleKnownPong(n, pkt)
   999  				return known, err
  1000  			case pongTimeout:
  1001  				net.tab.deleteReplace(n)
  1002  				return unresponsive, nil
  1003  			case pingPacket:
  1004  				net.handlePing(n, pkt)
  1005  				return contested, nil
  1006  			default:
  1007  				return net.handleQueryEvent(n, ev, pkt)
  1008  			}
  1009  		},
  1010  	}
  1011  
  1012  	unresponsive = &nodeState{
  1013  		name:     "unresponsive",
  1014  		canQuery: true,
  1015  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
  1016  			switch ev {
  1017  			case pingPacket:
  1018  				net.handlePing(n, pkt)
  1019  				return known, nil
  1020  			case pongPacket:
  1021  				err := net.handleKnownPong(n, pkt)
  1022  				return known, err
  1023  			default:
  1024  				return net.handleQueryEvent(n, ev, pkt)
  1025  			}
  1026  		},
  1027  	}
  1028  }
  1029  
  1030  // handle processes packets sent by n and events related to n.
  1031  func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error {
  1032  	//fmt.Println("handle", n.addr().String(), n.state, ev)
  1033  	if pkt != nil {
  1034  		if err := net.checkPacket(n, ev, pkt); err != nil {
  1035  			//fmt.Println("check err:", err)
  1036  			return err
  1037  		}
  1038  		// Start the background expiration goroutine after the first
  1039  		// successful communication. Subsequent calls have no effect if it
  1040  		// is already running. We do this here instead of somewhere else
  1041  		// so that the search for seed nodes also considers older nodes
  1042  		// that would otherwise be removed by the expirer.
  1043  		if net.db != nil {
  1044  			net.db.ensureExpirer()
  1045  		}
  1046  	}
  1047  	if n.state == nil {
  1048  		n.state = unknown //???
  1049  	}
  1050  	next, err := n.state.handle(net, n, ev, pkt)
  1051  	net.transition(n, next)
  1052  	//fmt.Println("new state:", n.state)
  1053  	return err
  1054  }
  1055  
  1056  func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error {
  1057  	// Replay prevention checks.
  1058  	switch ev {
  1059  	case pingPacket, findnodeHashPacket, neighborsPacket:
  1060  		// TODO: check date is > last date seen
  1061  		// TODO: check ping version
  1062  	case pongPacket:
  1063  		if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) {
  1064  			// fmt.Println("pong reply token mismatch")
  1065  			return fmt.Errorf("pong reply token mismatch")
  1066  		}
  1067  		n.pingEcho = nil
  1068  	}
  1069  	// Address validation.
  1070  	// TODO: Ideally we would do the following:
  1071  	//  - reject all packets with wrong address except ping.
  1072  	//  - for ping with new address, transition to verifywait but keep the
  1073  	//    previous node (with old address) around. if the new one reaches known,
  1074  	//    swap it out.
  1075  	return nil
  1076  }
  1077  
  1078  func (net *Network) transition(n *Node, next *nodeState) {
  1079  	if n.state != next {
  1080  		n.state = next
  1081  		if next.enter != nil {
  1082  			next.enter(net, n)
  1083  		}
  1084  	}
  1085  
  1086  	// TODO: persist/unpersist node
  1087  }
  1088  
  1089  func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) {
  1090  	timeout := timeoutEvent{ev, n}
  1091  	net.timeoutTimers[timeout] = time.AfterFunc(d, func() {
  1092  		select {
  1093  		case net.timeout <- timeout:
  1094  		case <-net.closed:
  1095  		}
  1096  	})
  1097  }
  1098  
  1099  func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) {
  1100  	timer := net.timeoutTimers[timeoutEvent{ev, n}]
  1101  	if timer != nil {
  1102  		timer.Stop()
  1103  		delete(net.timeoutTimers, timeoutEvent{ev, n})
  1104  	}
  1105  }
  1106  
  1107  func (net *Network) ping(n *Node, addr *net.UDPAddr) {
  1108  	//fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex())
  1109  	if n.pingEcho != nil || n.ID == net.tab.self.ID {
  1110  		//fmt.Println(" not sent")
  1111  		return
  1112  	}
  1113  	debugLog(fmt.Sprintf("ping(node = %x)", n.ID[:8]))
  1114  	n.pingTopics = net.ticketStore.regTopicSet()
  1115  	n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics)
  1116  	net.timedEvent(respTimeout, n, pongTimeout)
  1117  }
  1118  
  1119  func (net *Network) handlePing(n *Node, pkt *ingressPacket) {
  1120  	debugLog(fmt.Sprintf("handlePing(node = %x)", n.ID[:8]))
  1121  	ping := pkt.data.(*ping)
  1122  	n.TCP = ping.From.TCP
  1123  	t := net.topictab.getTicket(n, ping.Topics)
  1124  
  1125  	pong := &pong{
  1126  		To:         makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB
  1127  		ReplyTok:   pkt.hash,
  1128  		Expiration: uint64(time.Now().Add(expiration).Unix()),
  1129  	}
  1130  	ticketToPong(t, pong)
  1131  	net.conn.send(n, pongPacket, pong)
  1132  }
  1133  
  1134  func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error {
  1135  	debugLog(fmt.Sprintf("handleKnownPong(node = %x)", n.ID[:8]))
  1136  	net.abortTimedEvent(n, pongTimeout)
  1137  	now := mclock.Now()
  1138  	ticket, err := pongToTicket(now, n.pingTopics, n, pkt)
  1139  	if err == nil {
  1140  		// fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data)
  1141  		net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket)
  1142  	} else {
  1143  		debugLog(fmt.Sprintf(" error: %v", err))
  1144  	}
  1145  
  1146  	n.pingEcho = nil
  1147  	n.pingTopics = nil
  1148  	return err
  1149  }
  1150  
  1151  func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
  1152  	switch ev {
  1153  	case findnodePacket:
  1154  		target := crypto.Keccak256Hash(pkt.data.(*findnode).Target[:])
  1155  		results := net.tab.closest(target, bucketSize).entries
  1156  		net.conn.sendNeighbours(n, results)
  1157  		return n.state, nil
  1158  	case neighborsPacket:
  1159  		err := net.handleNeighboursPacket(n, pkt)
  1160  		return n.state, err
  1161  	case neighboursTimeout:
  1162  		if n.pendingNeighbours != nil {
  1163  			n.pendingNeighbours.reply <- nil
  1164  			n.pendingNeighbours = nil
  1165  		}
  1166  		n.queryTimeouts++
  1167  		if n.queryTimeouts > maxFindnodeFailures && n.state == known {
  1168  			return contested, errors.New("too many timeouts")
  1169  		}
  1170  		return n.state, nil
  1171  
  1172  	// v5
  1173  
  1174  	case findnodeHashPacket:
  1175  		results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries
  1176  		net.conn.sendNeighbours(n, results)
  1177  		return n.state, nil
  1178  	case topicRegisterPacket:
  1179  		//fmt.Println("got topicRegisterPacket")
  1180  		regdata := pkt.data.(*topicRegister)
  1181  		pong, err := net.checkTopicRegister(regdata)
  1182  		if err != nil {
  1183  			//fmt.Println(err)
  1184  			return n.state, fmt.Errorf("bad waiting ticket: %v", err)
  1185  		}
  1186  		net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods)
  1187  		return n.state, nil
  1188  	case topicQueryPacket:
  1189  		// TODO: handle expiration
  1190  		topic := pkt.data.(*topicQuery).Topic
  1191  		results := net.topictab.getEntries(topic)
  1192  		if _, ok := net.ticketStore.tickets[topic]; ok {
  1193  			results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too
  1194  		}
  1195  		if len(results) > 10 {
  1196  			results = results[:10]
  1197  		}
  1198  		var hash common.Hash
  1199  		copy(hash[:], pkt.hash)
  1200  		net.conn.sendTopicNodes(n, hash, results)
  1201  		return n.state, nil
  1202  	case topicNodesPacket:
  1203  		p := pkt.data.(*topicNodes)
  1204  		if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) {
  1205  			n.queryTimeouts++
  1206  			if n.queryTimeouts > maxFindnodeFailures && n.state == known {
  1207  				return contested, errors.New("too many timeouts")
  1208  			}
  1209  		}
  1210  		return n.state, nil
  1211  
  1212  	default:
  1213  		return n.state, errInvalidEvent
  1214  	}
  1215  }
  1216  
  1217  func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
  1218  	var pongpkt ingressPacket
  1219  	if err := decodePacket(data.Pong, &pongpkt); err != nil {
  1220  		return nil, err
  1221  	}
  1222  	if pongpkt.ev != pongPacket {
  1223  		return nil, errors.New("is not pong packet")
  1224  	}
  1225  	if pongpkt.remoteID != net.tab.self.ID {
  1226  		return nil, errors.New("not signed by us")
  1227  	}
  1228  	// check that we previously authorised all topics
  1229  	// that the other side is trying to register.
  1230  	if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash {
  1231  		return nil, errors.New("topic hash mismatch")
  1232  	}
  1233  	if data.Idx < 0 || int(data.Idx) >= len(data.Topics) {
  1234  		return nil, errors.New("topic index out of range")
  1235  	}
  1236  	return pongpkt.data.(*pong), nil
  1237  }
  1238  
  1239  func rlpHash(x interface{}) (h common.Hash) {
  1240  	hw := sha3.NewKeccak256()
  1241  	rlp.Encode(hw, x)
  1242  	hw.Sum(h[:0])
  1243  	return h
  1244  }
  1245  
  1246  func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error {
  1247  	if n.pendingNeighbours == nil {
  1248  		return errNoQuery
  1249  	}
  1250  	net.abortTimedEvent(n, neighboursTimeout)
  1251  
  1252  	req := pkt.data.(*neighbors)
  1253  	nodes := make([]*Node, len(req.Nodes))
  1254  	for i, rn := range req.Nodes {
  1255  		nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn)
  1256  		if err != nil {
  1257  			glog.V(logger.Debug).Infof("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err)
  1258  			continue
  1259  		}
  1260  		nodes[i] = nn
  1261  		// Start validation of query results immediately.
  1262  		// This fills the table quickly.
  1263  		// TODO: generates way too many packets, maybe do it via queue.
  1264  		if nn.state == unknown {
  1265  			net.transition(nn, verifyinit)
  1266  		}
  1267  	}
  1268  	// TODO: don't ignore second packet
  1269  	n.pendingNeighbours.reply <- nodes
  1270  	n.pendingNeighbours = nil
  1271  	// Now that this query is done, start the next one.
  1272  	n.startNextQuery(net)
  1273  	return nil
  1274  }