github.com/annchain/OG@v0.0.9/p2p/discv5/net.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package discv5
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/ecdsa"
    22  	"errors"
    23  	"fmt"
    24  	"github.com/annchain/OG/arefactor/common/goroutine"
    25  	"github.com/annchain/OG/arefactor/og/types"
    26  	ogcrypto2 "github.com/annchain/OG/deprecated/ogcrypto"
    27  	"github.com/annchain/OG/types/msg"
    28  	"net"
    29  	"time"
    30  
    31  	"github.com/annchain/OG/common/mclock"
    32  	"github.com/annchain/OG/p2p/netutil"
    33  	"golang.org/x/crypto/sha3"
    34  )
    35  
    36  var (
    37  	errInvalidEvent = errors.New("invalid in current state")
    38  	errNoQuery      = errors.New("no pending query")
    39  )
    40  
    41  const (
    42  	autoRefreshInterval   = 1 * time.Hour
    43  	bucketRefreshInterval = 1 * time.Minute
    44  	seedCount             = 30
    45  	seedMaxAge            = 5 * 24 * time.Hour
    46  	lowPort               = 1024
    47  )
    48  
    49  const testTopic = "foo"
    50  
    51  const (
    52  	printTestImgLogs = false
    53  )
    54  
    55  // Network manages the table and all protocol interaction.
    56  type Network struct {
    57  	db          *nodeDB // database of known nodes
    58  	conn        transport
    59  	netrestrict *netutil.Netlist
    60  
    61  	closed           chan struct{}          // closed when loop is done
    62  	closeReq         chan struct{}          // 'request to close'
    63  	refreshReq       chan []*Node           // lookups ask for refresh on this channel
    64  	refreshResp      chan (<-chan struct{}) // ...and get the channel to block on from this one
    65  	read             chan ingressPacket     // ingress packets arrive here
    66  	timeout          chan timeoutEvent
    67  	queryReq         chan *findnodeQuery // lookups submit findnode queries on this channel
    68  	tableOpReq       chan func()
    69  	tableOpResp      chan struct{}
    70  	topicRegisterReq chan topicRegisterReq
    71  	topicSearchReq   chan topicSearchReq
    72  
    73  	// State of the main loop.
    74  	tab           *Table
    75  	topictab      *topicTable
    76  	ticketStore   *ticketStore
    77  	nursery       []*Node
    78  	nodes         map[NodeID]*Node // tracks active nodes with state != known
    79  	timeoutTimers map[timeoutEvent]*time.Timer
    80  
    81  	// Revalidation queues.
    82  	// Nodes put on these queues will be pinged eventually.
    83  	slowRevalidateQueue []*Node
    84  	fastRevalidateQueue []*Node
    85  
    86  	// Buffers for state transition.
    87  	sendBuf []*ingressPacket
    88  }
    89  
    90  // transport is implemented by the UDP transport.
    91  // it is an interface so we can test without opening lots of UDP
    92  // sockets and without generating a private key.
    93  type transport interface {
    94  	sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte)
    95  	sendNeighbours(remote *Node, nodes []*Node)
    96  	sendFindnodeHash(remote *Node, target types.Hash)
    97  	sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte)
    98  	sendTopicNodes(remote *Node, queryHash types.Hash, nodes []*Node)
    99  
   100  	send(remote *Node, ptype nodeEvent, data []byte) (hash []byte)
   101  
   102  	localAddr() *net.UDPAddr
   103  	Close()
   104  }
   105  
   106  type findnodeQuery struct {
   107  	remote   *Node
   108  	target   types.Hash
   109  	reply    chan<- []*Node
   110  	nresults int // counter for received nodes
   111  }
   112  
   113  type topicRegisterReq struct {
   114  	add   bool
   115  	topic Topic
   116  }
   117  
   118  type topicSearchReq struct {
   119  	topic  Topic
   120  	found  chan<- *Node
   121  	lookup chan<- bool
   122  	delay  time.Duration
   123  }
   124  
   125  type topicSearchResult struct {
   126  	target lookupInfo
   127  	nodes  []*Node
   128  }
   129  
   130  type timeoutEvent struct {
   131  	ev   nodeEvent
   132  	node *Node
   133  }
   134  
   135  func newNetwork(conn transport, ourPubkey ecdsa.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) {
   136  	ourID := PubkeyID(&ourPubkey)
   137  
   138  	var db *nodeDB
   139  	if dbPath != "<no database>" {
   140  		var err error
   141  		if db, err = newNodeDB(dbPath, Version, ourID); err != nil {
   142  			return nil, err
   143  		}
   144  	}
   145  
   146  	tab := newTable(ourID, conn.localAddr())
   147  	net := &Network{
   148  		db:               db,
   149  		conn:             conn,
   150  		netrestrict:      netrestrict,
   151  		tab:              tab,
   152  		topictab:         newTopicTable(db, tab.self),
   153  		ticketStore:      newTicketStore(),
   154  		refreshReq:       make(chan []*Node),
   155  		refreshResp:      make(chan (<-chan struct{})),
   156  		closed:           make(chan struct{}),
   157  		closeReq:         make(chan struct{}),
   158  		read:             make(chan ingressPacket, 100),
   159  		timeout:          make(chan timeoutEvent),
   160  		timeoutTimers:    make(map[timeoutEvent]*time.Timer),
   161  		tableOpReq:       make(chan func()),
   162  		tableOpResp:      make(chan struct{}),
   163  		queryReq:         make(chan *findnodeQuery),
   164  		topicRegisterReq: make(chan topicRegisterReq),
   165  		topicSearchReq:   make(chan topicSearchReq),
   166  		nodes:            make(map[NodeID]*Node),
   167  	}
   168  	goroutine.New(net.loop)
   169  	return net, nil
   170  }
   171  
   172  // Close terminates the network listener and flushes the node database.
   173  func (net *Network) Close() {
   174  	net.conn.Close()
   175  	select {
   176  	case <-net.closed:
   177  	case net.closeReq <- struct{}{}:
   178  		<-net.closed
   179  	}
   180  }
   181  
   182  // Self returns the local node.
   183  // The returned node should not be modified by the caller.
   184  func (net *Network) Self() *Node {
   185  	return net.tab.self
   186  }
   187  
   188  // ReadRandomNodes fills the given slice with random nodes from the
   189  // table. It will not write the same node more than once. The nodes in
   190  // the slice are copies and can be modified by the caller.
   191  func (net *Network) ReadRandomNodes(buf []*Node) (n int) {
   192  	net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) })
   193  	return n
   194  }
   195  
   196  // SetFallbackNodes sets the initial points of contact. These nodes
   197  // are used to connect to the network if the table is empty and there
   198  // are no known nodes in the database.
   199  func (net *Network) SetFallbackNodes(nodes []*Node) error {
   200  	nursery := make([]*Node, 0, len(nodes))
   201  	for _, n := range nodes {
   202  		if err := n.validateComplete(); err != nil {
   203  			return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
   204  		}
   205  		// Recompute cpy.sha because the node might not have been
   206  		// created by NewNode or ParseNode.
   207  		cpy := *n
   208  		cpy.sha = ogcrypto2.Keccak256Hash(n.ID[:])
   209  		nursery = append(nursery, &cpy)
   210  	}
   211  	net.reqRefresh(nursery)
   212  	return nil
   213  }
   214  
   215  // Resolve searches for a specific node with the given ID.
   216  // It returns nil if the node could not be found.
   217  func (net *Network) Resolve(targetID NodeID) *Node {
   218  	result := net.lookup(ogcrypto2.Keccak256Hash(targetID[:]), true)
   219  	for _, n := range result {
   220  		if n.ID == targetID {
   221  			return n
   222  		}
   223  	}
   224  	return nil
   225  }
   226  
   227  // Lookup performs a network search for nodes close
   228  // to the given target. It approaches the target by querying
   229  // nodes that are closer to it on each iteration.
   230  // The given target does not need to be an actual node
   231  // identifier.
   232  //
   233  // The local node may be included in the result.
   234  func (net *Network) Lookup(targetID NodeID) []*Node {
   235  	return net.lookup(ogcrypto2.Keccak256Hash(targetID[:]), false)
   236  }
   237  
   238  func (net *Network) lookup(target types.Hash, stopOnMatch bool) []*Node {
   239  	var (
   240  		asked          = make(map[NodeID]bool)
   241  		seen           = make(map[NodeID]bool)
   242  		reply          = make(chan []*Node, alpha)
   243  		result         = nodesByDistance{target: target}
   244  		pendingQueries = 0
   245  	)
   246  	// Get initial answers from the local node.
   247  	result.push(net.tab.self, bucketSize)
   248  	for {
   249  		// Ask the α closest nodes that we haven't asked yet.
   250  		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
   251  			n := result.entries[i]
   252  			if !asked[n.ID] {
   253  				asked[n.ID] = true
   254  				pendingQueries++
   255  				net.reqQueryFindnode(n, target, reply)
   256  			}
   257  		}
   258  		if pendingQueries == 0 {
   259  			// We have asked all closest nodes, stop the search.
   260  			break
   261  		}
   262  		// Wait for the next reply.
   263  		select {
   264  		case nodes := <-reply:
   265  			for _, n := range nodes {
   266  				if n != nil && !seen[n.ID] {
   267  					seen[n.ID] = true
   268  					result.push(n, bucketSize)
   269  					if stopOnMatch && n.sha == target {
   270  						return result.entries
   271  					}
   272  				}
   273  			}
   274  			pendingQueries--
   275  		case <-time.After(respTimeout):
   276  			// forget all pending requests, start new ones
   277  			pendingQueries = 0
   278  			reply = make(chan []*Node, alpha)
   279  		}
   280  	}
   281  	return result.entries
   282  }
   283  
   284  func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) {
   285  	select {
   286  	case net.topicRegisterReq <- topicRegisterReq{true, topic}:
   287  	case <-net.closed:
   288  		return
   289  	}
   290  	select {
   291  	case <-net.closed:
   292  	case <-stop:
   293  		select {
   294  		case net.topicRegisterReq <- topicRegisterReq{false, topic}:
   295  		case <-net.closed:
   296  		}
   297  	}
   298  }
   299  
   300  func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) {
   301  	for {
   302  		select {
   303  		case <-net.closed:
   304  			return
   305  		case delay, ok := <-setPeriod:
   306  			select {
   307  			case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}:
   308  			case <-net.closed:
   309  				return
   310  			}
   311  			if !ok {
   312  				return
   313  			}
   314  		}
   315  	}
   316  }
   317  
   318  func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} {
   319  	select {
   320  	case net.refreshReq <- nursery:
   321  		return <-net.refreshResp
   322  	case <-net.closed:
   323  		return net.closed
   324  	}
   325  }
   326  
   327  func (net *Network) reqQueryFindnode(n *Node, target types.Hash, reply chan []*Node) bool {
   328  	q := &findnodeQuery{remote: n, target: target, reply: reply}
   329  	select {
   330  	case net.queryReq <- q:
   331  		return true
   332  	case <-net.closed:
   333  		return false
   334  	}
   335  }
   336  
   337  func (net *Network) reqReadPacket(pkt ingressPacket) {
   338  	select {
   339  	case net.read <- pkt:
   340  	case <-net.closed:
   341  	}
   342  }
   343  
   344  func (net *Network) reqTableOp(f func()) (called bool) {
   345  	select {
   346  	case net.tableOpReq <- f:
   347  		<-net.tableOpResp
   348  		return true
   349  	case <-net.closed:
   350  		return false
   351  	}
   352  }
   353  
   354  // TODO: external address handling.
   355  
   356  type topicSearchInfo struct {
   357  	lookupChn chan<- bool
   358  	period    time.Duration
   359  }
   360  
   361  const maxSearchCount = 5
   362  
   363  func (net *Network) loop() {
   364  	var (
   365  		refreshTimer       = time.NewTicker(autoRefreshInterval)
   366  		bucketRefreshTimer = time.NewTimer(bucketRefreshInterval)
   367  		refreshDone        chan struct{} // closed when the 'refresh' lookup has ended
   368  	)
   369  
   370  	// Tracking the next ticket to register.
   371  	var (
   372  		nextTicket        *ticketRef
   373  		nextRegisterTimer *time.Timer
   374  		nextRegisterTime  <-chan time.Time
   375  	)
   376  	defer func() {
   377  		if nextRegisterTimer != nil {
   378  			nextRegisterTimer.Stop()
   379  		}
   380  	}()
   381  	resetNextTicket := func() {
   382  		ticket, timeout := net.ticketStore.nextFilteredTicket()
   383  		if nextTicket != ticket {
   384  			nextTicket = ticket
   385  			if nextRegisterTimer != nil {
   386  				nextRegisterTimer.Stop()
   387  				nextRegisterTime = nil
   388  			}
   389  			if ticket != nil {
   390  				nextRegisterTimer = time.NewTimer(timeout)
   391  				nextRegisterTime = nextRegisterTimer.C
   392  			}
   393  		}
   394  	}
   395  
   396  	// Tracking registration and search lookups.
   397  	var (
   398  		topicRegisterLookupTarget lookupInfo
   399  		topicRegisterLookupDone   chan []*Node
   400  		topicRegisterLookupTick   = time.NewTimer(0)
   401  		searchReqWhenRefreshDone  []topicSearchReq
   402  		searchInfo                = make(map[Topic]topicSearchInfo)
   403  		activeSearchCount         int
   404  	)
   405  	topicSearchLookupDone := make(chan topicSearchResult, 100)
   406  	topicSearch := make(chan Topic, 100)
   407  	<-topicRegisterLookupTick.C
   408  
   409  	statsDump := time.NewTicker(10 * time.Second)
   410  
   411  loop:
   412  	for {
   413  		resetNextTicket()
   414  
   415  		select {
   416  		case <-net.closeReq:
   417  			log.Trace("<-net.closeReq")
   418  			break loop
   419  
   420  		// Ingress packet handling.
   421  		case pkt := <-net.read:
   422  			//fmt.Println("read", pkt.ev)
   423  			log.Trace("<-net.read")
   424  			n := net.internNode(&pkt)
   425  			prestate := n.state
   426  			status := "ok"
   427  			if err := net.handle(n, pkt.ev, &pkt); err != nil {
   428  				status = err.Error()
   429  			}
   430  			msgStr := fmt.Sprintf("<<< (%d) %v from %x@%v: %v -> %v (%v)",
   431  				net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status)
   432  			log.Trace("", "msg", msgStr)
   433  			// TODO: persist state if n.state goes >= known, delete if it goes <= known
   434  
   435  		// State transition timeouts.
   436  		case timeout := <-net.timeout:
   437  			log.Trace("<-net.timeout")
   438  			if net.timeoutTimers[timeout] == nil {
   439  				// Stale timer (was aborted).
   440  				continue
   441  			}
   442  			delete(net.timeoutTimers, timeout)
   443  			prestate := timeout.node.state
   444  			status := "ok"
   445  			if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
   446  				status = err.Error()
   447  			}
   448  			msgStr := fmt.Sprintf("--- (%d) %v for %x@%v: %v -> %v (%v)",
   449  				net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status)
   450  			log.Trace("", "msg", msgStr)
   451  
   452  		// Querying.
   453  		case q := <-net.queryReq:
   454  			log.Trace("<-net.queryReq")
   455  			if !q.start(net) {
   456  				q.remote.deferQuery(q)
   457  			}
   458  
   459  		// Interacting with the table.
   460  		case f := <-net.tableOpReq:
   461  			log.Trace("<-net.tableOpReq")
   462  			f()
   463  			net.tableOpResp <- struct{}{}
   464  
   465  		// Topic registration stuff.
   466  		case req := <-net.topicRegisterReq:
   467  			log.Trace("<-net.topicRegisterReq")
   468  			if !req.add {
   469  				net.ticketStore.removeRegisterTopic(req.topic)
   470  				continue
   471  			}
   472  			net.ticketStore.addTopic(req.topic, true)
   473  			// If we're currently waiting idle (nothing to look up), give the ticket store a
   474  			// chance to start it sooner. This should speed up convergence of the radius
   475  			// determination for new topics.
   476  			// if topicRegisterLookupDone == nil {
   477  			if topicRegisterLookupTarget.target == (types.Hash{}) {
   478  				log.Trace("topicRegisterLookupTarget == null")
   479  				if topicRegisterLookupTick.Stop() {
   480  					<-topicRegisterLookupTick.C
   481  				}
   482  				target, delay := net.ticketStore.nextRegisterLookup()
   483  				topicRegisterLookupTarget = target
   484  				topicRegisterLookupTick.Reset(delay)
   485  			}
   486  
   487  		case nodes := <-topicRegisterLookupDone:
   488  			log.Trace("<-topicRegisterLookupDone")
   489  			net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte {
   490  				net.ping(n, n.addr())
   491  				return n.pingEcho
   492  			})
   493  			target, delay := net.ticketStore.nextRegisterLookup()
   494  			topicRegisterLookupTarget = target
   495  			topicRegisterLookupTick.Reset(delay)
   496  			topicRegisterLookupDone = nil
   497  
   498  		case <-topicRegisterLookupTick.C:
   499  			log.Trace("<-topicRegisterLookupTick")
   500  			if topicRegisterLookupTarget.target.Empty() {
   501  				target, delay := net.ticketStore.nextRegisterLookup()
   502  				topicRegisterLookupTarget = target
   503  				topicRegisterLookupTick.Reset(delay)
   504  				topicRegisterLookupDone = nil
   505  			} else {
   506  				topicRegisterLookupDone = make(chan []*Node)
   507  				target := topicRegisterLookupTarget.target
   508  				goroutine.New(func() { topicRegisterLookupDone <- net.lookup(target, false) })
   509  			}
   510  
   511  		case <-nextRegisterTime:
   512  			log.Trace("<-nextRegisterTime")
   513  			net.ticketStore.ticketRegistered(*nextTicket)
   514  			//fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
   515  			net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
   516  
   517  		case req := <-net.topicSearchReq:
   518  			if refreshDone == nil {
   519  				log.Trace("<-net.topicSearchReq")
   520  				info, ok := searchInfo[req.topic]
   521  				if ok {
   522  					if req.delay == time.Duration(0) {
   523  						delete(searchInfo, req.topic)
   524  						net.ticketStore.removeSearchTopic(req.topic)
   525  					} else {
   526  						info.period = req.delay
   527  						searchInfo[req.topic] = info
   528  					}
   529  					continue
   530  				}
   531  				if req.delay != time.Duration(0) {
   532  					var info topicSearchInfo
   533  					info.period = req.delay
   534  					info.lookupChn = req.lookup
   535  					searchInfo[req.topic] = info
   536  					net.ticketStore.addSearchTopic(req.topic, req.found)
   537  					topicSearch <- req.topic
   538  				}
   539  			} else {
   540  				searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req)
   541  			}
   542  
   543  		case topic := <-topicSearch:
   544  			if activeSearchCount < maxSearchCount {
   545  				activeSearchCount++
   546  				target := net.ticketStore.nextSearchLookup(topic)
   547  				goroutine.New(func() {
   548  					nodes := net.lookup(target.target, false)
   549  					topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes}
   550  				})
   551  			}
   552  			period := searchInfo[topic].period
   553  			if period != time.Duration(0) {
   554  				goroutine.New(func() {
   555  					time.Sleep(period)
   556  					topicSearch <- topic
   557  				})
   558  			}
   559  
   560  		case res := <-topicSearchLookupDone:
   561  			activeSearchCount--
   562  			if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
   563  				lookupChn <- net.ticketStore.radius[res.target.topic].converged
   564  			}
   565  			net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte {
   566  				if n.state != nil && n.state.canQuery {
   567  					tp := TopicQuery{Topic: string(topic)}
   568  					data, _ := tp.MarshalMsg(nil)
   569  					return net.conn.send(n, topicQueryPacket, data) // TODO: set expiration
   570  				} else {
   571  					if n.state == unknown {
   572  						net.ping(n, n.addr())
   573  					}
   574  					return nil
   575  				}
   576  			})
   577  
   578  		case <-statsDump.C:
   579  			log.Trace("<-statsDump.C")
   580  			/*r, ok := net.ticketStore.radius[testTopic]
   581  			if !ok {
   582  				fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now())
   583  			} else {
   584  				topics := len(net.ticketStore.tickets)
   585  				tickets := len(net.ticketStore.nodes)
   586  				rad := r.radius / (maxRadius/10000+1)
   587  				fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now())
   588  			}*/
   589  
   590  			tm := mclock.Now()
   591  			for topic, r := range net.ticketStore.radius {
   592  				if printTestImgLogs {
   593  					rad := r.radius / (maxRadius/1000000 + 1)
   594  					minrad := r.minRadius / (maxRadius/1000000 + 1)
   595  					fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha.Bytes[:8], rad)
   596  					fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha.Bytes[:8], minrad)
   597  				}
   598  			}
   599  			for topic, t := range net.topictab.topics {
   600  				wp := t.wcl.nextWaitPeriod(tm)
   601  				if printTestImgLogs {
   602  					fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha.Bytes[:8], wp/1000000)
   603  				}
   604  			}
   605  
   606  		// Periodic / lookup-initiated bucket refresh.
   607  		case <-refreshTimer.C:
   608  			log.Trace("<-refreshTimer.C")
   609  			// TODO: ideally we would start the refresh timer after
   610  			// fallback nodes have been set for the first time.
   611  			if refreshDone == nil {
   612  				refreshDone = make(chan struct{})
   613  				net.refresh(refreshDone)
   614  			}
   615  		case <-bucketRefreshTimer.C:
   616  			target := net.tab.chooseBucketRefreshTarget()
   617  			goroutine.New(func() {
   618  				net.lookup(target, false)
   619  				bucketRefreshTimer.Reset(bucketRefreshInterval)
   620  			})
   621  		case newNursery := <-net.refreshReq:
   622  			log.Trace("<-net.refreshReq")
   623  			if newNursery != nil {
   624  				net.nursery = newNursery
   625  			}
   626  			if refreshDone == nil {
   627  				refreshDone = make(chan struct{})
   628  				net.refresh(refreshDone)
   629  			}
   630  			net.refreshResp <- refreshDone
   631  		case <-refreshDone:
   632  			log.Trace("<-net.refreshDone", "table size", net.tab.count)
   633  			if net.tab.count != 0 {
   634  				refreshDone = nil
   635  				list := searchReqWhenRefreshDone
   636  				searchReqWhenRefreshDone = nil
   637  				goroutine.New(func() {
   638  					for _, req := range list {
   639  						net.topicSearchReq <- req
   640  					}
   641  				})
   642  			} else {
   643  				refreshDone = make(chan struct{})
   644  				net.refresh(refreshDone)
   645  			}
   646  		}
   647  	}
   648  	log.Trace("loop stopped")
   649  
   650  	log.Debug(fmt.Sprintf("shutting down"))
   651  	if net.conn != nil {
   652  		net.conn.Close()
   653  	}
   654  	if refreshDone != nil {
   655  		// TODO: wait for pending refresh.
   656  		//<-refreshResults
   657  	}
   658  	// Cancel all pending timeouts.
   659  	for _, timer := range net.timeoutTimers {
   660  		timer.Stop()
   661  	}
   662  	if net.db != nil {
   663  		net.db.close()
   664  	}
   665  	close(net.closed)
   666  }
   667  
   668  // Everything below runs on the Network.loop goroutine
   669  // and can modify Node, Table and Network at any time without locking.
   670  
   671  func (net *Network) refresh(done chan<- struct{}) {
   672  	var seeds []*Node
   673  	if net.db != nil {
   674  		seeds = net.db.querySeeds(seedCount, seedMaxAge)
   675  	}
   676  	if len(seeds) == 0 {
   677  		seeds = net.nursery
   678  	}
   679  	if len(seeds) == 0 {
   680  		log.Trace("no seed nodes found")
   681  		close(done)
   682  		return
   683  	}
   684  	for _, n := range seeds {
   685  		var age string
   686  		if net.db != nil {
   687  			age = time.Since(net.db.lastPong(n.ID)).String()
   688  		} else {
   689  			age = "unknown"
   690  		}
   691  		msgStr := fmt.Sprintf("seed node (age %s): %v", age, n)
   692  		log.Debug("", "msg", msgStr)
   693  		n = net.internNodeFromDB(n)
   694  		if n.state == unknown {
   695  			net.transition(n, verifyinit)
   696  		}
   697  		// Force-add the seed node so Lookup does something.
   698  		// It will be deleted again if verification fails.
   699  		net.tab.add(n)
   700  	}
   701  	// Start self lookup to fill up the buckets.
   702  	goroutine.New(func() {
   703  		net.Lookup(net.tab.self.ID)
   704  		close(done)
   705  	})
   706  }
   707  
   708  // Node Interning.
   709  
   710  func (net *Network) internNode(pkt *ingressPacket) *Node {
   711  	if n := net.nodes[pkt.remoteID]; n != nil {
   712  		n.IP = pkt.remoteAddr.IP
   713  		n.UDP = uint16(pkt.remoteAddr.Port)
   714  		n.TCP = uint16(pkt.remoteAddr.Port)
   715  		return n
   716  	}
   717  	n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port))
   718  	n.state = unknown
   719  	net.nodes[pkt.remoteID] = n
   720  	return n
   721  }
   722  
   723  func (net *Network) internNodeFromDB(dbn *Node) *Node {
   724  	if n := net.nodes[dbn.ID]; n != nil {
   725  		return n
   726  	}
   727  	n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP)
   728  	n.state = unknown
   729  	net.nodes[n.ID] = n
   730  	return n
   731  }
   732  
   733  func (network *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn RpcNode) (n *Node, err error) {
   734  	if rn.ID == network.tab.self.ID {
   735  		return nil, errors.New("is self")
   736  	}
   737  	if rn.UDP <= lowPort {
   738  		return nil, errors.New("low port")
   739  	}
   740  	n = network.nodes[rn.ID]
   741  	if n == nil {
   742  		// We haven't seen this node before.
   743  		n, err = nodeFromRPC(sender, rn)
   744  		if network.netrestrict != nil && !network.netrestrict.Contains(n.IP) {
   745  			return n, errors.New("not contained in netrestrict whitelist")
   746  		}
   747  		if err == nil {
   748  			n.state = unknown
   749  			network.nodes[n.ID] = n
   750  		}
   751  		return n, err
   752  	}
   753  	IP := net.IP(n.IP)
   754  	if !IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
   755  		if n.state == known {
   756  			// reject address change if node is known by us
   757  			err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
   758  		} else {
   759  			// accept otherwise; this will be handled nicer with signed ENRs
   760  			n.IP = rn.IP
   761  			n.UDP = rn.UDP
   762  			n.TCP = rn.TCP
   763  		}
   764  	}
   765  	return n, err
   766  }
   767  
   768  // nodeNetGuts is embedded in Node and contains fields.
   769  type nodeNetGuts struct {
   770  	// This is a cached copy of sha3(ID) which is used for node
   771  	// distance calculations. This is part of Node in order to make it
   772  	// possible to write tests that need a node at a certain distance.
   773  	// In those tests, the content of sha will not actually correspond
   774  	// with ID.
   775  	sha types.Hash
   776  
   777  	// State machine fields. Access to these fields
   778  	// is restricted to the Network.loop goroutine.
   779  	state             *nodeState
   780  	pingEcho          []byte           // hash of last ping sent by us
   781  	pingTopics        []Topic          // topic set sent by us in last ping
   782  	deferredQueries   []*findnodeQuery // queries that can't be sent yet
   783  	pendingNeighbours *findnodeQuery   // current query, waiting for reply
   784  	queryTimeouts     int
   785  }
   786  
   787  func (n *nodeNetGuts) deferQuery(q *findnodeQuery) {
   788  	n.deferredQueries = append(n.deferredQueries, q)
   789  }
   790  
   791  func (n *nodeNetGuts) startNextQuery(net *Network) {
   792  	if len(n.deferredQueries) == 0 {
   793  		return
   794  	}
   795  	nextq := n.deferredQueries[0]
   796  	if nextq.start(net) {
   797  		n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...)
   798  	}
   799  }
   800  
   801  func (q *findnodeQuery) start(net *Network) bool {
   802  	// Satisfy queries against the local node directly.
   803  	if q.remote == net.tab.self {
   804  		closest := net.tab.closest(ogcrypto2.Keccak256Hash(q.target.Bytes[:]), bucketSize)
   805  		q.reply <- closest.entries
   806  		return true
   807  	}
   808  	if q.remote.state.canQuery && q.remote.pendingNeighbours == nil {
   809  		net.conn.sendFindnodeHash(q.remote, q.target)
   810  		net.timedEvent(respTimeout, q.remote, neighboursTimeout)
   811  		q.remote.pendingNeighbours = q
   812  		return true
   813  	}
   814  	// If the node is not known yet, it won't accept queries.
   815  	// Initiate the transition to known.
   816  	// The request will be sent later when the node reaches known state.
   817  	if q.remote.state == unknown {
   818  		net.transition(q.remote, verifyinit)
   819  	}
   820  	return false
   821  }
   822  
   823  // Node Events (the input to the state machine).
   824  
   825  type nodeEvent uint
   826  
   827  //go:generate stringer -type=nodeEvent
   828  
   829  const (
   830  
   831  	// Packet type events.
   832  	// These correspond to packet types in the UDP protocol.
   833  	pingPacket = iota + 1
   834  	pongPacket
   835  	findnodePacket
   836  	neighborsPacket
   837  	findnodeHashPacket
   838  	topicRegisterPacket
   839  	topicQueryPacket
   840  	topicNodesPacket
   841  
   842  	// Non-packet events.
   843  	// Event values in this category are allocated outside
   844  	// the packet type range (packet types are encoded as a single byte).
   845  	pongTimeout nodeEvent = iota + 256
   846  	pingTimeout
   847  	neighboursTimeout
   848  )
   849  
   850  // Node State Machine.
   851  
   852  type nodeState struct {
   853  	name     string
   854  	handle   func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error)
   855  	enter    func(*Network, *Node)
   856  	canQuery bool
   857  }
   858  
   859  func (s *nodeState) String() string {
   860  	return s.name
   861  }
   862  
   863  var (
   864  	unknown          *nodeState
   865  	verifyinit       *nodeState
   866  	verifywait       *nodeState
   867  	remoteverifywait *nodeState
   868  	known            *nodeState
   869  	contested        *nodeState
   870  	unresponsive     *nodeState
   871  )
   872  
   873  func init() {
   874  	unknown = &nodeState{
   875  		name: "unknown",
   876  		enter: func(net *Network, n *Node) {
   877  			net.tab.delete(n)
   878  			n.pingEcho = nil
   879  			// Abort active queries.
   880  			for _, q := range n.deferredQueries {
   881  				q.reply <- nil
   882  			}
   883  			n.deferredQueries = nil
   884  			if n.pendingNeighbours != nil {
   885  				n.pendingNeighbours.reply <- nil
   886  				n.pendingNeighbours = nil
   887  			}
   888  			n.queryTimeouts = 0
   889  		},
   890  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   891  			switch ev {
   892  			case pingPacket:
   893  				net.handlePing(n, pkt)
   894  				net.ping(n, pkt.remoteAddr)
   895  				return verifywait, nil
   896  			default:
   897  				return unknown, errInvalidEvent
   898  			}
   899  		},
   900  	}
   901  
   902  	verifyinit = &nodeState{
   903  		name: "verifyinit",
   904  		enter: func(net *Network, n *Node) {
   905  			net.ping(n, n.addr())
   906  		},
   907  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   908  			switch ev {
   909  			case pingPacket:
   910  				net.handlePing(n, pkt)
   911  				return verifywait, nil
   912  			case pongPacket:
   913  				err := net.handleKnownPong(n, pkt)
   914  				return remoteverifywait, err
   915  			case pongTimeout:
   916  				return unknown, nil
   917  			default:
   918  				return verifyinit, errInvalidEvent
   919  			}
   920  		},
   921  	}
   922  
   923  	verifywait = &nodeState{
   924  		name: "verifywait",
   925  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   926  			switch ev {
   927  			case pingPacket:
   928  				net.handlePing(n, pkt)
   929  				return verifywait, nil
   930  			case pongPacket:
   931  				err := net.handleKnownPong(n, pkt)
   932  				return known, err
   933  			case pongTimeout:
   934  				return unknown, nil
   935  			default:
   936  				return verifywait, errInvalidEvent
   937  			}
   938  		},
   939  	}
   940  
   941  	remoteverifywait = &nodeState{
   942  		name: "remoteverifywait",
   943  		enter: func(net *Network, n *Node) {
   944  			net.timedEvent(respTimeout, n, pingTimeout)
   945  		},
   946  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   947  			switch ev {
   948  			case pingPacket:
   949  				net.handlePing(n, pkt)
   950  				return remoteverifywait, nil
   951  			case pingTimeout:
   952  				return known, nil
   953  			default:
   954  				return remoteverifywait, errInvalidEvent
   955  			}
   956  		},
   957  	}
   958  
   959  	known = &nodeState{
   960  		name:     "known",
   961  		canQuery: true,
   962  		enter: func(net *Network, n *Node) {
   963  			n.queryTimeouts = 0
   964  			n.startNextQuery(net)
   965  			// Insert into the table and start revalidation of the last node
   966  			// in the bucket if it is full.
   967  			last := net.tab.add(n)
   968  			if last != nil && last.state == known {
   969  				// TODO: do this asynchronously
   970  				net.transition(last, contested)
   971  			}
   972  		},
   973  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   974  			switch ev {
   975  			case pingPacket:
   976  				net.handlePing(n, pkt)
   977  				return known, nil
   978  			case pongPacket:
   979  				err := net.handleKnownPong(n, pkt)
   980  				return known, err
   981  			default:
   982  				return net.handleQueryEvent(n, ev, pkt)
   983  			}
   984  		},
   985  	}
   986  
   987  	contested = &nodeState{
   988  		name:     "contested",
   989  		canQuery: true,
   990  		enter: func(net *Network, n *Node) {
   991  			net.ping(n, n.addr())
   992  		},
   993  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
   994  			switch ev {
   995  			case pongPacket:
   996  				// Node is still alive.
   997  				err := net.handleKnownPong(n, pkt)
   998  				return known, err
   999  			case pongTimeout:
  1000  				net.tab.deleteReplace(n)
  1001  				return unresponsive, nil
  1002  			case pingPacket:
  1003  				net.handlePing(n, pkt)
  1004  				return contested, nil
  1005  			default:
  1006  				return net.handleQueryEvent(n, ev, pkt)
  1007  			}
  1008  		},
  1009  	}
  1010  
  1011  	unresponsive = &nodeState{
  1012  		name:     "unresponsive",
  1013  		canQuery: true,
  1014  		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
  1015  			switch ev {
  1016  			case pingPacket:
  1017  				net.handlePing(n, pkt)
  1018  				return known, nil
  1019  			case pongPacket:
  1020  				err := net.handleKnownPong(n, pkt)
  1021  				return known, err
  1022  			default:
  1023  				return net.handleQueryEvent(n, ev, pkt)
  1024  			}
  1025  		},
  1026  	}
  1027  }
  1028  
  1029  // handle processes packets sent by n and events related to n.
  1030  func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error {
  1031  	//fmt.Println("handle", n.addr().String(), n.state, ev)
  1032  	if pkt != nil {
  1033  		if err := net.checkPacket(n, ev, pkt); err != nil {
  1034  			//fmt.Println("check err:", err)
  1035  			return err
  1036  		}
  1037  		// Start the background expiration goroutine after the first
  1038  		// successful communication. Subsequent calls have no effect if it
  1039  		// is already running. We do this here instead of somewhere else
  1040  		// so that the search for seed nodes also considers older nodes
  1041  		// that would otherwise be removed by the expirer.
  1042  		if net.db != nil {
  1043  			net.db.ensureExpirer()
  1044  		}
  1045  	}
  1046  	if n.state == nil {
  1047  		n.state = unknown //???
  1048  	}
  1049  	next, err := n.state.handle(net, n, ev, pkt)
  1050  	net.transition(n, next)
  1051  	//fmt.Println("new state:", n.state)
  1052  	return err
  1053  }
  1054  
  1055  func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error {
  1056  	// Replay prevention checks.
  1057  	switch ev {
  1058  	case pingPacket, findnodeHashPacket, neighborsPacket:
  1059  		// TODO: check date is > last date seen
  1060  		// TODO: check ping version
  1061  	case pongPacket:
  1062  		if !bytes.Equal(pkt.data.(*Pong).ReplyTok, n.pingEcho) {
  1063  			// fmt.Println("pong reply token mismatch")
  1064  			return fmt.Errorf("pong reply token mismatch")
  1065  		}
  1066  		n.pingEcho = nil
  1067  	}
  1068  	// Address validation.
  1069  	// TODO: Ideally we would do the following:
  1070  	//  - reject all packets with wrong address except ping.
  1071  	//  - for ping with new address, transition to verifywait but keep the
  1072  	//    previous node (with old address) around. if the new one reaches known,
  1073  	//    swap it out.
  1074  	return nil
  1075  }
  1076  
  1077  func (net *Network) transition(n *Node, next *nodeState) {
  1078  	if n.state != next {
  1079  		n.state = next
  1080  		if next.enter != nil {
  1081  			next.enter(net, n)
  1082  		}
  1083  	}
  1084  
  1085  	// TODO: persist/unpersist node
  1086  }
  1087  
  1088  func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) {
  1089  	timeout := timeoutEvent{ev, n}
  1090  	net.timeoutTimers[timeout] = time.AfterFunc(d, func() {
  1091  		select {
  1092  		case net.timeout <- timeout:
  1093  		case <-net.closed:
  1094  		}
  1095  	})
  1096  }
  1097  
  1098  func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) {
  1099  	timer := net.timeoutTimers[timeoutEvent{ev, n}]
  1100  	if timer != nil {
  1101  		timer.Stop()
  1102  		delete(net.timeoutTimers, timeoutEvent{ev, n})
  1103  	}
  1104  }
  1105  
  1106  func (net *Network) ping(n *Node, addr *net.UDPAddr) {
  1107  	//fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex())
  1108  	if n.pingEcho != nil || n.ID == net.tab.self.ID {
  1109  		//fmt.Println(" not sent")
  1110  		return
  1111  	}
  1112  	log.Trace("Pinging remote node", "node", n.ID)
  1113  	n.pingTopics = net.ticketStore.regTopicSet()
  1114  	n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics)
  1115  	net.timedEvent(respTimeout, n, pongTimeout)
  1116  }
  1117  
  1118  func (net *Network) handlePing(n *Node, pkt *ingressPacket) {
  1119  	log.Trace("Handling remote ping", "node", n.ID)
  1120  	ping := pkt.data.(*Ping)
  1121  	n.TCP = ping.From.TCP
  1122  	var topics []Topic
  1123  	for _, v := range ping.Topics {
  1124  		topics = append(topics, Topic(v))
  1125  	}
  1126  	t := net.topictab.getTicket(n, topics)
  1127  
  1128  	pong := &Pong{
  1129  		To:         makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB
  1130  		ReplyTok:   pkt.hash,
  1131  		Expiration: uint64(time.Now().Add(expiration).Unix()),
  1132  	}
  1133  	ticketToPong(t, pong)
  1134  	data, _ := pong.MarshalMsg(nil)
  1135  	net.conn.send(n, pongPacket, data)
  1136  }
  1137  
  1138  func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error {
  1139  	log.Trace("Handling known pong", "node", n.ID)
  1140  	net.abortTimedEvent(n, pongTimeout)
  1141  	now := mclock.Now()
  1142  	ticket, err := pongToTicket(now, n.pingTopics, n, pkt)
  1143  	if err == nil {
  1144  		// fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data)
  1145  		net.ticketStore.addTicket(now, pkt.data.(*Pong).ReplyTok, ticket)
  1146  	} else {
  1147  		log.Trace("Failed to convert pong to ticket", "err", err)
  1148  	}
  1149  	n.pingEcho = nil
  1150  	n.pingTopics = nil
  1151  	return err
  1152  }
  1153  
  1154  func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
  1155  	switch ev {
  1156  	case findnodePacket:
  1157  		target := ogcrypto2.Keccak256Hash(pkt.data.(*Findnode).Target[:])
  1158  		results := net.tab.closest(target, bucketSize).entries
  1159  		net.conn.sendNeighbours(n, results)
  1160  		return n.state, nil
  1161  	case neighborsPacket:
  1162  		err := net.handleNeighboursPacket(n, pkt)
  1163  		return n.state, err
  1164  	case neighboursTimeout:
  1165  		if n.pendingNeighbours != nil {
  1166  			n.pendingNeighbours.reply <- nil
  1167  			n.pendingNeighbours = nil
  1168  		}
  1169  		n.queryTimeouts++
  1170  		if n.queryTimeouts > maxFindnodeFailures && n.state == known {
  1171  			return contested, errors.New("too many timeouts")
  1172  		}
  1173  		return n.state, nil
  1174  
  1175  	// v5
  1176  
  1177  	case findnodeHashPacket:
  1178  		var tpHash types.Hash
  1179  		tpHash.Bytes = pkt.data.(*FindnodeHash).Target
  1180  		results := net.tab.closest(tpHash, bucketSize).entries
  1181  		net.conn.sendNeighbours(n, results)
  1182  		return n.state, nil
  1183  	case topicRegisterPacket:
  1184  		//fmt.Println("got topicRegisterPacket")
  1185  		regdata := pkt.data.(*TopicRegister)
  1186  		pong, err := net.checkTopicRegister(regdata)
  1187  		if err != nil {
  1188  			//fmt.Println(err)
  1189  			return n.state, fmt.Errorf("bad waiting ticket: %v", err)
  1190  		}
  1191  		var topics []Topic
  1192  		for _, v := range regdata.Topics {
  1193  			topics = append(topics, Topic(v))
  1194  		}
  1195  		net.topictab.useTicket(n, pong.TicketSerial, topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods)
  1196  		return n.state, nil
  1197  	case topicQueryPacket:
  1198  		// TODO: handle expiration
  1199  		topicString := pkt.data.(*TopicQuery).Topic
  1200  		topic := Topic(topicString)
  1201  		results := net.topictab.getEntries(topic)
  1202  		if _, ok := net.ticketStore.tickets[topic]; ok {
  1203  			results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too
  1204  		}
  1205  		if len(results) > 10 {
  1206  			results = results[:10]
  1207  		}
  1208  		var hash types.Hash
  1209  		copy(hash.Bytes[:], pkt.hash)
  1210  		net.conn.sendTopicNodes(n, hash, results)
  1211  		return n.state, nil
  1212  	case topicNodesPacket:
  1213  		p := pkt.data.(*TopicNodes)
  1214  		tpHash := types.Hash{}
  1215  		tpHash.Bytes = p.Echo
  1216  		if net.ticketStore.gotTopicNodes(n, tpHash, p.Nodes) {
  1217  			n.queryTimeouts++
  1218  			if n.queryTimeouts > maxFindnodeFailures && n.state == known {
  1219  				return contested, errors.New("too many timeouts")
  1220  			}
  1221  		}
  1222  		return n.state, nil
  1223  
  1224  	default:
  1225  		return n.state, errInvalidEvent
  1226  	}
  1227  }
  1228  
  1229  func (net *Network) checkTopicRegister(data *TopicRegister) (*Pong, error) {
  1230  	var pongpkt ingressPacket
  1231  	if err := decodePacket(data.Pong, &pongpkt); err != nil {
  1232  		return nil, err
  1233  	}
  1234  	if pongpkt.ev != pongPacket {
  1235  		return nil, errors.New("is not pong packet")
  1236  	}
  1237  	if pongpkt.remoteID != net.tab.self.ID {
  1238  		return nil, errors.New("not signed by us")
  1239  	}
  1240  	// check that we previously authorised all topics
  1241  	// that the other side is trying to register.
  1242  	t := stringsToTopics(data.Topics)
  1243  	btHash := CommonHash(rlpHash(&t).Bytes)
  1244  	if btHash != pongpkt.data.(*Pong).TopicHash {
  1245  		return nil, errors.New("topic hash mismatch")
  1246  	}
  1247  	if int(data.Idx) < 0 || int(data.Idx) >= len(data.Topics) {
  1248  		return nil, errors.New("topic index out of range")
  1249  	}
  1250  	return pongpkt.data.(*Pong), nil
  1251  }
  1252  
  1253  func rlpHash(x msg.MsgpMember) (h types.Hash) {
  1254  	hw := sha3.NewLegacyKeccak256()
  1255  	//rlp.Encode(hw, x)
  1256  	d, _ := x.MarshalMsg(nil)
  1257  	hw.Write(d)
  1258  	hw.Sum(h.Bytes[:0])
  1259  	return h
  1260  }
  1261  
  1262  func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error {
  1263  	if n.pendingNeighbours == nil {
  1264  		return errNoQuery
  1265  	}
  1266  	net.abortTimedEvent(n, neighboursTimeout)
  1267  
  1268  	req := pkt.data.(*Neighbors)
  1269  	nodes := make([]*Node, len(req.Nodes))
  1270  	for i, rn := range req.Nodes {
  1271  		nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn)
  1272  		if err != nil {
  1273  			log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err))
  1274  			continue
  1275  		}
  1276  		nodes[i] = nn
  1277  		// Start validation of query results immediately.
  1278  		// This fills the table quickly.
  1279  		// TODO: generates way too many packets, maybe do it via queue.
  1280  		if nn.state == unknown {
  1281  			net.transition(nn, verifyinit)
  1282  		}
  1283  	}
  1284  	// TODO: don't ignore second packet
  1285  	n.pendingNeighbours.reply <- nodes
  1286  	n.pendingNeighbours = nil
  1287  	// Now that this query is done, start the next one.
  1288  	n.startNextQuery(net)
  1289  	return nil
  1290  }