github.com/beyonderyue/gochain@v2.2.26+incompatible/p2p/discv5/net.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package discv5 18 19 import ( 20 "bytes" 21 "crypto/ecdsa" 22 "errors" 23 "fmt" 24 "net" 25 "time" 26 27 "github.com/gochain-io/gochain/common" 28 "github.com/gochain-io/gochain/common/mclock" 29 "github.com/gochain-io/gochain/crypto" 30 "github.com/gochain-io/gochain/crypto/sha3" 31 "github.com/gochain-io/gochain/log" 32 "github.com/gochain-io/gochain/p2p/netutil" 33 "github.com/gochain-io/gochain/rlp" 34 ) 35 36 var ( 37 errInvalidEvent = errors.New("invalid in current state") 38 errNoQuery = errors.New("no pending query") 39 errWrongAddress = errors.New("unknown sender address") 40 ) 41 42 const ( 43 autoRefreshInterval = 1 * time.Hour 44 bucketRefreshInterval = 1 * time.Minute 45 seedCount = 30 46 seedMaxAge = 5 * 24 * time.Hour 47 lowPort = 1024 48 ) 49 50 const testTopic = "foo" 51 52 const ( 53 printTestImgLogs = false 54 ) 55 56 // Network manages the table and all protocol interaction. 57 type Network struct { 58 db *nodeDB // database of known nodes 59 conn transport 60 netrestrict *netutil.Netlist 61 62 closed chan struct{} // closed when loop is done 63 closeReq chan struct{} // 'request to close' 64 refreshReq chan []*Node // lookups ask for refresh on this channel 65 refreshResp chan (<-chan struct{}) // ...and get the channel to block on from this one 66 read chan ingressPacket // ingress packets arrive here 67 timeout chan timeoutEvent 68 queryReq chan *findnodeQuery // lookups submit findnode queries on this channel 69 tableOpReq chan func() 70 tableOpResp chan struct{} 71 topicRegisterReq chan topicRegisterReq 72 topicSearchReq chan topicSearchReq 73 74 // State of the main loop. 75 tab *Table 76 topictab *topicTable 77 ticketStore *ticketStore 78 nursery []*Node 79 nodes map[NodeID]*Node // tracks active nodes with state != known 80 timeoutTimers map[timeoutEvent]*time.Timer 81 82 // Revalidation queues. 83 // Nodes put on these queues will be pinged eventually. 84 slowRevalidateQueue []*Node 85 fastRevalidateQueue []*Node 86 87 // Buffers for state transition. 88 sendBuf []*ingressPacket 89 } 90 91 // transport is implemented by the UDP transport. 92 // it is an interface so we can test without opening lots of UDP 93 // sockets and without generating a private key. 94 type transport interface { 95 sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte) 96 sendNeighbours(remote *Node, nodes []*Node) 97 sendFindnodeHash(remote *Node, target common.Hash) 98 sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte) 99 sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) 100 101 send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte) 102 103 localAddr() *net.UDPAddr 104 Close() 105 } 106 107 type findnodeQuery struct { 108 remote *Node 109 target common.Hash 110 reply chan<- []*Node 111 nresults int // counter for received nodes 112 } 113 114 type topicRegisterReq struct { 115 add bool 116 topic Topic 117 } 118 119 type topicSearchReq struct { 120 topic Topic 121 found chan<- *Node 122 lookup chan<- bool 123 delay time.Duration 124 } 125 126 type topicSearchResult struct { 127 target lookupInfo 128 nodes []*Node 129 } 130 131 type timeoutEvent struct { 132 ev nodeEvent 133 node *Node 134 } 135 136 func newNetwork(conn transport, ourPubkey ecdsa.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) { 137 ourID := PubkeyID(&ourPubkey) 138 139 var db *nodeDB 140 if dbPath != "<no database>" { 141 var err error 142 if db, err = newNodeDB(dbPath, Version, ourID); err != nil { 143 return nil, err 144 } 145 } 146 147 tab := newTable(ourID, conn.localAddr()) 148 net := &Network{ 149 db: db, 150 conn: conn, 151 netrestrict: netrestrict, 152 tab: tab, 153 topictab: newTopicTable(db, tab.self), 154 ticketStore: newTicketStore(), 155 refreshReq: make(chan []*Node), 156 refreshResp: make(chan (<-chan struct{})), 157 closed: make(chan struct{}), 158 closeReq: make(chan struct{}), 159 read: make(chan ingressPacket, 100), 160 timeout: make(chan timeoutEvent), 161 timeoutTimers: make(map[timeoutEvent]*time.Timer), 162 tableOpReq: make(chan func()), 163 tableOpResp: make(chan struct{}), 164 queryReq: make(chan *findnodeQuery), 165 topicRegisterReq: make(chan topicRegisterReq), 166 topicSearchReq: make(chan topicSearchReq), 167 nodes: make(map[NodeID]*Node), 168 } 169 go net.loop() 170 return net, nil 171 } 172 173 // Close terminates the network listener and flushes the node database. 174 func (net *Network) Close() { 175 net.conn.Close() 176 select { 177 case <-net.closed: 178 case net.closeReq <- struct{}{}: 179 <-net.closed 180 } 181 } 182 183 // Self returns the local node. 184 // The returned node should not be modified by the caller. 185 func (net *Network) Self() *Node { 186 return net.tab.self 187 } 188 189 // ReadRandomNodes fills the given slice with random nodes from the 190 // table. It will not write the same node more than once. The nodes in 191 // the slice are copies and can be modified by the caller. 192 func (net *Network) ReadRandomNodes(buf []*Node) (n int) { 193 net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) }) 194 return n 195 } 196 197 // SetFallbackNodes sets the initial points of contact. These nodes 198 // are used to connect to the network if the table is empty and there 199 // are no known nodes in the database. 200 func (net *Network) SetFallbackNodes(nodes []*Node) error { 201 nursery := make([]*Node, 0, len(nodes)) 202 for _, n := range nodes { 203 if err := n.validateComplete(); err != nil { 204 return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err) 205 } 206 // Recompute cpy.sha because the node might not have been 207 // created by NewNode or ParseNode. 208 cpy := *n 209 cpy.sha = crypto.Keccak256Hash(n.ID[:]) 210 nursery = append(nursery, &cpy) 211 } 212 net.reqRefresh(nursery) 213 return nil 214 } 215 216 // Resolve searches for a specific node with the given ID. 217 // It returns nil if the node could not be found. 218 func (net *Network) Resolve(targetID NodeID) *Node { 219 result := net.lookup(crypto.Keccak256Hash(targetID[:]), true) 220 for _, n := range result { 221 if n.ID == targetID { 222 return n 223 } 224 } 225 return nil 226 } 227 228 // Lookup performs a network search for nodes close 229 // to the given target. It approaches the target by querying 230 // nodes that are closer to it on each iteration. 231 // The given target does not need to be an actual node 232 // identifier. 233 // 234 // The local node may be included in the result. 235 func (net *Network) Lookup(targetID NodeID) []*Node { 236 return net.lookup(crypto.Keccak256Hash(targetID[:]), false) 237 } 238 239 func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node { 240 var ( 241 asked = make(map[NodeID]bool) 242 seen = make(map[NodeID]bool) 243 reply = make(chan []*Node, alpha) 244 result = nodesByDistance{target: target} 245 pendingQueries = 0 246 ) 247 // Get initial answers from the local node. 248 result.push(net.tab.self, bucketSize) 249 for { 250 // Ask the α closest nodes that we haven't asked yet. 251 for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { 252 n := result.entries[i] 253 if !asked[n.ID] { 254 asked[n.ID] = true 255 pendingQueries++ 256 net.reqQueryFindnode(n, target, reply) 257 } 258 } 259 if pendingQueries == 0 { 260 // We have asked all closest nodes, stop the search. 261 break 262 } 263 // Wait for the next reply. 264 select { 265 case nodes := <-reply: 266 for _, n := range nodes { 267 if n != nil && !seen[n.ID] { 268 seen[n.ID] = true 269 result.push(n, bucketSize) 270 if stopOnMatch && n.sha == target { 271 return result.entries 272 } 273 } 274 } 275 pendingQueries-- 276 case <-time.After(respTimeout): 277 // forget all pending requests, start new ones 278 pendingQueries = 0 279 reply = make(chan []*Node, alpha) 280 } 281 } 282 return result.entries 283 } 284 285 func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) { 286 select { 287 case net.topicRegisterReq <- topicRegisterReq{true, topic}: 288 case <-net.closed: 289 return 290 } 291 select { 292 case <-net.closed: 293 case <-stop: 294 select { 295 case net.topicRegisterReq <- topicRegisterReq{false, topic}: 296 case <-net.closed: 297 } 298 } 299 } 300 301 func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) { 302 for { 303 select { 304 case <-net.closed: 305 return 306 case delay, ok := <-setPeriod: 307 select { 308 case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}: 309 case <-net.closed: 310 return 311 } 312 if !ok { 313 return 314 } 315 } 316 } 317 } 318 319 func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} { 320 select { 321 case net.refreshReq <- nursery: 322 return <-net.refreshResp 323 case <-net.closed: 324 return net.closed 325 } 326 } 327 328 func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool { 329 q := &findnodeQuery{remote: n, target: target, reply: reply} 330 select { 331 case net.queryReq <- q: 332 return true 333 case <-net.closed: 334 return false 335 } 336 } 337 338 func (net *Network) reqReadPacket(pkt ingressPacket) { 339 select { 340 case net.read <- pkt: 341 case <-net.closed: 342 } 343 } 344 345 func (net *Network) reqTableOp(f func()) (called bool) { 346 select { 347 case net.tableOpReq <- f: 348 <-net.tableOpResp 349 return true 350 case <-net.closed: 351 return false 352 } 353 } 354 355 // TODO: external address handling. 356 357 type topicSearchInfo struct { 358 lookupChn chan<- bool 359 period time.Duration 360 } 361 362 const maxSearchCount = 5 363 364 func (net *Network) loop() { 365 var ( 366 refreshTimer = time.NewTicker(autoRefreshInterval) 367 bucketRefreshTimer = time.NewTimer(bucketRefreshInterval) 368 refreshDone chan struct{} // closed when the 'refresh' lookup has ended 369 ) 370 371 // Tracking the next ticket to register. 372 var ( 373 nextTicket *ticketRef 374 nextRegisterTimer *time.Timer 375 nextRegisterTime <-chan time.Time 376 ) 377 defer func() { 378 if nextRegisterTimer != nil { 379 nextRegisterTimer.Stop() 380 } 381 }() 382 resetNextTicket := func() { 383 ticket, timeout := net.ticketStore.nextFilteredTicket() 384 if nextTicket != ticket { 385 nextTicket = ticket 386 if nextRegisterTimer != nil { 387 nextRegisterTimer.Stop() 388 nextRegisterTime = nil 389 } 390 if ticket != nil { 391 nextRegisterTimer = time.NewTimer(timeout) 392 nextRegisterTime = nextRegisterTimer.C 393 } 394 } 395 } 396 397 // Tracking registration and search lookups. 398 var ( 399 topicRegisterLookupTarget lookupInfo 400 topicRegisterLookupDone chan []*Node 401 topicRegisterLookupTick = time.NewTimer(0) 402 searchReqWhenRefreshDone []topicSearchReq 403 searchInfo = make(map[Topic]topicSearchInfo) 404 activeSearchCount int 405 ) 406 topicSearchLookupDone := make(chan topicSearchResult, 100) 407 topicSearch := make(chan Topic, 100) 408 <-topicRegisterLookupTick.C 409 410 statsDump := time.NewTicker(10 * time.Second) 411 412 loop: 413 for { 414 resetNextTicket() 415 416 select { 417 case <-net.closeReq: 418 log.Trace("<-net.closeReq") 419 break loop 420 421 // Ingress packet handling. 422 case pkt := <-net.read: 423 //fmt.Println("read", pkt.ev) 424 log.Trace("<-net.read") 425 n := net.internNode(&pkt) 426 prestate := n.state 427 status := "ok" 428 if err := net.handle(n, pkt.ev, &pkt); err != nil { 429 status = err.Error() 430 } 431 if log.Tracing() { 432 log.Trace("", "msg", log.Lazy{Fn: func() string { 433 return fmt.Sprintf("<<< (%d) %v from %x@%v: %v -> %v (%v)", 434 net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status) 435 }}) 436 } 437 // TODO: persist state if n.state goes >= known, delete if it goes <= known 438 439 // State transition timeouts. 440 case timeout := <-net.timeout: 441 log.Trace("<-net.timeout") 442 if net.timeoutTimers[timeout] == nil { 443 // Stale timer (was aborted). 444 continue 445 } 446 delete(net.timeoutTimers, timeout) 447 prestate := timeout.node.state 448 status := "ok" 449 if err := net.handle(timeout.node, timeout.ev, nil); err != nil { 450 status = err.Error() 451 } 452 log.Trace("", "msg", log.Lazy{Fn: func() string { 453 return fmt.Sprintf("--- (%d) %v for %x@%v: %v -> %v (%v)", 454 net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status) 455 }}) 456 457 // Querying. 458 case q := <-net.queryReq: 459 log.Trace("<-net.queryReq") 460 if !q.start(net) { 461 q.remote.deferQuery(q) 462 } 463 464 // Interacting with the table. 465 case f := <-net.tableOpReq: 466 log.Trace("<-net.tableOpReq") 467 f() 468 net.tableOpResp <- struct{}{} 469 470 // Topic registration stuff. 471 case req := <-net.topicRegisterReq: 472 log.Trace("<-net.topicRegisterReq") 473 if !req.add { 474 net.ticketStore.removeRegisterTopic(req.topic) 475 continue 476 } 477 net.ticketStore.addTopic(req.topic, true) 478 // If we're currently waiting idle (nothing to look up), give the ticket store a 479 // chance to start it sooner. This should speed up convergence of the radius 480 // determination for new topics. 481 // if topicRegisterLookupDone == nil { 482 if topicRegisterLookupTarget.target == (common.Hash{}) { 483 log.Trace("topicRegisterLookupTarget == null") 484 if topicRegisterLookupTick.Stop() { 485 <-topicRegisterLookupTick.C 486 } 487 target, delay := net.ticketStore.nextRegisterLookup() 488 topicRegisterLookupTarget = target 489 topicRegisterLookupTick.Reset(delay) 490 } 491 492 case nodes := <-topicRegisterLookupDone: 493 log.Trace("<-topicRegisterLookupDone") 494 net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte { 495 net.ping(n, n.addr()) 496 return n.pingEcho 497 }) 498 target, delay := net.ticketStore.nextRegisterLookup() 499 topicRegisterLookupTarget = target 500 topicRegisterLookupTick.Reset(delay) 501 topicRegisterLookupDone = nil 502 503 case <-topicRegisterLookupTick.C: 504 log.Trace("<-topicRegisterLookupTick") 505 if (topicRegisterLookupTarget.target == common.Hash{}) { 506 target, delay := net.ticketStore.nextRegisterLookup() 507 topicRegisterLookupTarget = target 508 topicRegisterLookupTick.Reset(delay) 509 topicRegisterLookupDone = nil 510 } else { 511 topicRegisterLookupDone = make(chan []*Node) 512 target := topicRegisterLookupTarget.target 513 go func() { topicRegisterLookupDone <- net.lookup(target, false) }() 514 } 515 516 case <-nextRegisterTime: 517 log.Trace("<-nextRegisterTime") 518 net.ticketStore.ticketRegistered(*nextTicket) 519 //fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong) 520 net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong) 521 522 case req := <-net.topicSearchReq: 523 if refreshDone == nil { 524 log.Trace("<-net.topicSearchReq") 525 info, ok := searchInfo[req.topic] 526 if ok { 527 if req.delay == time.Duration(0) { 528 delete(searchInfo, req.topic) 529 net.ticketStore.removeSearchTopic(req.topic) 530 } else { 531 info.period = req.delay 532 searchInfo[req.topic] = info 533 } 534 continue 535 } 536 if req.delay != time.Duration(0) { 537 var info topicSearchInfo 538 info.period = req.delay 539 info.lookupChn = req.lookup 540 searchInfo[req.topic] = info 541 net.ticketStore.addSearchTopic(req.topic, req.found) 542 topicSearch <- req.topic 543 } 544 } else { 545 searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req) 546 } 547 548 case topic := <-topicSearch: 549 if activeSearchCount < maxSearchCount { 550 activeSearchCount++ 551 target := net.ticketStore.nextSearchLookup(topic) 552 go func() { 553 nodes := net.lookup(target.target, false) 554 topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes} 555 }() 556 } 557 period := searchInfo[topic].period 558 if period != time.Duration(0) { 559 go func() { 560 time.Sleep(period) 561 topicSearch <- topic 562 }() 563 } 564 565 case res := <-topicSearchLookupDone: 566 activeSearchCount-- 567 if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil { 568 lookupChn <- net.ticketStore.radius[res.target.topic].converged 569 } 570 net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte { 571 if n.state != nil && n.state.canQuery { 572 return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration 573 } else { 574 if n.state == unknown { 575 net.ping(n, n.addr()) 576 } 577 return nil 578 } 579 }) 580 581 case <-statsDump.C: 582 log.Trace("<-statsDump.C") 583 /*r, ok := net.ticketStore.radius[testTopic] 584 if !ok { 585 fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now()) 586 } else { 587 topics := len(net.ticketStore.tickets) 588 tickets := len(net.ticketStore.nodes) 589 rad := r.radius / (maxRadius/10000+1) 590 fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now()) 591 }*/ 592 593 tm := mclock.Now() 594 for topic, r := range net.ticketStore.radius { 595 if printTestImgLogs { 596 rad := r.radius / (maxRadius/1000000 + 1) 597 minrad := r.minRadius / (maxRadius/1000000 + 1) 598 fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad) 599 fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad) 600 } 601 } 602 for topic, t := range net.topictab.topics { 603 wp := t.wcl.nextWaitPeriod(tm) 604 if printTestImgLogs { 605 fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000) 606 } 607 } 608 609 // Periodic / lookup-initiated bucket refresh. 610 case <-refreshTimer.C: 611 log.Trace("<-refreshTimer.C") 612 // TODO: ideally we would start the refresh timer after 613 // fallback nodes have been set for the first time. 614 if refreshDone == nil { 615 refreshDone = make(chan struct{}) 616 net.refresh(refreshDone) 617 } 618 case <-bucketRefreshTimer.C: 619 target := net.tab.chooseBucketRefreshTarget() 620 go func() { 621 net.lookup(target, false) 622 bucketRefreshTimer.Reset(bucketRefreshInterval) 623 }() 624 case newNursery := <-net.refreshReq: 625 log.Trace("<-net.refreshReq") 626 if newNursery != nil { 627 net.nursery = newNursery 628 } 629 if refreshDone == nil { 630 refreshDone = make(chan struct{}) 631 net.refresh(refreshDone) 632 } 633 net.refreshResp <- refreshDone 634 case <-refreshDone: 635 log.Trace("<-net.refreshDone", "table size", net.tab.count) 636 if net.tab.count != 0 { 637 refreshDone = nil 638 list := searchReqWhenRefreshDone 639 searchReqWhenRefreshDone = nil 640 go func() { 641 for _, req := range list { 642 net.topicSearchReq <- req 643 } 644 }() 645 } else { 646 refreshDone = make(chan struct{}) 647 net.refresh(refreshDone) 648 } 649 } 650 } 651 log.Trace("loop stopped") 652 653 log.Debug(fmt.Sprintf("shutting down")) 654 if net.conn != nil { 655 net.conn.Close() 656 } 657 if refreshDone != nil { 658 // TODO: wait for pending refresh. 659 //<-refreshResults 660 } 661 // Cancel all pending timeouts. 662 for _, timer := range net.timeoutTimers { 663 timer.Stop() 664 } 665 if net.db != nil { 666 net.db.close() 667 } 668 close(net.closed) 669 } 670 671 // Everything below runs on the Network.loop goroutine 672 // and can modify Node, Table and Network at any time without locking. 673 674 func (net *Network) refresh(done chan<- struct{}) { 675 var seeds []*Node 676 if net.db != nil { 677 seeds = net.db.querySeeds(seedCount, seedMaxAge) 678 } 679 if len(seeds) == 0 { 680 seeds = net.nursery 681 } 682 if len(seeds) == 0 { 683 log.Trace("no seed nodes found") 684 close(done) 685 return 686 } 687 for _, n := range seeds { 688 log.Debug("", "msg", log.Lazy{Fn: func() string { 689 var age string 690 if net.db != nil { 691 age = time.Since(net.db.lastPong(n.ID)).String() 692 } else { 693 age = "unknown" 694 } 695 return fmt.Sprintf("seed node (age %s): %v", age, n) 696 }}) 697 n = net.internNodeFromDB(n) 698 if n.state == unknown { 699 net.transition(n, verifyinit) 700 } 701 // Force-add the seed node so Lookup does something. 702 // It will be deleted again if verification fails. 703 net.tab.add(n) 704 } 705 // Start self lookup to fill up the buckets. 706 go func() { 707 net.Lookup(net.tab.self.ID) 708 close(done) 709 }() 710 } 711 712 // Node Interning. 713 714 func (net *Network) internNode(pkt *ingressPacket) *Node { 715 if n := net.nodes[pkt.remoteID]; n != nil { 716 n.IP = pkt.remoteAddr.IP 717 n.UDP = uint16(pkt.remoteAddr.Port) 718 n.TCP = uint16(pkt.remoteAddr.Port) 719 return n 720 } 721 n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port)) 722 n.state = unknown 723 net.nodes[pkt.remoteID] = n 724 return n 725 } 726 727 func (net *Network) internNodeFromDB(dbn *Node) *Node { 728 if n := net.nodes[dbn.ID]; n != nil { 729 return n 730 } 731 n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP) 732 n.state = unknown 733 net.nodes[n.ID] = n 734 return n 735 } 736 737 func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n *Node, err error) { 738 if rn.ID == net.tab.self.ID { 739 return nil, errors.New("is self") 740 } 741 if rn.UDP <= lowPort { 742 return nil, errors.New("low port") 743 } 744 n = net.nodes[rn.ID] 745 if n == nil { 746 // We haven't seen this node before. 747 n, err = nodeFromRPC(sender, rn) 748 if net.netrestrict != nil && !net.netrestrict.Contains(n.IP) { 749 return n, errors.New("not contained in netrestrict whitelist") 750 } 751 if err == nil { 752 n.state = unknown 753 net.nodes[n.ID] = n 754 } 755 return n, err 756 } 757 if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP { 758 if n.state == known { 759 // reject address change if node is known by us 760 err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n) 761 } else { 762 // accept otherwise; this will be handled nicer with signed ENRs 763 n.IP = rn.IP 764 n.UDP = rn.UDP 765 n.TCP = rn.TCP 766 } 767 } 768 return n, err 769 } 770 771 // nodeNetGuts is embedded in Node and contains fields. 772 type nodeNetGuts struct { 773 // This is a cached copy of sha3(ID) which is used for node 774 // distance calculations. This is part of Node in order to make it 775 // possible to write tests that need a node at a certain distance. 776 // In those tests, the content of sha will not actually correspond 777 // with ID. 778 sha common.Hash 779 780 // State machine fields. Access to these fields 781 // is restricted to the Network.loop goroutine. 782 state *nodeState 783 pingEcho []byte // hash of last ping sent by us 784 pingTopics []Topic // topic set sent by us in last ping 785 deferredQueries []*findnodeQuery // queries that can't be sent yet 786 pendingNeighbours *findnodeQuery // current query, waiting for reply 787 queryTimeouts int 788 } 789 790 func (n *nodeNetGuts) deferQuery(q *findnodeQuery) { 791 n.deferredQueries = append(n.deferredQueries, q) 792 } 793 794 func (n *nodeNetGuts) startNextQuery(net *Network) { 795 if len(n.deferredQueries) == 0 { 796 return 797 } 798 nextq := n.deferredQueries[0] 799 if nextq.start(net) { 800 n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...) 801 } 802 } 803 804 func (q *findnodeQuery) start(net *Network) bool { 805 // Satisfy queries against the local node directly. 806 if q.remote == net.tab.self { 807 closest := net.tab.closest(crypto.Keccak256Hash(q.target[:]), bucketSize) 808 q.reply <- closest.entries 809 return true 810 } 811 if q.remote.state.canQuery && q.remote.pendingNeighbours == nil { 812 net.conn.sendFindnodeHash(q.remote, q.target) 813 net.timedEvent(respTimeout, q.remote, neighboursTimeout) 814 q.remote.pendingNeighbours = q 815 return true 816 } 817 // If the node is not known yet, it won't accept queries. 818 // Initiate the transition to known. 819 // The request will be sent later when the node reaches known state. 820 if q.remote.state == unknown { 821 net.transition(q.remote, verifyinit) 822 } 823 return false 824 } 825 826 // Node Events (the input to the state machine). 827 828 type nodeEvent uint 829 830 //go:generate stringer -type=nodeEvent 831 832 const ( 833 invalidEvent nodeEvent = iota // zero is reserved 834 835 // Packet type events. 836 // These correspond to packet types in the UDP protocol. 837 pingPacket 838 pongPacket 839 findnodePacket 840 neighborsPacket 841 findnodeHashPacket 842 topicRegisterPacket 843 topicQueryPacket 844 topicNodesPacket 845 846 // Non-packet events. 847 // Event values in this category are allocated outside 848 // the packet type range (packet types are encoded as a single byte). 849 pongTimeout nodeEvent = iota + 256 850 pingTimeout 851 neighboursTimeout 852 ) 853 854 // Node State Machine. 855 856 type nodeState struct { 857 name string 858 handle func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error) 859 enter func(*Network, *Node) 860 canQuery bool 861 } 862 863 func (s *nodeState) String() string { 864 return s.name 865 } 866 867 var ( 868 unknown *nodeState 869 verifyinit *nodeState 870 verifywait *nodeState 871 remoteverifywait *nodeState 872 known *nodeState 873 contested *nodeState 874 unresponsive *nodeState 875 ) 876 877 func init() { 878 unknown = &nodeState{ 879 name: "unknown", 880 enter: func(net *Network, n *Node) { 881 net.tab.delete(n) 882 n.pingEcho = nil 883 // Abort active queries. 884 for _, q := range n.deferredQueries { 885 q.reply <- nil 886 } 887 n.deferredQueries = nil 888 if n.pendingNeighbours != nil { 889 n.pendingNeighbours.reply <- nil 890 n.pendingNeighbours = nil 891 } 892 n.queryTimeouts = 0 893 }, 894 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 895 switch ev { 896 case pingPacket: 897 net.handlePing(n, pkt) 898 net.ping(n, pkt.remoteAddr) 899 return verifywait, nil 900 default: 901 return unknown, errInvalidEvent 902 } 903 }, 904 } 905 906 verifyinit = &nodeState{ 907 name: "verifyinit", 908 enter: func(net *Network, n *Node) { 909 net.ping(n, n.addr()) 910 }, 911 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 912 switch ev { 913 case pingPacket: 914 net.handlePing(n, pkt) 915 return verifywait, nil 916 case pongPacket: 917 err := net.handleKnownPong(n, pkt) 918 return remoteverifywait, err 919 case pongTimeout: 920 return unknown, nil 921 default: 922 return verifyinit, errInvalidEvent 923 } 924 }, 925 } 926 927 verifywait = &nodeState{ 928 name: "verifywait", 929 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 930 switch ev { 931 case pingPacket: 932 net.handlePing(n, pkt) 933 return verifywait, nil 934 case pongPacket: 935 err := net.handleKnownPong(n, pkt) 936 return known, err 937 case pongTimeout: 938 return unknown, nil 939 default: 940 return verifywait, errInvalidEvent 941 } 942 }, 943 } 944 945 remoteverifywait = &nodeState{ 946 name: "remoteverifywait", 947 enter: func(net *Network, n *Node) { 948 net.timedEvent(respTimeout, n, pingTimeout) 949 }, 950 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 951 switch ev { 952 case pingPacket: 953 net.handlePing(n, pkt) 954 return remoteverifywait, nil 955 case pingTimeout: 956 return known, nil 957 default: 958 return remoteverifywait, errInvalidEvent 959 } 960 }, 961 } 962 963 known = &nodeState{ 964 name: "known", 965 canQuery: true, 966 enter: func(net *Network, n *Node) { 967 n.queryTimeouts = 0 968 n.startNextQuery(net) 969 // Insert into the table and start revalidation of the last node 970 // in the bucket if it is full. 971 last := net.tab.add(n) 972 if last != nil && last.state == known { 973 // TODO: do this asynchronously 974 net.transition(last, contested) 975 } 976 }, 977 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 978 switch ev { 979 case pingPacket: 980 net.handlePing(n, pkt) 981 return known, nil 982 case pongPacket: 983 err := net.handleKnownPong(n, pkt) 984 return known, err 985 default: 986 return net.handleQueryEvent(n, ev, pkt) 987 } 988 }, 989 } 990 991 contested = &nodeState{ 992 name: "contested", 993 canQuery: true, 994 enter: func(net *Network, n *Node) { 995 net.ping(n, n.addr()) 996 }, 997 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 998 switch ev { 999 case pongPacket: 1000 // Node is still alive. 1001 err := net.handleKnownPong(n, pkt) 1002 return known, err 1003 case pongTimeout: 1004 net.tab.deleteReplace(n) 1005 return unresponsive, nil 1006 case pingPacket: 1007 net.handlePing(n, pkt) 1008 return contested, nil 1009 default: 1010 return net.handleQueryEvent(n, ev, pkt) 1011 } 1012 }, 1013 } 1014 1015 unresponsive = &nodeState{ 1016 name: "unresponsive", 1017 canQuery: true, 1018 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 1019 switch ev { 1020 case pingPacket: 1021 net.handlePing(n, pkt) 1022 return known, nil 1023 case pongPacket: 1024 err := net.handleKnownPong(n, pkt) 1025 return known, err 1026 default: 1027 return net.handleQueryEvent(n, ev, pkt) 1028 } 1029 }, 1030 } 1031 } 1032 1033 // handle processes packets sent by n and events related to n. 1034 func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error { 1035 //fmt.Println("handle", n.addr().String(), n.state, ev) 1036 if pkt != nil { 1037 if err := net.checkPacket(n, ev, pkt); err != nil { 1038 //fmt.Println("check err:", err) 1039 return err 1040 } 1041 // Start the background expiration goroutine after the first 1042 // successful communication. Subsequent calls have no effect if it 1043 // is already running. We do this here instead of somewhere else 1044 // so that the search for seed nodes also considers older nodes 1045 // that would otherwise be removed by the expirer. 1046 if net.db != nil { 1047 net.db.ensureExpirer() 1048 } 1049 } 1050 if n.state == nil { 1051 n.state = unknown //??? 1052 } 1053 next, err := n.state.handle(net, n, ev, pkt) 1054 net.transition(n, next) 1055 //fmt.Println("new state:", n.state) 1056 return err 1057 } 1058 1059 func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error { 1060 // Replay prevention checks. 1061 switch ev { 1062 case pingPacket, findnodeHashPacket, neighborsPacket: 1063 // TODO: check date is > last date seen 1064 // TODO: check ping version 1065 case pongPacket: 1066 if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) { 1067 // fmt.Println("pong reply token mismatch") 1068 return fmt.Errorf("pong reply token mismatch") 1069 } 1070 n.pingEcho = nil 1071 } 1072 // Address validation. 1073 // TODO: Ideally we would do the following: 1074 // - reject all packets with wrong address except ping. 1075 // - for ping with new address, transition to verifywait but keep the 1076 // previous node (with old address) around. if the new one reaches known, 1077 // swap it out. 1078 return nil 1079 } 1080 1081 func (net *Network) transition(n *Node, next *nodeState) { 1082 if n.state != next { 1083 n.state = next 1084 if next.enter != nil { 1085 next.enter(net, n) 1086 } 1087 } 1088 1089 // TODO: persist/unpersist node 1090 } 1091 1092 func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) { 1093 timeout := timeoutEvent{ev, n} 1094 net.timeoutTimers[timeout] = time.AfterFunc(d, func() { 1095 select { 1096 case net.timeout <- timeout: 1097 case <-net.closed: 1098 } 1099 }) 1100 } 1101 1102 func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) { 1103 timer := net.timeoutTimers[timeoutEvent{ev, n}] 1104 if timer != nil { 1105 timer.Stop() 1106 delete(net.timeoutTimers, timeoutEvent{ev, n}) 1107 } 1108 } 1109 1110 func (net *Network) ping(n *Node, addr *net.UDPAddr) { 1111 //fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex()) 1112 if n.pingEcho != nil || n.ID == net.tab.self.ID { 1113 //fmt.Println(" not sent") 1114 return 1115 } 1116 log.Trace("Pinging remote node", "node", n.ID) 1117 n.pingTopics = net.ticketStore.regTopicSet() 1118 n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics) 1119 net.timedEvent(respTimeout, n, pongTimeout) 1120 } 1121 1122 func (net *Network) handlePing(n *Node, pkt *ingressPacket) { 1123 log.Trace("Handling remote ping", "node", n.ID) 1124 ping := pkt.data.(*ping) 1125 n.TCP = ping.From.TCP 1126 t := net.topictab.getTicket(n, ping.Topics) 1127 1128 pong := &pong{ 1129 To: makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB 1130 ReplyTok: pkt.hash, 1131 Expiration: uint64(time.Now().Add(expiration).Unix()), 1132 } 1133 ticketToPong(t, pong) 1134 net.conn.send(n, pongPacket, pong) 1135 } 1136 1137 func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error { 1138 log.Trace("Handling known pong", "node", n.ID) 1139 net.abortTimedEvent(n, pongTimeout) 1140 now := mclock.Now() 1141 ticket, err := pongToTicket(now, n.pingTopics, n, pkt) 1142 if err == nil { 1143 // fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data) 1144 net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket) 1145 } else { 1146 log.Trace("Failed to convert pong to ticket", "err", err) 1147 } 1148 n.pingEcho = nil 1149 n.pingTopics = nil 1150 return err 1151 } 1152 1153 func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 1154 switch ev { 1155 case findnodePacket: 1156 target := crypto.Keccak256Hash(pkt.data.(*findnode).Target[:]) 1157 results := net.tab.closest(target, bucketSize).entries 1158 net.conn.sendNeighbours(n, results) 1159 return n.state, nil 1160 case neighborsPacket: 1161 err := net.handleNeighboursPacket(n, pkt) 1162 return n.state, err 1163 case neighboursTimeout: 1164 if n.pendingNeighbours != nil { 1165 n.pendingNeighbours.reply <- nil 1166 n.pendingNeighbours = nil 1167 } 1168 n.queryTimeouts++ 1169 if n.queryTimeouts > maxFindnodeFailures && n.state == known { 1170 return contested, errors.New("too many timeouts") 1171 } 1172 return n.state, nil 1173 1174 // v5 1175 1176 case findnodeHashPacket: 1177 results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries 1178 net.conn.sendNeighbours(n, results) 1179 return n.state, nil 1180 case topicRegisterPacket: 1181 //fmt.Println("got topicRegisterPacket") 1182 regdata := pkt.data.(*topicRegister) 1183 pong, err := net.checkTopicRegister(regdata) 1184 if err != nil { 1185 //fmt.Println(err) 1186 return n.state, fmt.Errorf("bad waiting ticket: %v", err) 1187 } 1188 net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods) 1189 return n.state, nil 1190 case topicQueryPacket: 1191 // TODO: handle expiration 1192 topic := pkt.data.(*topicQuery).Topic 1193 results := net.topictab.getEntries(topic) 1194 if _, ok := net.ticketStore.tickets[topic]; ok { 1195 results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too 1196 } 1197 if len(results) > 10 { 1198 results = results[:10] 1199 } 1200 var hash common.Hash 1201 copy(hash[:], pkt.hash) 1202 net.conn.sendTopicNodes(n, hash, results) 1203 return n.state, nil 1204 case topicNodesPacket: 1205 p := pkt.data.(*topicNodes) 1206 if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) { 1207 n.queryTimeouts++ 1208 if n.queryTimeouts > maxFindnodeFailures && n.state == known { 1209 return contested, errors.New("too many timeouts") 1210 } 1211 } 1212 return n.state, nil 1213 1214 default: 1215 return n.state, errInvalidEvent 1216 } 1217 } 1218 1219 func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) { 1220 var pongpkt ingressPacket 1221 if err := decodePacket(data.Pong, &pongpkt); err != nil { 1222 return nil, err 1223 } 1224 if pongpkt.ev != pongPacket { 1225 return nil, errors.New("is not pong packet") 1226 } 1227 if pongpkt.remoteID != net.tab.self.ID { 1228 return nil, errors.New("not signed by us") 1229 } 1230 // check that we previously authorised all topics 1231 // that the other side is trying to register. 1232 if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash { 1233 return nil, errors.New("topic hash mismatch") 1234 } 1235 if data.Idx < 0 || int(data.Idx) >= len(data.Topics) { 1236 return nil, errors.New("topic index out of range") 1237 } 1238 return pongpkt.data.(*pong), nil 1239 } 1240 1241 func rlpHash(x interface{}) (h common.Hash) { 1242 hw := sha3.NewKeccak256SingleSum() 1243 rlp.Encode(hw, x) 1244 hw.Sum(h[:0]) 1245 return h 1246 } 1247 1248 func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error { 1249 if n.pendingNeighbours == nil { 1250 return errNoQuery 1251 } 1252 net.abortTimedEvent(n, neighboursTimeout) 1253 1254 req := pkt.data.(*neighbors) 1255 nodes := make([]*Node, len(req.Nodes)) 1256 for i, rn := range req.Nodes { 1257 nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn) 1258 if err != nil { 1259 log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err)) 1260 continue 1261 } 1262 nodes[i] = nn 1263 // Start validation of query results immediately. 1264 // This fills the table quickly. 1265 // TODO: generates way too many packets, maybe do it via queue. 1266 if nn.state == unknown { 1267 net.transition(nn, verifyinit) 1268 } 1269 } 1270 // TODO: don't ignore second packet 1271 n.pendingNeighbours.reply <- nodes 1272 n.pendingNeighbours = nil 1273 // Now that this query is done, start the next one. 1274 n.startNextQuery(net) 1275 return nil 1276 }