github.com/core-coin/go-core/v2@v2.1.9/p2p/discv5/net.go (about) 1 // Copyright 2016 by the Authors 2 // This file is part of the go-core library. 3 // 4 // The go-core library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-core library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-core library. If not, see <http://www.gnu.org/licenses/>. 16 17 package discv5 18 19 import ( 20 "bytes" 21 "errors" 22 "fmt" 23 "net" 24 "time" 25 26 "golang.org/x/crypto/sha3" 27 28 "github.com/core-coin/go-core/v2/common" 29 "github.com/core-coin/go-core/v2/common/mclock" 30 "github.com/core-coin/go-core/v2/crypto" 31 "github.com/core-coin/go-core/v2/log" 32 "github.com/core-coin/go-core/v2/p2p/netutil" 33 "github.com/core-coin/go-core/v2/rlp" 34 ) 35 36 var ( 37 errInvalidEvent = errors.New("invalid in current state") 38 errNoQuery = errors.New("no pending query") 39 ) 40 41 const ( 42 autoRefreshInterval = 1 * time.Hour 43 bucketRefreshInterval = 1 * time.Minute 44 seedCount = 30 45 seedMaxAge = 5 * 24 * time.Hour 46 lowPort = 1024 47 ) 48 49 const testTopic = "foo" 50 51 const ( 52 printTestImgLogs = false 53 ) 54 55 // Network manages the table and all protocol interaction. 56 type Network struct { 57 db *nodeDB // database of known nodes 58 conn transport 59 netrestrict *netutil.Netlist 60 61 closed chan struct{} // closed when loop is done 62 closeReq chan struct{} // 'request to close' 63 refreshReq chan []*Node // lookups ask for refresh on this channel 64 refreshResp chan (<-chan struct{}) // ...and get the channel to block on from this one 65 read chan ingressPacket // ingress packets arrive here 66 timeout chan timeoutEvent 67 queryReq chan *findnodeQuery // lookups submit findnode queries on this channel 68 tableOpReq chan func() 69 tableOpResp chan struct{} 70 topicRegisterReq chan topicRegisterReq 71 topicSearchReq chan topicSearchReq 72 73 // State of the main loop. 74 tab *Table 75 topictab *topicTable 76 ticketStore *ticketStore 77 nursery []*Node 78 nodes map[NodeID]*Node // tracks active nodes with state != known 79 timeoutTimers map[timeoutEvent]*time.Timer 80 } 81 82 // transport is implemented by the UDP transport. 83 // it is an interface so we can test without opening lots of UDP 84 // sockets and without generating a private key. 85 type transport interface { 86 sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte) 87 sendNeighbours(remote *Node, nodes []*Node) 88 sendFindnodeHash(remote *Node, target common.Hash) 89 sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte) 90 sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) 91 92 send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte) 93 94 localAddr() *net.UDPAddr 95 Close() 96 } 97 98 type findnodeQuery struct { 99 remote *Node 100 target common.Hash 101 reply chan<- []*Node 102 } 103 104 type topicRegisterReq struct { 105 add bool 106 topic Topic 107 } 108 109 type topicSearchReq struct { 110 topic Topic 111 found chan<- *Node 112 lookup chan<- bool 113 delay time.Duration 114 } 115 116 type topicSearchResult struct { 117 target lookupInfo 118 nodes []*Node 119 } 120 121 type timeoutEvent struct { 122 ev nodeEvent 123 node *Node 124 } 125 126 func newNetwork(conn transport, ourPubkey crypto.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) { 127 ourID := PubkeyID(&ourPubkey) 128 129 var db *nodeDB 130 if dbPath != "<no database>" { 131 var err error 132 if db, err = newNodeDB(dbPath, Version, ourID); err != nil { 133 return nil, err 134 } 135 } 136 137 tab := newTable(ourID, conn.localAddr()) 138 net := &Network{ 139 db: db, 140 conn: conn, 141 netrestrict: netrestrict, 142 tab: tab, 143 topictab: newTopicTable(db, tab.self), 144 ticketStore: newTicketStore(), 145 refreshReq: make(chan []*Node), 146 refreshResp: make(chan (<-chan struct{})), 147 closed: make(chan struct{}), 148 closeReq: make(chan struct{}), 149 read: make(chan ingressPacket, 100), 150 timeout: make(chan timeoutEvent), 151 timeoutTimers: make(map[timeoutEvent]*time.Timer), 152 tableOpReq: make(chan func()), 153 tableOpResp: make(chan struct{}), 154 queryReq: make(chan *findnodeQuery), 155 topicRegisterReq: make(chan topicRegisterReq), 156 topicSearchReq: make(chan topicSearchReq), 157 nodes: make(map[NodeID]*Node), 158 } 159 go net.loop() 160 return net, nil 161 } 162 163 // Close terminates the network listener and flushes the node database. 164 func (net *Network) Close() { 165 net.conn.Close() 166 select { 167 case <-net.closed: 168 case net.closeReq <- struct{}{}: 169 <-net.closed 170 } 171 } 172 173 // Self returns the local node. 174 // The returned node should not be modified by the caller. 175 func (net *Network) Self() *Node { 176 return net.tab.self 177 } 178 179 // ReadRandomNodes fills the given slice with random nodes from the 180 // table. It will not write the same node more than once. The nodes in 181 // the slice are copies and can be modified by the caller. 182 func (net *Network) ReadRandomNodes(buf []*Node) (n int) { 183 net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) }) 184 return n 185 } 186 187 // SetFallbackNodes sets the initial points of contact. These nodes 188 // are used to connect to the network if the table is empty and there 189 // are no known nodes in the database. 190 func (net *Network) SetFallbackNodes(nodes []*Node) error { 191 nursery := make([]*Node, 0, len(nodes)) 192 for _, n := range nodes { 193 if err := n.validateComplete(); err != nil { 194 return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err) 195 } 196 // Recompute cpy.sha because the node might not have been 197 // created by NewNode or ParseNode. 198 cpy := *n 199 cpy.sha = crypto.SHA3Hash(n.ID[:]) 200 nursery = append(nursery, &cpy) 201 } 202 net.reqRefresh(nursery) 203 return nil 204 } 205 206 // Resolve searches for a specific node with the given ID. 207 // It returns nil if the node could not be found. 208 func (net *Network) Resolve(targetID NodeID) *Node { 209 result := net.lookup(crypto.SHA3Hash(targetID[:]), true) 210 for _, n := range result { 211 if n.ID == targetID { 212 return n 213 } 214 } 215 return nil 216 } 217 218 // Lookup performs a network search for nodes close 219 // to the given target. It approaches the target by querying 220 // nodes that are closer to it on each iteration. 221 // The given target does not need to be an actual node 222 // identifier. 223 // 224 // The local node may be included in the result. 225 func (net *Network) Lookup(targetID NodeID) []*Node { 226 return net.lookup(crypto.SHA3Hash(targetID[:]), false) 227 } 228 229 func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node { 230 var ( 231 asked = make(map[NodeID]bool) 232 seen = make(map[NodeID]bool) 233 reply = make(chan []*Node, alpha) 234 result = nodesByDistance{target: target} 235 pendingQueries = 0 236 ) 237 // Get initial answers from the local node. 238 result.push(net.tab.self, bucketSize) 239 for { 240 // Ask the α closest nodes that we haven't asked yet. 241 for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { 242 n := result.entries[i] 243 if !asked[n.ID] { 244 asked[n.ID] = true 245 pendingQueries++ 246 net.reqQueryFindnode(n, target, reply) 247 } 248 } 249 if pendingQueries == 0 { 250 // We have asked all closest nodes, stop the search. 251 break 252 } 253 // Wait for the next reply. 254 select { 255 case nodes := <-reply: 256 for _, n := range nodes { 257 if n != nil && !seen[n.ID] { 258 seen[n.ID] = true 259 result.push(n, bucketSize) 260 if stopOnMatch && n.sha == target { 261 return result.entries 262 } 263 } 264 } 265 pendingQueries-- 266 case <-time.After(respTimeout): 267 // forget all pending requests, start new ones 268 pendingQueries = 0 269 reply = make(chan []*Node, alpha) 270 } 271 } 272 return result.entries 273 } 274 275 func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) { 276 select { 277 case net.topicRegisterReq <- topicRegisterReq{true, topic}: 278 case <-net.closed: 279 return 280 } 281 select { 282 case <-net.closed: 283 case <-stop: 284 select { 285 case net.topicRegisterReq <- topicRegisterReq{false, topic}: 286 case <-net.closed: 287 } 288 } 289 } 290 291 func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) { 292 for { 293 select { 294 case <-net.closed: 295 return 296 case delay, ok := <-setPeriod: 297 select { 298 case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}: 299 case <-net.closed: 300 return 301 } 302 if !ok { 303 return 304 } 305 } 306 } 307 } 308 309 func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} { 310 select { 311 case net.refreshReq <- nursery: 312 return <-net.refreshResp 313 case <-net.closed: 314 return net.closed 315 } 316 } 317 318 func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool { 319 q := &findnodeQuery{remote: n, target: target, reply: reply} 320 select { 321 case net.queryReq <- q: 322 return true 323 case <-net.closed: 324 return false 325 } 326 } 327 328 func (net *Network) reqReadPacket(pkt ingressPacket) { 329 select { 330 case net.read <- pkt: 331 case <-net.closed: 332 } 333 } 334 335 func (net *Network) reqTableOp(f func()) (called bool) { 336 select { 337 case net.tableOpReq <- f: 338 <-net.tableOpResp 339 return true 340 case <-net.closed: 341 return false 342 } 343 } 344 345 // TODO: external address handling. 346 347 type topicSearchInfo struct { 348 lookupChn chan<- bool 349 period time.Duration 350 } 351 352 const maxSearchCount = 5 353 354 func (net *Network) loop() { 355 var ( 356 refreshTimer = time.NewTicker(autoRefreshInterval) 357 bucketRefreshTimer = time.NewTimer(bucketRefreshInterval) 358 refreshDone chan struct{} // closed when the 'refresh' lookup has ended 359 ) 360 defer refreshTimer.Stop() 361 defer bucketRefreshTimer.Stop() 362 363 // Tracking the next ticket to register. 364 var ( 365 nextTicket *ticketRef 366 nextRegisterTimer *time.Timer 367 nextRegisterTime <-chan time.Time 368 ) 369 defer func() { 370 if nextRegisterTimer != nil { 371 nextRegisterTimer.Stop() 372 } 373 }() 374 resetNextTicket := func() { 375 ticket, timeout := net.ticketStore.nextFilteredTicket() 376 if nextTicket != ticket { 377 nextTicket = ticket 378 if nextRegisterTimer != nil { 379 nextRegisterTimer.Stop() 380 nextRegisterTime = nil 381 } 382 if ticket != nil { 383 nextRegisterTimer = time.NewTimer(timeout) 384 nextRegisterTime = nextRegisterTimer.C 385 } 386 } 387 } 388 389 // Tracking registration and search lookups. 390 var ( 391 topicRegisterLookupTarget lookupInfo 392 topicRegisterLookupDone chan []*Node 393 topicRegisterLookupTick = time.NewTimer(0) 394 searchReqWhenRefreshDone []topicSearchReq 395 searchInfo = make(map[Topic]topicSearchInfo) 396 activeSearchCount int 397 ) 398 defer topicRegisterLookupTick.Stop() 399 topicSearchLookupDone := make(chan topicSearchResult, 100) 400 topicSearch := make(chan Topic, 100) 401 <-topicRegisterLookupTick.C 402 403 statsDump := time.NewTicker(10 * time.Second) 404 defer statsDump.Stop() 405 406 loop: 407 for { 408 resetNextTicket() 409 410 select { 411 case <-net.closeReq: 412 log.Trace("<-net.closeReq") 413 break loop 414 415 // Ingress packet handling. 416 case pkt := <-net.read: 417 //fmt.Println("read", pkt.ev) 418 log.Trace("<-net.read") 419 n := net.internNode(&pkt) 420 prestate := n.state 421 status := "ok" 422 if err := net.handle(n, pkt.ev, &pkt); err != nil { 423 status = err.Error() 424 } 425 log.Trace("", "msg", log.Lazy{Fn: func() string { 426 return fmt.Sprintf("<<< (%d) %v from %x@%v: %v -> %v (%v)", 427 net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status) 428 }}) 429 // TODO: persist state if n.state goes >= known, delete if it goes <= known 430 431 // State transition timeouts. 432 case timeout := <-net.timeout: 433 log.Trace("<-net.timeout") 434 if net.timeoutTimers[timeout] == nil { 435 // Stale timer (was aborted). 436 continue 437 } 438 delete(net.timeoutTimers, timeout) 439 prestate := timeout.node.state 440 status := "ok" 441 if err := net.handle(timeout.node, timeout.ev, nil); err != nil { 442 status = err.Error() 443 } 444 log.Trace("", "msg", log.Lazy{Fn: func() string { 445 return fmt.Sprintf("--- (%d) %v for %x@%v: %v -> %v (%v)", 446 net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status) 447 }}) 448 449 // Querying. 450 case q := <-net.queryReq: 451 log.Trace("<-net.queryReq") 452 if !q.start(net) { 453 q.remote.deferQuery(q) 454 } 455 456 // Interacting with the table. 457 case f := <-net.tableOpReq: 458 log.Trace("<-net.tableOpReq") 459 f() 460 net.tableOpResp <- struct{}{} 461 462 // Topic registration stuff. 463 case req := <-net.topicRegisterReq: 464 log.Trace("<-net.topicRegisterReq") 465 if !req.add { 466 net.ticketStore.removeRegisterTopic(req.topic) 467 continue 468 } 469 net.ticketStore.addTopic(req.topic, true) 470 // If we're currently waiting idle (nothing to look up), give the ticket store a 471 // chance to start it sooner. This should speed up convergence of the radius 472 // determination for new topics. 473 // if topicRegisterLookupDone == nil { 474 if topicRegisterLookupTarget.target == (common.Hash{}) { 475 log.Trace("topicRegisterLookupTarget == null") 476 if topicRegisterLookupTick.Stop() { 477 <-topicRegisterLookupTick.C 478 } 479 target, delay := net.ticketStore.nextRegisterLookup() 480 topicRegisterLookupTarget = target 481 topicRegisterLookupTick.Reset(delay) 482 } 483 484 case nodes := <-topicRegisterLookupDone: 485 log.Trace("<-topicRegisterLookupDone") 486 net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte { 487 net.ping(n, n.addr()) 488 return n.pingEcho 489 }) 490 target, delay := net.ticketStore.nextRegisterLookup() 491 topicRegisterLookupTarget = target 492 topicRegisterLookupTick.Reset(delay) 493 topicRegisterLookupDone = nil 494 495 case <-topicRegisterLookupTick.C: 496 log.Trace("<-topicRegisterLookupTick") 497 if (topicRegisterLookupTarget.target == common.Hash{}) { 498 target, delay := net.ticketStore.nextRegisterLookup() 499 topicRegisterLookupTarget = target 500 topicRegisterLookupTick.Reset(delay) 501 topicRegisterLookupDone = nil 502 } else { 503 topicRegisterLookupDone = make(chan []*Node) 504 target := topicRegisterLookupTarget.target 505 go func() { topicRegisterLookupDone <- net.lookup(target, false) }() 506 } 507 508 case <-nextRegisterTime: 509 log.Trace("<-nextRegisterTime") 510 net.ticketStore.ticketRegistered(*nextTicket) 511 //fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong) 512 net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong) 513 514 case req := <-net.topicSearchReq: 515 if refreshDone == nil { 516 log.Trace("<-net.topicSearchReq") 517 info, ok := searchInfo[req.topic] 518 if ok { 519 if req.delay == time.Duration(0) { 520 delete(searchInfo, req.topic) 521 net.ticketStore.removeSearchTopic(req.topic) 522 } else { 523 info.period = req.delay 524 searchInfo[req.topic] = info 525 } 526 continue 527 } 528 if req.delay != time.Duration(0) { 529 var info topicSearchInfo 530 info.period = req.delay 531 info.lookupChn = req.lookup 532 searchInfo[req.topic] = info 533 net.ticketStore.addSearchTopic(req.topic, req.found) 534 topicSearch <- req.topic 535 } 536 } else { 537 searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req) 538 } 539 540 case topic := <-topicSearch: 541 if activeSearchCount < maxSearchCount { 542 activeSearchCount++ 543 target := net.ticketStore.nextSearchLookup(topic) 544 go func() { 545 nodes := net.lookup(target.target, false) 546 topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes} 547 }() 548 } 549 period := searchInfo[topic].period 550 if period != time.Duration(0) { 551 go func() { 552 time.Sleep(period) 553 topicSearch <- topic 554 }() 555 } 556 557 case res := <-topicSearchLookupDone: 558 activeSearchCount-- 559 if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil { 560 lookupChn <- net.ticketStore.radius[res.target.topic].converged 561 } 562 net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte { 563 if n.state != nil && n.state.canQuery { 564 return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration 565 } 566 if n.state == unknown { 567 net.ping(n, n.addr()) 568 } 569 return nil 570 }) 571 572 case <-statsDump.C: 573 log.Trace("<-statsDump.C") 574 /*r, ok := net.ticketStore.radius[testTopic] 575 if !ok { 576 fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now()) 577 } else { 578 topics := len(net.ticketStore.tickets) 579 tickets := len(net.ticketStore.nodes) 580 rad := r.radius / (maxRadius/10000+1) 581 fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now()) 582 }*/ 583 584 tm := mclock.Now() 585 for topic, r := range net.ticketStore.radius { 586 if printTestImgLogs { 587 rad := r.radius / (maxRadius/1000000 + 1) 588 minrad := r.minRadius / (maxRadius/1000000 + 1) 589 fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad) 590 fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad) 591 } 592 } 593 for topic, t := range net.topictab.topics { 594 wp := t.wcl.nextWaitPeriod(tm) 595 if printTestImgLogs { 596 fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000) 597 } 598 } 599 600 // Periodic / lookup-initiated bucket refresh. 601 case <-refreshTimer.C: 602 log.Trace("<-refreshTimer.C") 603 // TODO: ideally we would start the refresh timer after 604 // fallback nodes have been set for the first time. 605 if refreshDone == nil { 606 refreshDone = make(chan struct{}) 607 net.refresh(refreshDone) 608 } 609 case <-bucketRefreshTimer.C: 610 target := net.tab.chooseBucketRefreshTarget() 611 go func() { 612 net.lookup(target, false) 613 bucketRefreshTimer.Reset(bucketRefreshInterval) 614 }() 615 case newNursery := <-net.refreshReq: 616 log.Trace("<-net.refreshReq") 617 if newNursery != nil { 618 net.nursery = newNursery 619 } 620 if refreshDone == nil { 621 refreshDone = make(chan struct{}) 622 net.refresh(refreshDone) 623 } 624 net.refreshResp <- refreshDone 625 case <-refreshDone: 626 log.Trace("<-net.refreshDone", "table size", net.tab.count) 627 if net.tab.count != 0 { 628 refreshDone = nil 629 list := searchReqWhenRefreshDone 630 searchReqWhenRefreshDone = nil 631 go func() { 632 for _, req := range list { 633 net.topicSearchReq <- req 634 } 635 }() 636 } else { 637 refreshDone = make(chan struct{}) 638 net.refresh(refreshDone) 639 } 640 } 641 } 642 log.Trace("loop stopped") 643 644 log.Debug("shutting down") 645 if net.conn != nil { 646 net.conn.Close() 647 } 648 // TODO: wait for pending refresh. 649 // if refreshDone != nil { 650 // <-refreshResults 651 // } 652 // Cancel all pending timeouts. 653 for _, timer := range net.timeoutTimers { 654 timer.Stop() 655 } 656 if net.db != nil { 657 net.db.close() 658 } 659 close(net.closed) 660 } 661 662 // Everything below runs on the Network.loop goroutine 663 // and can modify Node, Table and Network at any time without locking. 664 665 func (net *Network) refresh(done chan<- struct{}) { 666 var seeds []*Node 667 if net.db != nil { 668 seeds = net.db.querySeeds(seedCount, seedMaxAge) 669 } 670 if len(seeds) == 0 { 671 seeds = net.nursery 672 } 673 if len(seeds) == 0 { 674 log.Trace("no seed nodes found") 675 time.AfterFunc(time.Second*10, func() { close(done) }) 676 return 677 } 678 for _, n := range seeds { 679 log.Debug("", "msg", log.Lazy{Fn: func() string { 680 var age string 681 if net.db != nil { 682 age = time.Since(net.db.lastPong(n.ID)).String() 683 } else { 684 age = "unknown" 685 } 686 return fmt.Sprintf("seed node (age %s): %v", age, n) 687 }}) 688 n = net.internNodeFromDB(n) 689 if n.state == unknown { 690 net.transition(n, verifyinit) 691 } 692 // Force-add the seed node so Lookup does something. 693 // It will be deleted again if verification fails. 694 net.tab.add(n) 695 } 696 // Start self lookup to fill up the buckets. 697 go func() { 698 net.Lookup(net.tab.self.ID) 699 close(done) 700 }() 701 } 702 703 // Node Interning. 704 705 func (net *Network) internNode(pkt *ingressPacket) *Node { 706 if n := net.nodes[pkt.remoteID]; n != nil { 707 n.IP = pkt.remoteAddr.IP 708 n.UDP = uint16(pkt.remoteAddr.Port) 709 n.TCP = uint16(pkt.remoteAddr.Port) 710 return n 711 } 712 n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port)) 713 n.state = unknown 714 net.nodes[pkt.remoteID] = n 715 return n 716 } 717 718 func (net *Network) internNodeFromDB(dbn *Node) *Node { 719 if n := net.nodes[dbn.ID]; n != nil { 720 return n 721 } 722 n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP) 723 n.state = unknown 724 net.nodes[n.ID] = n 725 return n 726 } 727 728 func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n *Node, err error) { 729 if rn.ID == net.tab.self.ID { 730 return nil, errors.New("is self") 731 } 732 if rn.UDP <= lowPort { 733 return nil, errors.New("low port") 734 } 735 n = net.nodes[rn.ID] 736 if n == nil { 737 // We haven't seen this node before. 738 n, err = nodeFromRPC(sender, rn) 739 if net.netrestrict != nil && !net.netrestrict.Contains(n.IP) { 740 return n, errors.New("not contained in netrestrict whitelist") 741 } 742 if err == nil { 743 n.state = unknown 744 net.nodes[n.ID] = n 745 } 746 return n, err 747 } 748 if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP { 749 if n.state == known { 750 // reject address change if node is known by us 751 err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n) 752 } else { 753 // accept otherwise; this will be handled nicer with signed ENRs 754 n.IP = rn.IP 755 n.UDP = rn.UDP 756 n.TCP = rn.TCP 757 } 758 } 759 return n, err 760 } 761 762 // nodeNetGuts is embedded in Node and contains fields. 763 type nodeNetGuts struct { 764 // This is a cached copy of sha3(ID) which is used for node 765 // distance calculations. This is part of Node in order to make it 766 // possible to write tests that need a node at a certain distance. 767 // In those tests, the content of sha will not actually correspond 768 // with ID. 769 sha common.Hash 770 771 // State machine fields. Access to these fields 772 // is restricted to the Network.loop goroutine. 773 state *nodeState 774 pingEcho []byte // hash of last ping sent by us 775 pingTopics []Topic // topic set sent by us in last ping 776 deferredQueries []*findnodeQuery // queries that can't be sent yet 777 pendingNeighbours *findnodeQuery // current query, waiting for reply 778 queryTimeouts int 779 } 780 781 func (n *nodeNetGuts) deferQuery(q *findnodeQuery) { 782 n.deferredQueries = append(n.deferredQueries, q) 783 } 784 785 func (n *nodeNetGuts) startNextQuery(net *Network) { 786 if len(n.deferredQueries) == 0 { 787 return 788 } 789 nextq := n.deferredQueries[0] 790 if nextq.start(net) { 791 n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...) 792 } 793 } 794 795 func (q *findnodeQuery) start(net *Network) bool { 796 // Satisfy queries against the local node directly. 797 if q.remote == net.tab.self { 798 closest := net.tab.closest(q.target, bucketSize) 799 q.reply <- closest.entries 800 return true 801 } 802 if q.remote.state.canQuery && q.remote.pendingNeighbours == nil { 803 net.conn.sendFindnodeHash(q.remote, q.target) 804 net.timedEvent(respTimeout, q.remote, neighboursTimeout) 805 q.remote.pendingNeighbours = q 806 return true 807 } 808 // If the node is not known yet, it won't accept queries. 809 // Initiate the transition to known. 810 // The request will be sent later when the node reaches known state. 811 if q.remote.state == unknown { 812 net.transition(q.remote, verifyinit) 813 } 814 return false 815 } 816 817 // Node Events (the input to the state machine). 818 819 type nodeEvent uint 820 821 //go:generate stringer -type=nodeEvent 822 823 const ( 824 825 // Packet type events. 826 // These correspond to packet types in the UDP protocol. 827 pingPacket = iota + 1 828 pongPacket 829 findnodePacket 830 neighborsPacket 831 findnodeHashPacket 832 topicRegisterPacket 833 topicQueryPacket 834 topicNodesPacket 835 836 // Non-packet events. 837 // Event values in this category are allocated outside 838 // the packet type range (packet types are encoded as a single byte). 839 pongTimeout nodeEvent = iota + 256 840 pingTimeout 841 neighboursTimeout 842 ) 843 844 // Node State Machine. 845 846 type nodeState struct { 847 name string 848 handle func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error) 849 enter func(*Network, *Node) 850 canQuery bool 851 } 852 853 func (s *nodeState) String() string { 854 return s.name 855 } 856 857 var ( 858 unknown *nodeState 859 verifyinit *nodeState 860 verifywait *nodeState 861 remoteverifywait *nodeState 862 known *nodeState 863 contested *nodeState 864 unresponsive *nodeState 865 ) 866 867 func init() { 868 unknown = &nodeState{ 869 name: "unknown", 870 enter: func(net *Network, n *Node) { 871 net.tab.delete(n) 872 n.pingEcho = nil 873 // Abort active queries. 874 for _, q := range n.deferredQueries { 875 q.reply <- nil 876 } 877 n.deferredQueries = nil 878 if n.pendingNeighbours != nil { 879 n.pendingNeighbours.reply <- nil 880 n.pendingNeighbours = nil 881 } 882 n.queryTimeouts = 0 883 }, 884 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 885 switch ev { 886 case pingPacket: 887 net.handlePing(n, pkt) 888 net.ping(n, pkt.remoteAddr) 889 return verifywait, nil 890 default: 891 return unknown, errInvalidEvent 892 } 893 }, 894 } 895 896 verifyinit = &nodeState{ 897 name: "verifyinit", 898 enter: func(net *Network, n *Node) { 899 net.ping(n, n.addr()) 900 }, 901 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 902 switch ev { 903 case pingPacket: 904 net.handlePing(n, pkt) 905 return verifywait, nil 906 case pongPacket: 907 err := net.handleKnownPong(n, pkt) 908 return remoteverifywait, err 909 case pongTimeout: 910 return unknown, nil 911 default: 912 return verifyinit, errInvalidEvent 913 } 914 }, 915 } 916 917 verifywait = &nodeState{ 918 name: "verifywait", 919 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 920 switch ev { 921 case pingPacket: 922 net.handlePing(n, pkt) 923 return verifywait, nil 924 case pongPacket: 925 err := net.handleKnownPong(n, pkt) 926 return known, err 927 case pongTimeout: 928 return unknown, nil 929 default: 930 return verifywait, errInvalidEvent 931 } 932 }, 933 } 934 935 remoteverifywait = &nodeState{ 936 name: "remoteverifywait", 937 enter: func(net *Network, n *Node) { 938 net.timedEvent(respTimeout, n, pingTimeout) 939 }, 940 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 941 switch ev { 942 case pingPacket: 943 net.handlePing(n, pkt) 944 return remoteverifywait, nil 945 case pingTimeout: 946 return known, nil 947 default: 948 return remoteverifywait, errInvalidEvent 949 } 950 }, 951 } 952 953 known = &nodeState{ 954 name: "known", 955 canQuery: true, 956 enter: func(net *Network, n *Node) { 957 n.queryTimeouts = 0 958 n.startNextQuery(net) 959 // Insert into the table and start revalidation of the last node 960 // in the bucket if it is full. 961 last := net.tab.add(n) 962 if last != nil && last.state == known { 963 // TODO: do this asynchronously 964 net.transition(last, contested) 965 } 966 }, 967 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 968 switch ev { 969 case pingPacket: 970 net.handlePing(n, pkt) 971 return known, nil 972 case pongPacket: 973 err := net.handleKnownPong(n, pkt) 974 return known, err 975 default: 976 return net.handleQueryEvent(n, ev, pkt) 977 } 978 }, 979 } 980 981 contested = &nodeState{ 982 name: "contested", 983 canQuery: true, 984 enter: func(net *Network, n *Node) { 985 net.ping(n, n.addr()) 986 }, 987 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 988 switch ev { 989 case pongPacket: 990 // Node is still alive. 991 err := net.handleKnownPong(n, pkt) 992 return known, err 993 case pongTimeout: 994 net.tab.deleteReplace(n) 995 return unresponsive, nil 996 case pingPacket: 997 net.handlePing(n, pkt) 998 return contested, nil 999 default: 1000 return net.handleQueryEvent(n, ev, pkt) 1001 } 1002 }, 1003 } 1004 1005 unresponsive = &nodeState{ 1006 name: "unresponsive", 1007 canQuery: true, 1008 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 1009 switch ev { 1010 case pingPacket: 1011 net.handlePing(n, pkt) 1012 return known, nil 1013 case pongPacket: 1014 err := net.handleKnownPong(n, pkt) 1015 return known, err 1016 default: 1017 return net.handleQueryEvent(n, ev, pkt) 1018 } 1019 }, 1020 } 1021 } 1022 1023 // handle processes packets sent by n and events related to n. 1024 func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error { 1025 //fmt.Println("handle", n.addr().String(), n.state, ev) 1026 if pkt != nil { 1027 if err := net.checkPacket(n, ev, pkt); err != nil { 1028 //fmt.Println("check err:", err) 1029 return err 1030 } 1031 // Start the background expiration goroutine after the first 1032 // successful communication. Subsequent calls have no effect if it 1033 // is already running. We do this here instead of somewhere else 1034 // so that the search for seed nodes also considers older nodes 1035 // that would otherwise be removed by the expirer. 1036 if net.db != nil { 1037 net.db.ensureExpirer() 1038 } 1039 } 1040 if ev == pongTimeout { 1041 n.pingEcho = nil // clean up if pongtimeout 1042 } 1043 if n.state == nil { 1044 n.state = unknown //??? 1045 } 1046 next, err := n.state.handle(net, n, ev, pkt) 1047 net.transition(n, next) 1048 //fmt.Println("new state:", n.state) 1049 return err 1050 } 1051 1052 func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error { 1053 // Replay prevention checks. 1054 switch ev { 1055 case pingPacket, findnodeHashPacket, neighborsPacket: 1056 // TODO: check date is > last date seen 1057 // TODO: check ping version 1058 case pongPacket: 1059 if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) { 1060 // fmt.Println("pong reply token mismatch") 1061 return fmt.Errorf("pong reply token mismatch") 1062 } 1063 n.pingEcho = nil 1064 } 1065 // Address validation. 1066 // TODO: Ideally we would do the following: 1067 // - reject all packets with wrong address except ping. 1068 // - for ping with new address, transition to verifywait but keep the 1069 // previous node (with old address) around. if the new one reaches known, 1070 // swap it out. 1071 return nil 1072 } 1073 1074 func (net *Network) transition(n *Node, next *nodeState) { 1075 if n.state != next { 1076 n.state = next 1077 if next.enter != nil { 1078 next.enter(net, n) 1079 } 1080 } 1081 1082 // TODO: persist/unpersist node 1083 } 1084 1085 func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) { 1086 timeout := timeoutEvent{ev, n} 1087 net.timeoutTimers[timeout] = time.AfterFunc(d, func() { 1088 select { 1089 case net.timeout <- timeout: 1090 case <-net.closed: 1091 } 1092 }) 1093 } 1094 1095 func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) { 1096 timer := net.timeoutTimers[timeoutEvent{ev, n}] 1097 if timer != nil { 1098 timer.Stop() 1099 delete(net.timeoutTimers, timeoutEvent{ev, n}) 1100 } 1101 } 1102 1103 func (net *Network) ping(n *Node, addr *net.UDPAddr) { 1104 //fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex()) 1105 if n.pingEcho != nil || n.ID == net.tab.self.ID { 1106 //fmt.Println(" not sent") 1107 return 1108 } 1109 log.Trace("Pinging remote node", "node", n.ID) 1110 n.pingTopics = net.ticketStore.regTopicSet() 1111 n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics) 1112 net.timedEvent(respTimeout, n, pongTimeout) 1113 } 1114 1115 func (net *Network) handlePing(n *Node, pkt *ingressPacket) { 1116 log.Trace("Handling remote ping", "node", n.ID) 1117 ping := pkt.data.(*ping) 1118 n.TCP = ping.From.TCP 1119 t := net.topictab.getTicket(n, ping.Topics) 1120 1121 pong := &pong{ 1122 To: makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB 1123 ReplyTok: pkt.hash, 1124 Expiration: uint64(time.Now().Add(expiration).Unix()), 1125 } 1126 ticketToPong(t, pong) 1127 net.conn.send(n, pongPacket, pong) 1128 } 1129 1130 func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error { 1131 log.Trace("Handling known pong", "node", n.ID) 1132 net.abortTimedEvent(n, pongTimeout) 1133 now := mclock.Now() 1134 ticket, err := pongToTicket(now, n.pingTopics, n, pkt) 1135 if err == nil { 1136 // fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data) 1137 net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket) 1138 } else { 1139 log.Trace("Failed to convert pong to ticket", "err", err) 1140 } 1141 n.pingEcho = nil 1142 n.pingTopics = nil 1143 return err 1144 } 1145 1146 func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 1147 switch ev { 1148 case findnodePacket: 1149 target := crypto.SHA3Hash(pkt.data.(*findnode).Target[:]) 1150 results := net.tab.closest(target, bucketSize).entries 1151 net.conn.sendNeighbours(n, results) 1152 return n.state, nil 1153 case neighborsPacket: 1154 err := net.handleNeighboursPacket(n, pkt) 1155 return n.state, err 1156 case neighboursTimeout: 1157 if n.pendingNeighbours != nil { 1158 n.pendingNeighbours.reply <- nil 1159 n.pendingNeighbours = nil 1160 } 1161 n.queryTimeouts++ 1162 if n.queryTimeouts > maxFindnodeFailures && n.state == known { 1163 return contested, errors.New("too many timeouts") 1164 } 1165 return n.state, nil 1166 1167 // v5 1168 1169 case findnodeHashPacket: 1170 results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries 1171 net.conn.sendNeighbours(n, results) 1172 return n.state, nil 1173 case topicRegisterPacket: 1174 //fmt.Println("got topicRegisterPacket") 1175 regdata := pkt.data.(*topicRegister) 1176 pong, err := net.checkTopicRegister(regdata) 1177 if err != nil { 1178 //fmt.Println(err) 1179 return n.state, fmt.Errorf("bad waiting ticket: %v", err) 1180 } 1181 net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods) 1182 return n.state, nil 1183 case topicQueryPacket: 1184 // TODO: handle expiration 1185 topic := pkt.data.(*topicQuery).Topic 1186 results := net.topictab.getEntries(topic) 1187 if _, ok := net.ticketStore.tickets[topic]; ok { 1188 results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too 1189 } 1190 if len(results) > 10 { 1191 results = results[:10] 1192 } 1193 var hash common.Hash 1194 copy(hash[:], pkt.hash) 1195 net.conn.sendTopicNodes(n, hash, results) 1196 return n.state, nil 1197 case topicNodesPacket: 1198 p := pkt.data.(*topicNodes) 1199 if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) { 1200 n.queryTimeouts++ 1201 if n.queryTimeouts > maxFindnodeFailures && n.state == known { 1202 return contested, errors.New("too many timeouts") 1203 } 1204 } 1205 return n.state, nil 1206 1207 default: 1208 return n.state, errInvalidEvent 1209 } 1210 } 1211 1212 func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) { 1213 var pongpkt ingressPacket 1214 if err := decodePacket(data.Pong, &pongpkt); err != nil { 1215 return nil, err 1216 } 1217 if pongpkt.ev != pongPacket { 1218 return nil, errors.New("is not pong packet") 1219 } 1220 if pongpkt.remoteID != net.tab.self.ID { 1221 return nil, errors.New("not signed by us") 1222 } 1223 // check that we previously authorised all topics 1224 // that the other side is trying to register. 1225 if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash { 1226 return nil, errors.New("topic hash mismatch") 1227 } 1228 if data.Idx >= uint(len(data.Topics)) { 1229 return nil, errors.New("topic index out of range") 1230 } 1231 return pongpkt.data.(*pong), nil 1232 } 1233 1234 func rlpHash(x interface{}) (h common.Hash) { 1235 hw := sha3.New256() 1236 rlp.Encode(hw, x) 1237 hw.Sum(h[:0]) 1238 return h 1239 } 1240 1241 func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error { 1242 if n.pendingNeighbours == nil { 1243 return errNoQuery 1244 } 1245 net.abortTimedEvent(n, neighboursTimeout) 1246 1247 req := pkt.data.(*neighbors) 1248 nodes := make([]*Node, len(req.Nodes)) 1249 for i, rn := range req.Nodes { 1250 nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn) 1251 if err != nil { 1252 log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err)) 1253 continue 1254 } 1255 nodes[i] = nn 1256 // Start validation of query results immediately. 1257 // This fills the table quickly. 1258 // TODO: generates way too many packets, maybe do it via queue. 1259 if nn.state == unknown { 1260 net.transition(nn, verifyinit) 1261 } 1262 } 1263 // TODO: don't ignore second packet 1264 n.pendingNeighbours.reply <- nodes 1265 n.pendingNeighbours = nil 1266 // Now that this query is done, start the next one. 1267 n.startNextQuery(net) 1268 return nil 1269 }