github.com/codingfuture/orig-energi3@v0.8.4/p2p/discv5/net.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package discv5 18 19 import ( 20 "bytes" 21 "crypto/ecdsa" 22 "errors" 23 "fmt" 24 "net" 25 "time" 26 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/common/mclock" 29 "github.com/ethereum/go-ethereum/crypto" 30 "github.com/ethereum/go-ethereum/log" 31 "github.com/ethereum/go-ethereum/p2p/netutil" 32 "github.com/ethereum/go-ethereum/rlp" 33 "golang.org/x/crypto/sha3" 34 ) 35 36 var ( 37 errInvalidEvent = errors.New("invalid in current state") 38 errNoQuery = errors.New("no pending query") 39 ) 40 41 const ( 42 autoRefreshInterval = 1 * time.Hour 43 bucketRefreshInterval = 1 * time.Minute 44 seedCount = 30 45 seedMaxAge = 5 * 24 * time.Hour 46 lowPort = 1024 47 ) 48 49 const testTopic = "foo" 50 51 const ( 52 printTestImgLogs = false 53 ) 54 55 // Network manages the table and all protocol interaction. 56 type Network struct { 57 db *nodeDB // database of known nodes 58 conn transport 59 netrestrict *netutil.Netlist 60 61 closed chan struct{} // closed when loop is done 62 closeReq chan struct{} // 'request to close' 63 refreshReq chan []*Node // lookups ask for refresh on this channel 64 refreshResp chan (<-chan struct{}) // ...and get the channel to block on from this one 65 read chan ingressPacket // ingress packets arrive here 66 timeout chan timeoutEvent 67 queryReq chan *findnodeQuery // lookups submit findnode queries on this channel 68 tableOpReq chan func() 69 tableOpResp chan struct{} 70 topicRegisterReq chan topicRegisterReq 71 topicSearchReq chan topicSearchReq 72 73 // State of the main loop. 74 tab *Table 75 topictab *topicTable 76 ticketStore *ticketStore 77 nursery []*Node 78 nodes map[NodeID]*Node // tracks active nodes with state != known 79 timeoutTimers map[timeoutEvent]*time.Timer 80 81 // Revalidation queues. 82 // Nodes put on these queues will be pinged eventually. 83 slowRevalidateQueue []*Node 84 fastRevalidateQueue []*Node 85 86 // Buffers for state transition. 87 sendBuf []*ingressPacket 88 } 89 90 // transport is implemented by the UDP transport. 91 // it is an interface so we can test without opening lots of UDP 92 // sockets and without generating a private key. 93 type transport interface { 94 sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte) 95 sendNeighbours(remote *Node, nodes []*Node) 96 sendFindnodeHash(remote *Node, target common.Hash) 97 sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte) 98 sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) 99 100 send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte) 101 102 localAddr() *net.UDPAddr 103 Close() 104 } 105 106 type findnodeQuery struct { 107 remote *Node 108 target common.Hash 109 reply chan<- []*Node 110 nresults int // counter for received nodes 111 } 112 113 type topicRegisterReq struct { 114 add bool 115 topic Topic 116 } 117 118 type topicSearchReq struct { 119 topic Topic 120 found chan<- *Node 121 lookup chan<- bool 122 delay time.Duration 123 } 124 125 type topicSearchResult struct { 126 target lookupInfo 127 nodes []*Node 128 } 129 130 type timeoutEvent struct { 131 ev nodeEvent 132 node *Node 133 } 134 135 func newNetwork(conn transport, ourPubkey ecdsa.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) { 136 ourID := PubkeyID(&ourPubkey) 137 138 var db *nodeDB 139 if dbPath != "<no database>" { 140 var err error 141 if db, err = newNodeDB(dbPath, Version, ourID); err != nil { 142 return nil, err 143 } 144 } 145 146 tab := newTable(ourID, conn.localAddr()) 147 net := &Network{ 148 db: db, 149 conn: conn, 150 netrestrict: netrestrict, 151 tab: tab, 152 topictab: newTopicTable(db, tab.self), 153 ticketStore: newTicketStore(), 154 refreshReq: make(chan []*Node), 155 refreshResp: make(chan (<-chan struct{})), 156 closed: make(chan struct{}), 157 closeReq: make(chan struct{}), 158 read: make(chan ingressPacket, 100), 159 timeout: make(chan timeoutEvent), 160 timeoutTimers: make(map[timeoutEvent]*time.Timer), 161 tableOpReq: make(chan func()), 162 tableOpResp: make(chan struct{}), 163 queryReq: make(chan *findnodeQuery), 164 topicRegisterReq: make(chan topicRegisterReq), 165 topicSearchReq: make(chan topicSearchReq), 166 nodes: make(map[NodeID]*Node), 167 } 168 go net.loop() 169 return net, nil 170 } 171 172 // Close terminates the network listener and flushes the node database. 173 func (net *Network) Close() { 174 net.conn.Close() 175 select { 176 case <-net.closed: 177 case net.closeReq <- struct{}{}: 178 <-net.closed 179 } 180 } 181 182 // Self returns the local node. 183 // The returned node should not be modified by the caller. 184 func (net *Network) Self() *Node { 185 return net.tab.self 186 } 187 188 // ReadRandomNodes fills the given slice with random nodes from the 189 // table. It will not write the same node more than once. The nodes in 190 // the slice are copies and can be modified by the caller. 191 func (net *Network) ReadRandomNodes(buf []*Node) (n int) { 192 net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) }) 193 return n 194 } 195 196 // SetFallbackNodes sets the initial points of contact. These nodes 197 // are used to connect to the network if the table is empty and there 198 // are no known nodes in the database. 199 func (net *Network) SetFallbackNodes(nodes []*Node) error { 200 nursery := make([]*Node, 0, len(nodes)) 201 for _, n := range nodes { 202 if err := n.validateComplete(); err != nil { 203 return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err) 204 } 205 // Recompute cpy.sha because the node might not have been 206 // created by NewNode or ParseNode. 207 cpy := *n 208 cpy.sha = crypto.Keccak256Hash(n.ID[:]) 209 nursery = append(nursery, &cpy) 210 } 211 net.reqRefresh(nursery) 212 return nil 213 } 214 215 // Resolve searches for a specific node with the given ID. 216 // It returns nil if the node could not be found. 217 func (net *Network) Resolve(targetID NodeID) *Node { 218 result := net.lookup(crypto.Keccak256Hash(targetID[:]), true) 219 for _, n := range result { 220 if n.ID == targetID { 221 return n 222 } 223 } 224 return nil 225 } 226 227 // Lookup performs a network search for nodes close 228 // to the given target. It approaches the target by querying 229 // nodes that are closer to it on each iteration. 230 // The given target does not need to be an actual node 231 // identifier. 232 // 233 // The local node may be included in the result. 234 func (net *Network) Lookup(targetID NodeID) []*Node { 235 return net.lookup(crypto.Keccak256Hash(targetID[:]), false) 236 } 237 238 func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node { 239 var ( 240 asked = make(map[NodeID]bool) 241 seen = make(map[NodeID]bool) 242 reply = make(chan []*Node, alpha) 243 result = nodesByDistance{target: target} 244 pendingQueries = 0 245 ) 246 // Get initial answers from the local node. 247 result.push(net.tab.self, bucketSize) 248 for { 249 // Ask the α closest nodes that we haven't asked yet. 250 for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { 251 n := result.entries[i] 252 if !asked[n.ID] { 253 asked[n.ID] = true 254 pendingQueries++ 255 net.reqQueryFindnode(n, target, reply) 256 } 257 } 258 if pendingQueries == 0 { 259 // We have asked all closest nodes, stop the search. 260 break 261 } 262 // Wait for the next reply. 263 select { 264 case nodes := <-reply: 265 for _, n := range nodes { 266 if n != nil && !seen[n.ID] { 267 seen[n.ID] = true 268 result.push(n, bucketSize) 269 if stopOnMatch && n.sha == target { 270 return result.entries 271 } 272 } 273 } 274 pendingQueries-- 275 case <-time.After(respTimeout): 276 // forget all pending requests, start new ones 277 pendingQueries = 0 278 reply = make(chan []*Node, alpha) 279 } 280 } 281 return result.entries 282 } 283 284 func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) { 285 select { 286 case net.topicRegisterReq <- topicRegisterReq{true, topic}: 287 case <-net.closed: 288 return 289 } 290 select { 291 case <-net.closed: 292 case <-stop: 293 select { 294 case net.topicRegisterReq <- topicRegisterReq{false, topic}: 295 case <-net.closed: 296 } 297 } 298 } 299 300 func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) { 301 for { 302 select { 303 case <-net.closed: 304 return 305 case delay, ok := <-setPeriod: 306 select { 307 case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}: 308 case <-net.closed: 309 return 310 } 311 if !ok { 312 return 313 } 314 } 315 } 316 } 317 318 func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} { 319 select { 320 case net.refreshReq <- nursery: 321 return <-net.refreshResp 322 case <-net.closed: 323 return net.closed 324 } 325 } 326 327 func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool { 328 q := &findnodeQuery{remote: n, target: target, reply: reply} 329 select { 330 case net.queryReq <- q: 331 return true 332 case <-net.closed: 333 return false 334 } 335 } 336 337 func (net *Network) reqReadPacket(pkt ingressPacket) { 338 select { 339 case net.read <- pkt: 340 case <-net.closed: 341 } 342 } 343 344 func (net *Network) reqTableOp(f func()) (called bool) { 345 select { 346 case net.tableOpReq <- f: 347 <-net.tableOpResp 348 return true 349 case <-net.closed: 350 return false 351 } 352 } 353 354 // TODO: external address handling. 355 356 type topicSearchInfo struct { 357 lookupChn chan<- bool 358 period time.Duration 359 } 360 361 const maxSearchCount = 5 362 363 func (net *Network) loop() { 364 var ( 365 refreshTimer = time.NewTicker(autoRefreshInterval) 366 bucketRefreshTimer = time.NewTimer(bucketRefreshInterval) 367 refreshDone chan struct{} // closed when the 'refresh' lookup has ended 368 ) 369 370 // Tracking the next ticket to register. 371 var ( 372 nextTicket *ticketRef 373 nextRegisterTimer *time.Timer 374 nextRegisterTime <-chan time.Time 375 ) 376 defer func() { 377 if nextRegisterTimer != nil { 378 nextRegisterTimer.Stop() 379 } 380 }() 381 resetNextTicket := func() { 382 ticket, timeout := net.ticketStore.nextFilteredTicket() 383 if nextTicket != ticket { 384 nextTicket = ticket 385 if nextRegisterTimer != nil { 386 nextRegisterTimer.Stop() 387 nextRegisterTime = nil 388 } 389 if ticket != nil { 390 nextRegisterTimer = time.NewTimer(timeout) 391 nextRegisterTime = nextRegisterTimer.C 392 } 393 } 394 } 395 396 // Tracking registration and search lookups. 397 var ( 398 topicRegisterLookupTarget lookupInfo 399 topicRegisterLookupDone chan []*Node 400 topicRegisterLookupTick = time.NewTimer(0) 401 searchReqWhenRefreshDone []topicSearchReq 402 searchInfo = make(map[Topic]topicSearchInfo) 403 activeSearchCount int 404 ) 405 topicSearchLookupDone := make(chan topicSearchResult, 100) 406 topicSearch := make(chan Topic, 100) 407 <-topicRegisterLookupTick.C 408 409 statsDump := time.NewTicker(10 * time.Second) 410 411 loop: 412 for { 413 resetNextTicket() 414 415 select { 416 case <-net.closeReq: 417 log.Trace("<-net.closeReq") 418 break loop 419 420 // Ingress packet handling. 421 case pkt := <-net.read: 422 //fmt.Println("read", pkt.ev) 423 log.Trace("<-net.read") 424 n := net.internNode(&pkt) 425 prestate := n.state 426 status := "ok" 427 if err := net.handle(n, pkt.ev, &pkt); err != nil { 428 status = err.Error() 429 } 430 log.Trace("", "msg", log.Lazy{Fn: func() string { 431 return fmt.Sprintf("<<< (%d) %v from %x@%v: %v -> %v (%v)", 432 net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status) 433 }}) 434 // TODO: persist state if n.state goes >= known, delete if it goes <= known 435 436 // State transition timeouts. 437 case timeout := <-net.timeout: 438 log.Trace("<-net.timeout") 439 if net.timeoutTimers[timeout] == nil { 440 // Stale timer (was aborted). 441 continue 442 } 443 delete(net.timeoutTimers, timeout) 444 prestate := timeout.node.state 445 status := "ok" 446 if err := net.handle(timeout.node, timeout.ev, nil); err != nil { 447 status = err.Error() 448 } 449 log.Trace("", "msg", log.Lazy{Fn: func() string { 450 return fmt.Sprintf("--- (%d) %v for %x@%v: %v -> %v (%v)", 451 net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status) 452 }}) 453 454 // Querying. 455 case q := <-net.queryReq: 456 log.Trace("<-net.queryReq") 457 if !q.start(net) { 458 q.remote.deferQuery(q) 459 } 460 461 // Interacting with the table. 462 case f := <-net.tableOpReq: 463 log.Trace("<-net.tableOpReq") 464 f() 465 net.tableOpResp <- struct{}{} 466 467 // Topic registration stuff. 468 case req := <-net.topicRegisterReq: 469 log.Trace("<-net.topicRegisterReq") 470 if !req.add { 471 net.ticketStore.removeRegisterTopic(req.topic) 472 continue 473 } 474 net.ticketStore.addTopic(req.topic, true) 475 // If we're currently waiting idle (nothing to look up), give the ticket store a 476 // chance to start it sooner. This should speed up convergence of the radius 477 // determination for new topics. 478 // if topicRegisterLookupDone == nil { 479 if topicRegisterLookupTarget.target == (common.Hash{}) { 480 log.Trace("topicRegisterLookupTarget == null") 481 if topicRegisterLookupTick.Stop() { 482 <-topicRegisterLookupTick.C 483 } 484 target, delay := net.ticketStore.nextRegisterLookup() 485 topicRegisterLookupTarget = target 486 topicRegisterLookupTick.Reset(delay) 487 } 488 489 case nodes := <-topicRegisterLookupDone: 490 log.Trace("<-topicRegisterLookupDone") 491 net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte { 492 net.ping(n, n.addr()) 493 return n.pingEcho 494 }) 495 target, delay := net.ticketStore.nextRegisterLookup() 496 topicRegisterLookupTarget = target 497 topicRegisterLookupTick.Reset(delay) 498 topicRegisterLookupDone = nil 499 500 case <-topicRegisterLookupTick.C: 501 log.Trace("<-topicRegisterLookupTick") 502 if (topicRegisterLookupTarget.target == common.Hash{}) { 503 target, delay := net.ticketStore.nextRegisterLookup() 504 topicRegisterLookupTarget = target 505 topicRegisterLookupTick.Reset(delay) 506 topicRegisterLookupDone = nil 507 } else { 508 topicRegisterLookupDone = make(chan []*Node) 509 target := topicRegisterLookupTarget.target 510 go func() { topicRegisterLookupDone <- net.lookup(target, false) }() 511 } 512 513 case <-nextRegisterTime: 514 log.Trace("<-nextRegisterTime") 515 net.ticketStore.ticketRegistered(*nextTicket) 516 //fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong) 517 net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong) 518 519 case req := <-net.topicSearchReq: 520 if refreshDone == nil { 521 log.Trace("<-net.topicSearchReq") 522 info, ok := searchInfo[req.topic] 523 if ok { 524 if req.delay == time.Duration(0) { 525 delete(searchInfo, req.topic) 526 net.ticketStore.removeSearchTopic(req.topic) 527 } else { 528 info.period = req.delay 529 searchInfo[req.topic] = info 530 } 531 continue 532 } 533 if req.delay != time.Duration(0) { 534 var info topicSearchInfo 535 info.period = req.delay 536 info.lookupChn = req.lookup 537 searchInfo[req.topic] = info 538 net.ticketStore.addSearchTopic(req.topic, req.found) 539 topicSearch <- req.topic 540 } 541 } else { 542 searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req) 543 } 544 545 case topic := <-topicSearch: 546 if activeSearchCount < maxSearchCount { 547 activeSearchCount++ 548 target := net.ticketStore.nextSearchLookup(topic) 549 go func() { 550 nodes := net.lookup(target.target, false) 551 topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes} 552 }() 553 } 554 period := searchInfo[topic].period 555 if period != time.Duration(0) { 556 go func() { 557 time.Sleep(period) 558 topicSearch <- topic 559 }() 560 } 561 562 case res := <-topicSearchLookupDone: 563 activeSearchCount-- 564 if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil { 565 lookupChn <- net.ticketStore.radius[res.target.topic].converged 566 } 567 net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte { 568 if n.state != nil && n.state.canQuery { 569 return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration 570 } 571 if n.state == unknown { 572 net.ping(n, n.addr()) 573 } 574 return nil 575 }) 576 577 case <-statsDump.C: 578 log.Trace("<-statsDump.C") 579 /*r, ok := net.ticketStore.radius[testTopic] 580 if !ok { 581 fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now()) 582 } else { 583 topics := len(net.ticketStore.tickets) 584 tickets := len(net.ticketStore.nodes) 585 rad := r.radius / (maxRadius/10000+1) 586 fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now()) 587 }*/ 588 589 tm := mclock.Now() 590 for topic, r := range net.ticketStore.radius { 591 if printTestImgLogs { 592 rad := r.radius / (maxRadius/1000000 + 1) 593 minrad := r.minRadius / (maxRadius/1000000 + 1) 594 fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad) 595 fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad) 596 } 597 } 598 for topic, t := range net.topictab.topics { 599 wp := t.wcl.nextWaitPeriod(tm) 600 if printTestImgLogs { 601 fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000) 602 } 603 } 604 605 // Periodic / lookup-initiated bucket refresh. 606 case <-refreshTimer.C: 607 log.Trace("<-refreshTimer.C") 608 // TODO: ideally we would start the refresh timer after 609 // fallback nodes have been set for the first time. 610 if refreshDone == nil { 611 refreshDone = make(chan struct{}) 612 net.refresh(refreshDone) 613 } 614 case <-bucketRefreshTimer.C: 615 target := net.tab.chooseBucketRefreshTarget() 616 go func() { 617 net.lookup(target, false) 618 bucketRefreshTimer.Reset(bucketRefreshInterval) 619 }() 620 case newNursery := <-net.refreshReq: 621 log.Trace("<-net.refreshReq") 622 if newNursery != nil { 623 net.nursery = newNursery 624 } 625 if refreshDone == nil { 626 refreshDone = make(chan struct{}) 627 net.refresh(refreshDone) 628 } 629 net.refreshResp <- refreshDone 630 case <-refreshDone: 631 log.Trace("<-net.refreshDone", "table size", net.tab.count) 632 if net.tab.count != 0 { 633 refreshDone = nil 634 list := searchReqWhenRefreshDone 635 searchReqWhenRefreshDone = nil 636 go func() { 637 for _, req := range list { 638 net.topicSearchReq <- req 639 } 640 }() 641 } else { 642 refreshDone = make(chan struct{}) 643 net.refresh(refreshDone) 644 } 645 } 646 } 647 log.Trace("loop stopped") 648 649 log.Debug(fmt.Sprintf("shutting down")) 650 if net.conn != nil { 651 net.conn.Close() 652 } 653 if refreshDone != nil { 654 // TODO: wait for pending refresh. 655 //<-refreshResults 656 } 657 // Cancel all pending timeouts. 658 for _, timer := range net.timeoutTimers { 659 timer.Stop() 660 } 661 if net.db != nil { 662 net.db.close() 663 } 664 close(net.closed) 665 } 666 667 // Everything below runs on the Network.loop goroutine 668 // and can modify Node, Table and Network at any time without locking. 669 670 func (net *Network) refresh(done chan<- struct{}) { 671 var seeds []*Node 672 if net.db != nil { 673 seeds = net.db.querySeeds(seedCount, seedMaxAge) 674 } 675 if len(seeds) == 0 { 676 seeds = net.nursery 677 } 678 if len(seeds) == 0 { 679 log.Trace("no seed nodes found") 680 time.AfterFunc(time.Second*10, func() { close(done) }) 681 return 682 } 683 for _, n := range seeds { 684 log.Debug("", "msg", log.Lazy{Fn: func() string { 685 var age string 686 if net.db != nil { 687 age = time.Since(net.db.lastPong(n.ID)).String() 688 } else { 689 age = "unknown" 690 } 691 return fmt.Sprintf("seed node (age %s): %v", age, n) 692 }}) 693 n = net.internNodeFromDB(n) 694 if n.state == unknown { 695 net.transition(n, verifyinit) 696 } 697 // Force-add the seed node so Lookup does something. 698 // It will be deleted again if verification fails. 699 net.tab.add(n) 700 } 701 // Start self lookup to fill up the buckets. 702 go func() { 703 net.Lookup(net.tab.self.ID) 704 close(done) 705 }() 706 } 707 708 // Node Interning. 709 710 func (net *Network) internNode(pkt *ingressPacket) *Node { 711 if n := net.nodes[pkt.remoteID]; n != nil { 712 n.IP = pkt.remoteAddr.IP 713 n.UDP = uint16(pkt.remoteAddr.Port) 714 n.TCP = uint16(pkt.remoteAddr.Port) 715 return n 716 } 717 n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port)) 718 n.state = unknown 719 net.nodes[pkt.remoteID] = n 720 return n 721 } 722 723 func (net *Network) internNodeFromDB(dbn *Node) *Node { 724 if n := net.nodes[dbn.ID]; n != nil { 725 return n 726 } 727 n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP) 728 n.state = unknown 729 net.nodes[n.ID] = n 730 return n 731 } 732 733 func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n *Node, err error) { 734 if rn.ID == net.tab.self.ID { 735 return nil, errors.New("is self") 736 } 737 if rn.UDP <= lowPort { 738 return nil, errors.New("low port") 739 } 740 n = net.nodes[rn.ID] 741 if n == nil { 742 // We haven't seen this node before. 743 n, err = nodeFromRPC(sender, rn) 744 if net.netrestrict != nil && !net.netrestrict.Contains(n.IP) { 745 return n, errors.New("not contained in netrestrict whitelist") 746 } 747 if err == nil { 748 n.state = unknown 749 net.nodes[n.ID] = n 750 } 751 return n, err 752 } 753 if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP { 754 if n.state == known { 755 // reject address change if node is known by us 756 err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n) 757 } else { 758 // accept otherwise; this will be handled nicer with signed ENRs 759 n.IP = rn.IP 760 n.UDP = rn.UDP 761 n.TCP = rn.TCP 762 } 763 } 764 return n, err 765 } 766 767 // nodeNetGuts is embedded in Node and contains fields. 768 type nodeNetGuts struct { 769 // This is a cached copy of sha3(ID) which is used for node 770 // distance calculations. This is part of Node in order to make it 771 // possible to write tests that need a node at a certain distance. 772 // In those tests, the content of sha will not actually correspond 773 // with ID. 774 sha common.Hash 775 776 // State machine fields. Access to these fields 777 // is restricted to the Network.loop goroutine. 778 state *nodeState 779 pingEcho []byte // hash of last ping sent by us 780 pingTopics []Topic // topic set sent by us in last ping 781 deferredQueries []*findnodeQuery // queries that can't be sent yet 782 pendingNeighbours *findnodeQuery // current query, waiting for reply 783 queryTimeouts int 784 } 785 786 func (n *nodeNetGuts) deferQuery(q *findnodeQuery) { 787 n.deferredQueries = append(n.deferredQueries, q) 788 } 789 790 func (n *nodeNetGuts) startNextQuery(net *Network) { 791 if len(n.deferredQueries) == 0 { 792 return 793 } 794 nextq := n.deferredQueries[0] 795 if nextq.start(net) { 796 n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...) 797 } 798 } 799 800 func (q *findnodeQuery) start(net *Network) bool { 801 // Satisfy queries against the local node directly. 802 if q.remote == net.tab.self { 803 closest := net.tab.closest(q.target, bucketSize) 804 q.reply <- closest.entries 805 return true 806 } 807 if q.remote.state.canQuery && q.remote.pendingNeighbours == nil { 808 net.conn.sendFindnodeHash(q.remote, q.target) 809 net.timedEvent(respTimeout, q.remote, neighboursTimeout) 810 q.remote.pendingNeighbours = q 811 return true 812 } 813 // If the node is not known yet, it won't accept queries. 814 // Initiate the transition to known. 815 // The request will be sent later when the node reaches known state. 816 if q.remote.state == unknown { 817 net.transition(q.remote, verifyinit) 818 } 819 return false 820 } 821 822 // Node Events (the input to the state machine). 823 824 type nodeEvent uint 825 826 //go:generate stringer -type=nodeEvent 827 828 const ( 829 830 // Packet type events. 831 // These correspond to packet types in the UDP protocol. 832 pingPacket = iota + 1 833 pongPacket 834 findnodePacket 835 neighborsPacket 836 findnodeHashPacket 837 topicRegisterPacket 838 topicQueryPacket 839 topicNodesPacket 840 841 // Non-packet events. 842 // Event values in this category are allocated outside 843 // the packet type range (packet types are encoded as a single byte). 844 pongTimeout nodeEvent = iota + 256 845 pingTimeout 846 neighboursTimeout 847 ) 848 849 // Node State Machine. 850 851 type nodeState struct { 852 name string 853 handle func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error) 854 enter func(*Network, *Node) 855 canQuery bool 856 } 857 858 func (s *nodeState) String() string { 859 return s.name 860 } 861 862 var ( 863 unknown *nodeState 864 verifyinit *nodeState 865 verifywait *nodeState 866 remoteverifywait *nodeState 867 known *nodeState 868 contested *nodeState 869 unresponsive *nodeState 870 ) 871 872 func init() { 873 unknown = &nodeState{ 874 name: "unknown", 875 enter: func(net *Network, n *Node) { 876 net.tab.delete(n) 877 n.pingEcho = nil 878 // Abort active queries. 879 for _, q := range n.deferredQueries { 880 q.reply <- nil 881 } 882 n.deferredQueries = nil 883 if n.pendingNeighbours != nil { 884 n.pendingNeighbours.reply <- nil 885 n.pendingNeighbours = nil 886 } 887 n.queryTimeouts = 0 888 }, 889 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 890 switch ev { 891 case pingPacket: 892 net.handlePing(n, pkt) 893 net.ping(n, pkt.remoteAddr) 894 return verifywait, nil 895 default: 896 return unknown, errInvalidEvent 897 } 898 }, 899 } 900 901 verifyinit = &nodeState{ 902 name: "verifyinit", 903 enter: func(net *Network, n *Node) { 904 net.ping(n, n.addr()) 905 }, 906 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 907 switch ev { 908 case pingPacket: 909 net.handlePing(n, pkt) 910 return verifywait, nil 911 case pongPacket: 912 err := net.handleKnownPong(n, pkt) 913 return remoteverifywait, err 914 case pongTimeout: 915 return unknown, nil 916 default: 917 return verifyinit, errInvalidEvent 918 } 919 }, 920 } 921 922 verifywait = &nodeState{ 923 name: "verifywait", 924 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 925 switch ev { 926 case pingPacket: 927 net.handlePing(n, pkt) 928 return verifywait, nil 929 case pongPacket: 930 err := net.handleKnownPong(n, pkt) 931 return known, err 932 case pongTimeout: 933 return unknown, nil 934 default: 935 return verifywait, errInvalidEvent 936 } 937 }, 938 } 939 940 remoteverifywait = &nodeState{ 941 name: "remoteverifywait", 942 enter: func(net *Network, n *Node) { 943 net.timedEvent(respTimeout, n, pingTimeout) 944 }, 945 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 946 switch ev { 947 case pingPacket: 948 net.handlePing(n, pkt) 949 return remoteverifywait, nil 950 case pingTimeout: 951 return known, nil 952 default: 953 return remoteverifywait, errInvalidEvent 954 } 955 }, 956 } 957 958 known = &nodeState{ 959 name: "known", 960 canQuery: true, 961 enter: func(net *Network, n *Node) { 962 n.queryTimeouts = 0 963 n.startNextQuery(net) 964 // Insert into the table and start revalidation of the last node 965 // in the bucket if it is full. 966 last := net.tab.add(n) 967 if last != nil && last.state == known { 968 // TODO: do this asynchronously 969 net.transition(last, contested) 970 } 971 }, 972 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 973 switch ev { 974 case pingPacket: 975 net.handlePing(n, pkt) 976 return known, nil 977 case pongPacket: 978 err := net.handleKnownPong(n, pkt) 979 return known, err 980 default: 981 return net.handleQueryEvent(n, ev, pkt) 982 } 983 }, 984 } 985 986 contested = &nodeState{ 987 name: "contested", 988 canQuery: true, 989 enter: func(net *Network, n *Node) { 990 net.ping(n, n.addr()) 991 }, 992 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 993 switch ev { 994 case pongPacket: 995 // Node is still alive. 996 err := net.handleKnownPong(n, pkt) 997 return known, err 998 case pongTimeout: 999 net.tab.deleteReplace(n) 1000 return unresponsive, nil 1001 case pingPacket: 1002 net.handlePing(n, pkt) 1003 return contested, nil 1004 default: 1005 return net.handleQueryEvent(n, ev, pkt) 1006 } 1007 }, 1008 } 1009 1010 unresponsive = &nodeState{ 1011 name: "unresponsive", 1012 canQuery: true, 1013 handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 1014 switch ev { 1015 case pingPacket: 1016 net.handlePing(n, pkt) 1017 return known, nil 1018 case pongPacket: 1019 err := net.handleKnownPong(n, pkt) 1020 return known, err 1021 default: 1022 return net.handleQueryEvent(n, ev, pkt) 1023 } 1024 }, 1025 } 1026 } 1027 1028 // handle processes packets sent by n and events related to n. 1029 func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error { 1030 //fmt.Println("handle", n.addr().String(), n.state, ev) 1031 if pkt != nil { 1032 if err := net.checkPacket(n, ev, pkt); err != nil { 1033 //fmt.Println("check err:", err) 1034 return err 1035 } 1036 // Start the background expiration goroutine after the first 1037 // successful communication. Subsequent calls have no effect if it 1038 // is already running. We do this here instead of somewhere else 1039 // so that the search for seed nodes also considers older nodes 1040 // that would otherwise be removed by the expirer. 1041 if net.db != nil { 1042 net.db.ensureExpirer() 1043 } 1044 } 1045 if n.state == nil { 1046 n.state = unknown //??? 1047 } 1048 next, err := n.state.handle(net, n, ev, pkt) 1049 net.transition(n, next) 1050 //fmt.Println("new state:", n.state) 1051 return err 1052 } 1053 1054 func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error { 1055 // Replay prevention checks. 1056 switch ev { 1057 case pingPacket, findnodeHashPacket, neighborsPacket: 1058 // TODO: check date is > last date seen 1059 // TODO: check ping version 1060 case pongPacket: 1061 if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) { 1062 // fmt.Println("pong reply token mismatch") 1063 return fmt.Errorf("pong reply token mismatch") 1064 } 1065 n.pingEcho = nil 1066 } 1067 // Address validation. 1068 // TODO: Ideally we would do the following: 1069 // - reject all packets with wrong address except ping. 1070 // - for ping with new address, transition to verifywait but keep the 1071 // previous node (with old address) around. if the new one reaches known, 1072 // swap it out. 1073 return nil 1074 } 1075 1076 func (net *Network) transition(n *Node, next *nodeState) { 1077 if n.state != next { 1078 n.state = next 1079 if next.enter != nil { 1080 next.enter(net, n) 1081 } 1082 } 1083 1084 // TODO: persist/unpersist node 1085 } 1086 1087 func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) { 1088 timeout := timeoutEvent{ev, n} 1089 net.timeoutTimers[timeout] = time.AfterFunc(d, func() { 1090 select { 1091 case net.timeout <- timeout: 1092 case <-net.closed: 1093 } 1094 }) 1095 } 1096 1097 func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) { 1098 timer := net.timeoutTimers[timeoutEvent{ev, n}] 1099 if timer != nil { 1100 timer.Stop() 1101 delete(net.timeoutTimers, timeoutEvent{ev, n}) 1102 } 1103 } 1104 1105 func (net *Network) ping(n *Node, addr *net.UDPAddr) { 1106 //fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex()) 1107 if n.pingEcho != nil || n.ID == net.tab.self.ID { 1108 //fmt.Println(" not sent") 1109 return 1110 } 1111 log.Trace("Pinging remote node", "node", n.ID) 1112 n.pingTopics = net.ticketStore.regTopicSet() 1113 n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics) 1114 net.timedEvent(respTimeout, n, pongTimeout) 1115 } 1116 1117 func (net *Network) handlePing(n *Node, pkt *ingressPacket) { 1118 log.Trace("Handling remote ping", "node", n.ID) 1119 ping := pkt.data.(*ping) 1120 n.TCP = ping.From.TCP 1121 t := net.topictab.getTicket(n, ping.Topics) 1122 1123 pong := &pong{ 1124 To: makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB 1125 ReplyTok: pkt.hash, 1126 Expiration: uint64(time.Now().Add(expiration).Unix()), 1127 } 1128 ticketToPong(t, pong) 1129 net.conn.send(n, pongPacket, pong) 1130 } 1131 1132 func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error { 1133 log.Trace("Handling known pong", "node", n.ID) 1134 net.abortTimedEvent(n, pongTimeout) 1135 now := mclock.Now() 1136 ticket, err := pongToTicket(now, n.pingTopics, n, pkt) 1137 if err == nil { 1138 // fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data) 1139 net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket) 1140 } else { 1141 log.Trace("Failed to convert pong to ticket", "err", err) 1142 } 1143 n.pingEcho = nil 1144 n.pingTopics = nil 1145 return err 1146 } 1147 1148 func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { 1149 switch ev { 1150 case findnodePacket: 1151 target := crypto.Keccak256Hash(pkt.data.(*findnode).Target[:]) 1152 results := net.tab.closest(target, bucketSize).entries 1153 net.conn.sendNeighbours(n, results) 1154 return n.state, nil 1155 case neighborsPacket: 1156 err := net.handleNeighboursPacket(n, pkt) 1157 return n.state, err 1158 case neighboursTimeout: 1159 if n.pendingNeighbours != nil { 1160 n.pendingNeighbours.reply <- nil 1161 n.pendingNeighbours = nil 1162 } 1163 n.queryTimeouts++ 1164 if n.queryTimeouts > maxFindnodeFailures && n.state == known { 1165 return contested, errors.New("too many timeouts") 1166 } 1167 return n.state, nil 1168 1169 // v5 1170 1171 case findnodeHashPacket: 1172 results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries 1173 net.conn.sendNeighbours(n, results) 1174 return n.state, nil 1175 case topicRegisterPacket: 1176 //fmt.Println("got topicRegisterPacket") 1177 regdata := pkt.data.(*topicRegister) 1178 pong, err := net.checkTopicRegister(regdata) 1179 if err != nil { 1180 //fmt.Println(err) 1181 return n.state, fmt.Errorf("bad waiting ticket: %v", err) 1182 } 1183 net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods) 1184 return n.state, nil 1185 case topicQueryPacket: 1186 // TODO: handle expiration 1187 topic := pkt.data.(*topicQuery).Topic 1188 results := net.topictab.getEntries(topic) 1189 if _, ok := net.ticketStore.tickets[topic]; ok { 1190 results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too 1191 } 1192 if len(results) > 10 { 1193 results = results[:10] 1194 } 1195 var hash common.Hash 1196 copy(hash[:], pkt.hash) 1197 net.conn.sendTopicNodes(n, hash, results) 1198 return n.state, nil 1199 case topicNodesPacket: 1200 p := pkt.data.(*topicNodes) 1201 if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) { 1202 n.queryTimeouts++ 1203 if n.queryTimeouts > maxFindnodeFailures && n.state == known { 1204 return contested, errors.New("too many timeouts") 1205 } 1206 } 1207 return n.state, nil 1208 1209 default: 1210 return n.state, errInvalidEvent 1211 } 1212 } 1213 1214 func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) { 1215 var pongpkt ingressPacket 1216 if err := decodePacket(data.Pong, &pongpkt); err != nil { 1217 return nil, err 1218 } 1219 if pongpkt.ev != pongPacket { 1220 return nil, errors.New("is not pong packet") 1221 } 1222 if pongpkt.remoteID != net.tab.self.ID { 1223 return nil, errors.New("not signed by us") 1224 } 1225 // check that we previously authorised all topics 1226 // that the other side is trying to register. 1227 if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash { 1228 return nil, errors.New("topic hash mismatch") 1229 } 1230 if data.Idx >= uint(len(data.Topics)) { 1231 return nil, errors.New("topic index out of range") 1232 } 1233 return pongpkt.data.(*pong), nil 1234 } 1235 1236 func rlpHash(x interface{}) (h common.Hash) { 1237 hw := sha3.NewLegacyKeccak256() 1238 rlp.Encode(hw, x) 1239 hw.Sum(h[:0]) 1240 return h 1241 } 1242 1243 func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error { 1244 if n.pendingNeighbours == nil { 1245 return errNoQuery 1246 } 1247 net.abortTimedEvent(n, neighboursTimeout) 1248 1249 req := pkt.data.(*neighbors) 1250 nodes := make([]*Node, len(req.Nodes)) 1251 for i, rn := range req.Nodes { 1252 nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn) 1253 if err != nil { 1254 log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err)) 1255 continue 1256 } 1257 nodes[i] = nn 1258 // Start validation of query results immediately. 1259 // This fills the table quickly. 1260 // TODO: generates way too many packets, maybe do it via queue. 1261 if nn.state == unknown { 1262 net.transition(nn, verifyinit) 1263 } 1264 } 1265 // TODO: don't ignore second packet 1266 n.pendingNeighbours.reply <- nodes 1267 n.pendingNeighbours = nil 1268 // Now that this query is done, start the next one. 1269 n.startNextQuery(net) 1270 return nil 1271 }