github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/network/stream/stream.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package stream 18 19 import ( 20 "context" 21 "errors" 22 "fmt" 23 "math" 24 "reflect" 25 "sync" 26 "time" 27 28 "github.com/ethereum/go-ethereum/metrics" 29 "github.com/ethereum/go-ethereum/p2p" 30 "github.com/ethereum/go-ethereum/p2p/enode" 31 "github.com/ethereum/go-ethereum/p2p/protocols" 32 "github.com/ethereum/go-ethereum/rpc" 33 "github.com/ethereum/go-ethereum/swarm/log" 34 "github.com/ethereum/go-ethereum/swarm/network" 35 "github.com/ethereum/go-ethereum/swarm/network/stream/intervals" 36 "github.com/ethereum/go-ethereum/swarm/state" 37 "github.com/ethereum/go-ethereum/swarm/storage" 38 ) 39 40 const ( 41 Low uint8 = iota 42 Mid 43 High 44 Top 45 PriorityQueue = 4 // number of priority queues - Low, Mid, High, Top 46 PriorityQueueCap = 4096 // queue capacity 47 HashSize = 32 48 ) 49 50 // Enumerate options for syncing and retrieval 51 type SyncingOption int 52 type RetrievalOption int 53 54 // Syncing options 55 const ( 56 // Syncing disabled 57 SyncingDisabled SyncingOption = iota 58 // Register the client and the server but not subscribe 59 SyncingRegisterOnly 60 // Both client and server funcs are registered, subscribe sent automatically 61 SyncingAutoSubscribe 62 ) 63 64 const ( 65 // Retrieval disabled. Used mostly for tests to isolate syncing features (i.e. syncing only) 66 RetrievalDisabled RetrievalOption = iota 67 // Only the client side of the retrieve request is registered. 68 // (light nodes do not serve retrieve requests) 69 // once the client is registered, subscription to retrieve request stream is always sent 70 RetrievalClientOnly 71 // Both client and server funcs are registered, subscribe sent automatically 72 RetrievalEnabled 73 ) 74 75 // subscriptionFunc is used to determine what to do in order to perform subscriptions 76 // usually we would start to really subscribe to nodes, but for tests other functionality may be needed 77 // (see TestRequestPeerSubscriptions in streamer_test.go) 78 var subscriptionFunc = doRequestSubscription 79 80 // Registry registry for outgoing and incoming streamer constructors 81 type Registry struct { 82 addr enode.ID 83 api *API 84 skipCheck bool 85 clientMu sync.RWMutex 86 serverMu sync.RWMutex 87 peersMu sync.RWMutex 88 serverFuncs map[string]func(*Peer, string, bool) (Server, error) 89 clientFuncs map[string]func(*Peer, string, bool) (Client, error) 90 peers map[enode.ID]*Peer 91 delivery *Delivery 92 intervalsStore state.Store 93 autoRetrieval bool // automatically subscribe to retrieve request stream 94 maxPeerServers int 95 spec *protocols.Spec //this protocol's spec 96 balance protocols.Balance //implements protocols.Balance, for accounting 97 prices protocols.Prices //implements protocols.Prices, provides prices to accounting 98 quit chan struct{} // terminates registry goroutines 99 } 100 101 // RegistryOptions holds optional values for NewRegistry constructor. 102 type RegistryOptions struct { 103 SkipCheck bool 104 Syncing SyncingOption // Defines syncing behavior 105 Retrieval RetrievalOption // Defines retrieval behavior 106 SyncUpdateDelay time.Duration 107 MaxPeerServers int // The limit of servers for each peer in registry 108 } 109 110 // NewRegistry is Streamer constructor 111 func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry { 112 if options == nil { 113 options = &RegistryOptions{} 114 } 115 if options.SyncUpdateDelay <= 0 { 116 options.SyncUpdateDelay = 15 * time.Second 117 } 118 // check if retrieval has been disabled 119 retrieval := options.Retrieval != RetrievalDisabled 120 121 quit := make(chan struct{}) 122 123 streamer := &Registry{ 124 addr: localID, 125 skipCheck: options.SkipCheck, 126 serverFuncs: make(map[string]func(*Peer, string, bool) (Server, error)), 127 clientFuncs: make(map[string]func(*Peer, string, bool) (Client, error)), 128 peers: make(map[enode.ID]*Peer), 129 delivery: delivery, 130 intervalsStore: intervalsStore, 131 autoRetrieval: retrieval, 132 maxPeerServers: options.MaxPeerServers, 133 balance: balance, 134 quit: quit, 135 } 136 137 streamer.setupSpec() 138 139 streamer.api = NewAPI(streamer) 140 delivery.getPeer = streamer.getPeer 141 142 // if retrieval is enabled, register the server func, so that retrieve requests will be served (non-light nodes only) 143 if options.Retrieval == RetrievalEnabled { 144 streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, live bool) (Server, error) { 145 if !live { 146 return nil, errors.New("only live retrieval requests supported") 147 } 148 return NewSwarmChunkServer(delivery.chunkStore), nil 149 }) 150 } 151 152 // if retrieval is not disabled, register the client func (both light nodes and normal nodes can issue retrieve requests) 153 if options.Retrieval != RetrievalDisabled { 154 streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) { 155 return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live)) 156 }) 157 } 158 159 // If syncing is not disabled, the syncing functions are registered (both client and server) 160 if options.Syncing != SyncingDisabled { 161 RegisterSwarmSyncerServer(streamer, syncChunkStore) 162 RegisterSwarmSyncerClient(streamer, syncChunkStore) 163 } 164 165 // if syncing is set to automatically subscribe to the syncing stream, start the subscription process 166 if options.Syncing == SyncingAutoSubscribe { 167 // latestIntC function ensures that 168 // - receiving from the in chan is not blocked by processing inside the for loop 169 // - the latest int value is delivered to the loop after the processing is done 170 // In context of NeighbourhoodDepthC: 171 // after the syncing is done updating inside the loop, we do not need to update on the intermediate 172 // depth changes, only to the latest one 173 latestIntC := func(in <-chan int) <-chan int { 174 out := make(chan int, 1) 175 176 go func() { 177 defer close(out) 178 179 for { 180 select { 181 case i, ok := <-in: 182 if !ok { 183 return 184 } 185 select { 186 case <-out: 187 default: 188 } 189 out <- i 190 case <-quit: 191 return 192 } 193 } 194 }() 195 196 return out 197 } 198 199 kad := streamer.delivery.kad 200 // get notification channels from Kademlia before returning 201 // from this function to avoid race with Close method and 202 // the goroutine created below 203 depthC := latestIntC(kad.NeighbourhoodDepthC()) 204 addressBookSizeC := latestIntC(kad.AddrCountC()) 205 206 go func() { 207 // wait for kademlia table to be healthy 208 // but return if Registry is closed before 209 select { 210 case <-time.After(options.SyncUpdateDelay): 211 case <-quit: 212 return 213 } 214 215 // initial requests for syncing subscription to peers 216 streamer.updateSyncing() 217 218 for depth := range depthC { 219 log.Debug("Kademlia neighbourhood depth change", "depth", depth) 220 221 // Prevent too early sync subscriptions by waiting until there are no 222 // new peers connecting. Sync streams updating will be done after no 223 // peers are connected for at least SyncUpdateDelay period. 224 timer := time.NewTimer(options.SyncUpdateDelay) 225 // Hard limit to sync update delay, preventing long delays 226 // on a very dynamic network 227 maxTimer := time.NewTimer(3 * time.Minute) 228 loop: 229 for { 230 select { 231 case <-maxTimer.C: 232 // force syncing update when a hard timeout is reached 233 log.Trace("Sync subscriptions update on hard timeout") 234 // request for syncing subscription to new peers 235 streamer.updateSyncing() 236 break loop 237 case <-timer.C: 238 // start syncing as no new peers has been added to kademlia 239 // for some time 240 log.Trace("Sync subscriptions update") 241 // request for syncing subscription to new peers 242 streamer.updateSyncing() 243 break loop 244 case size := <-addressBookSizeC: 245 log.Trace("Kademlia address book size changed on depth change", "size", size) 246 // new peers has been added to kademlia, 247 // reset the timer to prevent early sync subscriptions 248 if !timer.Stop() { 249 <-timer.C 250 } 251 timer.Reset(options.SyncUpdateDelay) 252 case <-quit: 253 break loop 254 } 255 } 256 timer.Stop() 257 maxTimer.Stop() 258 } 259 }() 260 } 261 262 return streamer 263 } 264 265 // This is an accounted protocol, therefore we need to provide a pricing Hook to the spec 266 // For simulations to be able to run multiple nodes and not override the hook's balance, 267 // we need to construct a spec instance per node instance 268 func (r *Registry) setupSpec() { 269 // first create the "bare" spec 270 r.createSpec() 271 // now create the pricing object 272 r.createPriceOracle() 273 // if balance is nil, this node has been started without swap support (swapEnabled flag is false) 274 if r.balance != nil && !reflect.ValueOf(r.balance).IsNil() { 275 // swap is enabled, so setup the hook 276 r.spec.Hook = protocols.NewAccounting(r.balance, r.prices) 277 } 278 } 279 280 // RegisterClient registers an incoming streamer constructor 281 func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) { 282 r.clientMu.Lock() 283 defer r.clientMu.Unlock() 284 285 r.clientFuncs[stream] = f 286 } 287 288 // RegisterServer registers an outgoing streamer constructor 289 func (r *Registry) RegisterServerFunc(stream string, f func(*Peer, string, bool) (Server, error)) { 290 r.serverMu.Lock() 291 defer r.serverMu.Unlock() 292 293 r.serverFuncs[stream] = f 294 } 295 296 // GetClient accessor for incoming streamer constructors 297 func (r *Registry) GetClientFunc(stream string) (func(*Peer, string, bool) (Client, error), error) { 298 r.clientMu.RLock() 299 defer r.clientMu.RUnlock() 300 301 f := r.clientFuncs[stream] 302 if f == nil { 303 return nil, fmt.Errorf("stream %v not registered", stream) 304 } 305 return f, nil 306 } 307 308 // GetServer accessor for incoming streamer constructors 309 func (r *Registry) GetServerFunc(stream string) (func(*Peer, string, bool) (Server, error), error) { 310 r.serverMu.RLock() 311 defer r.serverMu.RUnlock() 312 313 f := r.serverFuncs[stream] 314 if f == nil { 315 return nil, fmt.Errorf("stream %v not registered", stream) 316 } 317 return f, nil 318 } 319 320 func (r *Registry) RequestSubscription(peerId enode.ID, s Stream, h *Range, prio uint8) error { 321 // check if the stream is registered 322 if _, err := r.GetServerFunc(s.Name); err != nil { 323 return err 324 } 325 326 peer := r.getPeer(peerId) 327 if peer == nil { 328 return fmt.Errorf("peer not found %v", peerId) 329 } 330 331 if _, err := peer.getServer(s); err != nil { 332 if e, ok := err.(*notFoundError); ok && e.t == "server" { 333 // request subscription only if the server for this stream is not created 334 log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h) 335 return peer.Send(context.TODO(), &RequestSubscriptionMsg{ 336 Stream: s, 337 History: h, 338 Priority: prio, 339 }) 340 } 341 return err 342 } 343 log.Trace("RequestSubscription: already subscribed", "peer", peerId, "stream", s, "history", h) 344 return nil 345 } 346 347 // Subscribe initiates the streamer 348 func (r *Registry) Subscribe(peerId enode.ID, s Stream, h *Range, priority uint8) error { 349 // check if the stream is registered 350 if _, err := r.GetClientFunc(s.Name); err != nil { 351 return err 352 } 353 354 peer := r.getPeer(peerId) 355 if peer == nil { 356 return fmt.Errorf("peer not found %v", peerId) 357 } 358 359 var to uint64 360 if !s.Live && h != nil { 361 to = h.To 362 } 363 364 err := peer.setClientParams(s, newClientParams(priority, to)) 365 if err != nil { 366 return err 367 } 368 if s.Live && h != nil { 369 if err := peer.setClientParams( 370 getHistoryStream(s), 371 newClientParams(getHistoryPriority(priority), h.To), 372 ); err != nil { 373 return err 374 } 375 } 376 377 msg := &SubscribeMsg{ 378 Stream: s, 379 History: h, 380 Priority: priority, 381 } 382 log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h) 383 384 return peer.SendPriority(context.TODO(), msg, priority) 385 } 386 387 func (r *Registry) Unsubscribe(peerId enode.ID, s Stream) error { 388 peer := r.getPeer(peerId) 389 if peer == nil { 390 return fmt.Errorf("peer not found %v", peerId) 391 } 392 393 msg := &UnsubscribeMsg{ 394 Stream: s, 395 } 396 log.Debug("Unsubscribe ", "peer", peerId, "stream", s) 397 398 if err := peer.Send(context.TODO(), msg); err != nil { 399 return err 400 } 401 return peer.removeClient(s) 402 } 403 404 // Quit sends the QuitMsg to the peer to remove the 405 // stream peer client and terminate the streaming. 406 func (r *Registry) Quit(peerId enode.ID, s Stream) error { 407 peer := r.getPeer(peerId) 408 if peer == nil { 409 log.Debug("stream quit: peer not found", "peer", peerId, "stream", s) 410 // if the peer is not found, abort the request 411 return nil 412 } 413 414 msg := &QuitMsg{ 415 Stream: s, 416 } 417 log.Debug("Quit ", "peer", peerId, "stream", s) 418 419 return peer.Send(context.TODO(), msg) 420 } 421 422 func (r *Registry) Close() error { 423 // Stop sending neighborhood depth change and address count 424 // change from Kademlia that were initiated in NewRegistry constructor. 425 r.delivery.kad.CloseNeighbourhoodDepthC() 426 r.delivery.kad.CloseAddrCountC() 427 close(r.quit) 428 return r.intervalsStore.Close() 429 } 430 431 func (r *Registry) getPeer(peerId enode.ID) *Peer { 432 r.peersMu.RLock() 433 defer r.peersMu.RUnlock() 434 435 return r.peers[peerId] 436 } 437 438 func (r *Registry) setPeer(peer *Peer) { 439 r.peersMu.Lock() 440 r.peers[peer.ID()] = peer 441 metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers))) 442 r.peersMu.Unlock() 443 } 444 445 func (r *Registry) deletePeer(peer *Peer) { 446 r.peersMu.Lock() 447 delete(r.peers, peer.ID()) 448 metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers))) 449 r.peersMu.Unlock() 450 } 451 452 func (r *Registry) peersCount() (c int) { 453 r.peersMu.Lock() 454 c = len(r.peers) 455 r.peersMu.Unlock() 456 return 457 } 458 459 // Run protocol run function 460 func (r *Registry) Run(p *network.BzzPeer) error { 461 sp := NewPeer(p.Peer, r) 462 r.setPeer(sp) 463 defer r.deletePeer(sp) 464 defer close(sp.quit) 465 defer sp.close() 466 467 if r.autoRetrieval && !p.LightNode { 468 err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", true), nil, Top) 469 if err != nil { 470 return err 471 } 472 } 473 474 return sp.Run(sp.HandleMsg) 475 } 476 477 // updateSyncing subscribes to SYNC streams by iterating over the 478 // kademlia connections and bins. If there are existing SYNC streams 479 // and they are no longer required after iteration, request to Quit 480 // them will be send to appropriate peers. 481 func (r *Registry) updateSyncing() { 482 kad := r.delivery.kad 483 // map of all SYNC streams for all peers 484 // used at the and of the function to remove servers 485 // that are not needed anymore 486 subs := make(map[enode.ID]map[Stream]struct{}) 487 r.peersMu.RLock() 488 for id, peer := range r.peers { 489 peer.serverMu.RLock() 490 for stream := range peer.servers { 491 if stream.Name == "SYNC" { 492 if _, ok := subs[id]; !ok { 493 subs[id] = make(map[Stream]struct{}) 494 } 495 subs[id][stream] = struct{}{} 496 } 497 } 498 peer.serverMu.RUnlock() 499 } 500 r.peersMu.RUnlock() 501 502 // start requesting subscriptions from peers 503 r.requestPeerSubscriptions(kad, subs) 504 505 // remove SYNC servers that do not need to be subscribed 506 for id, streams := range subs { 507 if len(streams) == 0 { 508 continue 509 } 510 peer := r.getPeer(id) 511 if peer == nil { 512 continue 513 } 514 for stream := range streams { 515 log.Debug("Remove sync server", "peer", id, "stream", stream) 516 err := r.Quit(peer.ID(), stream) 517 if err != nil && err != p2p.ErrShuttingDown { 518 log.Error("quit", "err", err, "peer", peer.ID(), "stream", stream) 519 } 520 } 521 } 522 } 523 524 // requestPeerSubscriptions calls on each live peer in the kademlia table 525 // and sends a `RequestSubscription` to peers according to their bin 526 // and their relationship with kademlia's depth. 527 // Also check `TestRequestPeerSubscriptions` in order to understand the 528 // expected behavior. 529 // The function expects: 530 // * the kademlia 531 // * a map of subscriptions 532 // * the actual function to subscribe 533 // (in case of the test, it doesn't do real subscriptions) 534 func (r *Registry) requestPeerSubscriptions(kad *network.Kademlia, subs map[enode.ID]map[Stream]struct{}) { 535 536 var startPo int 537 var endPo int 538 var ok bool 539 540 // kademlia's depth 541 kadDepth := kad.NeighbourhoodDepth() 542 // request subscriptions for all nodes and bins 543 // nil as base takes the node's base; we need to pass 255 as `EachConn` runs 544 // from deepest bins backwards 545 kad.EachConn(nil, 255, func(p *network.Peer, po int) bool { 546 // nodes that do not provide stream protocol 547 // should not be subscribed, e.g. bootnodes 548 if !p.HasCap("stream") { 549 return true 550 } 551 //if the peer's bin is shallower than the kademlia depth, 552 //only the peer's bin should be subscribed 553 if po < kadDepth { 554 startPo = po 555 endPo = po 556 } else { 557 //if the peer's bin is equal or deeper than the kademlia depth, 558 //each bin from the depth up to k.MaxProxDisplay should be subscribed 559 startPo = kadDepth 560 endPo = kad.MaxProxDisplay 561 } 562 563 for bin := startPo; bin <= endPo; bin++ { 564 //do the actual subscription 565 ok = subscriptionFunc(r, p, uint8(bin), subs) 566 } 567 return ok 568 }) 569 } 570 571 // doRequestSubscription sends the actual RequestSubscription to the peer 572 func doRequestSubscription(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool { 573 log.Debug("Requesting subscription by registry:", "registry", r.addr, "peer", p.ID(), "bin", bin) 574 // bin is always less then 256 and it is safe to convert it to type uint8 575 stream := NewStream("SYNC", FormatSyncBinKey(bin), true) 576 if streams, ok := subs[p.ID()]; ok { 577 // delete live and history streams from the map, so that it won't be removed with a Quit request 578 delete(streams, stream) 579 delete(streams, getHistoryStream(stream)) 580 } 581 err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High) 582 if err != nil { 583 log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream) 584 return false 585 } 586 return true 587 } 588 589 func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error { 590 peer := protocols.NewPeer(p, rw, r.spec) 591 bp := network.NewBzzPeer(peer) 592 np := network.NewPeer(bp, r.delivery.kad) 593 r.delivery.kad.On(np) 594 defer r.delivery.kad.Off(np) 595 return r.Run(bp) 596 } 597 598 // HandleMsg is the message handler that delegates incoming messages 599 func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error { 600 select { 601 case <-p.streamer.quit: 602 log.Trace("message received after the streamer is closed", "peer", p.ID()) 603 // return without an error since streamer is closed and 604 // no messages should be handled as other subcomponents like 605 // storage leveldb may be closed 606 return nil 607 default: 608 } 609 610 switch msg := msg.(type) { 611 612 case *SubscribeMsg: 613 return p.handleSubscribeMsg(ctx, msg) 614 615 case *SubscribeErrorMsg: 616 return p.handleSubscribeErrorMsg(msg) 617 618 case *UnsubscribeMsg: 619 return p.handleUnsubscribeMsg(msg) 620 621 case *OfferedHashesMsg: 622 return p.handleOfferedHashesMsg(ctx, msg) 623 624 case *TakeoverProofMsg: 625 return p.handleTakeoverProofMsg(ctx, msg) 626 627 case *WantedHashesMsg: 628 return p.handleWantedHashesMsg(ctx, msg) 629 630 case *ChunkDeliveryMsgRetrieval: 631 // handling chunk delivery is the same for retrieval and syncing, so let's cast the msg 632 return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg))) 633 634 case *ChunkDeliveryMsgSyncing: 635 // handling chunk delivery is the same for retrieval and syncing, so let's cast the msg 636 return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg))) 637 638 case *RetrieveRequestMsg: 639 return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg) 640 641 case *RequestSubscriptionMsg: 642 return p.handleRequestSubscription(ctx, msg) 643 644 case *QuitMsg: 645 return p.handleQuitMsg(msg) 646 647 default: 648 return fmt.Errorf("unknown message type: %T", msg) 649 } 650 } 651 652 type server struct { 653 Server 654 stream Stream 655 priority uint8 656 currentBatch []byte 657 sessionIndex uint64 658 } 659 660 // setNextBatch adjusts passed interval based on session index and whether 661 // stream is live or history. It calls Server SetNextBatch with adjusted 662 // interval and returns batch hashes and their interval. 663 func (s *server) setNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) { 664 if s.stream.Live { 665 if from == 0 { 666 from = s.sessionIndex 667 } 668 if to <= from || from >= s.sessionIndex { 669 to = math.MaxUint64 670 } 671 } else { 672 if (to < from && to != 0) || from > s.sessionIndex { 673 return nil, 0, 0, nil, nil 674 } 675 if to == 0 || to > s.sessionIndex { 676 to = s.sessionIndex 677 } 678 } 679 return s.SetNextBatch(from, to) 680 } 681 682 // Server interface for outgoing peer Streamer 683 type Server interface { 684 // SessionIndex is called when a server is initialized 685 // to get the current cursor state of the stream data. 686 // Based on this index, live and history stream intervals 687 // will be adjusted before calling SetNextBatch. 688 SessionIndex() (uint64, error) 689 SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) 690 GetData(context.Context, []byte) ([]byte, error) 691 Close() 692 } 693 694 type client struct { 695 Client 696 stream Stream 697 priority uint8 698 sessionAt uint64 699 to uint64 700 next chan error 701 quit chan struct{} 702 703 intervalsKey string 704 intervalsStore state.Store 705 } 706 707 func peerStreamIntervalsKey(p *Peer, s Stream) string { 708 return p.ID().String() + s.String() 709 } 710 711 func (c *client) AddInterval(start, end uint64) (err error) { 712 i := &intervals.Intervals{} 713 if err = c.intervalsStore.Get(c.intervalsKey, i); err != nil { 714 return err 715 } 716 i.Add(start, end) 717 return c.intervalsStore.Put(c.intervalsKey, i) 718 } 719 720 func (c *client) NextInterval() (start, end uint64, err error) { 721 i := &intervals.Intervals{} 722 err = c.intervalsStore.Get(c.intervalsKey, i) 723 if err != nil { 724 return 0, 0, err 725 } 726 start, end = i.Next() 727 return start, end, nil 728 } 729 730 // Client interface for incoming peer Streamer 731 type Client interface { 732 NeedData(context.Context, []byte) func(context.Context) error 733 BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) 734 Close() 735 } 736 737 func (c *client) nextBatch(from uint64) (nextFrom uint64, nextTo uint64) { 738 if c.to > 0 && from >= c.to { 739 return 0, 0 740 } 741 if c.stream.Live { 742 return from, 0 743 } else if from >= c.sessionAt { 744 if c.to > 0 { 745 return from, c.to 746 } 747 return from, math.MaxUint64 748 } 749 nextFrom, nextTo, err := c.NextInterval() 750 if err != nil { 751 log.Error("next intervals", "stream", c.stream) 752 return 753 } 754 if nextTo > c.to { 755 nextTo = c.to 756 } 757 if nextTo == 0 { 758 nextTo = c.sessionAt 759 } 760 return 761 } 762 763 func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error { 764 if tf := c.BatchDone(req.Stream, req.From, hashes, req.Root); tf != nil { 765 tp, err := tf() 766 if err != nil { 767 return err 768 } 769 770 if err := p.SendPriority(context.TODO(), tp, c.priority); err != nil { 771 return err 772 } 773 if c.to > 0 && tp.Takeover.End >= c.to { 774 return p.streamer.Unsubscribe(p.Peer.ID(), req.Stream) 775 } 776 return nil 777 } 778 return c.AddInterval(req.From, req.To) 779 } 780 781 func (c *client) close() { 782 select { 783 case <-c.quit: 784 default: 785 close(c.quit) 786 } 787 c.Close() 788 } 789 790 // clientParams store parameters for the new client 791 // between a subscription and initial offered hashes request handling. 792 type clientParams struct { 793 priority uint8 794 to uint64 795 // signal when the client is created 796 clientCreatedC chan struct{} 797 } 798 799 func newClientParams(priority uint8, to uint64) *clientParams { 800 return &clientParams{ 801 priority: priority, 802 to: to, 803 clientCreatedC: make(chan struct{}), 804 } 805 } 806 807 func (c *clientParams) waitClient(ctx context.Context) error { 808 select { 809 case <-ctx.Done(): 810 return ctx.Err() 811 case <-c.clientCreatedC: 812 return nil 813 } 814 } 815 816 func (c *clientParams) clientCreated() { 817 close(c.clientCreatedC) 818 } 819 820 // GetSpec returns the streamer spec to callers 821 // This used to be a global variable but for simulations with 822 // multiple nodes its fields (notably the Hook) would be overwritten 823 func (r *Registry) GetSpec() *protocols.Spec { 824 return r.spec 825 } 826 827 func (r *Registry) createSpec() { 828 // Spec is the spec of the streamer protocol 829 var spec = &protocols.Spec{ 830 Name: "stream", 831 Version: 8, 832 MaxMsgSize: 10 * 1024 * 1024, 833 Messages: []interface{}{ 834 UnsubscribeMsg{}, 835 OfferedHashesMsg{}, 836 WantedHashesMsg{}, 837 TakeoverProofMsg{}, 838 SubscribeMsg{}, 839 RetrieveRequestMsg{}, 840 ChunkDeliveryMsgRetrieval{}, 841 SubscribeErrorMsg{}, 842 RequestSubscriptionMsg{}, 843 QuitMsg{}, 844 ChunkDeliveryMsgSyncing{}, 845 }, 846 } 847 r.spec = spec 848 } 849 850 // An accountable message needs some meta information attached to it 851 // in order to evaluate the correct price 852 type StreamerPrices struct { 853 priceMatrix map[reflect.Type]*protocols.Price 854 registry *Registry 855 } 856 857 // Price implements the accounting interface and returns the price for a specific message 858 func (sp *StreamerPrices) Price(msg interface{}) *protocols.Price { 859 t := reflect.TypeOf(msg).Elem() 860 return sp.priceMatrix[t] 861 } 862 863 // Instead of hardcoding the price, get it 864 // through a function - it could be quite complex in the future 865 func (sp *StreamerPrices) getRetrieveRequestMsgPrice() uint64 { 866 return uint64(1) 867 } 868 869 // Instead of hardcoding the price, get it 870 // through a function - it could be quite complex in the future 871 func (sp *StreamerPrices) getChunkDeliveryMsgRetrievalPrice() uint64 { 872 return uint64(1) 873 } 874 875 // createPriceOracle sets up a matrix which can be queried to get 876 // the price for a message via the Price method 877 func (r *Registry) createPriceOracle() { 878 sp := &StreamerPrices{ 879 registry: r, 880 } 881 sp.priceMatrix = map[reflect.Type]*protocols.Price{ 882 reflect.TypeOf(ChunkDeliveryMsgRetrieval{}): { 883 Value: sp.getChunkDeliveryMsgRetrievalPrice(), // arbitrary price for now 884 PerByte: true, 885 Payer: protocols.Receiver, 886 }, 887 reflect.TypeOf(RetrieveRequestMsg{}): { 888 Value: sp.getRetrieveRequestMsgPrice(), // arbitrary price for now 889 PerByte: false, 890 Payer: protocols.Sender, 891 }, 892 } 893 r.prices = sp 894 } 895 896 func (r *Registry) Protocols() []p2p.Protocol { 897 return []p2p.Protocol{ 898 { 899 Name: r.spec.Name, 900 Version: r.spec.Version, 901 Length: r.spec.Length(), 902 Run: r.runProtocol, 903 }, 904 } 905 } 906 907 func (r *Registry) APIs() []rpc.API { 908 return []rpc.API{ 909 { 910 Namespace: "stream", 911 Version: "3.0", 912 Service: r.api, 913 Public: true, 914 }, 915 } 916 } 917 918 func (r *Registry) Start(server *p2p.Server) error { 919 log.Info("Streamer started") 920 return nil 921 } 922 923 func (r *Registry) Stop() error { 924 return nil 925 } 926 927 type Range struct { 928 From, To uint64 929 } 930 931 func NewRange(from, to uint64) *Range { 932 return &Range{ 933 From: from, 934 To: to, 935 } 936 } 937 938 func (r *Range) String() string { 939 return fmt.Sprintf("%v-%v", r.From, r.To) 940 } 941 942 func getHistoryPriority(priority uint8) uint8 { 943 if priority == 0 { 944 return 0 945 } 946 return priority - 1 947 } 948 949 func getHistoryStream(s Stream) Stream { 950 return NewStream(s.Name, s.Key, false) 951 } 952 953 type API struct { 954 streamer *Registry 955 } 956 957 func NewAPI(r *Registry) *API { 958 return &API{ 959 streamer: r, 960 } 961 } 962 963 func (api *API) SubscribeStream(peerId enode.ID, s Stream, history *Range, priority uint8) error { 964 return api.streamer.Subscribe(peerId, s, history, priority) 965 } 966 967 func (api *API) UnsubscribeStream(peerId enode.ID, s Stream) error { 968 return api.streamer.Unsubscribe(peerId, s) 969 } 970 971 /* 972 GetPeerSubscriptions is a API function which allows to query a peer for stream subscriptions it has. 973 It can be called via RPC. 974 It returns a map of node IDs with an array of string representations of Stream objects. 975 */ 976 func (api *API) GetPeerSubscriptions() map[string][]string { 977 //create the empty map 978 pstreams := make(map[string][]string) 979 980 //iterate all streamer peers 981 api.streamer.peersMu.RLock() 982 defer api.streamer.peersMu.RUnlock() 983 984 for id, p := range api.streamer.peers { 985 var streams []string 986 //every peer has a map of stream servers 987 //every stream server represents a subscription 988 p.serverMu.RLock() 989 for s := range p.servers { 990 //append the string representation of the stream 991 //to the list for this peer 992 streams = append(streams, s.String()) 993 } 994 p.serverMu.RUnlock() 995 //set the array of stream servers to the map 996 pstreams[id.String()] = streams 997 } 998 return pstreams 999 }