github.com/shyftnetwork/go-empyrean@v1.8.3-0.20191127201940-fbfca9338f04/swarm/network/stream/stream.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package stream 18 19 import ( 20 "context" 21 "errors" 22 "fmt" 23 "math" 24 "reflect" 25 "sync" 26 "time" 27 28 "github.com/ShyftNetwork/go-empyrean/metrics" 29 "github.com/ShyftNetwork/go-empyrean/p2p" 30 "github.com/ShyftNetwork/go-empyrean/p2p/enode" 31 "github.com/ShyftNetwork/go-empyrean/p2p/protocols" 32 "github.com/ShyftNetwork/go-empyrean/rpc" 33 "github.com/ShyftNetwork/go-empyrean/swarm/log" 34 "github.com/ShyftNetwork/go-empyrean/swarm/network" 35 "github.com/ShyftNetwork/go-empyrean/swarm/network/stream/intervals" 36 "github.com/ShyftNetwork/go-empyrean/swarm/state" 37 "github.com/ShyftNetwork/go-empyrean/swarm/storage" 38 ) 39 40 const ( 41 Low uint8 = iota 42 Mid 43 High 44 Top 45 PriorityQueue = 4 // number of priority queues - Low, Mid, High, Top 46 PriorityQueueCap = 4096 // queue capacity 47 HashSize = 32 48 ) 49 50 // Enumerate options for syncing and retrieval 51 type SyncingOption int 52 type RetrievalOption int 53 54 // Syncing options 55 const ( 56 // Syncing disabled 57 SyncingDisabled SyncingOption = iota 58 // Register the client and the server but not subscribe 59 SyncingRegisterOnly 60 // Both client and server funcs are registered, subscribe sent automatically 61 SyncingAutoSubscribe 62 ) 63 64 const ( 65 // Retrieval disabled. Used mostly for tests to isolate syncing features (i.e. syncing only) 66 RetrievalDisabled RetrievalOption = iota 67 // Only the client side of the retrieve request is registered. 68 // (light nodes do not serve retrieve requests) 69 // once the client is registered, subscription to retrieve request stream is always sent 70 RetrievalClientOnly 71 // Both client and server funcs are registered, subscribe sent automatically 72 RetrievalEnabled 73 ) 74 75 // subscriptionFunc is used to determine what to do in order to perform subscriptions 76 // usually we would start to really subscribe to nodes, but for tests other functionality may be needed 77 // (see TestRequestPeerSubscriptions in streamer_test.go) 78 var subscriptionFunc func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool = doRequestSubscription 79 80 // Registry registry for outgoing and incoming streamer constructors 81 type Registry struct { 82 addr enode.ID 83 api *API 84 skipCheck bool 85 clientMu sync.RWMutex 86 serverMu sync.RWMutex 87 peersMu sync.RWMutex 88 serverFuncs map[string]func(*Peer, string, bool) (Server, error) 89 clientFuncs map[string]func(*Peer, string, bool) (Client, error) 90 peers map[enode.ID]*Peer 91 delivery *Delivery 92 intervalsStore state.Store 93 autoRetrieval bool // automatically subscribe to retrieve request stream 94 maxPeerServers int 95 spec *protocols.Spec //this protocol's spec 96 balance protocols.Balance //implements protocols.Balance, for accounting 97 prices protocols.Prices //implements protocols.Prices, provides prices to accounting 98 } 99 100 // RegistryOptions holds optional values for NewRegistry constructor. 101 type RegistryOptions struct { 102 SkipCheck bool 103 Syncing SyncingOption // Defines syncing behavior 104 Retrieval RetrievalOption // Defines retrieval behavior 105 SyncUpdateDelay time.Duration 106 MaxPeerServers int // The limit of servers for each peer in registry 107 } 108 109 // NewRegistry is Streamer constructor 110 func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry { 111 if options == nil { 112 options = &RegistryOptions{} 113 } 114 if options.SyncUpdateDelay <= 0 { 115 options.SyncUpdateDelay = 15 * time.Second 116 } 117 // check if retrieval has been disabled 118 retrieval := options.Retrieval != RetrievalDisabled 119 120 streamer := &Registry{ 121 addr: localID, 122 skipCheck: options.SkipCheck, 123 serverFuncs: make(map[string]func(*Peer, string, bool) (Server, error)), 124 clientFuncs: make(map[string]func(*Peer, string, bool) (Client, error)), 125 peers: make(map[enode.ID]*Peer), 126 delivery: delivery, 127 intervalsStore: intervalsStore, 128 autoRetrieval: retrieval, 129 maxPeerServers: options.MaxPeerServers, 130 balance: balance, 131 } 132 133 streamer.setupSpec() 134 135 streamer.api = NewAPI(streamer) 136 delivery.getPeer = streamer.getPeer 137 138 // if retrieval is enabled, register the server func, so that retrieve requests will be served (non-light nodes only) 139 if options.Retrieval == RetrievalEnabled { 140 streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, live bool) (Server, error) { 141 if !live { 142 return nil, errors.New("only live retrieval requests supported") 143 } 144 return NewSwarmChunkServer(delivery.chunkStore), nil 145 }) 146 } 147 148 // if retrieval is not disabled, register the client func (both light nodes and normal nodes can issue retrieve requests) 149 if options.Retrieval != RetrievalDisabled { 150 streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) { 151 return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live)) 152 }) 153 } 154 155 // If syncing is not disabled, the syncing functions are registered (both client and server) 156 if options.Syncing != SyncingDisabled { 157 RegisterSwarmSyncerServer(streamer, syncChunkStore) 158 RegisterSwarmSyncerClient(streamer, syncChunkStore) 159 } 160 161 // if syncing is set to automatically subscribe to the syncing stream, start the subscription process 162 if options.Syncing == SyncingAutoSubscribe { 163 // latestIntC function ensures that 164 // - receiving from the in chan is not blocked by processing inside the for loop 165 // - the latest int value is delivered to the loop after the processing is done 166 // In context of NeighbourhoodDepthC: 167 // after the syncing is done updating inside the loop, we do not need to update on the intermediate 168 // depth changes, only to the latest one 169 latestIntC := func(in <-chan int) <-chan int { 170 out := make(chan int, 1) 171 172 go func() { 173 defer close(out) 174 175 for i := range in { 176 select { 177 case <-out: 178 default: 179 } 180 out <- i 181 } 182 }() 183 184 return out 185 } 186 187 go func() { 188 // wait for kademlia table to be healthy 189 time.Sleep(options.SyncUpdateDelay) 190 191 kad := streamer.delivery.kad 192 depthC := latestIntC(kad.NeighbourhoodDepthC()) 193 addressBookSizeC := latestIntC(kad.AddrCountC()) 194 195 // initial requests for syncing subscription to peers 196 streamer.updateSyncing() 197 198 for depth := range depthC { 199 log.Debug("Kademlia neighbourhood depth change", "depth", depth) 200 201 // Prevent too early sync subscriptions by waiting until there are no 202 // new peers connecting. Sync streams updating will be done after no 203 // peers are connected for at least SyncUpdateDelay period. 204 timer := time.NewTimer(options.SyncUpdateDelay) 205 // Hard limit to sync update delay, preventing long delays 206 // on a very dynamic network 207 maxTimer := time.NewTimer(3 * time.Minute) 208 loop: 209 for { 210 select { 211 case <-maxTimer.C: 212 // force syncing update when a hard timeout is reached 213 log.Trace("Sync subscriptions update on hard timeout") 214 // request for syncing subscription to new peers 215 streamer.updateSyncing() 216 break loop 217 case <-timer.C: 218 // start syncing as no new peers has been added to kademlia 219 // for some time 220 log.Trace("Sync subscriptions update") 221 // request for syncing subscription to new peers 222 streamer.updateSyncing() 223 break loop 224 case size := <-addressBookSizeC: 225 log.Trace("Kademlia address book size changed on depth change", "size", size) 226 // new peers has been added to kademlia, 227 // reset the timer to prevent early sync subscriptions 228 if !timer.Stop() { 229 <-timer.C 230 } 231 timer.Reset(options.SyncUpdateDelay) 232 } 233 } 234 timer.Stop() 235 maxTimer.Stop() 236 } 237 }() 238 } 239 240 return streamer 241 } 242 243 // This is an accounted protocol, therefore we need to provide a pricing Hook to the spec 244 // For simulations to be able to run multiple nodes and not override the hook's balance, 245 // we need to construct a spec instance per node instance 246 func (r *Registry) setupSpec() { 247 // first create the "bare" spec 248 r.createSpec() 249 // now create the pricing object 250 r.createPriceOracle() 251 // if balance is nil, this node has been started without swap support (swapEnabled flag is false) 252 if r.balance != nil && !reflect.ValueOf(r.balance).IsNil() { 253 // swap is enabled, so setup the hook 254 r.spec.Hook = protocols.NewAccounting(r.balance, r.prices) 255 } 256 } 257 258 // RegisterClient registers an incoming streamer constructor 259 func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) { 260 r.clientMu.Lock() 261 defer r.clientMu.Unlock() 262 263 r.clientFuncs[stream] = f 264 } 265 266 // RegisterServer registers an outgoing streamer constructor 267 func (r *Registry) RegisterServerFunc(stream string, f func(*Peer, string, bool) (Server, error)) { 268 r.serverMu.Lock() 269 defer r.serverMu.Unlock() 270 271 r.serverFuncs[stream] = f 272 } 273 274 // GetClient accessor for incoming streamer constructors 275 func (r *Registry) GetClientFunc(stream string) (func(*Peer, string, bool) (Client, error), error) { 276 r.clientMu.RLock() 277 defer r.clientMu.RUnlock() 278 279 f := r.clientFuncs[stream] 280 if f == nil { 281 return nil, fmt.Errorf("stream %v not registered", stream) 282 } 283 return f, nil 284 } 285 286 // GetServer accessor for incoming streamer constructors 287 func (r *Registry) GetServerFunc(stream string) (func(*Peer, string, bool) (Server, error), error) { 288 r.serverMu.RLock() 289 defer r.serverMu.RUnlock() 290 291 f := r.serverFuncs[stream] 292 if f == nil { 293 return nil, fmt.Errorf("stream %v not registered", stream) 294 } 295 return f, nil 296 } 297 298 func (r *Registry) RequestSubscription(peerId enode.ID, s Stream, h *Range, prio uint8) error { 299 // check if the stream is registered 300 if _, err := r.GetServerFunc(s.Name); err != nil { 301 return err 302 } 303 304 peer := r.getPeer(peerId) 305 if peer == nil { 306 return fmt.Errorf("peer not found %v", peerId) 307 } 308 309 if _, err := peer.getServer(s); err != nil { 310 if e, ok := err.(*notFoundError); ok && e.t == "server" { 311 // request subscription only if the server for this stream is not created 312 log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h) 313 return peer.Send(context.TODO(), &RequestSubscriptionMsg{ 314 Stream: s, 315 History: h, 316 Priority: prio, 317 }) 318 } 319 return err 320 } 321 log.Trace("RequestSubscription: already subscribed", "peer", peerId, "stream", s, "history", h) 322 return nil 323 } 324 325 // Subscribe initiates the streamer 326 func (r *Registry) Subscribe(peerId enode.ID, s Stream, h *Range, priority uint8) error { 327 // check if the stream is registered 328 if _, err := r.GetClientFunc(s.Name); err != nil { 329 return err 330 } 331 332 peer := r.getPeer(peerId) 333 if peer == nil { 334 return fmt.Errorf("peer not found %v", peerId) 335 } 336 337 var to uint64 338 if !s.Live && h != nil { 339 to = h.To 340 } 341 342 err := peer.setClientParams(s, newClientParams(priority, to)) 343 if err != nil { 344 return err 345 } 346 if s.Live && h != nil { 347 if err := peer.setClientParams( 348 getHistoryStream(s), 349 newClientParams(getHistoryPriority(priority), h.To), 350 ); err != nil { 351 return err 352 } 353 } 354 355 msg := &SubscribeMsg{ 356 Stream: s, 357 History: h, 358 Priority: priority, 359 } 360 log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h) 361 362 return peer.SendPriority(context.TODO(), msg, priority) 363 } 364 365 func (r *Registry) Unsubscribe(peerId enode.ID, s Stream) error { 366 peer := r.getPeer(peerId) 367 if peer == nil { 368 return fmt.Errorf("peer not found %v", peerId) 369 } 370 371 msg := &UnsubscribeMsg{ 372 Stream: s, 373 } 374 log.Debug("Unsubscribe ", "peer", peerId, "stream", s) 375 376 if err := peer.Send(context.TODO(), msg); err != nil { 377 return err 378 } 379 return peer.removeClient(s) 380 } 381 382 // Quit sends the QuitMsg to the peer to remove the 383 // stream peer client and terminate the streaming. 384 func (r *Registry) Quit(peerId enode.ID, s Stream) error { 385 peer := r.getPeer(peerId) 386 if peer == nil { 387 log.Debug("stream quit: peer not found", "peer", peerId, "stream", s) 388 // if the peer is not found, abort the request 389 return nil 390 } 391 392 msg := &QuitMsg{ 393 Stream: s, 394 } 395 log.Debug("Quit ", "peer", peerId, "stream", s) 396 397 return peer.Send(context.TODO(), msg) 398 } 399 400 func (r *Registry) Close() error { 401 return r.intervalsStore.Close() 402 } 403 404 func (r *Registry) getPeer(peerId enode.ID) *Peer { 405 r.peersMu.RLock() 406 defer r.peersMu.RUnlock() 407 408 return r.peers[peerId] 409 } 410 411 func (r *Registry) setPeer(peer *Peer) { 412 r.peersMu.Lock() 413 r.peers[peer.ID()] = peer 414 metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers))) 415 r.peersMu.Unlock() 416 } 417 418 func (r *Registry) deletePeer(peer *Peer) { 419 r.peersMu.Lock() 420 delete(r.peers, peer.ID()) 421 metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers))) 422 r.peersMu.Unlock() 423 } 424 425 func (r *Registry) peersCount() (c int) { 426 r.peersMu.Lock() 427 c = len(r.peers) 428 r.peersMu.Unlock() 429 return 430 } 431 432 // Run protocol run function 433 func (r *Registry) Run(p *network.BzzPeer) error { 434 sp := NewPeer(p.Peer, r) 435 r.setPeer(sp) 436 defer r.deletePeer(sp) 437 defer close(sp.quit) 438 defer sp.close() 439 440 if r.autoRetrieval && !p.LightNode { 441 err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", true), nil, Top) 442 if err != nil { 443 return err 444 } 445 } 446 447 return sp.Run(sp.HandleMsg) 448 } 449 450 // updateSyncing subscribes to SYNC streams by iterating over the 451 // kademlia connections and bins. If there are existing SYNC streams 452 // and they are no longer required after iteration, request to Quit 453 // them will be send to appropriate peers. 454 func (r *Registry) updateSyncing() { 455 kad := r.delivery.kad 456 // map of all SYNC streams for all peers 457 // used at the and of the function to remove servers 458 // that are not needed anymore 459 subs := make(map[enode.ID]map[Stream]struct{}) 460 r.peersMu.RLock() 461 for id, peer := range r.peers { 462 peer.serverMu.RLock() 463 for stream := range peer.servers { 464 if stream.Name == "SYNC" { 465 if _, ok := subs[id]; !ok { 466 subs[id] = make(map[Stream]struct{}) 467 } 468 subs[id][stream] = struct{}{} 469 } 470 } 471 peer.serverMu.RUnlock() 472 } 473 r.peersMu.RUnlock() 474 475 // start requesting subscriptions from peers 476 r.requestPeerSubscriptions(kad, subs) 477 478 // remove SYNC servers that do not need to be subscribed 479 for id, streams := range subs { 480 if len(streams) == 0 { 481 continue 482 } 483 peer := r.getPeer(id) 484 if peer == nil { 485 continue 486 } 487 for stream := range streams { 488 log.Debug("Remove sync server", "peer", id, "stream", stream) 489 err := r.Quit(peer.ID(), stream) 490 if err != nil && err != p2p.ErrShuttingDown { 491 log.Error("quit", "err", err, "peer", peer.ID(), "stream", stream) 492 } 493 } 494 } 495 } 496 497 // requestPeerSubscriptions calls on each live peer in the kademlia table 498 // and sends a `RequestSubscription` to peers according to their bin 499 // and their relationship with kademlia's depth. 500 // Also check `TestRequestPeerSubscriptions` in order to understand the 501 // expected behavior. 502 // The function expects: 503 // * the kademlia 504 // * a map of subscriptions 505 // * the actual function to subscribe 506 // (in case of the test, it doesn't do real subscriptions) 507 func (r *Registry) requestPeerSubscriptions(kad *network.Kademlia, subs map[enode.ID]map[Stream]struct{}) { 508 509 var startPo int 510 var endPo int 511 var ok bool 512 513 // kademlia's depth 514 kadDepth := kad.NeighbourhoodDepth() 515 // request subscriptions for all nodes and bins 516 // nil as base takes the node's base; we need to pass 255 as `EachConn` runs 517 // from deepest bins backwards 518 kad.EachConn(nil, 255, func(p *network.Peer, po int) bool { 519 //if the peer's bin is shallower than the kademlia depth, 520 //only the peer's bin should be subscribed 521 if po < kadDepth { 522 startPo = po 523 endPo = po 524 } else { 525 //if the peer's bin is equal or deeper than the kademlia depth, 526 //each bin from the depth up to k.MaxProxDisplay should be subscribed 527 startPo = kadDepth 528 endPo = kad.MaxProxDisplay 529 } 530 531 for bin := startPo; bin <= endPo; bin++ { 532 //do the actual subscription 533 ok = subscriptionFunc(r, p, uint8(bin), subs) 534 } 535 return ok 536 }) 537 } 538 539 // doRequestSubscription sends the actual RequestSubscription to the peer 540 func doRequestSubscription(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool { 541 log.Debug("Requesting subscription by registry:", "registry", r.addr, "peer", p.ID(), "bin", bin) 542 // bin is always less then 256 and it is safe to convert it to type uint8 543 stream := NewStream("SYNC", FormatSyncBinKey(bin), true) 544 if streams, ok := subs[p.ID()]; ok { 545 // delete live and history streams from the map, so that it won't be removed with a Quit request 546 delete(streams, stream) 547 delete(streams, getHistoryStream(stream)) 548 } 549 err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High) 550 if err != nil { 551 log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream) 552 return false 553 } 554 return true 555 } 556 557 func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error { 558 peer := protocols.NewPeer(p, rw, r.spec) 559 bp := network.NewBzzPeer(peer) 560 np := network.NewPeer(bp, r.delivery.kad) 561 r.delivery.kad.On(np) 562 defer r.delivery.kad.Off(np) 563 return r.Run(bp) 564 } 565 566 // HandleMsg is the message handler that delegates incoming messages 567 func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error { 568 switch msg := msg.(type) { 569 570 case *SubscribeMsg: 571 return p.handleSubscribeMsg(ctx, msg) 572 573 case *SubscribeErrorMsg: 574 return p.handleSubscribeErrorMsg(msg) 575 576 case *UnsubscribeMsg: 577 return p.handleUnsubscribeMsg(msg) 578 579 case *OfferedHashesMsg: 580 return p.handleOfferedHashesMsg(ctx, msg) 581 582 case *TakeoverProofMsg: 583 return p.handleTakeoverProofMsg(ctx, msg) 584 585 case *WantedHashesMsg: 586 return p.handleWantedHashesMsg(ctx, msg) 587 588 case *ChunkDeliveryMsgRetrieval: 589 // handling chunk delivery is the same for retrieval and syncing, so let's cast the msg 590 return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg))) 591 592 case *ChunkDeliveryMsgSyncing: 593 // handling chunk delivery is the same for retrieval and syncing, so let's cast the msg 594 return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg))) 595 596 case *RetrieveRequestMsg: 597 return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg) 598 599 case *RequestSubscriptionMsg: 600 return p.handleRequestSubscription(ctx, msg) 601 602 case *QuitMsg: 603 return p.handleQuitMsg(msg) 604 605 default: 606 return fmt.Errorf("unknown message type: %T", msg) 607 } 608 } 609 610 type server struct { 611 Server 612 stream Stream 613 priority uint8 614 currentBatch []byte 615 sessionIndex uint64 616 } 617 618 // setNextBatch adjusts passed interval based on session index and whether 619 // stream is live or history. It calls Server SetNextBatch with adjusted 620 // interval and returns batch hashes and their interval. 621 func (s *server) setNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) { 622 if s.stream.Live { 623 if from == 0 { 624 from = s.sessionIndex 625 } 626 if to <= from || from >= s.sessionIndex { 627 to = math.MaxUint64 628 } 629 } else { 630 if (to < from && to != 0) || from > s.sessionIndex { 631 return nil, 0, 0, nil, nil 632 } 633 if to == 0 || to > s.sessionIndex { 634 to = s.sessionIndex 635 } 636 } 637 return s.SetNextBatch(from, to) 638 } 639 640 // Server interface for outgoing peer Streamer 641 type Server interface { 642 // SessionIndex is called when a server is initialized 643 // to get the current cursor state of the stream data. 644 // Based on this index, live and history stream intervals 645 // will be adjusted before calling SetNextBatch. 646 SessionIndex() (uint64, error) 647 SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) 648 GetData(context.Context, []byte) ([]byte, error) 649 Close() 650 } 651 652 type client struct { 653 Client 654 stream Stream 655 priority uint8 656 sessionAt uint64 657 to uint64 658 next chan error 659 quit chan struct{} 660 661 intervalsKey string 662 intervalsStore state.Store 663 } 664 665 func peerStreamIntervalsKey(p *Peer, s Stream) string { 666 return p.ID().String() + s.String() 667 } 668 669 func (c client) AddInterval(start, end uint64) (err error) { 670 i := &intervals.Intervals{} 671 err = c.intervalsStore.Get(c.intervalsKey, i) 672 if err != nil { 673 return err 674 } 675 i.Add(start, end) 676 return c.intervalsStore.Put(c.intervalsKey, i) 677 } 678 679 func (c client) NextInterval() (start, end uint64, err error) { 680 i := &intervals.Intervals{} 681 err = c.intervalsStore.Get(c.intervalsKey, i) 682 if err != nil { 683 return 0, 0, err 684 } 685 start, end = i.Next() 686 return start, end, nil 687 } 688 689 // Client interface for incoming peer Streamer 690 type Client interface { 691 NeedData(context.Context, []byte) func(context.Context) error 692 BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) 693 Close() 694 } 695 696 func (c *client) nextBatch(from uint64) (nextFrom uint64, nextTo uint64) { 697 if c.to > 0 && from >= c.to { 698 return 0, 0 699 } 700 if c.stream.Live { 701 return from, 0 702 } else if from >= c.sessionAt { 703 if c.to > 0 { 704 return from, c.to 705 } 706 return from, math.MaxUint64 707 } 708 nextFrom, nextTo, err := c.NextInterval() 709 if err != nil { 710 log.Error("next intervals", "stream", c.stream) 711 return 712 } 713 if nextTo > c.to { 714 nextTo = c.to 715 } 716 if nextTo == 0 { 717 nextTo = c.sessionAt 718 } 719 return 720 } 721 722 func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error { 723 if tf := c.BatchDone(req.Stream, req.From, hashes, req.Root); tf != nil { 724 tp, err := tf() 725 if err != nil { 726 return err 727 } 728 if err := p.SendPriority(context.TODO(), tp, c.priority); err != nil { 729 return err 730 } 731 if c.to > 0 && tp.Takeover.End >= c.to { 732 return p.streamer.Unsubscribe(p.Peer.ID(), req.Stream) 733 } 734 return nil 735 } 736 // TODO: make a test case for testing if the interval is added when the batch is done 737 if err := c.AddInterval(req.From, req.To); err != nil { 738 return err 739 } 740 return nil 741 } 742 743 func (c *client) close() { 744 select { 745 case <-c.quit: 746 default: 747 close(c.quit) 748 } 749 c.Close() 750 } 751 752 // clientParams store parameters for the new client 753 // between a subscription and initial offered hashes request handling. 754 type clientParams struct { 755 priority uint8 756 to uint64 757 // signal when the client is created 758 clientCreatedC chan struct{} 759 } 760 761 func newClientParams(priority uint8, to uint64) *clientParams { 762 return &clientParams{ 763 priority: priority, 764 to: to, 765 clientCreatedC: make(chan struct{}), 766 } 767 } 768 769 func (c *clientParams) waitClient(ctx context.Context) error { 770 select { 771 case <-ctx.Done(): 772 return ctx.Err() 773 case <-c.clientCreatedC: 774 return nil 775 } 776 } 777 778 func (c *clientParams) clientCreated() { 779 close(c.clientCreatedC) 780 } 781 782 // GetSpec returns the streamer spec to callers 783 // This used to be a global variable but for simulations with 784 // multiple nodes its fields (notably the Hook) would be overwritten 785 func (r *Registry) GetSpec() *protocols.Spec { 786 return r.spec 787 } 788 789 func (r *Registry) createSpec() { 790 // Spec is the spec of the streamer protocol 791 var spec = &protocols.Spec{ 792 Name: "stream", 793 Version: 8, 794 MaxMsgSize: 10 * 1024 * 1024, 795 Messages: []interface{}{ 796 UnsubscribeMsg{}, 797 OfferedHashesMsg{}, 798 WantedHashesMsg{}, 799 TakeoverProofMsg{}, 800 SubscribeMsg{}, 801 RetrieveRequestMsg{}, 802 ChunkDeliveryMsgRetrieval{}, 803 SubscribeErrorMsg{}, 804 RequestSubscriptionMsg{}, 805 QuitMsg{}, 806 ChunkDeliveryMsgSyncing{}, 807 }, 808 } 809 r.spec = spec 810 } 811 812 // An accountable message needs some meta information attached to it 813 // in order to evaluate the correct price 814 type StreamerPrices struct { 815 priceMatrix map[reflect.Type]*protocols.Price 816 registry *Registry 817 } 818 819 // Price implements the accounting interface and returns the price for a specific message 820 func (sp *StreamerPrices) Price(msg interface{}) *protocols.Price { 821 t := reflect.TypeOf(msg).Elem() 822 return sp.priceMatrix[t] 823 } 824 825 // Instead of hardcoding the price, get it 826 // through a function - it could be quite complex in the future 827 func (sp *StreamerPrices) getRetrieveRequestMsgPrice() uint64 { 828 return uint64(1) 829 } 830 831 // Instead of hardcoding the price, get it 832 // through a function - it could be quite complex in the future 833 func (sp *StreamerPrices) getChunkDeliveryMsgRetrievalPrice() uint64 { 834 return uint64(1) 835 } 836 837 // createPriceOracle sets up a matrix which can be queried to get 838 // the price for a message via the Price method 839 func (r *Registry) createPriceOracle() { 840 sp := &StreamerPrices{ 841 registry: r, 842 } 843 sp.priceMatrix = map[reflect.Type]*protocols.Price{ 844 reflect.TypeOf(ChunkDeliveryMsgRetrieval{}): { 845 Value: sp.getChunkDeliveryMsgRetrievalPrice(), // arbitrary price for now 846 PerByte: true, 847 Payer: protocols.Receiver, 848 }, 849 reflect.TypeOf(RetrieveRequestMsg{}): { 850 Value: sp.getRetrieveRequestMsgPrice(), // arbitrary price for now 851 PerByte: false, 852 Payer: protocols.Sender, 853 }, 854 } 855 r.prices = sp 856 } 857 858 func (r *Registry) Protocols() []p2p.Protocol { 859 return []p2p.Protocol{ 860 { 861 Name: r.spec.Name, 862 Version: r.spec.Version, 863 Length: r.spec.Length(), 864 Run: r.runProtocol, 865 }, 866 } 867 } 868 869 func (r *Registry) APIs() []rpc.API { 870 return []rpc.API{ 871 { 872 Namespace: "stream", 873 Version: "3.0", 874 Service: r.api, 875 Public: true, 876 }, 877 } 878 } 879 880 func (r *Registry) Start(server *p2p.Server) error { 881 log.Info("Streamer started") 882 return nil 883 } 884 885 func (r *Registry) Stop() error { 886 return nil 887 } 888 889 type Range struct { 890 From, To uint64 891 } 892 893 func NewRange(from, to uint64) *Range { 894 return &Range{ 895 From: from, 896 To: to, 897 } 898 } 899 900 func (r *Range) String() string { 901 return fmt.Sprintf("%v-%v", r.From, r.To) 902 } 903 904 func getHistoryPriority(priority uint8) uint8 { 905 if priority == 0 { 906 return 0 907 } 908 return priority - 1 909 } 910 911 func getHistoryStream(s Stream) Stream { 912 return NewStream(s.Name, s.Key, false) 913 } 914 915 type API struct { 916 streamer *Registry 917 } 918 919 func NewAPI(r *Registry) *API { 920 return &API{ 921 streamer: r, 922 } 923 } 924 925 func (api *API) SubscribeStream(peerId enode.ID, s Stream, history *Range, priority uint8) error { 926 return api.streamer.Subscribe(peerId, s, history, priority) 927 } 928 929 func (api *API) UnsubscribeStream(peerId enode.ID, s Stream) error { 930 return api.streamer.Unsubscribe(peerId, s) 931 }