github.com/alexdevranger/node-1.8.27@v0.0.0-20221128213301-aa5841e41d2d/swarm/network/stream/stream.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-dubxcoin library.
     3  //
     4  // The go-dubxcoin library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-dubxcoin library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-dubxcoin library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"reflect"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/alexdevranger/node-1.8.27/metrics"
    29  	"github.com/alexdevranger/node-1.8.27/p2p"
    30  	"github.com/alexdevranger/node-1.8.27/p2p/enode"
    31  	"github.com/alexdevranger/node-1.8.27/p2p/protocols"
    32  	"github.com/alexdevranger/node-1.8.27/rpc"
    33  	"github.com/alexdevranger/node-1.8.27/swarm/log"
    34  	"github.com/alexdevranger/node-1.8.27/swarm/network"
    35  	"github.com/alexdevranger/node-1.8.27/swarm/network/stream/intervals"
    36  	"github.com/alexdevranger/node-1.8.27/swarm/state"
    37  	"github.com/alexdevranger/node-1.8.27/swarm/storage"
    38  )
    39  
    40  const (
    41  	Low uint8 = iota
    42  	Mid
    43  	High
    44  	Top
    45  	PriorityQueue    = 4    // number of priority queues - Low, Mid, High, Top
    46  	PriorityQueueCap = 4096 // queue capacity
    47  	HashSize         = 32
    48  )
    49  
    50  // Enumerate options for syncing and retrieval
    51  type SyncingOption int
    52  type RetrievalOption int
    53  
    54  // Syncing options
    55  const (
    56  	// Syncing disabled
    57  	SyncingDisabled SyncingOption = iota
    58  	// Register the client and the server but not subscribe
    59  	SyncingRegisterOnly
    60  	// Both client and server funcs are registered, subscribe sent automatically
    61  	SyncingAutoSubscribe
    62  )
    63  
    64  const (
    65  	// Retrieval disabled. Used mostly for tests to isolate syncing features (i.e. syncing only)
    66  	RetrievalDisabled RetrievalOption = iota
    67  	// Only the client side of the retrieve request is registered.
    68  	// (light nodes do not serve retrieve requests)
    69  	// once the client is registered, subscription to retrieve request stream is always sent
    70  	RetrievalClientOnly
    71  	// Both client and server funcs are registered, subscribe sent automatically
    72  	RetrievalEnabled
    73  )
    74  
    75  // subscriptionFunc is used to determine what to do in order to perform subscriptions
    76  // usually we would start to really subscribe to nodes, but for tests other functionality may be needed
    77  // (see TestRequestPeerSubscriptions in streamer_test.go)
    78  var subscriptionFunc func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool = doRequestSubscription
    79  
    80  // Registry registry for outgoing and incoming streamer constructors
    81  type Registry struct {
    82  	addr           enode.ID
    83  	api            *API
    84  	skipCheck      bool
    85  	clientMu       sync.RWMutex
    86  	serverMu       sync.RWMutex
    87  	peersMu        sync.RWMutex
    88  	serverFuncs    map[string]func(*Peer, string, bool) (Server, error)
    89  	clientFuncs    map[string]func(*Peer, string, bool) (Client, error)
    90  	peers          map[enode.ID]*Peer
    91  	delivery       *Delivery
    92  	intervalsStore state.Store
    93  	autoRetrieval  bool // automatically subscribe to retrieve request stream
    94  	maxPeerServers int
    95  	spec           *protocols.Spec   //this protocol's spec
    96  	balance        protocols.Balance //implements protocols.Balance, for accounting
    97  	prices         protocols.Prices  //implements protocols.Prices, provides prices to accounting
    98  }
    99  
   100  // RegistryOptions holds optional values for NewRegistry constructor.
   101  type RegistryOptions struct {
   102  	SkipCheck       bool
   103  	Syncing         SyncingOption   // Defines syncing behavior
   104  	Retrieval       RetrievalOption // Defines retrieval behavior
   105  	SyncUpdateDelay time.Duration
   106  	MaxPeerServers  int // The limit of servers for each peer in registry
   107  }
   108  
   109  // NewRegistry is Streamer constructor
   110  func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry {
   111  	if options == nil {
   112  		options = &RegistryOptions{}
   113  	}
   114  	if options.SyncUpdateDelay <= 0 {
   115  		options.SyncUpdateDelay = 15 * time.Second
   116  	}
   117  	// check if retrieval has been disabled
   118  	retrieval := options.Retrieval != RetrievalDisabled
   119  
   120  	streamer := &Registry{
   121  		addr:           localID,
   122  		skipCheck:      options.SkipCheck,
   123  		serverFuncs:    make(map[string]func(*Peer, string, bool) (Server, error)),
   124  		clientFuncs:    make(map[string]func(*Peer, string, bool) (Client, error)),
   125  		peers:          make(map[enode.ID]*Peer),
   126  		delivery:       delivery,
   127  		intervalsStore: intervalsStore,
   128  		autoRetrieval:  retrieval,
   129  		maxPeerServers: options.MaxPeerServers,
   130  		balance:        balance,
   131  	}
   132  
   133  	streamer.setupSpec()
   134  
   135  	streamer.api = NewAPI(streamer)
   136  	delivery.getPeer = streamer.getPeer
   137  
   138  	// if retrieval is enabled, register the server func, so that retrieve requests will be served (non-light nodes only)
   139  	if options.Retrieval == RetrievalEnabled {
   140  		streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, live bool) (Server, error) {
   141  			if !live {
   142  				return nil, errors.New("only live retrieval requests supported")
   143  			}
   144  			return NewSwarmChunkServer(delivery.chunkStore), nil
   145  		})
   146  	}
   147  
   148  	// if retrieval is not disabled, register the client func (both light nodes and normal nodes can issue retrieve requests)
   149  	if options.Retrieval != RetrievalDisabled {
   150  		streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
   151  			return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live))
   152  		})
   153  	}
   154  
   155  	// If syncing is not disabled, the syncing functions are registered (both client and server)
   156  	if options.Syncing != SyncingDisabled {
   157  		RegisterSwarmSyncerServer(streamer, syncChunkStore)
   158  		RegisterSwarmSyncerClient(streamer, syncChunkStore)
   159  	}
   160  
   161  	// if syncing is set to automatically subscribe to the syncing stream, start the subscription process
   162  	if options.Syncing == SyncingAutoSubscribe {
   163  		// latestIntC function ensures that
   164  		//   - receiving from the in chan is not blocked by processing inside the for loop
   165  		// 	 - the latest int value is delivered to the loop after the processing is done
   166  		// In context of NeighbourhoodDepthC:
   167  		// after the syncing is done updating inside the loop, we do not need to update on the intermediate
   168  		// depth changes, only to the latest one
   169  		latestIntC := func(in <-chan int) <-chan int {
   170  			out := make(chan int, 1)
   171  
   172  			go func() {
   173  				defer close(out)
   174  
   175  				for i := range in {
   176  					select {
   177  					case <-out:
   178  					default:
   179  					}
   180  					out <- i
   181  				}
   182  			}()
   183  
   184  			return out
   185  		}
   186  
   187  		go func() {
   188  			// wait for kademlia table to be healthy
   189  			time.Sleep(options.SyncUpdateDelay)
   190  
   191  			kad := streamer.delivery.kad
   192  			depthC := latestIntC(kad.NeighbourhoodDepthC())
   193  			addressBookSizeC := latestIntC(kad.AddrCountC())
   194  
   195  			// initial requests for syncing subscription to peers
   196  			streamer.updateSyncing()
   197  
   198  			for depth := range depthC {
   199  				log.Debug("Kademlia neighbourhood depth change", "depth", depth)
   200  
   201  				// Prevent too early sync subscriptions by waiting until there are no
   202  				// new peers connecting. Sync streams updating will be done after no
   203  				// peers are connected for at least SyncUpdateDelay period.
   204  				timer := time.NewTimer(options.SyncUpdateDelay)
   205  				// Hard limit to sync update delay, preventing long delays
   206  				// on a very dynamic network
   207  				maxTimer := time.NewTimer(3 * time.Minute)
   208  			loop:
   209  				for {
   210  					select {
   211  					case <-maxTimer.C:
   212  						// force syncing update when a hard timeout is reached
   213  						log.Trace("Sync subscriptions update on hard timeout")
   214  						// request for syncing subscription to new peers
   215  						streamer.updateSyncing()
   216  						break loop
   217  					case <-timer.C:
   218  						// start syncing as no new peers has been added to kademlia
   219  						// for some time
   220  						log.Trace("Sync subscriptions update")
   221  						// request for syncing subscription to new peers
   222  						streamer.updateSyncing()
   223  						break loop
   224  					case size := <-addressBookSizeC:
   225  						log.Trace("Kademlia address book size changed on depth change", "size", size)
   226  						// new peers has been added to kademlia,
   227  						// reset the timer to prevent early sync subscriptions
   228  						if !timer.Stop() {
   229  							<-timer.C
   230  						}
   231  						timer.Reset(options.SyncUpdateDelay)
   232  					}
   233  				}
   234  				timer.Stop()
   235  				maxTimer.Stop()
   236  			}
   237  		}()
   238  	}
   239  
   240  	return streamer
   241  }
   242  
   243  // This is an accounted protocol, therefore we need to provide a pricing Hook to the spec
   244  // For simulations to be able to run multiple nodes and not override the hook's balance,
   245  // we need to construct a spec instance per node instance
   246  func (r *Registry) setupSpec() {
   247  	// first create the "bare" spec
   248  	r.createSpec()
   249  	// now create the pricing object
   250  	r.createPriceOracle()
   251  	// if balance is nil, this node has been started without swap support (swapEnabled flag is false)
   252  	if r.balance != nil && !reflect.ValueOf(r.balance).IsNil() {
   253  		// swap is enabled, so setup the hook
   254  		r.spec.Hook = protocols.NewAccounting(r.balance, r.prices)
   255  	}
   256  }
   257  
   258  // RegisterClient registers an incoming streamer constructor
   259  func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) {
   260  	r.clientMu.Lock()
   261  	defer r.clientMu.Unlock()
   262  
   263  	r.clientFuncs[stream] = f
   264  }
   265  
   266  // RegisterServer registers an outgoing streamer constructor
   267  func (r *Registry) RegisterServerFunc(stream string, f func(*Peer, string, bool) (Server, error)) {
   268  	r.serverMu.Lock()
   269  	defer r.serverMu.Unlock()
   270  
   271  	r.serverFuncs[stream] = f
   272  }
   273  
   274  // GetClient accessor for incoming streamer constructors
   275  func (r *Registry) GetClientFunc(stream string) (func(*Peer, string, bool) (Client, error), error) {
   276  	r.clientMu.RLock()
   277  	defer r.clientMu.RUnlock()
   278  
   279  	f := r.clientFuncs[stream]
   280  	if f == nil {
   281  		return nil, fmt.Errorf("stream %v not registered", stream)
   282  	}
   283  	return f, nil
   284  }
   285  
   286  // GetServer accessor for incoming streamer constructors
   287  func (r *Registry) GetServerFunc(stream string) (func(*Peer, string, bool) (Server, error), error) {
   288  	r.serverMu.RLock()
   289  	defer r.serverMu.RUnlock()
   290  
   291  	f := r.serverFuncs[stream]
   292  	if f == nil {
   293  		return nil, fmt.Errorf("stream %v not registered", stream)
   294  	}
   295  	return f, nil
   296  }
   297  
   298  func (r *Registry) RequestSubscription(peerId enode.ID, s Stream, h *Range, prio uint8) error {
   299  	// check if the stream is registered
   300  	if _, err := r.GetServerFunc(s.Name); err != nil {
   301  		return err
   302  	}
   303  
   304  	peer := r.getPeer(peerId)
   305  	if peer == nil {
   306  		return fmt.Errorf("peer not found %v", peerId)
   307  	}
   308  
   309  	if _, err := peer.getServer(s); err != nil {
   310  		if e, ok := err.(*notFoundError); ok && e.t == "server" {
   311  			// request subscription only if the server for this stream is not created
   312  			log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h)
   313  			return peer.Send(context.TODO(), &RequestSubscriptionMsg{
   314  				Stream:   s,
   315  				History:  h,
   316  				Priority: prio,
   317  			})
   318  		}
   319  		return err
   320  	}
   321  	log.Trace("RequestSubscription: already subscribed", "peer", peerId, "stream", s, "history", h)
   322  	return nil
   323  }
   324  
   325  // Subscribe initiates the streamer
   326  func (r *Registry) Subscribe(peerId enode.ID, s Stream, h *Range, priority uint8) error {
   327  	// check if the stream is registered
   328  	if _, err := r.GetClientFunc(s.Name); err != nil {
   329  		return err
   330  	}
   331  
   332  	peer := r.getPeer(peerId)
   333  	if peer == nil {
   334  		return fmt.Errorf("peer not found %v", peerId)
   335  	}
   336  
   337  	var to uint64
   338  	if !s.Live && h != nil {
   339  		to = h.To
   340  	}
   341  
   342  	err := peer.setClientParams(s, newClientParams(priority, to))
   343  	if err != nil {
   344  		return err
   345  	}
   346  	if s.Live && h != nil {
   347  		if err := peer.setClientParams(
   348  			getHistoryStream(s),
   349  			newClientParams(getHistoryPriority(priority), h.To),
   350  		); err != nil {
   351  			return err
   352  		}
   353  	}
   354  
   355  	msg := &SubscribeMsg{
   356  		Stream:   s,
   357  		History:  h,
   358  		Priority: priority,
   359  	}
   360  	log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h)
   361  
   362  	return peer.SendPriority(context.TODO(), msg, priority, "")
   363  }
   364  
   365  func (r *Registry) Unsubscribe(peerId enode.ID, s Stream) error {
   366  	peer := r.getPeer(peerId)
   367  	if peer == nil {
   368  		return fmt.Errorf("peer not found %v", peerId)
   369  	}
   370  
   371  	msg := &UnsubscribeMsg{
   372  		Stream: s,
   373  	}
   374  	log.Debug("Unsubscribe ", "peer", peerId, "stream", s)
   375  
   376  	if err := peer.Send(context.TODO(), msg); err != nil {
   377  		return err
   378  	}
   379  	return peer.removeClient(s)
   380  }
   381  
   382  // Quit sends the QuitMsg to the peer to remove the
   383  // stream peer client and terminate the streaming.
   384  func (r *Registry) Quit(peerId enode.ID, s Stream) error {
   385  	peer := r.getPeer(peerId)
   386  	if peer == nil {
   387  		log.Debug("stream quit: peer not found", "peer", peerId, "stream", s)
   388  		// if the peer is not found, abort the request
   389  		return nil
   390  	}
   391  
   392  	msg := &QuitMsg{
   393  		Stream: s,
   394  	}
   395  	log.Debug("Quit ", "peer", peerId, "stream", s)
   396  
   397  	return peer.Send(context.TODO(), msg)
   398  }
   399  
   400  func (r *Registry) Close() error {
   401  	return r.intervalsStore.Close()
   402  }
   403  
   404  func (r *Registry) getPeer(peerId enode.ID) *Peer {
   405  	r.peersMu.RLock()
   406  	defer r.peersMu.RUnlock()
   407  
   408  	return r.peers[peerId]
   409  }
   410  
   411  func (r *Registry) setPeer(peer *Peer) {
   412  	r.peersMu.Lock()
   413  	r.peers[peer.ID()] = peer
   414  	metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
   415  	r.peersMu.Unlock()
   416  }
   417  
   418  func (r *Registry) deletePeer(peer *Peer) {
   419  	r.peersMu.Lock()
   420  	delete(r.peers, peer.ID())
   421  	metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
   422  	r.peersMu.Unlock()
   423  }
   424  
   425  func (r *Registry) peersCount() (c int) {
   426  	r.peersMu.Lock()
   427  	c = len(r.peers)
   428  	r.peersMu.Unlock()
   429  	return
   430  }
   431  
   432  // Run protocol run function
   433  func (r *Registry) Run(p *network.BzzPeer) error {
   434  	sp := NewPeer(p.Peer, r)
   435  	r.setPeer(sp)
   436  	defer r.deletePeer(sp)
   437  	defer close(sp.quit)
   438  	defer sp.close()
   439  
   440  	if r.autoRetrieval && !p.LightNode {
   441  		err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", true), nil, Top)
   442  		if err != nil {
   443  			return err
   444  		}
   445  	}
   446  
   447  	return sp.Run(sp.HandleMsg)
   448  }
   449  
   450  // updateSyncing subscribes to SYNC streams by iterating over the
   451  // kademlia connections and bins. If there are existing SYNC streams
   452  // and they are no longer required after iteration, request to Quit
   453  // them will be send to appropriate peers.
   454  func (r *Registry) updateSyncing() {
   455  	kad := r.delivery.kad
   456  	// map of all SYNC streams for all peers
   457  	// used at the and of the function to remove servers
   458  	// that are not needed anymore
   459  	subs := make(map[enode.ID]map[Stream]struct{})
   460  	r.peersMu.RLock()
   461  	for id, peer := range r.peers {
   462  		peer.serverMu.RLock()
   463  		for stream := range peer.servers {
   464  			if stream.Name == "SYNC" {
   465  				if _, ok := subs[id]; !ok {
   466  					subs[id] = make(map[Stream]struct{})
   467  				}
   468  				subs[id][stream] = struct{}{}
   469  			}
   470  		}
   471  		peer.serverMu.RUnlock()
   472  	}
   473  	r.peersMu.RUnlock()
   474  
   475  	// start requesting subscriptions from peers
   476  	r.requestPeerSubscriptions(kad, subs)
   477  
   478  	// remove SYNC servers that do not need to be subscribed
   479  	for id, streams := range subs {
   480  		if len(streams) == 0 {
   481  			continue
   482  		}
   483  		peer := r.getPeer(id)
   484  		if peer == nil {
   485  			continue
   486  		}
   487  		for stream := range streams {
   488  			log.Debug("Remove sync server", "peer", id, "stream", stream)
   489  			err := r.Quit(peer.ID(), stream)
   490  			if err != nil && err != p2p.ErrShuttingDown {
   491  				log.Error("quit", "err", err, "peer", peer.ID(), "stream", stream)
   492  			}
   493  		}
   494  	}
   495  }
   496  
   497  // requestPeerSubscriptions calls on each live peer in the kademlia table
   498  // and sends a `RequestSubscription` to peers according to their bin
   499  // and their relationship with kademlia's depth.
   500  // Also check `TestRequestPeerSubscriptions` in order to understand the
   501  // expected behavior.
   502  // The function expects:
   503  //   * the kademlia
   504  //   * a map of subscriptions
   505  //   * the actual function to subscribe
   506  //     (in case of the test, it doesn't do real subscriptions)
   507  func (r *Registry) requestPeerSubscriptions(kad *network.Kademlia, subs map[enode.ID]map[Stream]struct{}) {
   508  
   509  	var startPo int
   510  	var endPo int
   511  	var ok bool
   512  
   513  	// kademlia's depth
   514  	kadDepth := kad.NeighbourhoodDepth()
   515  	// request subscriptions for all nodes and bins
   516  	// nil as base takes the node's base; we need to pass 255 as `EachConn` runs
   517  	// from deepest bins backwards
   518  	kad.EachConn(nil, 255, func(p *network.Peer, po int) bool {
   519  		// nodes that do not provide stream protocol
   520  		// should not be subscribed, e.g. bootnodes
   521  		if !p.HasCap("stream") {
   522  			return true
   523  		}
   524  		//if the peer's bin is shallower than the kademlia depth,
   525  		//only the peer's bin should be subscribed
   526  		if po < kadDepth {
   527  			startPo = po
   528  			endPo = po
   529  		} else {
   530  			//if the peer's bin is equal or deeper than the kademlia depth,
   531  			//each bin from the depth up to k.MaxProxDisplay should be subscribed
   532  			startPo = kadDepth
   533  			endPo = kad.MaxProxDisplay
   534  		}
   535  
   536  		for bin := startPo; bin <= endPo; bin++ {
   537  			//do the actual subscription
   538  			ok = subscriptionFunc(r, p, uint8(bin), subs)
   539  		}
   540  		return ok
   541  	})
   542  }
   543  
   544  // doRequestSubscription sends the actual RequestSubscription to the peer
   545  func doRequestSubscription(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
   546  	log.Debug("Requesting subscription by registry:", "registry", r.addr, "peer", p.ID(), "bin", bin)
   547  	// bin is always less then 256 and it is safe to convert it to type uint8
   548  	stream := NewStream("SYNC", FormatSyncBinKey(bin), true)
   549  	if streams, ok := subs[p.ID()]; ok {
   550  		// delete live and history streams from the map, so that it won't be removed with a Quit request
   551  		delete(streams, stream)
   552  		delete(streams, getHistoryStream(stream))
   553  	}
   554  	err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
   555  	if err != nil {
   556  		log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
   557  		return false
   558  	}
   559  	return true
   560  }
   561  
   562  func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   563  	peer := protocols.NewPeer(p, rw, r.spec)
   564  	bp := network.NewBzzPeer(peer)
   565  	np := network.NewPeer(bp, r.delivery.kad)
   566  	r.delivery.kad.On(np)
   567  	defer r.delivery.kad.Off(np)
   568  	return r.Run(bp)
   569  }
   570  
   571  // HandleMsg is the message handler that delegates incoming messages
   572  func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
   573  	switch msg := msg.(type) {
   574  
   575  	case *SubscribeMsg:
   576  		return p.handleSubscribeMsg(ctx, msg)
   577  
   578  	case *SubscribeErrorMsg:
   579  		return p.handleSubscribeErrorMsg(msg)
   580  
   581  	case *UnsubscribeMsg:
   582  		return p.handleUnsubscribeMsg(msg)
   583  
   584  	case *OfferedHashesMsg:
   585  		return p.handleOfferedHashesMsg(ctx, msg)
   586  
   587  	case *TakeoverProofMsg:
   588  		return p.handleTakeoverProofMsg(ctx, msg)
   589  
   590  	case *WantedHashesMsg:
   591  		return p.handleWantedHashesMsg(ctx, msg)
   592  
   593  	case *ChunkDeliveryMsgRetrieval:
   594  		// handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
   595  		return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
   596  
   597  	case *ChunkDeliveryMsgSyncing:
   598  		// handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
   599  		return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
   600  
   601  	case *RetrieveRequestMsg:
   602  		return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg)
   603  
   604  	case *RequestSubscriptionMsg:
   605  		return p.handleRequestSubscription(ctx, msg)
   606  
   607  	case *QuitMsg:
   608  		return p.handleQuitMsg(msg)
   609  
   610  	default:
   611  		return fmt.Errorf("unknown message type: %T", msg)
   612  	}
   613  }
   614  
   615  type server struct {
   616  	Server
   617  	stream       Stream
   618  	priority     uint8
   619  	currentBatch []byte
   620  	sessionIndex uint64
   621  }
   622  
   623  // setNextBatch adjusts passed interval based on session index and whether
   624  // stream is live or history. It calls Server SetNextBatch with adjusted
   625  // interval and returns batch hashes and their interval.
   626  func (s *server) setNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
   627  	if s.stream.Live {
   628  		if from == 0 {
   629  			from = s.sessionIndex
   630  		}
   631  		if to <= from || from >= s.sessionIndex {
   632  			to = math.MaxUint64
   633  		}
   634  	} else {
   635  		if (to < from && to != 0) || from > s.sessionIndex {
   636  			return nil, 0, 0, nil, nil
   637  		}
   638  		if to == 0 || to > s.sessionIndex {
   639  			to = s.sessionIndex
   640  		}
   641  	}
   642  	return s.SetNextBatch(from, to)
   643  }
   644  
   645  // Server interface for outgoing peer Streamer
   646  type Server interface {
   647  	// SessionIndex is called when a server is initialized
   648  	// to get the current cursor state of the stream data.
   649  	// Based on this index, live and history stream intervals
   650  	// will be adjusted before calling SetNextBatch.
   651  	SessionIndex() (uint64, error)
   652  	SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error)
   653  	GetData(context.Context, []byte) ([]byte, error)
   654  	Close()
   655  }
   656  
   657  type client struct {
   658  	Client
   659  	stream    Stream
   660  	priority  uint8
   661  	sessionAt uint64
   662  	to        uint64
   663  	next      chan error
   664  	quit      chan struct{}
   665  
   666  	intervalsKey   string
   667  	intervalsStore state.Store
   668  }
   669  
   670  func peerStreamIntervalsKey(p *Peer, s Stream) string {
   671  	return p.ID().String() + s.String()
   672  }
   673  
   674  func (c client) AddInterval(start, end uint64) (err error) {
   675  	i := &intervals.Intervals{}
   676  	err = c.intervalsStore.Get(c.intervalsKey, i)
   677  	if err != nil {
   678  		return err
   679  	}
   680  	i.Add(start, end)
   681  	return c.intervalsStore.Put(c.intervalsKey, i)
   682  }
   683  
   684  func (c client) NextInterval() (start, end uint64, err error) {
   685  	i := &intervals.Intervals{}
   686  	err = c.intervalsStore.Get(c.intervalsKey, i)
   687  	if err != nil {
   688  		return 0, 0, err
   689  	}
   690  	start, end = i.Next()
   691  	return start, end, nil
   692  }
   693  
   694  // Client interface for incoming peer Streamer
   695  type Client interface {
   696  	NeedData(context.Context, []byte) func(context.Context) error
   697  	BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error)
   698  	Close()
   699  }
   700  
   701  func (c *client) nextBatch(from uint64) (nextFrom uint64, nextTo uint64) {
   702  	if c.to > 0 && from >= c.to {
   703  		return 0, 0
   704  	}
   705  	if c.stream.Live {
   706  		return from, 0
   707  	} else if from >= c.sessionAt {
   708  		if c.to > 0 {
   709  			return from, c.to
   710  		}
   711  		return from, math.MaxUint64
   712  	}
   713  	nextFrom, nextTo, err := c.NextInterval()
   714  	if err != nil {
   715  		log.Error("next intervals", "stream", c.stream)
   716  		return
   717  	}
   718  	if nextTo > c.to {
   719  		nextTo = c.to
   720  	}
   721  	if nextTo == 0 {
   722  		nextTo = c.sessionAt
   723  	}
   724  	return
   725  }
   726  
   727  func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error {
   728  	if tf := c.BatchDone(req.Stream, req.From, hashes, req.Root); tf != nil {
   729  		tp, err := tf()
   730  		if err != nil {
   731  			return err
   732  		}
   733  
   734  		if err := p.SendPriority(context.TODO(), tp, c.priority, ""); err != nil {
   735  			return err
   736  		}
   737  		if c.to > 0 && tp.Takeover.End >= c.to {
   738  			return p.streamer.Unsubscribe(p.Peer.ID(), req.Stream)
   739  		}
   740  		return nil
   741  	}
   742  	// TODO: make a test case for testing if the interval is added when the batch is done
   743  	if err := c.AddInterval(req.From, req.To); err != nil {
   744  		return err
   745  	}
   746  	return nil
   747  }
   748  
   749  func (c *client) close() {
   750  	select {
   751  	case <-c.quit:
   752  	default:
   753  		close(c.quit)
   754  	}
   755  	c.Close()
   756  }
   757  
   758  // clientParams store parameters for the new client
   759  // between a subscription and initial offered hashes request handling.
   760  type clientParams struct {
   761  	priority uint8
   762  	to       uint64
   763  	// signal when the client is created
   764  	clientCreatedC chan struct{}
   765  }
   766  
   767  func newClientParams(priority uint8, to uint64) *clientParams {
   768  	return &clientParams{
   769  		priority:       priority,
   770  		to:             to,
   771  		clientCreatedC: make(chan struct{}),
   772  	}
   773  }
   774  
   775  func (c *clientParams) waitClient(ctx context.Context) error {
   776  	select {
   777  	case <-ctx.Done():
   778  		return ctx.Err()
   779  	case <-c.clientCreatedC:
   780  		return nil
   781  	}
   782  }
   783  
   784  func (c *clientParams) clientCreated() {
   785  	close(c.clientCreatedC)
   786  }
   787  
   788  // GetSpec returns the streamer spec to callers
   789  // This used to be a global variable but for simulations with
   790  // multiple nodes its fields (notably the Hook) would be overwritten
   791  func (r *Registry) GetSpec() *protocols.Spec {
   792  	return r.spec
   793  }
   794  
   795  func (r *Registry) createSpec() {
   796  	// Spec is the spec of the streamer protocol
   797  	var spec = &protocols.Spec{
   798  		Name:       "stream",
   799  		Version:    8,
   800  		MaxMsgSize: 10 * 1024 * 1024,
   801  		Messages: []interface{}{
   802  			UnsubscribeMsg{},
   803  			OfferedHashesMsg{},
   804  			WantedHashesMsg{},
   805  			TakeoverProofMsg{},
   806  			SubscribeMsg{},
   807  			RetrieveRequestMsg{},
   808  			ChunkDeliveryMsgRetrieval{},
   809  			SubscribeErrorMsg{},
   810  			RequestSubscriptionMsg{},
   811  			QuitMsg{},
   812  			ChunkDeliveryMsgSyncing{},
   813  		},
   814  	}
   815  	r.spec = spec
   816  }
   817  
   818  // An accountable message needs some meta information attached to it
   819  // in order to evaluate the correct price
   820  type StreamerPrices struct {
   821  	priceMatrix map[reflect.Type]*protocols.Price
   822  	registry    *Registry
   823  }
   824  
   825  // Price implements the accounting interface and returns the price for a specific message
   826  func (sp *StreamerPrices) Price(msg interface{}) *protocols.Price {
   827  	t := reflect.TypeOf(msg).Elem()
   828  	return sp.priceMatrix[t]
   829  }
   830  
   831  // Instead of hardcoding the price, get it
   832  // through a function - it could be quite complex in the future
   833  func (sp *StreamerPrices) getRetrieveRequestMsgPrice() uint64 {
   834  	return uint64(1)
   835  }
   836  
   837  // Instead of hardcoding the price, get it
   838  // through a function - it could be quite complex in the future
   839  func (sp *StreamerPrices) getChunkDeliveryMsgRetrievalPrice() uint64 {
   840  	return uint64(1)
   841  }
   842  
   843  // createPriceOracle sets up a matrix which can be queried to get
   844  // the price for a message via the Price method
   845  func (r *Registry) createPriceOracle() {
   846  	sp := &StreamerPrices{
   847  		registry: r,
   848  	}
   849  	sp.priceMatrix = map[reflect.Type]*protocols.Price{
   850  		reflect.TypeOf(ChunkDeliveryMsgRetrieval{}): {
   851  			Value:   sp.getChunkDeliveryMsgRetrievalPrice(), // arbitrary price for now
   852  			PerByte: true,
   853  			Payer:   protocols.Receiver,
   854  		},
   855  		reflect.TypeOf(RetrieveRequestMsg{}): {
   856  			Value:   sp.getRetrieveRequestMsgPrice(), // arbitrary price for now
   857  			PerByte: false,
   858  			Payer:   protocols.Sender,
   859  		},
   860  	}
   861  	r.prices = sp
   862  }
   863  
   864  func (r *Registry) Protocols() []p2p.Protocol {
   865  	return []p2p.Protocol{
   866  		{
   867  			Name:    r.spec.Name,
   868  			Version: r.spec.Version,
   869  			Length:  r.spec.Length(),
   870  			Run:     r.runProtocol,
   871  		},
   872  	}
   873  }
   874  
   875  func (r *Registry) APIs() []rpc.API {
   876  	return []rpc.API{
   877  		{
   878  			Namespace: "stream",
   879  			Version:   "3.0",
   880  			Service:   r.api,
   881  			Public:    true,
   882  		},
   883  	}
   884  }
   885  
   886  func (r *Registry) Start(server *p2p.Server) error {
   887  	log.Info("Streamer started")
   888  	return nil
   889  }
   890  
   891  func (r *Registry) Stop() error {
   892  	return nil
   893  }
   894  
   895  type Range struct {
   896  	From, To uint64
   897  }
   898  
   899  func NewRange(from, to uint64) *Range {
   900  	return &Range{
   901  		From: from,
   902  		To:   to,
   903  	}
   904  }
   905  
   906  func (r *Range) String() string {
   907  	return fmt.Sprintf("%v-%v", r.From, r.To)
   908  }
   909  
   910  func getHistoryPriority(priority uint8) uint8 {
   911  	if priority == 0 {
   912  		return 0
   913  	}
   914  	return priority - 1
   915  }
   916  
   917  func getHistoryStream(s Stream) Stream {
   918  	return NewStream(s.Name, s.Key, false)
   919  }
   920  
   921  type API struct {
   922  	streamer *Registry
   923  }
   924  
   925  func NewAPI(r *Registry) *API {
   926  	return &API{
   927  		streamer: r,
   928  	}
   929  }
   930  
   931  func (api *API) SubscribeStream(peerId enode.ID, s Stream, history *Range, priority uint8) error {
   932  	return api.streamer.Subscribe(peerId, s, history, priority)
   933  }
   934  
   935  func (api *API) UnsubscribeStream(peerId enode.ID, s Stream) error {
   936  	return api.streamer.Unsubscribe(peerId, s)
   937  }
   938  
   939  /*
   940  GetPeerSubscriptions is a API function which allows to query a peer for stream subscriptions it has.
   941  It can be called via RPC.
   942  It returns a map of node IDs with an array of string representations of Stream objects.
   943  */
   944  func (api *API) GetPeerSubscriptions() map[string][]string {
   945  	//create the empty map
   946  	pstreams := make(map[string][]string)
   947  
   948  	//iterate all streamer peers
   949  	api.streamer.peersMu.RLock()
   950  	defer api.streamer.peersMu.RUnlock()
   951  
   952  	for id, p := range api.streamer.peers {
   953  		var streams []string
   954  		//every peer has a map of stream servers
   955  		//every stream server represents a subscription
   956  		p.serverMu.RLock()
   957  		for s := range p.servers {
   958  			//append the string representation of the stream
   959  			//to the list for this peer
   960  			streams = append(streams, s.String())
   961  		}
   962  		p.serverMu.RUnlock()
   963  		//set the array of stream servers to the map
   964  		pstreams[id.String()] = streams
   965  	}
   966  	return pstreams
   967  }