github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/swarm/network/stream/stream.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"sync"
    25  	"time"
    26  
    27  	"github.com/ethereum/go-ethereum/metrics"
    28  	"github.com/ethereum/go-ethereum/p2p"
    29  	"github.com/ethereum/go-ethereum/p2p/enode"
    30  	"github.com/ethereum/go-ethereum/p2p/protocols"
    31  	"github.com/ethereum/go-ethereum/rpc"
    32  	"github.com/ethereum/go-ethereum/swarm/log"
    33  	"github.com/ethereum/go-ethereum/swarm/network"
    34  	"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
    35  	"github.com/ethereum/go-ethereum/swarm/pot"
    36  	"github.com/ethereum/go-ethereum/swarm/state"
    37  	"github.com/ethereum/go-ethereum/swarm/storage"
    38  )
    39  
    40  const (
    41  	Low uint8 = iota
    42  	Mid
    43  	High
    44  	Top
    45  	PriorityQueue    = 4    // number of priority queues - Low, Mid, High, Top
    46  	PriorityQueueCap = 4096 // queue capacity
    47  	HashSize         = 32
    48  )
    49  
    50  // Registry registry for outgoing and incoming streamer constructors
    51  type Registry struct {
    52  	addr           enode.ID
    53  	api            *API
    54  	skipCheck      bool
    55  	clientMu       sync.RWMutex
    56  	serverMu       sync.RWMutex
    57  	peersMu        sync.RWMutex
    58  	serverFuncs    map[string]func(*Peer, string, bool) (Server, error)
    59  	clientFuncs    map[string]func(*Peer, string, bool) (Client, error)
    60  	peers          map[enode.ID]*Peer
    61  	delivery       *Delivery
    62  	intervalsStore state.Store
    63  	doRetrieve     bool
    64  	maxPeerServers int
    65  }
    66  
    67  // RegistryOptions holds optional values for NewRegistry constructor.
    68  type RegistryOptions struct {
    69  	SkipCheck       bool
    70  	DoSync          bool // Sets if the server syncs with peers. Default is true, set to false by lightnode or nosync flags.
    71  	DoRetrieve      bool // Sets if the server issues Retrieve requests. Default is true.
    72  	DoServeRetrieve bool // Sets if the server serves Retrieve requests. Default is true, set to false by lightnode flag.
    73  	SyncUpdateDelay time.Duration
    74  	MaxPeerServers  int // The limit of servers for each peer in registry
    75  }
    76  
    77  // NewRegistry is Streamer constructor
    78  func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions) *Registry {
    79  	if options == nil {
    80  		options = &RegistryOptions{}
    81  	}
    82  	if options.SyncUpdateDelay <= 0 {
    83  		options.SyncUpdateDelay = 15 * time.Second
    84  	}
    85  	streamer := &Registry{
    86  		addr:           localID,
    87  		skipCheck:      options.SkipCheck,
    88  		serverFuncs:    make(map[string]func(*Peer, string, bool) (Server, error)),
    89  		clientFuncs:    make(map[string]func(*Peer, string, bool) (Client, error)),
    90  		peers:          make(map[enode.ID]*Peer),
    91  		delivery:       delivery,
    92  		intervalsStore: intervalsStore,
    93  		doRetrieve:     options.DoRetrieve,
    94  		maxPeerServers: options.MaxPeerServers,
    95  	}
    96  	streamer.api = NewAPI(streamer)
    97  	delivery.getPeer = streamer.getPeer
    98  
    99  	if options.DoServeRetrieve {
   100  		streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, live bool) (Server, error) {
   101  			if !live {
   102  				return nil, errors.New("only live retrieval requests supported")
   103  			}
   104  			return NewSwarmChunkServer(delivery.chunkStore), nil
   105  		})
   106  	}
   107  
   108  	streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
   109  		return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live))
   110  	})
   111  
   112  	if options.DoSync {
   113  		RegisterSwarmSyncerServer(streamer, syncChunkStore)
   114  		RegisterSwarmSyncerClient(streamer, syncChunkStore)
   115  	}
   116  
   117  	if options.DoSync {
   118  		// latestIntC function ensures that
   119  		//   - receiving from the in chan is not blocked by processing inside the for loop
   120  		// 	 - the latest int value is delivered to the loop after the processing is done
   121  		// In context of NeighbourhoodDepthC:
   122  		// after the syncing is done updating inside the loop, we do not need to update on the intermediate
   123  		// depth changes, only to the latest one
   124  		latestIntC := func(in <-chan int) <-chan int {
   125  			out := make(chan int, 1)
   126  
   127  			go func() {
   128  				defer close(out)
   129  
   130  				for i := range in {
   131  					select {
   132  					case <-out:
   133  					default:
   134  					}
   135  					out <- i
   136  				}
   137  			}()
   138  
   139  			return out
   140  		}
   141  
   142  		go func() {
   143  			// wait for kademlia table to be healthy
   144  			time.Sleep(options.SyncUpdateDelay)
   145  
   146  			kad := streamer.delivery.kad
   147  			depthC := latestIntC(kad.NeighbourhoodDepthC())
   148  			addressBookSizeC := latestIntC(kad.AddrCountC())
   149  
   150  			// initial requests for syncing subscription to peers
   151  			streamer.updateSyncing()
   152  
   153  			for depth := range depthC {
   154  				log.Debug("Kademlia neighbourhood depth change", "depth", depth)
   155  
   156  				// Prevent too early sync subscriptions by waiting until there are no
   157  				// new peers connecting. Sync streams updating will be done after no
   158  				// peers are connected for at least SyncUpdateDelay period.
   159  				timer := time.NewTimer(options.SyncUpdateDelay)
   160  				// Hard limit to sync update delay, preventing long delays
   161  				// on a very dynamic network
   162  				maxTimer := time.NewTimer(3 * time.Minute)
   163  			loop:
   164  				for {
   165  					select {
   166  					case <-maxTimer.C:
   167  						// force syncing update when a hard timeout is reached
   168  						log.Trace("Sync subscriptions update on hard timeout")
   169  						// request for syncing subscription to new peers
   170  						streamer.updateSyncing()
   171  						break loop
   172  					case <-timer.C:
   173  						// start syncing as no new peers has been added to kademlia
   174  						// for some time
   175  						log.Trace("Sync subscriptions update")
   176  						// request for syncing subscription to new peers
   177  						streamer.updateSyncing()
   178  						break loop
   179  					case size := <-addressBookSizeC:
   180  						log.Trace("Kademlia address book size changed on depth change", "size", size)
   181  						// new peers has been added to kademlia,
   182  						// reset the timer to prevent early sync subscriptions
   183  						if !timer.Stop() {
   184  							<-timer.C
   185  						}
   186  						timer.Reset(options.SyncUpdateDelay)
   187  					}
   188  				}
   189  				timer.Stop()
   190  				maxTimer.Stop()
   191  			}
   192  		}()
   193  	}
   194  
   195  	return streamer
   196  }
   197  
   198  // RegisterClient registers an incoming streamer constructor
   199  func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) {
   200  	r.clientMu.Lock()
   201  	defer r.clientMu.Unlock()
   202  
   203  	r.clientFuncs[stream] = f
   204  }
   205  
   206  // RegisterServer registers an outgoing streamer constructor
   207  func (r *Registry) RegisterServerFunc(stream string, f func(*Peer, string, bool) (Server, error)) {
   208  	r.serverMu.Lock()
   209  	defer r.serverMu.Unlock()
   210  
   211  	r.serverFuncs[stream] = f
   212  }
   213  
   214  // GetClient accessor for incoming streamer constructors
   215  func (r *Registry) GetClientFunc(stream string) (func(*Peer, string, bool) (Client, error), error) {
   216  	r.clientMu.RLock()
   217  	defer r.clientMu.RUnlock()
   218  
   219  	f := r.clientFuncs[stream]
   220  	if f == nil {
   221  		return nil, fmt.Errorf("stream %v not registered", stream)
   222  	}
   223  	return f, nil
   224  }
   225  
   226  // GetServer accessor for incoming streamer constructors
   227  func (r *Registry) GetServerFunc(stream string) (func(*Peer, string, bool) (Server, error), error) {
   228  	r.serverMu.RLock()
   229  	defer r.serverMu.RUnlock()
   230  
   231  	f := r.serverFuncs[stream]
   232  	if f == nil {
   233  		return nil, fmt.Errorf("stream %v not registered", stream)
   234  	}
   235  	return f, nil
   236  }
   237  
   238  func (r *Registry) RequestSubscription(peerId enode.ID, s Stream, h *Range, prio uint8) error {
   239  	// check if the stream is registered
   240  	if _, err := r.GetServerFunc(s.Name); err != nil {
   241  		return err
   242  	}
   243  
   244  	peer := r.getPeer(peerId)
   245  	if peer == nil {
   246  		return fmt.Errorf("peer not found %v", peerId)
   247  	}
   248  
   249  	if _, err := peer.getServer(s); err != nil {
   250  		if e, ok := err.(*notFoundError); ok && e.t == "server" {
   251  			// request subscription only if the server for this stream is not created
   252  			log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h)
   253  			return peer.Send(context.TODO(), &RequestSubscriptionMsg{
   254  				Stream:   s,
   255  				History:  h,
   256  				Priority: prio,
   257  			})
   258  		}
   259  		return err
   260  	}
   261  	log.Trace("RequestSubscription: already subscribed", "peer", peerId, "stream", s, "history", h)
   262  	return nil
   263  }
   264  
   265  // Subscribe initiates the streamer
   266  func (r *Registry) Subscribe(peerId enode.ID, s Stream, h *Range, priority uint8) error {
   267  	// check if the stream is registered
   268  	if _, err := r.GetClientFunc(s.Name); err != nil {
   269  		return err
   270  	}
   271  
   272  	peer := r.getPeer(peerId)
   273  	if peer == nil {
   274  		return fmt.Errorf("peer not found %v", peerId)
   275  	}
   276  
   277  	var to uint64
   278  	if !s.Live && h != nil {
   279  		to = h.To
   280  	}
   281  
   282  	err := peer.setClientParams(s, newClientParams(priority, to))
   283  	if err != nil {
   284  		return err
   285  	}
   286  	if s.Live && h != nil {
   287  		if err := peer.setClientParams(
   288  			getHistoryStream(s),
   289  			newClientParams(getHistoryPriority(priority), h.To),
   290  		); err != nil {
   291  			return err
   292  		}
   293  	}
   294  
   295  	msg := &SubscribeMsg{
   296  		Stream:   s,
   297  		History:  h,
   298  		Priority: priority,
   299  	}
   300  	log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h)
   301  
   302  	return peer.SendPriority(context.TODO(), msg, priority)
   303  }
   304  
   305  func (r *Registry) Unsubscribe(peerId enode.ID, s Stream) error {
   306  	peer := r.getPeer(peerId)
   307  	if peer == nil {
   308  		return fmt.Errorf("peer not found %v", peerId)
   309  	}
   310  
   311  	msg := &UnsubscribeMsg{
   312  		Stream: s,
   313  	}
   314  	log.Debug("Unsubscribe ", "peer", peerId, "stream", s)
   315  
   316  	if err := peer.Send(context.TODO(), msg); err != nil {
   317  		return err
   318  	}
   319  	return peer.removeClient(s)
   320  }
   321  
   322  // Quit sends the QuitMsg to the peer to remove the
   323  // stream peer client and terminate the streaming.
   324  func (r *Registry) Quit(peerId enode.ID, s Stream) error {
   325  	peer := r.getPeer(peerId)
   326  	if peer == nil {
   327  		log.Debug("stream quit: peer not found", "peer", peerId, "stream", s)
   328  		// if the peer is not found, abort the request
   329  		return nil
   330  	}
   331  
   332  	msg := &QuitMsg{
   333  		Stream: s,
   334  	}
   335  	log.Debug("Quit ", "peer", peerId, "stream", s)
   336  
   337  	return peer.Send(context.TODO(), msg)
   338  }
   339  
   340  func (r *Registry) NodeInfo() interface{} {
   341  	return nil
   342  }
   343  
   344  func (r *Registry) PeerInfo(id enode.ID) interface{} {
   345  	return nil
   346  }
   347  
   348  func (r *Registry) Close() error {
   349  	return r.intervalsStore.Close()
   350  }
   351  
   352  func (r *Registry) getPeer(peerId enode.ID) *Peer {
   353  	r.peersMu.RLock()
   354  	defer r.peersMu.RUnlock()
   355  
   356  	return r.peers[peerId]
   357  }
   358  
   359  func (r *Registry) setPeer(peer *Peer) {
   360  	r.peersMu.Lock()
   361  	r.peers[peer.ID()] = peer
   362  	metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
   363  	r.peersMu.Unlock()
   364  }
   365  
   366  func (r *Registry) deletePeer(peer *Peer) {
   367  	r.peersMu.Lock()
   368  	delete(r.peers, peer.ID())
   369  	metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
   370  	r.peersMu.Unlock()
   371  }
   372  
   373  func (r *Registry) peersCount() (c int) {
   374  	r.peersMu.Lock()
   375  	c = len(r.peers)
   376  	r.peersMu.Unlock()
   377  	return
   378  }
   379  
   380  // Run protocol run function
   381  func (r *Registry) Run(p *network.BzzPeer) error {
   382  	sp := NewPeer(p.Peer, r)
   383  	r.setPeer(sp)
   384  	defer r.deletePeer(sp)
   385  	defer close(sp.quit)
   386  	defer sp.close()
   387  
   388  	if r.doRetrieve {
   389  		err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", true), nil, Top)
   390  		if err != nil {
   391  			return err
   392  		}
   393  	}
   394  
   395  	return sp.Run(sp.HandleMsg)
   396  }
   397  
   398  // updateSyncing subscribes to SYNC streams by iterating over the
   399  // kademlia connections and bins. If there are existing SYNC streams
   400  // and they are no longer required after iteration, request to Quit
   401  // them will be send to appropriate peers.
   402  func (r *Registry) updateSyncing() {
   403  	kad := r.delivery.kad
   404  	// map of all SYNC streams for all peers
   405  	// used at the and of the function to remove servers
   406  	// that are not needed anymore
   407  	subs := make(map[enode.ID]map[Stream]struct{})
   408  	r.peersMu.RLock()
   409  	for id, peer := range r.peers {
   410  		peer.serverMu.RLock()
   411  		for stream := range peer.servers {
   412  			if stream.Name == "SYNC" {
   413  				if _, ok := subs[id]; !ok {
   414  					subs[id] = make(map[Stream]struct{})
   415  				}
   416  				subs[id][stream] = struct{}{}
   417  			}
   418  		}
   419  		peer.serverMu.RUnlock()
   420  	}
   421  	r.peersMu.RUnlock()
   422  
   423  	// request subscriptions for all nodes and bins
   424  	kad.EachBin(r.addr[:], pot.DefaultPof(256), 0, func(p *network.Peer, bin int) bool {
   425  		log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr, p.ID(), bin))
   426  
   427  		// bin is always less then 256 and it is safe to convert it to type uint8
   428  		stream := NewStream("SYNC", FormatSyncBinKey(uint8(bin)), true)
   429  		if streams, ok := subs[p.ID()]; ok {
   430  			// delete live and history streams from the map, so that it won't be removed with a Quit request
   431  			delete(streams, stream)
   432  			delete(streams, getHistoryStream(stream))
   433  		}
   434  		err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
   435  		if err != nil {
   436  			log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
   437  			return false
   438  		}
   439  		return true
   440  	})
   441  
   442  	// remove SYNC servers that do not need to be subscribed
   443  	for id, streams := range subs {
   444  		if len(streams) == 0 {
   445  			continue
   446  		}
   447  		peer := r.getPeer(id)
   448  		if peer == nil {
   449  			continue
   450  		}
   451  		for stream := range streams {
   452  			log.Debug("Remove sync server", "peer", id, "stream", stream)
   453  			err := r.Quit(peer.ID(), stream)
   454  			if err != nil && err != p2p.ErrShuttingDown {
   455  				log.Error("quit", "err", err, "peer", peer.ID(), "stream", stream)
   456  			}
   457  		}
   458  	}
   459  }
   460  
   461  func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   462  	peer := protocols.NewPeer(p, rw, Spec)
   463  	bp := network.NewBzzPeer(peer)
   464  	np := network.NewPeer(bp, r.delivery.kad)
   465  	r.delivery.kad.On(np)
   466  	defer r.delivery.kad.Off(np)
   467  	return r.Run(bp)
   468  }
   469  
   470  // HandleMsg is the message handler that delegates incoming messages
   471  func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
   472  	switch msg := msg.(type) {
   473  
   474  	case *SubscribeMsg:
   475  		return p.handleSubscribeMsg(ctx, msg)
   476  
   477  	case *SubscribeErrorMsg:
   478  		return p.handleSubscribeErrorMsg(msg)
   479  
   480  	case *UnsubscribeMsg:
   481  		return p.handleUnsubscribeMsg(msg)
   482  
   483  	case *OfferedHashesMsg:
   484  		return p.handleOfferedHashesMsg(ctx, msg)
   485  
   486  	case *TakeoverProofMsg:
   487  		return p.handleTakeoverProofMsg(ctx, msg)
   488  
   489  	case *WantedHashesMsg:
   490  		return p.handleWantedHashesMsg(ctx, msg)
   491  
   492  	case *ChunkDeliveryMsgRetrieval:
   493  		//handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
   494  		return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
   495  
   496  	case *ChunkDeliveryMsgSyncing:
   497  		//handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
   498  		return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
   499  
   500  	case *RetrieveRequestMsg:
   501  		return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg)
   502  
   503  	case *RequestSubscriptionMsg:
   504  		return p.handleRequestSubscription(ctx, msg)
   505  
   506  	case *QuitMsg:
   507  		return p.handleQuitMsg(msg)
   508  
   509  	default:
   510  		return fmt.Errorf("unknown message type: %T", msg)
   511  	}
   512  }
   513  
   514  type server struct {
   515  	Server
   516  	stream       Stream
   517  	priority     uint8
   518  	currentBatch []byte
   519  	sessionIndex uint64
   520  }
   521  
   522  // setNextBatch adjusts passed interval based on session index and whether
   523  // stream is live or history. It calls Server SetNextBatch with adjusted
   524  // interval and returns batch hashes and their interval.
   525  func (s *server) setNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
   526  	if s.stream.Live {
   527  		if from == 0 {
   528  			from = s.sessionIndex
   529  		}
   530  		if to <= from || from >= s.sessionIndex {
   531  			to = math.MaxUint64
   532  		}
   533  	} else {
   534  		if (to < from && to != 0) || from > s.sessionIndex {
   535  			return nil, 0, 0, nil, nil
   536  		}
   537  		if to == 0 || to > s.sessionIndex {
   538  			to = s.sessionIndex
   539  		}
   540  	}
   541  	return s.SetNextBatch(from, to)
   542  }
   543  
   544  // Server interface for outgoing peer Streamer
   545  type Server interface {
   546  	// SessionIndex is called when a server is initialized
   547  	// to get the current cursor state of the stream data.
   548  	// Based on this index, live and history stream intervals
   549  	// will be adjusted before calling SetNextBatch.
   550  	SessionIndex() (uint64, error)
   551  	SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error)
   552  	GetData(context.Context, []byte) ([]byte, error)
   553  	Close()
   554  }
   555  
   556  type client struct {
   557  	Client
   558  	stream    Stream
   559  	priority  uint8
   560  	sessionAt uint64
   561  	to        uint64
   562  	next      chan error
   563  	quit      chan struct{}
   564  
   565  	intervalsKey   string
   566  	intervalsStore state.Store
   567  }
   568  
   569  func peerStreamIntervalsKey(p *Peer, s Stream) string {
   570  	return p.ID().String() + s.String()
   571  }
   572  
   573  func (c client) AddInterval(start, end uint64) (err error) {
   574  	i := &intervals.Intervals{}
   575  	err = c.intervalsStore.Get(c.intervalsKey, i)
   576  	if err != nil {
   577  		return err
   578  	}
   579  	i.Add(start, end)
   580  	return c.intervalsStore.Put(c.intervalsKey, i)
   581  }
   582  
   583  func (c client) NextInterval() (start, end uint64, err error) {
   584  	i := &intervals.Intervals{}
   585  	err = c.intervalsStore.Get(c.intervalsKey, i)
   586  	if err != nil {
   587  		return 0, 0, err
   588  	}
   589  	start, end = i.Next()
   590  	return start, end, nil
   591  }
   592  
   593  // Client interface for incoming peer Streamer
   594  type Client interface {
   595  	NeedData(context.Context, []byte) func(context.Context) error
   596  	BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error)
   597  	Close()
   598  }
   599  
   600  func (c *client) nextBatch(from uint64) (nextFrom uint64, nextTo uint64) {
   601  	if c.to > 0 && from >= c.to {
   602  		return 0, 0
   603  	}
   604  	if c.stream.Live {
   605  		return from, 0
   606  	} else if from >= c.sessionAt {
   607  		if c.to > 0 {
   608  			return from, c.to
   609  		}
   610  		return from, math.MaxUint64
   611  	}
   612  	nextFrom, nextTo, err := c.NextInterval()
   613  	if err != nil {
   614  		log.Error("next intervals", "stream", c.stream)
   615  		return
   616  	}
   617  	if nextTo > c.to {
   618  		nextTo = c.to
   619  	}
   620  	if nextTo == 0 {
   621  		nextTo = c.sessionAt
   622  	}
   623  	return
   624  }
   625  
   626  func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error {
   627  	if tf := c.BatchDone(req.Stream, req.From, hashes, req.Root); tf != nil {
   628  		tp, err := tf()
   629  		if err != nil {
   630  			return err
   631  		}
   632  		if err := p.SendPriority(context.TODO(), tp, c.priority); err != nil {
   633  			return err
   634  		}
   635  		if c.to > 0 && tp.Takeover.End >= c.to {
   636  			return p.streamer.Unsubscribe(p.Peer.ID(), req.Stream)
   637  		}
   638  		return nil
   639  	}
   640  	// TODO: make a test case for testing if the interval is added when the batch is done
   641  	if err := c.AddInterval(req.From, req.To); err != nil {
   642  		return err
   643  	}
   644  	return nil
   645  }
   646  
   647  func (c *client) close() {
   648  	select {
   649  	case <-c.quit:
   650  	default:
   651  		close(c.quit)
   652  	}
   653  	c.Close()
   654  }
   655  
   656  // clientParams store parameters for the new client
   657  // between a subscription and initial offered hashes request handling.
   658  type clientParams struct {
   659  	priority uint8
   660  	to       uint64
   661  	// signal when the client is created
   662  	clientCreatedC chan struct{}
   663  }
   664  
   665  func newClientParams(priority uint8, to uint64) *clientParams {
   666  	return &clientParams{
   667  		priority:       priority,
   668  		to:             to,
   669  		clientCreatedC: make(chan struct{}),
   670  	}
   671  }
   672  
   673  func (c *clientParams) waitClient(ctx context.Context) error {
   674  	select {
   675  	case <-ctx.Done():
   676  		return ctx.Err()
   677  	case <-c.clientCreatedC:
   678  		return nil
   679  	}
   680  }
   681  
   682  func (c *clientParams) clientCreated() {
   683  	close(c.clientCreatedC)
   684  }
   685  
   686  // Spec is the spec of the streamer protocol
   687  var Spec = &protocols.Spec{
   688  	Name:       "stream",
   689  	Version:    8,
   690  	MaxMsgSize: 10 * 1024 * 1024,
   691  	Messages: []interface{}{
   692  		UnsubscribeMsg{},
   693  		OfferedHashesMsg{},
   694  		WantedHashesMsg{},
   695  		TakeoverProofMsg{},
   696  		SubscribeMsg{},
   697  		RetrieveRequestMsg{},
   698  		ChunkDeliveryMsgRetrieval{},
   699  		SubscribeErrorMsg{},
   700  		RequestSubscriptionMsg{},
   701  		QuitMsg{},
   702  		ChunkDeliveryMsgSyncing{},
   703  	},
   704  }
   705  
   706  func (r *Registry) Protocols() []p2p.Protocol {
   707  	return []p2p.Protocol{
   708  		{
   709  			Name:    Spec.Name,
   710  			Version: Spec.Version,
   711  			Length:  Spec.Length(),
   712  			Run:     r.runProtocol,
   713  			// NodeInfo: ,
   714  			// PeerInfo: ,
   715  		},
   716  	}
   717  }
   718  
   719  func (r *Registry) APIs() []rpc.API {
   720  	return []rpc.API{
   721  		{
   722  			Namespace: "stream",
   723  			Version:   "3.0",
   724  			Service:   r.api,
   725  			Public:    true,
   726  		},
   727  	}
   728  }
   729  
   730  func (r *Registry) Start(server *p2p.Server) error {
   731  	log.Info("Streamer started")
   732  	return nil
   733  }
   734  
   735  func (r *Registry) Stop() error {
   736  	return nil
   737  }
   738  
   739  type Range struct {
   740  	From, To uint64
   741  }
   742  
   743  func NewRange(from, to uint64) *Range {
   744  	return &Range{
   745  		From: from,
   746  		To:   to,
   747  	}
   748  }
   749  
   750  func (r *Range) String() string {
   751  	return fmt.Sprintf("%v-%v", r.From, r.To)
   752  }
   753  
   754  func getHistoryPriority(priority uint8) uint8 {
   755  	if priority == 0 {
   756  		return 0
   757  	}
   758  	return priority - 1
   759  }
   760  
   761  func getHistoryStream(s Stream) Stream {
   762  	return NewStream(s.Name, s.Key, false)
   763  }
   764  
   765  type API struct {
   766  	streamer *Registry
   767  }
   768  
   769  func NewAPI(r *Registry) *API {
   770  	return &API{
   771  		streamer: r,
   772  	}
   773  }
   774  
   775  func (api *API) SubscribeStream(peerId enode.ID, s Stream, history *Range, priority uint8) error {
   776  	return api.streamer.Subscribe(peerId, s, history, priority)
   777  }
   778  
   779  func (api *API) UnsubscribeStream(peerId enode.ID, s Stream) error {
   780  	return api.streamer.Unsubscribe(peerId, s)
   781  }