github.com/divan/go-ethereum@v1.8.14-0.20180820134928-1de9ada4016d/swarm/network/stream/stream.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/metrics"
    27  	"github.com/ethereum/go-ethereum/p2p"
    28  	"github.com/ethereum/go-ethereum/p2p/discover"
    29  	"github.com/ethereum/go-ethereum/p2p/protocols"
    30  	"github.com/ethereum/go-ethereum/rpc"
    31  	"github.com/ethereum/go-ethereum/swarm/log"
    32  	"github.com/ethereum/go-ethereum/swarm/network"
    33  	"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
    34  	"github.com/ethereum/go-ethereum/swarm/pot"
    35  	"github.com/ethereum/go-ethereum/swarm/spancontext"
    36  	"github.com/ethereum/go-ethereum/swarm/state"
    37  	"github.com/ethereum/go-ethereum/swarm/storage"
    38  	opentracing "github.com/opentracing/opentracing-go"
    39  )
    40  
    41  const (
    42  	Low uint8 = iota
    43  	Mid
    44  	High
    45  	Top
    46  	PriorityQueue         // number of queues
    47  	PriorityQueueCap = 32 // queue capacity
    48  	HashSize         = 32
    49  )
    50  
    51  // Registry registry for outgoing and incoming streamer constructors
    52  type Registry struct {
    53  	api            *API
    54  	addr           *network.BzzAddr
    55  	skipCheck      bool
    56  	clientMu       sync.RWMutex
    57  	serverMu       sync.RWMutex
    58  	peersMu        sync.RWMutex
    59  	serverFuncs    map[string]func(*Peer, string, bool) (Server, error)
    60  	clientFuncs    map[string]func(*Peer, string, bool) (Client, error)
    61  	peers          map[discover.NodeID]*Peer
    62  	delivery       *Delivery
    63  	intervalsStore state.Store
    64  	doRetrieve     bool
    65  }
    66  
    67  // RegistryOptions holds optional values for NewRegistry constructor.
    68  type RegistryOptions struct {
    69  	SkipCheck       bool
    70  	DoSync          bool
    71  	DoRetrieve      bool
    72  	SyncUpdateDelay time.Duration
    73  }
    74  
    75  // NewRegistry is Streamer constructor
    76  func NewRegistry(addr *network.BzzAddr, delivery *Delivery, db *storage.DBAPI, intervalsStore state.Store, options *RegistryOptions) *Registry {
    77  	if options == nil {
    78  		options = &RegistryOptions{}
    79  	}
    80  	if options.SyncUpdateDelay <= 0 {
    81  		options.SyncUpdateDelay = 15 * time.Second
    82  	}
    83  	streamer := &Registry{
    84  		addr:           addr,
    85  		skipCheck:      options.SkipCheck,
    86  		serverFuncs:    make(map[string]func(*Peer, string, bool) (Server, error)),
    87  		clientFuncs:    make(map[string]func(*Peer, string, bool) (Client, error)),
    88  		peers:          make(map[discover.NodeID]*Peer),
    89  		delivery:       delivery,
    90  		intervalsStore: intervalsStore,
    91  		doRetrieve:     options.DoRetrieve,
    92  	}
    93  	streamer.api = NewAPI(streamer)
    94  	delivery.getPeer = streamer.getPeer
    95  	streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, _ bool) (Server, error) {
    96  		return NewSwarmChunkServer(delivery.db), nil
    97  	})
    98  	streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
    99  		return NewSwarmSyncerClient(p, delivery.db, false, NewStream(swarmChunkServerStreamName, t, live))
   100  	})
   101  	RegisterSwarmSyncerServer(streamer, db)
   102  	RegisterSwarmSyncerClient(streamer, db)
   103  
   104  	if options.DoSync {
   105  		// latestIntC function ensures that
   106  		//   - receiving from the in chan is not blocked by processing inside the for loop
   107  		// 	 - the latest int value is delivered to the loop after the processing is done
   108  		// In context of NeighbourhoodDepthC:
   109  		// after the syncing is done updating inside the loop, we do not need to update on the intermediate
   110  		// depth changes, only to the latest one
   111  		latestIntC := func(in <-chan int) <-chan int {
   112  			out := make(chan int, 1)
   113  
   114  			go func() {
   115  				defer close(out)
   116  
   117  				for i := range in {
   118  					select {
   119  					case <-out:
   120  					default:
   121  					}
   122  					out <- i
   123  				}
   124  			}()
   125  
   126  			return out
   127  		}
   128  
   129  		go func() {
   130  			// wait for kademlia table to be healthy
   131  			time.Sleep(options.SyncUpdateDelay)
   132  
   133  			kad := streamer.delivery.overlay.(*network.Kademlia)
   134  			depthC := latestIntC(kad.NeighbourhoodDepthC())
   135  			addressBookSizeC := latestIntC(kad.AddrCountC())
   136  
   137  			// initial requests for syncing subscription to peers
   138  			streamer.updateSyncing()
   139  
   140  			for depth := range depthC {
   141  				log.Debug("Kademlia neighbourhood depth change", "depth", depth)
   142  
   143  				// Prevent too early sync subscriptions by waiting until there are no
   144  				// new peers connecting. Sync streams updating will be done after no
   145  				// peers are connected for at least SyncUpdateDelay period.
   146  				timer := time.NewTimer(options.SyncUpdateDelay)
   147  				// Hard limit to sync update delay, preventing long delays
   148  				// on a very dynamic network
   149  				maxTimer := time.NewTimer(3 * time.Minute)
   150  			loop:
   151  				for {
   152  					select {
   153  					case <-maxTimer.C:
   154  						// force syncing update when a hard timeout is reached
   155  						log.Trace("Sync subscriptions update on hard timeout")
   156  						// request for syncing subscription to new peers
   157  						streamer.updateSyncing()
   158  						break loop
   159  					case <-timer.C:
   160  						// start syncing as no new peers has been added to kademlia
   161  						// for some time
   162  						log.Trace("Sync subscriptions update")
   163  						// request for syncing subscription to new peers
   164  						streamer.updateSyncing()
   165  						break loop
   166  					case size := <-addressBookSizeC:
   167  						log.Trace("Kademlia address book size changed on depth change", "size", size)
   168  						// new peers has been added to kademlia,
   169  						// reset the timer to prevent early sync subscriptions
   170  						if !timer.Stop() {
   171  							<-timer.C
   172  						}
   173  						timer.Reset(options.SyncUpdateDelay)
   174  					}
   175  				}
   176  				timer.Stop()
   177  				maxTimer.Stop()
   178  			}
   179  		}()
   180  	}
   181  
   182  	return streamer
   183  }
   184  
   185  // RegisterClient registers an incoming streamer constructor
   186  func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) {
   187  	r.clientMu.Lock()
   188  	defer r.clientMu.Unlock()
   189  
   190  	r.clientFuncs[stream] = f
   191  }
   192  
   193  // RegisterServer registers an outgoing streamer constructor
   194  func (r *Registry) RegisterServerFunc(stream string, f func(*Peer, string, bool) (Server, error)) {
   195  	r.serverMu.Lock()
   196  	defer r.serverMu.Unlock()
   197  
   198  	r.serverFuncs[stream] = f
   199  }
   200  
   201  // GetClient accessor for incoming streamer constructors
   202  func (r *Registry) GetClientFunc(stream string) (func(*Peer, string, bool) (Client, error), error) {
   203  	r.clientMu.RLock()
   204  	defer r.clientMu.RUnlock()
   205  
   206  	f := r.clientFuncs[stream]
   207  	if f == nil {
   208  		return nil, fmt.Errorf("stream %v not registered", stream)
   209  	}
   210  	return f, nil
   211  }
   212  
   213  // GetServer accessor for incoming streamer constructors
   214  func (r *Registry) GetServerFunc(stream string) (func(*Peer, string, bool) (Server, error), error) {
   215  	r.serverMu.RLock()
   216  	defer r.serverMu.RUnlock()
   217  
   218  	f := r.serverFuncs[stream]
   219  	if f == nil {
   220  		return nil, fmt.Errorf("stream %v not registered", stream)
   221  	}
   222  	return f, nil
   223  }
   224  
   225  func (r *Registry) RequestSubscription(peerId discover.NodeID, s Stream, h *Range, prio uint8) error {
   226  	// check if the stream is registered
   227  	if _, err := r.GetServerFunc(s.Name); err != nil {
   228  		return err
   229  	}
   230  
   231  	peer := r.getPeer(peerId)
   232  	if peer == nil {
   233  		return fmt.Errorf("peer not found %v", peerId)
   234  	}
   235  
   236  	if _, err := peer.getServer(s); err != nil {
   237  		if e, ok := err.(*notFoundError); ok && e.t == "server" {
   238  			// request subscription only if the server for this stream is not created
   239  			log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h)
   240  			return peer.Send(context.TODO(), &RequestSubscriptionMsg{
   241  				Stream:   s,
   242  				History:  h,
   243  				Priority: prio,
   244  			})
   245  		}
   246  		return err
   247  	}
   248  	log.Trace("RequestSubscription: already subscribed", "peer", peerId, "stream", s, "history", h)
   249  	return nil
   250  }
   251  
   252  // Subscribe initiates the streamer
   253  func (r *Registry) Subscribe(peerId discover.NodeID, s Stream, h *Range, priority uint8) error {
   254  	// check if the stream is registered
   255  	if _, err := r.GetClientFunc(s.Name); err != nil {
   256  		return err
   257  	}
   258  
   259  	peer := r.getPeer(peerId)
   260  	if peer == nil {
   261  		return fmt.Errorf("peer not found %v", peerId)
   262  	}
   263  
   264  	var to uint64
   265  	if !s.Live && h != nil {
   266  		to = h.To
   267  	}
   268  
   269  	err := peer.setClientParams(s, newClientParams(priority, to))
   270  	if err != nil {
   271  		return err
   272  	}
   273  
   274  	if s.Live && h != nil {
   275  		if err := peer.setClientParams(
   276  			getHistoryStream(s),
   277  			newClientParams(getHistoryPriority(priority), h.To),
   278  		); err != nil {
   279  			return err
   280  		}
   281  	}
   282  
   283  	msg := &SubscribeMsg{
   284  		Stream:   s,
   285  		History:  h,
   286  		Priority: priority,
   287  	}
   288  	log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h)
   289  
   290  	return peer.SendPriority(context.TODO(), msg, priority)
   291  }
   292  
   293  func (r *Registry) Unsubscribe(peerId discover.NodeID, s Stream) error {
   294  	peer := r.getPeer(peerId)
   295  	if peer == nil {
   296  		return fmt.Errorf("peer not found %v", peerId)
   297  	}
   298  
   299  	msg := &UnsubscribeMsg{
   300  		Stream: s,
   301  	}
   302  	log.Debug("Unsubscribe ", "peer", peerId, "stream", s)
   303  
   304  	if err := peer.Send(context.TODO(), msg); err != nil {
   305  		return err
   306  	}
   307  	return peer.removeClient(s)
   308  }
   309  
   310  // Quit sends the QuitMsg to the peer to remove the
   311  // stream peer client and terminate the streaming.
   312  func (r *Registry) Quit(peerId discover.NodeID, s Stream) error {
   313  	peer := r.getPeer(peerId)
   314  	if peer == nil {
   315  		log.Debug("stream quit: peer not found", "peer", peerId, "stream", s)
   316  		// if the peer is not found, abort the request
   317  		return nil
   318  	}
   319  
   320  	msg := &QuitMsg{
   321  		Stream: s,
   322  	}
   323  	log.Debug("Quit ", "peer", peerId, "stream", s)
   324  
   325  	return peer.Send(context.TODO(), msg)
   326  }
   327  
   328  func (r *Registry) Retrieve(ctx context.Context, chunk *storage.Chunk) error {
   329  	var sp opentracing.Span
   330  	ctx, sp = spancontext.StartSpan(
   331  		ctx,
   332  		"registry.retrieve")
   333  	defer sp.Finish()
   334  
   335  	return r.delivery.RequestFromPeers(ctx, chunk.Addr[:], r.skipCheck)
   336  }
   337  
   338  func (r *Registry) NodeInfo() interface{} {
   339  	return nil
   340  }
   341  
   342  func (r *Registry) PeerInfo(id discover.NodeID) interface{} {
   343  	return nil
   344  }
   345  
   346  func (r *Registry) Close() error {
   347  	return r.intervalsStore.Close()
   348  }
   349  
   350  func (r *Registry) getPeer(peerId discover.NodeID) *Peer {
   351  	r.peersMu.RLock()
   352  	defer r.peersMu.RUnlock()
   353  
   354  	return r.peers[peerId]
   355  }
   356  
   357  func (r *Registry) setPeer(peer *Peer) {
   358  	r.peersMu.Lock()
   359  	r.peers[peer.ID()] = peer
   360  	metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
   361  	r.peersMu.Unlock()
   362  }
   363  
   364  func (r *Registry) deletePeer(peer *Peer) {
   365  	r.peersMu.Lock()
   366  	delete(r.peers, peer.ID())
   367  	metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
   368  	r.peersMu.Unlock()
   369  }
   370  
   371  func (r *Registry) peersCount() (c int) {
   372  	r.peersMu.Lock()
   373  	c = len(r.peers)
   374  	r.peersMu.Unlock()
   375  	return
   376  }
   377  
   378  // Run protocol run function
   379  func (r *Registry) Run(p *network.BzzPeer) error {
   380  	sp := NewPeer(p.Peer, r)
   381  	r.setPeer(sp)
   382  	defer r.deletePeer(sp)
   383  	defer close(sp.quit)
   384  	defer sp.close()
   385  
   386  	if r.doRetrieve {
   387  		err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", false), nil, Top)
   388  		if err != nil {
   389  			return err
   390  		}
   391  	}
   392  
   393  	return sp.Run(sp.HandleMsg)
   394  }
   395  
   396  // updateSyncing subscribes to SYNC streams by iterating over the
   397  // kademlia connections and bins. If there are existing SYNC streams
   398  // and they are no longer required after iteration, request to Quit
   399  // them will be send to appropriate peers.
   400  func (r *Registry) updateSyncing() {
   401  	// if overlay in not Kademlia, panic
   402  	kad := r.delivery.overlay.(*network.Kademlia)
   403  
   404  	// map of all SYNC streams for all peers
   405  	// used at the and of the function to remove servers
   406  	// that are not needed anymore
   407  	subs := make(map[discover.NodeID]map[Stream]struct{})
   408  	r.peersMu.RLock()
   409  	for id, peer := range r.peers {
   410  		peer.serverMu.RLock()
   411  		for stream := range peer.servers {
   412  			if stream.Name == "SYNC" {
   413  				if _, ok := subs[id]; !ok {
   414  					subs[id] = make(map[Stream]struct{})
   415  				}
   416  				subs[id][stream] = struct{}{}
   417  			}
   418  		}
   419  		peer.serverMu.RUnlock()
   420  	}
   421  	r.peersMu.RUnlock()
   422  
   423  	// request subscriptions for all nodes and bins
   424  	kad.EachBin(r.addr.Over(), pot.DefaultPof(256), 0, func(conn network.OverlayConn, bin int) bool {
   425  		p := conn.(network.Peer)
   426  		log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr.ID(), p.ID(), bin))
   427  
   428  		// bin is always less then 256 and it is safe to convert it to type uint8
   429  		stream := NewStream("SYNC", FormatSyncBinKey(uint8(bin)), true)
   430  		if streams, ok := subs[p.ID()]; ok {
   431  			// delete live and history streams from the map, so that it won't be removed with a Quit request
   432  			delete(streams, stream)
   433  			delete(streams, getHistoryStream(stream))
   434  		}
   435  		err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
   436  		if err != nil {
   437  			log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
   438  			return false
   439  		}
   440  		return true
   441  	})
   442  
   443  	// remove SYNC servers that do not need to be subscribed
   444  	for id, streams := range subs {
   445  		if len(streams) == 0 {
   446  			continue
   447  		}
   448  		peer := r.getPeer(id)
   449  		if peer == nil {
   450  			continue
   451  		}
   452  		for stream := range streams {
   453  			log.Debug("Remove sync server", "peer", id, "stream", stream)
   454  			err := r.Quit(peer.ID(), stream)
   455  			if err != nil && err != p2p.ErrShuttingDown {
   456  				log.Error("quit", "err", err, "peer", peer.ID(), "stream", stream)
   457  			}
   458  		}
   459  	}
   460  }
   461  
   462  func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   463  	peer := protocols.NewPeer(p, rw, Spec)
   464  	bzzPeer := network.NewBzzTestPeer(peer, r.addr)
   465  	r.delivery.overlay.On(bzzPeer)
   466  	defer r.delivery.overlay.Off(bzzPeer)
   467  	return r.Run(bzzPeer)
   468  }
   469  
   470  // HandleMsg is the message handler that delegates incoming messages
   471  func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
   472  	switch msg := msg.(type) {
   473  
   474  	case *SubscribeMsg:
   475  		return p.handleSubscribeMsg(ctx, msg)
   476  
   477  	case *SubscribeErrorMsg:
   478  		return p.handleSubscribeErrorMsg(msg)
   479  
   480  	case *UnsubscribeMsg:
   481  		return p.handleUnsubscribeMsg(msg)
   482  
   483  	case *OfferedHashesMsg:
   484  		return p.handleOfferedHashesMsg(ctx, msg)
   485  
   486  	case *TakeoverProofMsg:
   487  		return p.handleTakeoverProofMsg(ctx, msg)
   488  
   489  	case *WantedHashesMsg:
   490  		return p.handleWantedHashesMsg(ctx, msg)
   491  
   492  	case *ChunkDeliveryMsg:
   493  		return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, msg)
   494  
   495  	case *RetrieveRequestMsg:
   496  		return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg)
   497  
   498  	case *RequestSubscriptionMsg:
   499  		return p.handleRequestSubscription(ctx, msg)
   500  
   501  	case *QuitMsg:
   502  		return p.handleQuitMsg(msg)
   503  
   504  	default:
   505  		return fmt.Errorf("unknown message type: %T", msg)
   506  	}
   507  }
   508  
   509  type server struct {
   510  	Server
   511  	stream       Stream
   512  	priority     uint8
   513  	currentBatch []byte
   514  }
   515  
   516  // Server interface for outgoing peer Streamer
   517  type Server interface {
   518  	SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error)
   519  	GetData(context.Context, []byte) ([]byte, error)
   520  	Close()
   521  }
   522  
   523  type client struct {
   524  	Client
   525  	stream    Stream
   526  	priority  uint8
   527  	sessionAt uint64
   528  	to        uint64
   529  	next      chan error
   530  	quit      chan struct{}
   531  
   532  	intervalsKey   string
   533  	intervalsStore state.Store
   534  }
   535  
   536  func peerStreamIntervalsKey(p *Peer, s Stream) string {
   537  	return p.ID().String() + s.String()
   538  }
   539  
   540  func (c client) AddInterval(start, end uint64) (err error) {
   541  	i := &intervals.Intervals{}
   542  	err = c.intervalsStore.Get(c.intervalsKey, i)
   543  	if err != nil {
   544  		return err
   545  	}
   546  	i.Add(start, end)
   547  	return c.intervalsStore.Put(c.intervalsKey, i)
   548  }
   549  
   550  func (c client) NextInterval() (start, end uint64, err error) {
   551  	i := &intervals.Intervals{}
   552  	err = c.intervalsStore.Get(c.intervalsKey, i)
   553  	if err != nil {
   554  		return 0, 0, err
   555  	}
   556  	start, end = i.Next()
   557  	return start, end, nil
   558  }
   559  
   560  // Client interface for incoming peer Streamer
   561  type Client interface {
   562  	NeedData(context.Context, []byte) func()
   563  	BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error)
   564  	Close()
   565  }
   566  
   567  func (c *client) nextBatch(from uint64) (nextFrom uint64, nextTo uint64) {
   568  	if c.to > 0 && from >= c.to {
   569  		return 0, 0
   570  	}
   571  	if c.stream.Live {
   572  		return from, 0
   573  	} else if from >= c.sessionAt {
   574  		if c.to > 0 {
   575  			return from, c.to
   576  		}
   577  		return from, math.MaxUint64
   578  	}
   579  	nextFrom, nextTo, err := c.NextInterval()
   580  	if err != nil {
   581  		log.Error("next intervals", "stream", c.stream)
   582  		return
   583  	}
   584  	if nextTo > c.to {
   585  		nextTo = c.to
   586  	}
   587  	if nextTo == 0 {
   588  		nextTo = c.sessionAt
   589  	}
   590  	return
   591  }
   592  
   593  func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error {
   594  	if tf := c.BatchDone(req.Stream, req.From, hashes, req.Root); tf != nil {
   595  		tp, err := tf()
   596  		if err != nil {
   597  			return err
   598  		}
   599  		if err := p.SendPriority(context.TODO(), tp, c.priority); err != nil {
   600  			return err
   601  		}
   602  		if c.to > 0 && tp.Takeover.End >= c.to {
   603  			return p.streamer.Unsubscribe(p.Peer.ID(), req.Stream)
   604  		}
   605  		return nil
   606  	}
   607  	// TODO: make a test case for testing if the interval is added when the batch is done
   608  	if err := c.AddInterval(req.From, req.To); err != nil {
   609  		return err
   610  	}
   611  	return nil
   612  }
   613  
   614  func (c *client) close() {
   615  	select {
   616  	case <-c.quit:
   617  	default:
   618  		close(c.quit)
   619  	}
   620  	c.Close()
   621  }
   622  
   623  // clientParams store parameters for the new client
   624  // between a subscription and initial offered hashes request handling.
   625  type clientParams struct {
   626  	priority uint8
   627  	to       uint64
   628  	// signal when the client is created
   629  	clientCreatedC chan struct{}
   630  }
   631  
   632  func newClientParams(priority uint8, to uint64) *clientParams {
   633  	return &clientParams{
   634  		priority:       priority,
   635  		to:             to,
   636  		clientCreatedC: make(chan struct{}),
   637  	}
   638  }
   639  
   640  func (c *clientParams) waitClient(ctx context.Context) error {
   641  	select {
   642  	case <-ctx.Done():
   643  		return ctx.Err()
   644  	case <-c.clientCreatedC:
   645  		return nil
   646  	}
   647  }
   648  
   649  func (c *clientParams) clientCreated() {
   650  	close(c.clientCreatedC)
   651  }
   652  
   653  // Spec is the spec of the streamer protocol
   654  var Spec = &protocols.Spec{
   655  	Name:       "stream",
   656  	Version:    5,
   657  	MaxMsgSize: 10 * 1024 * 1024,
   658  	Messages: []interface{}{
   659  		UnsubscribeMsg{},
   660  		OfferedHashesMsg{},
   661  		WantedHashesMsg{},
   662  		TakeoverProofMsg{},
   663  		SubscribeMsg{},
   664  		RetrieveRequestMsg{},
   665  		ChunkDeliveryMsg{},
   666  		SubscribeErrorMsg{},
   667  		RequestSubscriptionMsg{},
   668  		QuitMsg{},
   669  	},
   670  }
   671  
   672  func (r *Registry) Protocols() []p2p.Protocol {
   673  	return []p2p.Protocol{
   674  		{
   675  			Name:    Spec.Name,
   676  			Version: Spec.Version,
   677  			Length:  Spec.Length(),
   678  			Run:     r.runProtocol,
   679  			// NodeInfo: ,
   680  			// PeerInfo: ,
   681  		},
   682  	}
   683  }
   684  
   685  func (r *Registry) APIs() []rpc.API {
   686  	return []rpc.API{
   687  		{
   688  			Namespace: "stream",
   689  			Version:   "3.0",
   690  			Service:   r.api,
   691  			Public:    true,
   692  		},
   693  	}
   694  }
   695  
   696  func (r *Registry) Start(server *p2p.Server) error {
   697  	log.Info("Streamer started")
   698  	return nil
   699  }
   700  
   701  func (r *Registry) Stop() error {
   702  	return nil
   703  }
   704  
   705  type Range struct {
   706  	From, To uint64
   707  }
   708  
   709  func NewRange(from, to uint64) *Range {
   710  	return &Range{
   711  		From: from,
   712  		To:   to,
   713  	}
   714  }
   715  
   716  func (r *Range) String() string {
   717  	return fmt.Sprintf("%v-%v", r.From, r.To)
   718  }
   719  
   720  func getHistoryPriority(priority uint8) uint8 {
   721  	if priority == 0 {
   722  		return 0
   723  	}
   724  	return priority - 1
   725  }
   726  
   727  func getHistoryStream(s Stream) Stream {
   728  	return NewStream(s.Name, s.Key, false)
   729  }
   730  
   731  type API struct {
   732  	streamer *Registry
   733  }
   734  
   735  func NewAPI(r *Registry) *API {
   736  	return &API{
   737  		streamer: r,
   738  	}
   739  }
   740  
   741  func (api *API) SubscribeStream(peerId discover.NodeID, s Stream, history *Range, priority uint8) error {
   742  	return api.streamer.Subscribe(peerId, s, history, priority)
   743  }
   744  
   745  func (api *API) UnsubscribeStream(peerId discover.NodeID, s Stream) error {
   746  	return api.streamer.Unsubscribe(peerId, s)
   747  }