github.com/aaa256/atlantis@v0.0.0-20210707112435-42ee889287a2/swarm/network/stream/stream.go (about)

     1  // Copyright 2018 The go-athereum Authors
     2  // This file is part of the go-athereum library.
     3  //
     4  // The go-athereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-athereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-athereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/athereum/go-athereum/metrics"
    27  	"github.com/athereum/go-athereum/p2p"
    28  	"github.com/athereum/go-athereum/p2p/discover"
    29  	"github.com/athereum/go-athereum/p2p/protocols"
    30  	"github.com/athereum/go-athereum/rpc"
    31  	"github.com/athereum/go-athereum/swarm/log"
    32  	"github.com/athereum/go-athereum/swarm/network"
    33  	"github.com/athereum/go-athereum/swarm/network/stream/intervals"
    34  	"github.com/athereum/go-athereum/swarm/pot"
    35  	"github.com/athereum/go-athereum/swarm/state"
    36  	"github.com/athereum/go-athereum/swarm/storage"
    37  )
    38  
    39  const (
    40  	Low uint8 = iota
    41  	Mid
    42  	High
    43  	Top
    44  	PriorityQueue         // number of queues
    45  	PriorityQueueCap = 32 // queue capacity
    46  	HashSize         = 32
    47  )
    48  
    49  // Registry registry for outgoing and incoming streamer constructors
    50  type Registry struct {
    51  	api            *API
    52  	addr           *network.BzzAddr
    53  	skipCheck      bool
    54  	clientMu       sync.RWMutex
    55  	serverMu       sync.RWMutex
    56  	peersMu        sync.RWMutex
    57  	serverFuncs    map[string]func(*Peer, string, bool) (Server, error)
    58  	clientFuncs    map[string]func(*Peer, string, bool) (Client, error)
    59  	peers          map[discover.NodeID]*Peer
    60  	delivery       *Delivery
    61  	intervalsStore state.Store
    62  	doRetrieve     bool
    63  }
    64  
    65  // RegistryOptions holds optional values for NewRegistry constructor.
    66  type RegistryOptions struct {
    67  	SkipCheck       bool
    68  	DoSync          bool
    69  	DoRetrieve      bool
    70  	SyncUpdateDelay time.Duration
    71  }
    72  
    73  // NewRegistry is Streamer constructor
    74  func NewRegistry(addr *network.BzzAddr, delivery *Delivery, db *storage.DBAPI, intervalsStore state.Store, options *RegistryOptions) *Registry {
    75  	if options == nil {
    76  		options = &RegistryOptions{}
    77  	}
    78  	if options.SyncUpdateDelay <= 0 {
    79  		options.SyncUpdateDelay = 15 * time.Second
    80  	}
    81  	streamer := &Registry{
    82  		addr:           addr,
    83  		skipCheck:      options.SkipCheck,
    84  		serverFuncs:    make(map[string]func(*Peer, string, bool) (Server, error)),
    85  		clientFuncs:    make(map[string]func(*Peer, string, bool) (Client, error)),
    86  		peers:          make(map[discover.NodeID]*Peer),
    87  		delivery:       delivery,
    88  		intervalsStore: intervalsStore,
    89  		doRetrieve:     options.DoRetrieve,
    90  	}
    91  	streamer.api = NewAPI(streamer)
    92  	delivery.getPeer = streamer.getPeer
    93  	streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, _ bool) (Server, error) {
    94  		return NewSwarmChunkServer(delivery.db), nil
    95  	})
    96  	streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
    97  		return NewSwarmSyncerClient(p, delivery.db, false, NewStream(swarmChunkServerStreamName, t, live))
    98  	})
    99  	RegisterSwarmSyncerServer(streamer, db)
   100  	RegisterSwarmSyncerClient(streamer, db)
   101  
   102  	if options.DoSync {
   103  		// latestIntC function ensures that
   104  		//   - receiving from the in chan is not blocked by processing inside the for loop
   105  		// 	 - the latest int value is delivered to the loop after the processing is done
   106  		// In context of NeighbourhoodDepthC:
   107  		// after the syncing is done updating inside the loop, we do not need to update on the intermediate
   108  		// depth changes, only to the latest one
   109  		latestIntC := func(in <-chan int) <-chan int {
   110  			out := make(chan int, 1)
   111  
   112  			go func() {
   113  				defer close(out)
   114  
   115  				for i := range in {
   116  					select {
   117  					case <-out:
   118  					default:
   119  					}
   120  					out <- i
   121  				}
   122  			}()
   123  
   124  			return out
   125  		}
   126  
   127  		go func() {
   128  			// wait for kademlia table to be healthy
   129  			time.Sleep(options.SyncUpdateDelay)
   130  
   131  			kad := streamer.delivery.overlay.(*network.Kademlia)
   132  			depthC := latestIntC(kad.NeighbourhoodDepthC())
   133  			addressBookSizeC := latestIntC(kad.AddrCountC())
   134  
   135  			// initial requests for syncing subscription to peers
   136  			streamer.updateSyncing()
   137  
   138  			for depth := range depthC {
   139  				log.Debug("Kademlia neighbourhood depth change", "depth", depth)
   140  
   141  				// Prevent too early sync subscriptions by waiting until there are no
   142  				// new peers connecting. Sync streams updating will be done after no
   143  				// peers are connected for at least SyncUpdateDelay period.
   144  				timer := time.NewTimer(options.SyncUpdateDelay)
   145  				// Hard limit to sync update delay, preventing long delays
   146  				// on a very dynamic network
   147  				maxTimer := time.NewTimer(3 * time.Minute)
   148  			loop:
   149  				for {
   150  					select {
   151  					case <-maxTimer.C:
   152  						// force syncing update when a hard timeout is reached
   153  						log.Trace("Sync subscriptions update on hard timeout")
   154  						// request for syncing subscription to new peers
   155  						streamer.updateSyncing()
   156  						break loop
   157  					case <-timer.C:
   158  						// start syncing as no new peers has been added to kademlia
   159  						// for some time
   160  						log.Trace("Sync subscriptions update")
   161  						// request for syncing subscription to new peers
   162  						streamer.updateSyncing()
   163  						break loop
   164  					case size := <-addressBookSizeC:
   165  						log.Trace("Kademlia address book size changed on depth change", "size", size)
   166  						// new peers has been added to kademlia,
   167  						// reset the timer to prevent early sync subscriptions
   168  						if !timer.Stop() {
   169  							<-timer.C
   170  						}
   171  						timer.Reset(options.SyncUpdateDelay)
   172  					}
   173  				}
   174  				timer.Stop()
   175  				maxTimer.Stop()
   176  			}
   177  		}()
   178  	}
   179  
   180  	return streamer
   181  }
   182  
   183  // RegisterClient registers an incoming streamer constructor
   184  func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) {
   185  	r.clientMu.Lock()
   186  	defer r.clientMu.Unlock()
   187  
   188  	r.clientFuncs[stream] = f
   189  }
   190  
   191  // RegisterServer registers an outgoing streamer constructor
   192  func (r *Registry) RegisterServerFunc(stream string, f func(*Peer, string, bool) (Server, error)) {
   193  	r.serverMu.Lock()
   194  	defer r.serverMu.Unlock()
   195  
   196  	r.serverFuncs[stream] = f
   197  }
   198  
   199  // GetClient accessor for incoming streamer constructors
   200  func (r *Registry) GetClientFunc(stream string) (func(*Peer, string, bool) (Client, error), error) {
   201  	r.clientMu.RLock()
   202  	defer r.clientMu.RUnlock()
   203  
   204  	f := r.clientFuncs[stream]
   205  	if f == nil {
   206  		return nil, fmt.Errorf("stream %v not registered", stream)
   207  	}
   208  	return f, nil
   209  }
   210  
   211  // GetServer accessor for incoming streamer constructors
   212  func (r *Registry) GetServerFunc(stream string) (func(*Peer, string, bool) (Server, error), error) {
   213  	r.serverMu.RLock()
   214  	defer r.serverMu.RUnlock()
   215  
   216  	f := r.serverFuncs[stream]
   217  	if f == nil {
   218  		return nil, fmt.Errorf("stream %v not registered", stream)
   219  	}
   220  	return f, nil
   221  }
   222  
   223  func (r *Registry) RequestSubscription(peerId discover.NodeID, s Stream, h *Range, prio uint8) error {
   224  	// check if the stream is registered
   225  	if _, err := r.GetServerFunc(s.Name); err != nil {
   226  		return err
   227  	}
   228  
   229  	peer := r.getPeer(peerId)
   230  	if peer == nil {
   231  		return fmt.Errorf("peer not found %v", peerId)
   232  	}
   233  
   234  	if _, err := peer.getServer(s); err != nil {
   235  		if e, ok := err.(*notFoundError); ok && e.t == "server" {
   236  			// request subscription only if the server for this stream is not created
   237  			log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h)
   238  			return peer.Send(&RequestSubscriptionMsg{
   239  				Stream:   s,
   240  				History:  h,
   241  				Priority: prio,
   242  			})
   243  		}
   244  		return err
   245  	}
   246  	log.Trace("RequestSubscription: already subscribed", "peer", peerId, "stream", s, "history", h)
   247  	return nil
   248  }
   249  
   250  // Subscribe initiates the streamer
   251  func (r *Registry) Subscribe(peerId discover.NodeID, s Stream, h *Range, priority uint8) error {
   252  	// check if the stream is registered
   253  	if _, err := r.GetClientFunc(s.Name); err != nil {
   254  		return err
   255  	}
   256  
   257  	peer := r.getPeer(peerId)
   258  	if peer == nil {
   259  		return fmt.Errorf("peer not found %v", peerId)
   260  	}
   261  
   262  	var to uint64
   263  	if !s.Live && h != nil {
   264  		to = h.To
   265  	}
   266  
   267  	err := peer.setClientParams(s, newClientParams(priority, to))
   268  	if err != nil {
   269  		return err
   270  	}
   271  
   272  	if s.Live && h != nil {
   273  		if err := peer.setClientParams(
   274  			getHistoryStream(s),
   275  			newClientParams(getHistoryPriority(priority), h.To),
   276  		); err != nil {
   277  			return err
   278  		}
   279  	}
   280  
   281  	msg := &SubscribeMsg{
   282  		Stream:   s,
   283  		History:  h,
   284  		Priority: priority,
   285  	}
   286  	log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h)
   287  
   288  	return peer.SendPriority(msg, priority)
   289  }
   290  
   291  func (r *Registry) Unsubscribe(peerId discover.NodeID, s Stream) error {
   292  	peer := r.getPeer(peerId)
   293  	if peer == nil {
   294  		return fmt.Errorf("peer not found %v", peerId)
   295  	}
   296  
   297  	msg := &UnsubscribeMsg{
   298  		Stream: s,
   299  	}
   300  	log.Debug("Unsubscribe ", "peer", peerId, "stream", s)
   301  
   302  	if err := peer.Send(msg); err != nil {
   303  		return err
   304  	}
   305  	return peer.removeClient(s)
   306  }
   307  
   308  // Quit sends the QuitMsg to the peer to remove the
   309  // stream peer client and terminate the streaming.
   310  func (r *Registry) Quit(peerId discover.NodeID, s Stream) error {
   311  	peer := r.getPeer(peerId)
   312  	if peer == nil {
   313  		log.Debug("stream quit: peer not found", "peer", peerId, "stream", s)
   314  		// if the peer is not found, abort the request
   315  		return nil
   316  	}
   317  
   318  	msg := &QuitMsg{
   319  		Stream: s,
   320  	}
   321  	log.Debug("Quit ", "peer", peerId, "stream", s)
   322  
   323  	return peer.Send(msg)
   324  }
   325  
   326  func (r *Registry) Retrieve(chunk *storage.Chunk) error {
   327  	return r.delivery.RequestFromPeers(chunk.Addr[:], r.skipCheck)
   328  }
   329  
   330  func (r *Registry) NodeInfo() interface{} {
   331  	return nil
   332  }
   333  
   334  func (r *Registry) PeerInfo(id discover.NodeID) interface{} {
   335  	return nil
   336  }
   337  
   338  func (r *Registry) Close() error {
   339  	return r.intervalsStore.Close()
   340  }
   341  
   342  func (r *Registry) getPeer(peerId discover.NodeID) *Peer {
   343  	r.peersMu.RLock()
   344  	defer r.peersMu.RUnlock()
   345  
   346  	return r.peers[peerId]
   347  }
   348  
   349  func (r *Registry) setPeer(peer *Peer) {
   350  	r.peersMu.Lock()
   351  	r.peers[peer.ID()] = peer
   352  	metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
   353  	r.peersMu.Unlock()
   354  }
   355  
   356  func (r *Registry) deletePeer(peer *Peer) {
   357  	r.peersMu.Lock()
   358  	delete(r.peers, peer.ID())
   359  	metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
   360  	r.peersMu.Unlock()
   361  }
   362  
   363  func (r *Registry) peersCount() (c int) {
   364  	r.peersMu.Lock()
   365  	c = len(r.peers)
   366  	r.peersMu.Unlock()
   367  	return
   368  }
   369  
   370  // Run protocol run function
   371  func (r *Registry) Run(p *network.BzzPeer) error {
   372  	sp := NewPeer(p.Peer, r)
   373  	r.setPeer(sp)
   374  	defer r.deletePeer(sp)
   375  	defer close(sp.quit)
   376  	defer sp.close()
   377  
   378  	if r.doRetrieve {
   379  		err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", false), nil, Top)
   380  		if err != nil {
   381  			return err
   382  		}
   383  	}
   384  
   385  	return sp.Run(sp.HandleMsg)
   386  }
   387  
   388  // updateSyncing subscribes to SYNC streams by iterating over the
   389  // kademlia connections and bins. If there are existing SYNC streams
   390  // and they are no longer required after iteration, request to Quit
   391  // them will be send to appropriate peers.
   392  func (r *Registry) updateSyncing() {
   393  	// if overlay in not Kademlia, panic
   394  	kad := r.delivery.overlay.(*network.Kademlia)
   395  
   396  	// map of all SYNC streams for all peers
   397  	// used at the and of the function to remove servers
   398  	// that are not needed anymore
   399  	subs := make(map[discover.NodeID]map[Stream]struct{})
   400  	r.peersMu.RLock()
   401  	for id, peer := range r.peers {
   402  		peer.serverMu.RLock()
   403  		for stream := range peer.servers {
   404  			if stream.Name == "SYNC" {
   405  				if _, ok := subs[id]; !ok {
   406  					subs[id] = make(map[Stream]struct{})
   407  				}
   408  				subs[id][stream] = struct{}{}
   409  			}
   410  		}
   411  		peer.serverMu.RUnlock()
   412  	}
   413  	r.peersMu.RUnlock()
   414  
   415  	// request subscriptions for all nodes and bins
   416  	kad.EachBin(r.addr.Over(), pot.DefaultPof(256), 0, func(conn network.OverlayConn, bin int) bool {
   417  		p := conn.(network.Peer)
   418  		log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr.ID(), p.ID(), bin))
   419  
   420  		// bin is always less then 256 and it is safe to convert it to type uint8
   421  		stream := NewStream("SYNC", FormatSyncBinKey(uint8(bin)), true)
   422  		if streams, ok := subs[p.ID()]; ok {
   423  			// delete live and history streams from the map, so that it won't be removed with a Quit request
   424  			delete(streams, stream)
   425  			delete(streams, getHistoryStream(stream))
   426  		}
   427  		err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
   428  		if err != nil {
   429  			log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
   430  			return false
   431  		}
   432  		return true
   433  	})
   434  
   435  	// remove SYNC servers that do not need to be subscribed
   436  	for id, streams := range subs {
   437  		if len(streams) == 0 {
   438  			continue
   439  		}
   440  		peer := r.getPeer(id)
   441  		if peer == nil {
   442  			continue
   443  		}
   444  		for stream := range streams {
   445  			log.Debug("Remove sync server", "peer", id, "stream", stream)
   446  			err := r.Quit(peer.ID(), stream)
   447  			if err != nil && err != p2p.ErrShuttingDown {
   448  				log.Error("quit", "err", err, "peer", peer.ID(), "stream", stream)
   449  			}
   450  		}
   451  	}
   452  }
   453  
   454  func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   455  	peer := protocols.NewPeer(p, rw, Spec)
   456  	bzzPeer := network.NewBzzTestPeer(peer, r.addr)
   457  	r.delivery.overlay.On(bzzPeer)
   458  	defer r.delivery.overlay.Off(bzzPeer)
   459  	return r.Run(bzzPeer)
   460  }
   461  
   462  // HandleMsg is the message handler that delegates incoming messages
   463  func (p *Peer) HandleMsg(msg interface{}) error {
   464  	switch msg := msg.(type) {
   465  
   466  	case *SubscribeMsg:
   467  		return p.handleSubscribeMsg(msg)
   468  
   469  	case *SubscribeErrorMsg:
   470  		return p.handleSubscribeErrorMsg(msg)
   471  
   472  	case *UnsubscribeMsg:
   473  		return p.handleUnsubscribeMsg(msg)
   474  
   475  	case *OfferedHashesMsg:
   476  		return p.handleOfferedHashesMsg(msg)
   477  
   478  	case *TakeoverProofMsg:
   479  		return p.handleTakeoverProofMsg(msg)
   480  
   481  	case *WantedHashesMsg:
   482  		return p.handleWantedHashesMsg(msg)
   483  
   484  	case *ChunkDeliveryMsg:
   485  		return p.streamer.delivery.handleChunkDeliveryMsg(p, msg)
   486  
   487  	case *RetrieveRequestMsg:
   488  		return p.streamer.delivery.handleRetrieveRequestMsg(p, msg)
   489  
   490  	case *RequestSubscriptionMsg:
   491  		return p.handleRequestSubscription(msg)
   492  
   493  	case *QuitMsg:
   494  		return p.handleQuitMsg(msg)
   495  
   496  	default:
   497  		return fmt.Errorf("unknown message type: %T", msg)
   498  	}
   499  }
   500  
   501  type server struct {
   502  	Server
   503  	stream       Stream
   504  	priority     uint8
   505  	currentBatch []byte
   506  }
   507  
   508  // Server interface for outgoing peer Streamer
   509  type Server interface {
   510  	SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error)
   511  	GetData([]byte) ([]byte, error)
   512  	Close()
   513  }
   514  
   515  type client struct {
   516  	Client
   517  	stream    Stream
   518  	priority  uint8
   519  	sessionAt uint64
   520  	to        uint64
   521  	next      chan error
   522  	quit      chan struct{}
   523  
   524  	intervalsKey   string
   525  	intervalsStore state.Store
   526  }
   527  
   528  func peerStreamIntervalsKey(p *Peer, s Stream) string {
   529  	return p.ID().String() + s.String()
   530  }
   531  
   532  func (c client) AddInterval(start, end uint64) (err error) {
   533  	i := &intervals.Intervals{}
   534  	err = c.intervalsStore.Get(c.intervalsKey, i)
   535  	if err != nil {
   536  		return err
   537  	}
   538  	i.Add(start, end)
   539  	return c.intervalsStore.Put(c.intervalsKey, i)
   540  }
   541  
   542  func (c client) NextInterval() (start, end uint64, err error) {
   543  	i := &intervals.Intervals{}
   544  	err = c.intervalsStore.Get(c.intervalsKey, i)
   545  	if err != nil {
   546  		return 0, 0, err
   547  	}
   548  	start, end = i.Next()
   549  	return start, end, nil
   550  }
   551  
   552  // Client interface for incoming peer Streamer
   553  type Client interface {
   554  	NeedData([]byte) func()
   555  	BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error)
   556  	Close()
   557  }
   558  
   559  func (c *client) nextBatch(from uint64) (nextFrom uint64, nextTo uint64) {
   560  	if c.to > 0 && from >= c.to {
   561  		return 0, 0
   562  	}
   563  	if c.stream.Live {
   564  		return from, 0
   565  	} else if from >= c.sessionAt {
   566  		if c.to > 0 {
   567  			return from, c.to
   568  		}
   569  		return from, math.MaxUint64
   570  	}
   571  	nextFrom, nextTo, err := c.NextInterval()
   572  	if err != nil {
   573  		log.Error("next intervals", "stream", c.stream)
   574  		return
   575  	}
   576  	if nextTo > c.to {
   577  		nextTo = c.to
   578  	}
   579  	if nextTo == 0 {
   580  		nextTo = c.sessionAt
   581  	}
   582  	return
   583  }
   584  
   585  func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error {
   586  	if tf := c.BatchDone(req.Stream, req.From, hashes, req.Root); tf != nil {
   587  		tp, err := tf()
   588  		if err != nil {
   589  			return err
   590  		}
   591  		if err := p.SendPriority(tp, c.priority); err != nil {
   592  			return err
   593  		}
   594  		if c.to > 0 && tp.Takeover.End >= c.to {
   595  			return p.streamer.Unsubscribe(p.Peer.ID(), req.Stream)
   596  		}
   597  		return nil
   598  	}
   599  	// TODO: make a test case for testing if the interval is added when the batch is done
   600  	if err := c.AddInterval(req.From, req.To); err != nil {
   601  		return err
   602  	}
   603  	return nil
   604  }
   605  
   606  func (c *client) close() {
   607  	select {
   608  	case <-c.quit:
   609  	default:
   610  		close(c.quit)
   611  	}
   612  	c.Close()
   613  }
   614  
   615  // clientParams store parameters for the new client
   616  // between a subscription and initial offered hashes request handling.
   617  type clientParams struct {
   618  	priority uint8
   619  	to       uint64
   620  	// signal when the client is created
   621  	clientCreatedC chan struct{}
   622  }
   623  
   624  func newClientParams(priority uint8, to uint64) *clientParams {
   625  	return &clientParams{
   626  		priority:       priority,
   627  		to:             to,
   628  		clientCreatedC: make(chan struct{}),
   629  	}
   630  }
   631  
   632  func (c *clientParams) waitClient(ctx context.Context) error {
   633  	select {
   634  	case <-ctx.Done():
   635  		return ctx.Err()
   636  	case <-c.clientCreatedC:
   637  		return nil
   638  	}
   639  }
   640  
   641  func (c *clientParams) clientCreated() {
   642  	close(c.clientCreatedC)
   643  }
   644  
   645  // Spec is the spec of the streamer protocol
   646  var Spec = &protocols.Spec{
   647  	Name:       "stream",
   648  	Version:    4,
   649  	MaxMsgSize: 10 * 1024 * 1024,
   650  	Messages: []interface{}{
   651  		UnsubscribeMsg{},
   652  		OfferedHashesMsg{},
   653  		WantedHashesMsg{},
   654  		TakeoverProofMsg{},
   655  		SubscribeMsg{},
   656  		RetrieveRequestMsg{},
   657  		ChunkDeliveryMsg{},
   658  		SubscribeErrorMsg{},
   659  		RequestSubscriptionMsg{},
   660  		QuitMsg{},
   661  	},
   662  }
   663  
   664  func (r *Registry) Protocols() []p2p.Protocol {
   665  	return []p2p.Protocol{
   666  		{
   667  			Name:    Spec.Name,
   668  			Version: Spec.Version,
   669  			Length:  Spec.Length(),
   670  			Run:     r.runProtocol,
   671  			// NodeInfo: ,
   672  			// PeerInfo: ,
   673  		},
   674  	}
   675  }
   676  
   677  func (r *Registry) APIs() []rpc.API {
   678  	return []rpc.API{
   679  		{
   680  			Namespace: "stream",
   681  			Version:   "3.0",
   682  			Service:   r.api,
   683  			Public:    true,
   684  		},
   685  	}
   686  }
   687  
   688  func (r *Registry) Start(server *p2p.Server) error {
   689  	log.Info("Streamer started")
   690  	return nil
   691  }
   692  
   693  func (r *Registry) Stop() error {
   694  	return nil
   695  }
   696  
   697  type Range struct {
   698  	From, To uint64
   699  }
   700  
   701  func NewRange(from, to uint64) *Range {
   702  	return &Range{
   703  		From: from,
   704  		To:   to,
   705  	}
   706  }
   707  
   708  func (r *Range) String() string {
   709  	return fmt.Sprintf("%v-%v", r.From, r.To)
   710  }
   711  
   712  func getHistoryPriority(priority uint8) uint8 {
   713  	if priority == 0 {
   714  		return 0
   715  	}
   716  	return priority - 1
   717  }
   718  
   719  func getHistoryStream(s Stream) Stream {
   720  	return NewStream(s.Name, s.Key, false)
   721  }
   722  
   723  type API struct {
   724  	streamer *Registry
   725  }
   726  
   727  func NewAPI(r *Registry) *API {
   728  	return &API{
   729  		streamer: r,
   730  	}
   731  }
   732  
   733  func (api *API) SubscribeStream(peerId discover.NodeID, s Stream, history *Range, priority uint8) error {
   734  	return api.streamer.Subscribe(peerId, s, history, priority)
   735  }
   736  
   737  func (api *API) UnsubscribeStream(peerId discover.NodeID, s Stream) error {
   738  	return api.streamer.Unsubscribe(peerId, s)
   739  }