github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/swarm/network/stream/messages.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"time"
    23  
    24  	"github.com/ethereum/go-ethereum/metrics"
    25  	"github.com/ethereum/go-ethereum/swarm/log"
    26  	bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
    27  	"github.com/ethereum/go-ethereum/swarm/spancontext"
    28  	"github.com/ethereum/go-ethereum/swarm/storage"
    29  	"github.com/opentracing/opentracing-go"
    30  )
    31  
    32  var syncBatchTimeout = 30 * time.Second
    33  
    34  // Stream defines a unique stream identifier.
    35  type Stream struct {
    36  	// Name is used for Client and Server functions identification.
    37  	Name string
    38  	// Key is the name of specific stream data.
    39  	Key string
    40  	// Live defines whether the stream delivers only new data
    41  	// for the specific stream.
    42  	Live bool
    43  }
    44  
    45  func NewStream(name string, key string, live bool) Stream {
    46  	return Stream{
    47  		Name: name,
    48  		Key:  key,
    49  		Live: live,
    50  	}
    51  }
    52  
    53  // String return a stream id based on all Stream fields.
    54  func (s Stream) String() string {
    55  	t := "h"
    56  	if s.Live {
    57  		t = "l"
    58  	}
    59  	return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t)
    60  }
    61  
    62  // SubcribeMsg is the protocol msg for requesting a stream(section)
    63  type SubscribeMsg struct {
    64  	Stream   Stream
    65  	History  *Range `rlp:"nil"`
    66  	Priority uint8  // delivered on priority channel
    67  }
    68  
    69  // RequestSubscriptionMsg is the protocol msg for a node to request subscription to a
    70  // specific stream
    71  type RequestSubscriptionMsg struct {
    72  	Stream   Stream
    73  	History  *Range `rlp:"nil"`
    74  	Priority uint8  // delivered on priority channel
    75  }
    76  
    77  func (p *Peer) handleRequestSubscription(ctx context.Context, req *RequestSubscriptionMsg) (err error) {
    78  	log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr, p.ID(), req.Stream))
    79  	if err = p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority); err != nil {
    80  		// The error will be sent as a subscribe error message
    81  		// and will not be returned as it will prevent any new message
    82  		// exchange between peers over p2p. Instead, error will be returned
    83  		// only if there is one from sending subscribe error message.
    84  		err = p.Send(ctx, SubscribeErrorMsg{
    85  			Error: err.Error(),
    86  		})
    87  	}
    88  	return err
    89  }
    90  
    91  func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err error) {
    92  	metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1)
    93  
    94  	defer func() {
    95  		if err != nil {
    96  			// The error will be sent as a subscribe error message
    97  			// and will not be returned as it will prevent any new message
    98  			// exchange between peers over p2p. Instead, error will be returned
    99  			// only if there is one from sending subscribe error message.
   100  			err = p.Send(context.TODO(), SubscribeErrorMsg{
   101  				Error: err.Error(),
   102  			})
   103  		}
   104  	}()
   105  
   106  	log.Debug("received subscription", "from", p.streamer.addr, "peer", p.ID(), "stream", req.Stream, "history", req.History)
   107  
   108  	f, err := p.streamer.GetServerFunc(req.Stream.Name)
   109  	if err != nil {
   110  		return err
   111  	}
   112  
   113  	s, err := f(p, req.Stream.Key, req.Stream.Live)
   114  	if err != nil {
   115  		return err
   116  	}
   117  	os, err := p.setServer(req.Stream, s, req.Priority)
   118  	if err != nil {
   119  		return err
   120  	}
   121  
   122  	var from uint64
   123  	var to uint64
   124  	if !req.Stream.Live && req.History != nil {
   125  		from = req.History.From
   126  		to = req.History.To
   127  	}
   128  
   129  	go func() {
   130  		if err := p.SendOfferedHashes(os, from, to); err != nil {
   131  			log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
   132  		}
   133  	}()
   134  
   135  	if req.Stream.Live && req.History != nil {
   136  		// subscribe to the history stream
   137  		s, err := f(p, req.Stream.Key, false)
   138  		if err != nil {
   139  			return err
   140  		}
   141  
   142  		os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority))
   143  		if err != nil {
   144  			return err
   145  		}
   146  		go func() {
   147  			if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
   148  				log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
   149  			}
   150  		}()
   151  	}
   152  
   153  	return nil
   154  }
   155  
   156  type SubscribeErrorMsg struct {
   157  	Error string
   158  }
   159  
   160  func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) {
   161  	//TODO the error should be channeled to whoever calls the subscribe
   162  	return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error)
   163  }
   164  
   165  type UnsubscribeMsg struct {
   166  	Stream Stream
   167  }
   168  
   169  func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error {
   170  	return p.removeServer(req.Stream)
   171  }
   172  
   173  type QuitMsg struct {
   174  	Stream Stream
   175  }
   176  
   177  func (p *Peer) handleQuitMsg(req *QuitMsg) error {
   178  	return p.removeClient(req.Stream)
   179  }
   180  
   181  // OfferedHashesMsg is the protocol msg for offering to hand over a
   182  // stream section
   183  type OfferedHashesMsg struct {
   184  	Stream         Stream // name of Stream
   185  	From, To       uint64 // peer and db-specific entry count
   186  	Hashes         []byte // stream of hashes (128)
   187  	*HandoverProof        // HandoverProof
   188  }
   189  
   190  // String pretty prints OfferedHashesMsg
   191  func (m OfferedHashesMsg) String() string {
   192  	return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize)
   193  }
   194  
   195  // handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface
   196  // Filter method
   197  func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error {
   198  	metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
   199  
   200  	var sp opentracing.Span
   201  	ctx, sp = spancontext.StartSpan(
   202  		ctx,
   203  		"handle.offered.hashes")
   204  	defer sp.Finish()
   205  
   206  	c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
   207  	if err != nil {
   208  		return err
   209  	}
   210  
   211  	hashes := req.Hashes
   212  	lenHashes := len(hashes)
   213  	if lenHashes%HashSize != 0 {
   214  		return fmt.Errorf("error invalid hashes length (len: %v)", lenHashes)
   215  	}
   216  
   217  	want, err := bv.New(lenHashes / HashSize)
   218  	if err != nil {
   219  		return fmt.Errorf("error initiaising bitvector of length %v: %v", lenHashes/HashSize, err)
   220  	}
   221  
   222  	ctr := 0
   223  	errC := make(chan error)
   224  	ctx, cancel := context.WithTimeout(ctx, syncBatchTimeout)
   225  
   226  	ctx = context.WithValue(ctx, "source", p.ID().String())
   227  	for i := 0; i < lenHashes; i += HashSize {
   228  		hash := hashes[i : i+HashSize]
   229  
   230  		if wait := c.NeedData(ctx, hash); wait != nil {
   231  			ctr++
   232  			want.Set(i/HashSize, true)
   233  			// create request and wait until the chunk data arrives and is stored
   234  			go func(w func(context.Context) error) {
   235  				select {
   236  				case errC <- w(ctx):
   237  				case <-ctx.Done():
   238  				}
   239  			}(wait)
   240  		}
   241  	}
   242  
   243  	go func() {
   244  		defer cancel()
   245  		for i := 0; i < ctr; i++ {
   246  			select {
   247  			case err := <-errC:
   248  				if err != nil {
   249  					log.Debug("client.handleOfferedHashesMsg() error waiting for chunk, dropping peer", "peer", p.ID(), "err", err)
   250  					p.Drop(err)
   251  					return
   252  				}
   253  			case <-ctx.Done():
   254  				log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
   255  				return
   256  			case <-c.quit:
   257  				log.Debug("client.handleOfferedHashesMsg() quit")
   258  				return
   259  			}
   260  		}
   261  		select {
   262  		case c.next <- c.batchDone(p, req, hashes):
   263  		case <-c.quit:
   264  			log.Debug("client.handleOfferedHashesMsg() quit")
   265  		case <-ctx.Done():
   266  			log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
   267  		}
   268  	}()
   269  	// only send wantedKeysMsg if all missing chunks of the previous batch arrived
   270  	// except
   271  	if c.stream.Live {
   272  		c.sessionAt = req.From
   273  	}
   274  	from, to := c.nextBatch(req.To + 1)
   275  	log.Trace("set next batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "addr", p.streamer.addr)
   276  	if from == to {
   277  		return nil
   278  	}
   279  
   280  	msg := &WantedHashesMsg{
   281  		Stream: req.Stream,
   282  		Want:   want.Bytes(),
   283  		From:   from,
   284  		To:     to,
   285  	}
   286  	go func() {
   287  		log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
   288  		select {
   289  		case err := <-c.next:
   290  			if err != nil {
   291  				log.Warn("c.next error dropping peer", "err", err)
   292  				p.Drop(err)
   293  				return
   294  			}
   295  		case <-c.quit:
   296  			log.Debug("client.handleOfferedHashesMsg() quit")
   297  			return
   298  		case <-ctx.Done():
   299  			log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
   300  			return
   301  		}
   302  		log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
   303  		err := p.SendPriority(ctx, msg, c.priority)
   304  		if err != nil {
   305  			log.Warn("SendPriority error", "err", err)
   306  		}
   307  	}()
   308  	return nil
   309  }
   310  
   311  // WantedHashesMsg is the protocol msg data for signaling which hashes
   312  // offered in OfferedHashesMsg downstream peer actually wants sent over
   313  type WantedHashesMsg struct {
   314  	Stream   Stream
   315  	Want     []byte // bitvector indicating which keys of the batch needed
   316  	From, To uint64 // next interval offset - empty if not to be continued
   317  }
   318  
   319  // String pretty prints WantedHashesMsg
   320  func (m WantedHashesMsg) String() string {
   321  	return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To)
   322  }
   323  
   324  // handleWantedHashesMsg protocol msg handler
   325  // * sends the next batch of unsynced keys
   326  // * sends the actual data chunks as per WantedHashesMsg
   327  func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) error {
   328  	metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1)
   329  
   330  	log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
   331  	s, err := p.getServer(req.Stream)
   332  	if err != nil {
   333  		return err
   334  	}
   335  	hashes := s.currentBatch
   336  	// launch in go routine since GetBatch blocks until new hashes arrive
   337  	go func() {
   338  		if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
   339  			log.Warn("SendOfferedHashes error", "err", err)
   340  		}
   341  	}()
   342  	// go p.SendOfferedHashes(s, req.From, req.To)
   343  	l := len(hashes) / HashSize
   344  
   345  	log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l)
   346  	want, err := bv.NewFromBytes(req.Want, l)
   347  	if err != nil {
   348  		return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err)
   349  	}
   350  	for i := 0; i < l; i++ {
   351  		if want.Get(i) {
   352  			metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1)
   353  
   354  			hash := hashes[i*HashSize : (i+1)*HashSize]
   355  			data, err := s.GetData(ctx, hash)
   356  			if err != nil {
   357  				return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
   358  			}
   359  			chunk := storage.NewChunk(hash, data)
   360  			syncing := true
   361  			if err := p.Deliver(ctx, chunk, s.priority, syncing); err != nil {
   362  				return err
   363  			}
   364  		}
   365  	}
   366  	return nil
   367  }
   368  
   369  // Handover represents a statement that the upstream peer hands over the stream section
   370  type Handover struct {
   371  	Stream     Stream // name of stream
   372  	Start, End uint64 // index of hashes
   373  	Root       []byte // Root hash for indexed segment inclusion proofs
   374  }
   375  
   376  // HandoverProof represents a signed statement that the upstream peer handed over the stream section
   377  type HandoverProof struct {
   378  	Sig []byte // Sign(Hash(Serialisation(Handover)))
   379  	*Handover
   380  }
   381  
   382  // Takeover represents a statement that downstream peer took over (stored all data)
   383  // handed over
   384  type Takeover Handover
   385  
   386  //  TakeoverProof represents a signed statement that the downstream peer took over
   387  // the stream section
   388  type TakeoverProof struct {
   389  	Sig []byte // Sign(Hash(Serialisation(Takeover)))
   390  	*Takeover
   391  }
   392  
   393  // TakeoverProofMsg is the protocol msg sent by downstream peer
   394  type TakeoverProofMsg TakeoverProof
   395  
   396  // String pretty prints TakeoverProofMsg
   397  func (m TakeoverProofMsg) String() string {
   398  	return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig)
   399  }
   400  
   401  func (p *Peer) handleTakeoverProofMsg(ctx context.Context, req *TakeoverProofMsg) error {
   402  	_, err := p.getServer(req.Stream)
   403  	// store the strongest takeoverproof for the stream in streamer
   404  	return err
   405  }