github.com/divan/go-ethereum@v1.8.14-0.20180820134928-1de9ada4016d/swarm/network/stream/messages.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/metrics"
    27  	"github.com/ethereum/go-ethereum/swarm/log"
    28  	bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
    29  	"github.com/ethereum/go-ethereum/swarm/spancontext"
    30  	"github.com/ethereum/go-ethereum/swarm/storage"
    31  	opentracing "github.com/opentracing/opentracing-go"
    32  )
    33  
    34  // Stream defines a unique stream identifier.
    35  type Stream struct {
    36  	// Name is used for Client and Server functions identification.
    37  	Name string
    38  	// Key is the name of specific stream data.
    39  	Key string
    40  	// Live defines whether the stream delivers only new data
    41  	// for the specific stream.
    42  	Live bool
    43  }
    44  
    45  func NewStream(name string, key string, live bool) Stream {
    46  	return Stream{
    47  		Name: name,
    48  		Key:  key,
    49  		Live: live,
    50  	}
    51  }
    52  
    53  // String return a stream id based on all Stream fields.
    54  func (s Stream) String() string {
    55  	t := "h"
    56  	if s.Live {
    57  		t = "l"
    58  	}
    59  	return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t)
    60  }
    61  
    62  // SubcribeMsg is the protocol msg for requesting a stream(section)
    63  type SubscribeMsg struct {
    64  	Stream   Stream
    65  	History  *Range `rlp:"nil"`
    66  	Priority uint8  // delivered on priority channel
    67  }
    68  
    69  // RequestSubscriptionMsg is the protocol msg for a node to request subscription to a
    70  // specific stream
    71  type RequestSubscriptionMsg struct {
    72  	Stream   Stream
    73  	History  *Range `rlp:"nil"`
    74  	Priority uint8  // delivered on priority channel
    75  }
    76  
    77  func (p *Peer) handleRequestSubscription(ctx context.Context, req *RequestSubscriptionMsg) (err error) {
    78  	log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr.ID(), p.ID(), req.Stream))
    79  	return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority)
    80  }
    81  
    82  func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err error) {
    83  	metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1)
    84  
    85  	defer func() {
    86  		if err != nil {
    87  			if e := p.Send(context.TODO(), SubscribeErrorMsg{
    88  				Error: err.Error(),
    89  			}); e != nil {
    90  				log.Error("send stream subscribe error message", "err", err)
    91  			}
    92  		}
    93  	}()
    94  
    95  	log.Debug("received subscription", "from", p.streamer.addr.ID(), "peer", p.ID(), "stream", req.Stream, "history", req.History)
    96  
    97  	f, err := p.streamer.GetServerFunc(req.Stream.Name)
    98  	if err != nil {
    99  		return err
   100  	}
   101  
   102  	s, err := f(p, req.Stream.Key, req.Stream.Live)
   103  	if err != nil {
   104  		return err
   105  	}
   106  	os, err := p.setServer(req.Stream, s, req.Priority)
   107  	if err != nil {
   108  		return err
   109  	}
   110  
   111  	var from uint64
   112  	var to uint64
   113  	if !req.Stream.Live && req.History != nil {
   114  		from = req.History.From
   115  		to = req.History.To
   116  	}
   117  
   118  	go func() {
   119  		if err := p.SendOfferedHashes(os, from, to); err != nil {
   120  			log.Warn("SendOfferedHashes dropping peer", "err", err)
   121  			p.Drop(err)
   122  		}
   123  	}()
   124  
   125  	if req.Stream.Live && req.History != nil {
   126  		// subscribe to the history stream
   127  		s, err := f(p, req.Stream.Key, false)
   128  		if err != nil {
   129  			return err
   130  		}
   131  
   132  		os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority))
   133  		if err != nil {
   134  			return err
   135  		}
   136  		go func() {
   137  			if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
   138  				log.Warn("SendOfferedHashes dropping peer", "err", err)
   139  				p.Drop(err)
   140  			}
   141  		}()
   142  	}
   143  
   144  	return nil
   145  }
   146  
   147  type SubscribeErrorMsg struct {
   148  	Error string
   149  }
   150  
   151  func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) {
   152  	return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error)
   153  }
   154  
   155  type UnsubscribeMsg struct {
   156  	Stream Stream
   157  }
   158  
   159  func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error {
   160  	return p.removeServer(req.Stream)
   161  }
   162  
   163  type QuitMsg struct {
   164  	Stream Stream
   165  }
   166  
   167  func (p *Peer) handleQuitMsg(req *QuitMsg) error {
   168  	return p.removeClient(req.Stream)
   169  }
   170  
   171  // OfferedHashesMsg is the protocol msg for offering to hand over a
   172  // stream section
   173  type OfferedHashesMsg struct {
   174  	Stream         Stream // name of Stream
   175  	From, To       uint64 // peer and db-specific entry count
   176  	Hashes         []byte // stream of hashes (128)
   177  	*HandoverProof        // HandoverProof
   178  }
   179  
   180  // String pretty prints OfferedHashesMsg
   181  func (m OfferedHashesMsg) String() string {
   182  	return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize)
   183  }
   184  
   185  // handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface
   186  // Filter method
   187  func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error {
   188  	metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
   189  
   190  	var sp opentracing.Span
   191  	ctx, sp = spancontext.StartSpan(
   192  		ctx,
   193  		"handle.offered.hashes")
   194  	defer sp.Finish()
   195  
   196  	c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
   197  	if err != nil {
   198  		return err
   199  	}
   200  	hashes := req.Hashes
   201  	want, err := bv.New(len(hashes) / HashSize)
   202  	if err != nil {
   203  		return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err)
   204  	}
   205  	wg := sync.WaitGroup{}
   206  	for i := 0; i < len(hashes); i += HashSize {
   207  		hash := hashes[i : i+HashSize]
   208  
   209  		if wait := c.NeedData(ctx, hash); wait != nil {
   210  			want.Set(i/HashSize, true)
   211  			wg.Add(1)
   212  			// create request and wait until the chunk data arrives and is stored
   213  			go func(w func()) {
   214  				w()
   215  				wg.Done()
   216  			}(wait)
   217  		}
   218  	}
   219  	// done := make(chan bool)
   220  	// go func() {
   221  	// 	wg.Wait()
   222  	// 	close(done)
   223  	// }()
   224  	// go func() {
   225  	// 	select {
   226  	// 	case <-done:
   227  	// 		s.next <- s.batchDone(p, req, hashes)
   228  	// 	case <-time.After(1 * time.Second):
   229  	// 		p.Drop(errors.New("timeout waiting for batch to be delivered"))
   230  	// 	}
   231  	// }()
   232  	go func() {
   233  		wg.Wait()
   234  		select {
   235  		case c.next <- c.batchDone(p, req, hashes):
   236  		case <-c.quit:
   237  		}
   238  	}()
   239  	// only send wantedKeysMsg if all missing chunks of the previous batch arrived
   240  	// except
   241  	if c.stream.Live {
   242  		c.sessionAt = req.From
   243  	}
   244  	from, to := c.nextBatch(req.To + 1)
   245  	log.Trace("received offered batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
   246  	if from == to {
   247  		return nil
   248  	}
   249  
   250  	msg := &WantedHashesMsg{
   251  		Stream: req.Stream,
   252  		Want:   want.Bytes(),
   253  		From:   from,
   254  		To:     to,
   255  	}
   256  	go func() {
   257  		select {
   258  		case <-time.After(120 * time.Second):
   259  			log.Warn("handleOfferedHashesMsg timeout, so dropping peer")
   260  			p.Drop(errors.New("handle offered hashes timeout"))
   261  			return
   262  		case err := <-c.next:
   263  			if err != nil {
   264  				log.Warn("c.next dropping peer", "err", err)
   265  				p.Drop(err)
   266  				return
   267  			}
   268  		case <-c.quit:
   269  			return
   270  		}
   271  		log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
   272  		err := p.SendPriority(ctx, msg, c.priority)
   273  		if err != nil {
   274  			log.Warn("SendPriority err, so dropping peer", "err", err)
   275  			p.Drop(err)
   276  		}
   277  	}()
   278  	return nil
   279  }
   280  
   281  // WantedHashesMsg is the protocol msg data for signaling which hashes
   282  // offered in OfferedHashesMsg downstream peer actually wants sent over
   283  type WantedHashesMsg struct {
   284  	Stream   Stream
   285  	Want     []byte // bitvector indicating which keys of the batch needed
   286  	From, To uint64 // next interval offset - empty if not to be continued
   287  }
   288  
   289  // String pretty prints WantedHashesMsg
   290  func (m WantedHashesMsg) String() string {
   291  	return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To)
   292  }
   293  
   294  // handleWantedHashesMsg protocol msg handler
   295  // * sends the next batch of unsynced keys
   296  // * sends the actual data chunks as per WantedHashesMsg
   297  func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) error {
   298  	metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1)
   299  
   300  	log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
   301  	s, err := p.getServer(req.Stream)
   302  	if err != nil {
   303  		return err
   304  	}
   305  	hashes := s.currentBatch
   306  	// launch in go routine since GetBatch blocks until new hashes arrive
   307  	go func() {
   308  		if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
   309  			log.Warn("SendOfferedHashes dropping peer", "err", err)
   310  			p.Drop(err)
   311  		}
   312  	}()
   313  	// go p.SendOfferedHashes(s, req.From, req.To)
   314  	l := len(hashes) / HashSize
   315  
   316  	log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l)
   317  	want, err := bv.NewFromBytes(req.Want, l)
   318  	if err != nil {
   319  		return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err)
   320  	}
   321  	for i := 0; i < l; i++ {
   322  		if want.Get(i) {
   323  			metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1)
   324  
   325  			hash := hashes[i*HashSize : (i+1)*HashSize]
   326  			data, err := s.GetData(ctx, hash)
   327  			if err != nil {
   328  				return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
   329  			}
   330  			chunk := storage.NewChunk(hash, nil)
   331  			chunk.SData = data
   332  			if length := len(chunk.SData); length < 9 {
   333  				log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr)
   334  			}
   335  			if err := p.Deliver(ctx, chunk, s.priority); err != nil {
   336  				return err
   337  			}
   338  		}
   339  	}
   340  	return nil
   341  }
   342  
   343  // Handover represents a statement that the upstream peer hands over the stream section
   344  type Handover struct {
   345  	Stream     Stream // name of stream
   346  	Start, End uint64 // index of hashes
   347  	Root       []byte // Root hash for indexed segment inclusion proofs
   348  }
   349  
   350  // HandoverProof represents a signed statement that the upstream peer handed over the stream section
   351  type HandoverProof struct {
   352  	Sig []byte // Sign(Hash(Serialisation(Handover)))
   353  	*Handover
   354  }
   355  
   356  // Takeover represents a statement that downstream peer took over (stored all data)
   357  // handed over
   358  type Takeover Handover
   359  
   360  //  TakeoverProof represents a signed statement that the downstream peer took over
   361  // the stream section
   362  type TakeoverProof struct {
   363  	Sig []byte // Sign(Hash(Serialisation(Takeover)))
   364  	*Takeover
   365  }
   366  
   367  // TakeoverProofMsg is the protocol msg sent by downstream peer
   368  type TakeoverProofMsg TakeoverProof
   369  
   370  // String pretty prints TakeoverProofMsg
   371  func (m TakeoverProofMsg) String() string {
   372  	return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig)
   373  }
   374  
   375  func (p *Peer) handleTakeoverProofMsg(ctx context.Context, req *TakeoverProofMsg) error {
   376  	_, err := p.getServer(req.Stream)
   377  	// store the strongest takeoverproof for the stream in streamer
   378  	return err
   379  }