github.com/linapex/ethereum-dpos-chinese@v0.0.0-20190316121959-b78b3a4a1ece/swarm/network/stream/messages.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 12:09:48</date>
    10  //</624342675700584448>
    11  
    12  //
    13  //
    14  //
    15  //
    16  //
    17  //
    18  //
    19  //
    20  //
    21  //
    22  //
    23  //
    24  //
    25  //
    26  //
    27  
    28  package stream
    29  
    30  import (
    31  	"context"
    32  	"errors"
    33  	"fmt"
    34  	"sync"
    35  	"time"
    36  
    37  	"github.com/ethereum/go-ethereum/metrics"
    38  	"github.com/ethereum/go-ethereum/swarm/log"
    39  	bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
    40  	"github.com/ethereum/go-ethereum/swarm/spancontext"
    41  	"github.com/ethereum/go-ethereum/swarm/storage"
    42  	opentracing "github.com/opentracing/opentracing-go"
    43  )
    44  
    45  //
    46  type Stream struct {
    47  //
    48  	Name string
    49  //
    50  	Key string
    51  //
    52  //
    53  	Live bool
    54  }
    55  
    56  func NewStream(name string, key string, live bool) Stream {
    57  	return Stream{
    58  		Name: name,
    59  		Key:  key,
    60  		Live: live,
    61  	}
    62  }
    63  
    64  //
    65  func (s Stream) String() string {
    66  	t := "h"
    67  	if s.Live {
    68  		t = "l"
    69  	}
    70  	return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t)
    71  }
    72  
    73  //
    74  type SubscribeMsg struct {
    75  	Stream   Stream
    76  	History  *Range `rlp:"nil"`
    77  Priority uint8  //
    78  }
    79  
    80  //
    81  //
    82  type RequestSubscriptionMsg struct {
    83  	Stream   Stream
    84  	History  *Range `rlp:"nil"`
    85  Priority uint8  //
    86  }
    87  
    88  func (p *Peer) handleRequestSubscription(ctx context.Context, req *RequestSubscriptionMsg) (err error) {
    89  	log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr.ID(), p.ID(), req.Stream))
    90  	return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority)
    91  }
    92  
    93  func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err error) {
    94  	metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1)
    95  
    96  	defer func() {
    97  		if err != nil {
    98  			if e := p.Send(context.TODO(), SubscribeErrorMsg{
    99  				Error: err.Error(),
   100  			}); e != nil {
   101  				log.Error("send stream subscribe error message", "err", err)
   102  			}
   103  		}
   104  	}()
   105  
   106  	log.Debug("received subscription", "from", p.streamer.addr.ID(), "peer", p.ID(), "stream", req.Stream, "history", req.History)
   107  
   108  	f, err := p.streamer.GetServerFunc(req.Stream.Name)
   109  	if err != nil {
   110  		return err
   111  	}
   112  
   113  	s, err := f(p, req.Stream.Key, req.Stream.Live)
   114  	if err != nil {
   115  		return err
   116  	}
   117  	os, err := p.setServer(req.Stream, s, req.Priority)
   118  	if err != nil {
   119  		return err
   120  	}
   121  
   122  	var from uint64
   123  	var to uint64
   124  	if !req.Stream.Live && req.History != nil {
   125  		from = req.History.From
   126  		to = req.History.To
   127  	}
   128  
   129  	go func() {
   130  		if err := p.SendOfferedHashes(os, from, to); err != nil {
   131  			log.Warn("SendOfferedHashes dropping peer", "err", err)
   132  			p.Drop(err)
   133  		}
   134  	}()
   135  
   136  	if req.Stream.Live && req.History != nil {
   137  //
   138  		s, err := f(p, req.Stream.Key, false)
   139  		if err != nil {
   140  			return err
   141  		}
   142  
   143  		os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority))
   144  		if err != nil {
   145  			return err
   146  		}
   147  		go func() {
   148  			if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
   149  				log.Warn("SendOfferedHashes dropping peer", "err", err)
   150  				p.Drop(err)
   151  			}
   152  		}()
   153  	}
   154  
   155  	return nil
   156  }
   157  
   158  type SubscribeErrorMsg struct {
   159  	Error string
   160  }
   161  
   162  func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) {
   163  	return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error)
   164  }
   165  
   166  type UnsubscribeMsg struct {
   167  	Stream Stream
   168  }
   169  
   170  func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error {
   171  	return p.removeServer(req.Stream)
   172  }
   173  
   174  type QuitMsg struct {
   175  	Stream Stream
   176  }
   177  
   178  func (p *Peer) handleQuitMsg(req *QuitMsg) error {
   179  	return p.removeClient(req.Stream)
   180  }
   181  
   182  //
   183  //
   184  type OfferedHashesMsg struct {
   185  Stream         Stream //
   186  From, To       uint64 //
   187  Hashes         []byte //
   188  *HandoverProof        //
   189  }
   190  
   191  //
   192  func (m OfferedHashesMsg) String() string {
   193  	return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize)
   194  }
   195  
   196  //
   197  //
   198  func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error {
   199  	metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
   200  
   201  	var sp opentracing.Span
   202  	ctx, sp = spancontext.StartSpan(
   203  		ctx,
   204  		"handle.offered.hashes")
   205  	defer sp.Finish()
   206  
   207  	c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
   208  	if err != nil {
   209  		return err
   210  	}
   211  	hashes := req.Hashes
   212  	want, err := bv.New(len(hashes) / HashSize)
   213  	if err != nil {
   214  		return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err)
   215  	}
   216  	wg := sync.WaitGroup{}
   217  	for i := 0; i < len(hashes); i += HashSize {
   218  		hash := hashes[i : i+HashSize]
   219  
   220  		if wait := c.NeedData(ctx, hash); wait != nil {
   221  			want.Set(i/HashSize, true)
   222  			wg.Add(1)
   223  //
   224  			go func(w func()) {
   225  				w()
   226  				wg.Done()
   227  			}(wait)
   228  		}
   229  	}
   230  //
   231  //
   232  //
   233  //
   234  //
   235  //
   236  //
   237  //
   238  //
   239  //
   240  //
   241  //
   242  //
   243  	go func() {
   244  		wg.Wait()
   245  		select {
   246  		case c.next <- c.batchDone(p, req, hashes):
   247  		case <-c.quit:
   248  		}
   249  	}()
   250  //
   251  //
   252  	if c.stream.Live {
   253  		c.sessionAt = req.From
   254  	}
   255  	from, to := c.nextBatch(req.To + 1)
   256  	log.Trace("received offered batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
   257  	if from == to {
   258  		return nil
   259  	}
   260  
   261  	msg := &WantedHashesMsg{
   262  		Stream: req.Stream,
   263  		Want:   want.Bytes(),
   264  		From:   from,
   265  		To:     to,
   266  	}
   267  	go func() {
   268  		select {
   269  		case <-time.After(120 * time.Second):
   270  			log.Warn("handleOfferedHashesMsg timeout, so dropping peer")
   271  			p.Drop(errors.New("handle offered hashes timeout"))
   272  			return
   273  		case err := <-c.next:
   274  			if err != nil {
   275  				log.Warn("c.next dropping peer", "err", err)
   276  				p.Drop(err)
   277  				return
   278  			}
   279  		case <-c.quit:
   280  			return
   281  		}
   282  		log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
   283  		err := p.SendPriority(ctx, msg, c.priority)
   284  		if err != nil {
   285  			log.Warn("SendPriority err, so dropping peer", "err", err)
   286  			p.Drop(err)
   287  		}
   288  	}()
   289  	return nil
   290  }
   291  
   292  //
   293  //
   294  type WantedHashesMsg struct {
   295  	Stream   Stream
   296  Want     []byte //
   297  From, To uint64 //
   298  }
   299  
   300  //
   301  func (m WantedHashesMsg) String() string {
   302  	return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To)
   303  }
   304  
   305  //
   306  //
   307  //
   308  func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) error {
   309  	metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1)
   310  
   311  	log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
   312  	s, err := p.getServer(req.Stream)
   313  	if err != nil {
   314  		return err
   315  	}
   316  	hashes := s.currentBatch
   317  //
   318  	go func() {
   319  		if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
   320  			log.Warn("SendOfferedHashes dropping peer", "err", err)
   321  			p.Drop(err)
   322  		}
   323  	}()
   324  //
   325  	l := len(hashes) / HashSize
   326  
   327  	log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l)
   328  	want, err := bv.NewFromBytes(req.Want, l)
   329  	if err != nil {
   330  		return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err)
   331  	}
   332  	for i := 0; i < l; i++ {
   333  		if want.Get(i) {
   334  			metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1)
   335  
   336  			hash := hashes[i*HashSize : (i+1)*HashSize]
   337  			data, err := s.GetData(ctx, hash)
   338  			if err != nil {
   339  				return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
   340  			}
   341  			chunk := storage.NewChunk(hash, nil)
   342  			chunk.SData = data
   343  			if length := len(chunk.SData); length < 9 {
   344  				log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr)
   345  			}
   346  			if err := p.Deliver(ctx, chunk, s.priority); err != nil {
   347  				return err
   348  			}
   349  		}
   350  	}
   351  	return nil
   352  }
   353  
   354  //
   355  type Handover struct {
   356  Stream     Stream //
   357  Start, End uint64 //
   358  Root       []byte //
   359  }
   360  
   361  //
   362  type HandoverProof struct {
   363  Sig []byte //
   364  	*Handover
   365  }
   366  
   367  //
   368  //
   369  type Takeover Handover
   370  
   371  //
   372  //
   373  type TakeoverProof struct {
   374  Sig []byte //
   375  	*Takeover
   376  }
   377  
   378  //
   379  type TakeoverProofMsg TakeoverProof
   380  
   381  //
   382  func (m TakeoverProofMsg) String() string {
   383  	return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig)
   384  }
   385  
   386  func (p *Peer) handleTakeoverProofMsg(ctx context.Context, req *TakeoverProofMsg) error {
   387  	_, err := p.getServer(req.Stream)
   388  //
   389  	return err
   390  }
   391