github.com/aaa256/atlantis@v0.0.0-20210707112435-42ee889287a2/swarm/network/stream/messages.go (about)

     1  // Copyright 2018 The go-athereum Authors
     2  // This file is part of the go-athereum library.
     3  //
     4  // The go-athereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-athereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-athereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/athereum/go-athereum/metrics"
    26  	"github.com/athereum/go-athereum/swarm/log"
    27  	bv "github.com/athereum/go-athereum/swarm/network/bitvector"
    28  	"github.com/athereum/go-athereum/swarm/storage"
    29  )
    30  
    31  // Stream defines a unique stream identifier.
    32  type Stream struct {
    33  	// Name is used for Client and Server functions identification.
    34  	Name string
    35  	// Key is the name of specific stream data.
    36  	Key string
    37  	// Live defines whather the stream delivers only new data
    38  	// for the specific stream.
    39  	Live bool
    40  }
    41  
    42  func NewStream(name string, key string, live bool) Stream {
    43  	return Stream{
    44  		Name: name,
    45  		Key:  key,
    46  		Live: live,
    47  	}
    48  }
    49  
    50  // String return a stream id based on all Stream fields.
    51  func (s Stream) String() string {
    52  	t := "h"
    53  	if s.Live {
    54  		t = "l"
    55  	}
    56  	return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t)
    57  }
    58  
    59  // SubcribeMsg is the protocol msg for requesting a stream(section)
    60  type SubscribeMsg struct {
    61  	Stream   Stream
    62  	History  *Range `rlp:"nil"`
    63  	Priority uint8  // delivered on priority channel
    64  }
    65  
    66  // RequestSubscriptionMsg is the protocol msg for a node to request subscription to a
    67  // specific stream
    68  type RequestSubscriptionMsg struct {
    69  	Stream   Stream
    70  	History  *Range `rlp:"nil"`
    71  	Priority uint8  // delivered on priority channel
    72  }
    73  
    74  func (p *Peer) handleRequestSubscription(req *RequestSubscriptionMsg) (err error) {
    75  	log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr.ID(), p.ID(), req.Stream))
    76  	return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority)
    77  }
    78  
    79  func (p *Peer) handleSubscribeMsg(req *SubscribeMsg) (err error) {
    80  	metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1)
    81  
    82  	defer func() {
    83  		if err != nil {
    84  			if e := p.Send(SubscribeErrorMsg{
    85  				Error: err.Error(),
    86  			}); e != nil {
    87  				log.Error("send stream subscribe error message", "err", err)
    88  			}
    89  		}
    90  	}()
    91  
    92  	log.Debug("received subscription", "from", p.streamer.addr.ID(), "peer", p.ID(), "stream", req.Stream, "history", req.History)
    93  
    94  	f, err := p.streamer.GetServerFunc(req.Stream.Name)
    95  	if err != nil {
    96  		return err
    97  	}
    98  
    99  	s, err := f(p, req.Stream.Key, req.Stream.Live)
   100  	if err != nil {
   101  		return err
   102  	}
   103  	os, err := p.setServer(req.Stream, s, req.Priority)
   104  	if err != nil {
   105  		return err
   106  	}
   107  
   108  	var from uint64
   109  	var to uint64
   110  	if !req.Stream.Live && req.History != nil {
   111  		from = req.History.From
   112  		to = req.History.To
   113  	}
   114  
   115  	go func() {
   116  		if err := p.SendOfferedHashes(os, from, to); err != nil {
   117  			log.Warn("SendOfferedHashes dropping peer", "err", err)
   118  			p.Drop(err)
   119  		}
   120  	}()
   121  
   122  	if req.Stream.Live && req.History != nil {
   123  		// subscribe to the history stream
   124  		s, err := f(p, req.Stream.Key, false)
   125  		if err != nil {
   126  			return err
   127  		}
   128  
   129  		os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority))
   130  		if err != nil {
   131  			return err
   132  		}
   133  		go func() {
   134  			if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
   135  				log.Warn("SendOfferedHashes dropping peer", "err", err)
   136  				p.Drop(err)
   137  			}
   138  		}()
   139  	}
   140  
   141  	return nil
   142  }
   143  
   144  type SubscribeErrorMsg struct {
   145  	Error string
   146  }
   147  
   148  func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) {
   149  	return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error)
   150  }
   151  
   152  type UnsubscribeMsg struct {
   153  	Stream Stream
   154  }
   155  
   156  func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error {
   157  	return p.removeServer(req.Stream)
   158  }
   159  
   160  type QuitMsg struct {
   161  	Stream Stream
   162  }
   163  
   164  func (p *Peer) handleQuitMsg(req *QuitMsg) error {
   165  	return p.removeClient(req.Stream)
   166  }
   167  
   168  // OfferedHashesMsg is the protocol msg for offering to hand over a
   169  // stream section
   170  type OfferedHashesMsg struct {
   171  	Stream         Stream // name of Stream
   172  	From, To       uint64 // peer and db-specific entry count
   173  	Hashes         []byte // stream of hashes (128)
   174  	*HandoverProof        // HandoverProof
   175  }
   176  
   177  // String pretty prints OfferedHashesMsg
   178  func (m OfferedHashesMsg) String() string {
   179  	return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize)
   180  }
   181  
   182  // handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface
   183  // Filter method
   184  func (p *Peer) handleOfferedHashesMsg(req *OfferedHashesMsg) error {
   185  	metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
   186  
   187  	c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
   188  	if err != nil {
   189  		return err
   190  	}
   191  	hashes := req.Hashes
   192  	want, err := bv.New(len(hashes) / HashSize)
   193  	if err != nil {
   194  		return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err)
   195  	}
   196  	wg := sync.WaitGroup{}
   197  	for i := 0; i < len(hashes); i += HashSize {
   198  		hash := hashes[i : i+HashSize]
   199  
   200  		if wait := c.NeedData(hash); wait != nil {
   201  			want.Set(i/HashSize, true)
   202  			wg.Add(1)
   203  			// create request and wait until the chunk data arrives and is stored
   204  			go func(w func()) {
   205  				w()
   206  				wg.Done()
   207  			}(wait)
   208  		}
   209  	}
   210  	// done := make(chan bool)
   211  	// go func() {
   212  	// 	wg.Wait()
   213  	// 	close(done)
   214  	// }()
   215  	// go func() {
   216  	// 	select {
   217  	// 	case <-done:
   218  	// 		s.next <- s.batchDone(p, req, hashes)
   219  	// 	case <-time.After(1 * time.Second):
   220  	// 		p.Drop(errors.New("timeout waiting for batch to be delivered"))
   221  	// 	}
   222  	// }()
   223  	go func() {
   224  		wg.Wait()
   225  		select {
   226  		case c.next <- c.batchDone(p, req, hashes):
   227  		case <-c.quit:
   228  		}
   229  	}()
   230  	// only send wantedKeysMsg if all missing chunks of the previous batch arrived
   231  	// except
   232  	if c.stream.Live {
   233  		c.sessionAt = req.From
   234  	}
   235  	from, to := c.nextBatch(req.To + 1)
   236  	log.Trace("received offered batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
   237  	if from == to {
   238  		return nil
   239  	}
   240  
   241  	msg := &WantedHashesMsg{
   242  		Stream: req.Stream,
   243  		Want:   want.Bytes(),
   244  		From:   from,
   245  		To:     to,
   246  	}
   247  	go func() {
   248  		select {
   249  		case <-time.After(120 * time.Second):
   250  			log.Warn("handleOfferedHashesMsg timeout, so dropping peer")
   251  			p.Drop(errors.New("handle offered hashes timeout"))
   252  			return
   253  		case err := <-c.next:
   254  			if err != nil {
   255  				log.Warn("c.next dropping peer", "err", err)
   256  				p.Drop(err)
   257  				return
   258  			}
   259  		case <-c.quit:
   260  			return
   261  		}
   262  		log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
   263  		err := p.SendPriority(msg, c.priority)
   264  		if err != nil {
   265  			log.Warn("SendPriority err, so dropping peer", "err", err)
   266  			p.Drop(err)
   267  		}
   268  	}()
   269  	return nil
   270  }
   271  
   272  // WantedHashesMsg is the protocol msg data for signaling which hashes
   273  // offered in OfferedHashesMsg downstream peer actually wants sent over
   274  type WantedHashesMsg struct {
   275  	Stream   Stream
   276  	Want     []byte // bitvector indicating which keys of the batch needed
   277  	From, To uint64 // next interval offset - empty if not to be continued
   278  }
   279  
   280  // String pretty prints WantedHashesMsg
   281  func (m WantedHashesMsg) String() string {
   282  	return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To)
   283  }
   284  
   285  // handleWantedHashesMsg protocol msg handler
   286  // * sends the next batch of unsynced keys
   287  // * sends the actual data chunks as per WantedHashesMsg
   288  func (p *Peer) handleWantedHashesMsg(req *WantedHashesMsg) error {
   289  	metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1)
   290  
   291  	log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
   292  	s, err := p.getServer(req.Stream)
   293  	if err != nil {
   294  		return err
   295  	}
   296  	hashes := s.currentBatch
   297  	// launch in go routine since GetBatch blocks until new hashes arrive
   298  	go func() {
   299  		if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
   300  			log.Warn("SendOfferedHashes dropping peer", "err", err)
   301  			p.Drop(err)
   302  		}
   303  	}()
   304  	// go p.SendOfferedHashes(s, req.From, req.To)
   305  	l := len(hashes) / HashSize
   306  
   307  	log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l)
   308  	want, err := bv.NewFromBytes(req.Want, l)
   309  	if err != nil {
   310  		return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err)
   311  	}
   312  	for i := 0; i < l; i++ {
   313  		if want.Get(i) {
   314  			metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1)
   315  
   316  			hash := hashes[i*HashSize : (i+1)*HashSize]
   317  			data, err := s.GetData(hash)
   318  			if err != nil {
   319  				return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
   320  			}
   321  			chunk := storage.NewChunk(hash, nil)
   322  			chunk.SData = data
   323  			if length := len(chunk.SData); length < 9 {
   324  				log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr)
   325  			}
   326  			if err := p.Deliver(chunk, s.priority); err != nil {
   327  				return err
   328  			}
   329  		}
   330  	}
   331  	return nil
   332  }
   333  
   334  // Handover represents a statement that the upstream peer hands over the stream section
   335  type Handover struct {
   336  	Stream     Stream // name of stream
   337  	Start, End uint64 // index of hashes
   338  	Root       []byte // Root hash for indexed segment inclusion proofs
   339  }
   340  
   341  // HandoverProof represents a signed statement that the upstream peer handed over the stream section
   342  type HandoverProof struct {
   343  	Sig []byte // Sign(Hash(Serialisation(Handover)))
   344  	*Handover
   345  }
   346  
   347  // Takeover represents a statement that downstream peer took over (stored all data)
   348  // handed over
   349  type Takeover Handover
   350  
   351  //  TakeoverProof represents a signed statement that the downstream peer took over
   352  // the stream section
   353  type TakeoverProof struct {
   354  	Sig []byte // Sign(Hash(Serialisation(Takeover)))
   355  	*Takeover
   356  }
   357  
   358  // TakeoverProofMsg is the protocol msg sent by downstream peer
   359  type TakeoverProofMsg TakeoverProof
   360  
   361  // String pretty prints TakeoverProofMsg
   362  func (m TakeoverProofMsg) String() string {
   363  	return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig)
   364  }
   365  
   366  func (p *Peer) handleTakeoverProofMsg(req *TakeoverProofMsg) error {
   367  	_, err := p.getServer(req.Stream)
   368  	// store the strongest takeoverproof for the stream in streamer
   369  	return err
   370  }