github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/swarm/network/stream/peer.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/metrics"
    27  	"github.com/ethereum/go-ethereum/p2p/protocols"
    28  	"github.com/ethereum/go-ethereum/swarm/log"
    29  	pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
    30  	"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
    31  	"github.com/ethereum/go-ethereum/swarm/spancontext"
    32  	"github.com/ethereum/go-ethereum/swarm/state"
    33  	"github.com/ethereum/go-ethereum/swarm/storage"
    34  	opentracing "github.com/opentracing/opentracing-go"
    35  )
    36  
    37  type notFoundError struct {
    38  	t string
    39  	s Stream
    40  }
    41  
    42  func newNotFoundError(t string, s Stream) *notFoundError {
    43  	return &notFoundError{t: t, s: s}
    44  }
    45  
    46  func (e *notFoundError) Error() string {
    47  	return fmt.Sprintf("%s not found for stream %q", e.t, e.s)
    48  }
    49  
    50  // ErrMaxPeerServers will be returned if peer server limit is reached.
    51  // It will be sent in the SubscribeErrorMsg.
    52  var ErrMaxPeerServers = errors.New("max peer servers")
    53  
    54  // Peer is the Peer extension for the streaming protocol
    55  type Peer struct {
    56  	*protocols.Peer
    57  	streamer *Registry
    58  	pq       *pq.PriorityQueue
    59  	serverMu sync.RWMutex
    60  	clientMu sync.RWMutex // protects both clients and clientParams
    61  	servers  map[Stream]*server
    62  	clients  map[Stream]*client
    63  	// clientParams map keeps required client arguments
    64  	// that are set on Registry.Subscribe and used
    65  	// on creating a new client in offered hashes handler.
    66  	clientParams map[Stream]*clientParams
    67  	quit         chan struct{}
    68  }
    69  
    70  type WrappedPriorityMsg struct {
    71  	Context context.Context
    72  	Msg     interface{}
    73  }
    74  
    75  // NewPeer is the constructor for Peer
    76  func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
    77  	p := &Peer{
    78  		Peer:         peer,
    79  		pq:           pq.New(int(PriorityQueue), PriorityQueueCap),
    80  		streamer:     streamer,
    81  		servers:      make(map[Stream]*server),
    82  		clients:      make(map[Stream]*client),
    83  		clientParams: make(map[Stream]*clientParams),
    84  		quit:         make(chan struct{}),
    85  	}
    86  	ctx, cancel := context.WithCancel(context.Background())
    87  	go p.pq.Run(ctx, func(i interface{}) {
    88  		wmsg := i.(WrappedPriorityMsg)
    89  		err := p.Send(wmsg.Context, wmsg.Msg)
    90  		if err != nil {
    91  			log.Error("Message send error, dropping peer", "peer", p.ID(), "err", err)
    92  			p.Drop(err)
    93  		}
    94  	})
    95  
    96  	// basic monitoring for pq contention
    97  	go func(pq *pq.PriorityQueue) {
    98  		ticker := time.NewTicker(5 * time.Second)
    99  		defer ticker.Stop()
   100  		for {
   101  			select {
   102  			case <-ticker.C:
   103  				var len_maxi int
   104  				var cap_maxi int
   105  				for k := range pq.Queues {
   106  					if len_maxi < len(pq.Queues[k]) {
   107  						len_maxi = len(pq.Queues[k])
   108  					}
   109  
   110  					if cap_maxi < cap(pq.Queues[k]) {
   111  						cap_maxi = cap(pq.Queues[k])
   112  					}
   113  				}
   114  
   115  				metrics.GetOrRegisterGauge(fmt.Sprintf("pq_len_%s", p.ID().TerminalString()), nil).Update(int64(len_maxi))
   116  				metrics.GetOrRegisterGauge(fmt.Sprintf("pq_cap_%s", p.ID().TerminalString()), nil).Update(int64(cap_maxi))
   117  			case <-p.quit:
   118  				return
   119  			}
   120  		}
   121  	}(p.pq)
   122  
   123  	go func() {
   124  		<-p.quit
   125  		cancel()
   126  	}()
   127  	return p
   128  }
   129  
   130  // Deliver sends a storeRequestMsg protocol message to the peer
   131  // Depending on the `syncing` parameter we send different message types
   132  func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8, syncing bool) error {
   133  	var sp opentracing.Span
   134  	var msg interface{}
   135  
   136  	spanName := "send.chunk.delivery"
   137  
   138  	//we send different types of messages if delivery is for syncing or retrievals,
   139  	//even if handling and content of the message are the same,
   140  	//because swap accounting decides which messages need accounting based on the message type
   141  	if syncing {
   142  		msg = &ChunkDeliveryMsgSyncing{
   143  			Addr:  chunk.Address(),
   144  			SData: chunk.Data(),
   145  		}
   146  		spanName += ".syncing"
   147  	} else {
   148  		msg = &ChunkDeliveryMsgRetrieval{
   149  			Addr:  chunk.Address(),
   150  			SData: chunk.Data(),
   151  		}
   152  		spanName += ".retrieval"
   153  	}
   154  	ctx, sp = spancontext.StartSpan(
   155  		ctx,
   156  		spanName)
   157  	defer sp.Finish()
   158  
   159  	return p.SendPriority(ctx, msg, priority)
   160  }
   161  
   162  // SendPriority sends message to the peer using the outgoing priority queue
   163  func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8) error {
   164  	defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now())
   165  	metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1)
   166  	wmsg := WrappedPriorityMsg{
   167  		Context: ctx,
   168  		Msg:     msg,
   169  	}
   170  	err := p.pq.Push(wmsg, int(priority))
   171  	if err == pq.ErrContention {
   172  		log.Warn("dropping peer on priority queue contention", "peer", p.ID())
   173  		p.Drop(err)
   174  	}
   175  	return err
   176  }
   177  
   178  // SendOfferedHashes sends OfferedHashesMsg protocol msg
   179  func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error {
   180  	var sp opentracing.Span
   181  	ctx, sp := spancontext.StartSpan(
   182  		context.TODO(),
   183  		"send.offered.hashes")
   184  	defer sp.Finish()
   185  
   186  	hashes, from, to, proof, err := s.setNextBatch(f, t)
   187  	if err != nil {
   188  		return err
   189  	}
   190  	// true only when quitting
   191  	if len(hashes) == 0 {
   192  		return nil
   193  	}
   194  	if proof == nil {
   195  		proof = &HandoverProof{
   196  			Handover: &Handover{},
   197  		}
   198  	}
   199  	s.currentBatch = hashes
   200  	msg := &OfferedHashesMsg{
   201  		HandoverProof: proof,
   202  		Hashes:        hashes,
   203  		From:          from,
   204  		To:            to,
   205  		Stream:        s.stream,
   206  	}
   207  	log.Trace("Swarm syncer offer batch", "peer", p.ID(), "stream", s.stream, "len", len(hashes), "from", from, "to", to)
   208  	return p.SendPriority(ctx, msg, s.priority)
   209  }
   210  
   211  func (p *Peer) getServer(s Stream) (*server, error) {
   212  	p.serverMu.RLock()
   213  	defer p.serverMu.RUnlock()
   214  
   215  	server := p.servers[s]
   216  	if server == nil {
   217  		return nil, newNotFoundError("server", s)
   218  	}
   219  	return server, nil
   220  }
   221  
   222  func (p *Peer) setServer(s Stream, o Server, priority uint8) (*server, error) {
   223  	p.serverMu.Lock()
   224  	defer p.serverMu.Unlock()
   225  
   226  	if p.servers[s] != nil {
   227  		return nil, fmt.Errorf("server %s already registered", s)
   228  	}
   229  
   230  	if p.streamer.maxPeerServers > 0 && len(p.servers) >= p.streamer.maxPeerServers {
   231  		return nil, ErrMaxPeerServers
   232  	}
   233  
   234  	sessionIndex, err := o.SessionIndex()
   235  	if err != nil {
   236  		return nil, err
   237  	}
   238  	os := &server{
   239  		Server:       o,
   240  		stream:       s,
   241  		priority:     priority,
   242  		sessionIndex: sessionIndex,
   243  	}
   244  	p.servers[s] = os
   245  	return os, nil
   246  }
   247  
   248  func (p *Peer) removeServer(s Stream) error {
   249  	p.serverMu.Lock()
   250  	defer p.serverMu.Unlock()
   251  
   252  	server, ok := p.servers[s]
   253  	if !ok {
   254  		return newNotFoundError("server", s)
   255  	}
   256  	server.Close()
   257  	delete(p.servers, s)
   258  	return nil
   259  }
   260  
   261  func (p *Peer) getClient(ctx context.Context, s Stream) (c *client, err error) {
   262  	var params *clientParams
   263  	func() {
   264  		p.clientMu.RLock()
   265  		defer p.clientMu.RUnlock()
   266  
   267  		c = p.clients[s]
   268  		if c != nil {
   269  			return
   270  		}
   271  		params = p.clientParams[s]
   272  	}()
   273  	if c != nil {
   274  		return c, nil
   275  	}
   276  
   277  	if params != nil {
   278  		//debug.PrintStack()
   279  		if err := params.waitClient(ctx); err != nil {
   280  			return nil, err
   281  		}
   282  	}
   283  
   284  	p.clientMu.RLock()
   285  	defer p.clientMu.RUnlock()
   286  
   287  	c = p.clients[s]
   288  	if c != nil {
   289  		return c, nil
   290  	}
   291  	return nil, newNotFoundError("client", s)
   292  }
   293  
   294  func (p *Peer) getOrSetClient(s Stream, from, to uint64) (c *client, created bool, err error) {
   295  	p.clientMu.Lock()
   296  	defer p.clientMu.Unlock()
   297  
   298  	c = p.clients[s]
   299  	if c != nil {
   300  		return c, false, nil
   301  	}
   302  
   303  	f, err := p.streamer.GetClientFunc(s.Name)
   304  	if err != nil {
   305  		return nil, false, err
   306  	}
   307  
   308  	is, err := f(p, s.Key, s.Live)
   309  	if err != nil {
   310  		return nil, false, err
   311  	}
   312  
   313  	cp, err := p.getClientParams(s)
   314  	if err != nil {
   315  		return nil, false, err
   316  	}
   317  	defer func() {
   318  		if err == nil {
   319  			if err := p.removeClientParams(s); err != nil {
   320  				log.Error("stream set client: remove client params", "stream", s, "peer", p, "err", err)
   321  			}
   322  		}
   323  	}()
   324  
   325  	intervalsKey := peerStreamIntervalsKey(p, s)
   326  	if s.Live {
   327  		// try to find previous history and live intervals and merge live into history
   328  		historyKey := peerStreamIntervalsKey(p, NewStream(s.Name, s.Key, false))
   329  		historyIntervals := &intervals.Intervals{}
   330  		err := p.streamer.intervalsStore.Get(historyKey, historyIntervals)
   331  		switch err {
   332  		case nil:
   333  			liveIntervals := &intervals.Intervals{}
   334  			err := p.streamer.intervalsStore.Get(intervalsKey, liveIntervals)
   335  			switch err {
   336  			case nil:
   337  				historyIntervals.Merge(liveIntervals)
   338  				if err := p.streamer.intervalsStore.Put(historyKey, historyIntervals); err != nil {
   339  					log.Error("stream set client: put history intervals", "stream", s, "peer", p, "err", err)
   340  				}
   341  			case state.ErrNotFound:
   342  			default:
   343  				log.Error("stream set client: get live intervals", "stream", s, "peer", p, "err", err)
   344  			}
   345  		case state.ErrNotFound:
   346  		default:
   347  			log.Error("stream set client: get history intervals", "stream", s, "peer", p, "err", err)
   348  		}
   349  	}
   350  
   351  	if err := p.streamer.intervalsStore.Put(intervalsKey, intervals.NewIntervals(from)); err != nil {
   352  		return nil, false, err
   353  	}
   354  
   355  	next := make(chan error, 1)
   356  	c = &client{
   357  		Client:         is,
   358  		stream:         s,
   359  		priority:       cp.priority,
   360  		to:             cp.to,
   361  		next:           next,
   362  		quit:           make(chan struct{}),
   363  		intervalsStore: p.streamer.intervalsStore,
   364  		intervalsKey:   intervalsKey,
   365  	}
   366  	p.clients[s] = c
   367  	cp.clientCreated() // unblock all possible getClient calls that are waiting
   368  	next <- nil        // this is to allow wantedKeysMsg before first batch arrives
   369  	return c, true, nil
   370  }
   371  
   372  func (p *Peer) removeClient(s Stream) error {
   373  	p.clientMu.Lock()
   374  	defer p.clientMu.Unlock()
   375  
   376  	client, ok := p.clients[s]
   377  	if !ok {
   378  		return newNotFoundError("client", s)
   379  	}
   380  	client.close()
   381  	delete(p.clients, s)
   382  	return nil
   383  }
   384  
   385  func (p *Peer) setClientParams(s Stream, params *clientParams) error {
   386  	p.clientMu.Lock()
   387  	defer p.clientMu.Unlock()
   388  
   389  	if p.clients[s] != nil {
   390  		return fmt.Errorf("client %s already exists", s)
   391  	}
   392  	if p.clientParams[s] != nil {
   393  		return fmt.Errorf("client params %s already set", s)
   394  	}
   395  	p.clientParams[s] = params
   396  	return nil
   397  }
   398  
   399  func (p *Peer) getClientParams(s Stream) (*clientParams, error) {
   400  	params := p.clientParams[s]
   401  	if params == nil {
   402  		return nil, fmt.Errorf("client params '%v' not provided to peer %v", s, p.ID())
   403  	}
   404  	return params, nil
   405  }
   406  
   407  func (p *Peer) removeClientParams(s Stream) error {
   408  	_, ok := p.clientParams[s]
   409  	if !ok {
   410  		return newNotFoundError("client params", s)
   411  	}
   412  	delete(p.clientParams, s)
   413  	return nil
   414  }
   415  
   416  func (p *Peer) close() {
   417  	for _, s := range p.servers {
   418  		s.Close()
   419  	}
   420  }