github.com/truechain/go-ethereum@v1.8.11/swarm/network/depo.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package network
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"fmt"
    23  	"time"
    24  
    25  	"github.com/ethereum/go-ethereum/log"
    26  	"github.com/ethereum/go-ethereum/metrics"
    27  	"github.com/ethereum/go-ethereum/swarm/storage"
    28  )
    29  
    30  //metrics variables
    31  var (
    32  	syncReceiveCount  = metrics.NewRegisteredCounter("network.sync.recv.count", nil)
    33  	syncReceiveIgnore = metrics.NewRegisteredCounter("network.sync.recv.ignore", nil)
    34  	syncSendCount     = metrics.NewRegisteredCounter("network.sync.send.count", nil)
    35  	syncSendRefused   = metrics.NewRegisteredCounter("network.sync.send.refused", nil)
    36  	syncSendNotFound  = metrics.NewRegisteredCounter("network.sync.send.notfound", nil)
    37  )
    38  
    39  // Handler for storage/retrieval related protocol requests
    40  // implements the StorageHandler interface used by the bzz protocol
    41  type Depo struct {
    42  	hashfunc   storage.SwarmHasher
    43  	localStore storage.ChunkStore
    44  	netStore   storage.ChunkStore
    45  }
    46  
    47  func NewDepo(hash storage.SwarmHasher, localStore, remoteStore storage.ChunkStore) *Depo {
    48  	return &Depo{
    49  		hashfunc:   hash,
    50  		localStore: localStore,
    51  		netStore:   remoteStore, // entrypoint internal
    52  	}
    53  }
    54  
    55  // Handles UnsyncedKeysMsg after msg decoding - unsynced hashes upto sync state
    56  // * the remote sync state is just stored and handled in protocol
    57  // * filters through the new syncRequests and send the ones missing
    58  // * back immediately as a deliveryRequest message
    59  // * empty message just pings back for more (is this needed?)
    60  // * strict signed sync states may be needed.
    61  func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error {
    62  	unsynced := req.Unsynced
    63  	var missing []*syncRequest
    64  	var chunk *storage.Chunk
    65  	var err error
    66  	for _, req := range unsynced {
    67  		// skip keys that are found,
    68  		chunk, err = self.localStore.Get(req.Key[:])
    69  		if err != nil || chunk.SData == nil {
    70  			missing = append(missing, req)
    71  		}
    72  	}
    73  	log.Debug(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v unsynced keys: %v missing. new state: %v", len(unsynced), len(missing), req.State))
    74  	log.Trace(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v", unsynced))
    75  	// send delivery request with missing keys
    76  	err = p.deliveryRequest(missing)
    77  	if err != nil {
    78  		return err
    79  	}
    80  	// set peers state to persist
    81  	p.syncState = req.State
    82  	return nil
    83  }
    84  
    85  // Handles deliveryRequestMsg
    86  // * serves actual chunks asked by the remote peer
    87  // by pushing to the delivery queue (sync db) of the correct priority
    88  // (remote peer is free to reprioritize)
    89  // * the message implies remote peer wants more, so trigger for
    90  // * new outgoing unsynced keys message is fired
    91  func (self *Depo) HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error {
    92  	deliver := req.Deliver
    93  	// queue the actual delivery of a chunk ()
    94  	log.Trace(fmt.Sprintf("Depo.HandleDeliveryRequestMsg: received %v delivery requests: %v", len(deliver), deliver))
    95  	for _, sreq := range deliver {
    96  		// TODO: look up in cache here or in deliveries
    97  		// priorities are taken from the message so the remote party can
    98  		// reprioritise to at their leisure
    99  		// r = self.pullCached(sreq.Key) // pulls and deletes from cache
   100  		Push(p, sreq.Key, sreq.Priority)
   101  	}
   102  
   103  	// sends it out as unsyncedKeysMsg
   104  	p.syncer.sendUnsyncedKeys()
   105  	return nil
   106  }
   107  
   108  // the entrypoint for store requests coming from the bzz wire protocol
   109  // if key found locally, return. otherwise
   110  // remote is untrusted, so hash is verified and chunk passed on to NetStore
   111  func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) {
   112  	var islocal bool
   113  	req.from = p
   114  	chunk, err := self.localStore.Get(req.Key)
   115  	switch {
   116  	case err != nil:
   117  		log.Trace(fmt.Sprintf("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key))
   118  		// not found in memory cache, ie., a genuine store request
   119  		// create chunk
   120  		syncReceiveCount.Inc(1)
   121  		chunk = storage.NewChunk(req.Key, nil)
   122  
   123  	case chunk.SData == nil:
   124  		// found chunk in memory store, needs the data, validate now
   125  		log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v. request entry found", req))
   126  
   127  	default:
   128  		// data is found, store request ignored
   129  		// this should update access count?
   130  		syncReceiveIgnore.Inc(1)
   131  		log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v found locally. ignore.", req))
   132  		islocal = true
   133  		//return
   134  	}
   135  
   136  	hasher := self.hashfunc()
   137  	hasher.Write(req.SData)
   138  	if !bytes.Equal(hasher.Sum(nil), req.Key) {
   139  		// data does not validate, ignore
   140  		// TODO: peer should be penalised/dropped?
   141  		log.Warn(fmt.Sprintf("Depo.HandleStoreRequest: chunk invalid. store request ignored: %v", req))
   142  		return
   143  	}
   144  
   145  	if islocal {
   146  		return
   147  	}
   148  	// update chunk with size and data
   149  	chunk.SData = req.SData // protocol validates that SData is minimum 9 bytes long (int64 size  + at least one byte of data)
   150  	chunk.Size = int64(binary.LittleEndian.Uint64(req.SData[0:8]))
   151  	log.Trace(fmt.Sprintf("delivery of %v from %v", chunk, p))
   152  	chunk.Source = p
   153  	self.netStore.Put(chunk)
   154  }
   155  
   156  // entrypoint for retrieve requests coming from the bzz wire protocol
   157  // checks swap balance - return if peer has no credit
   158  func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer) {
   159  	req.from = p
   160  	// swap - record credit for 1 request
   161  	// note that only charge actual reqsearches
   162  	var err error
   163  	if p.swap != nil {
   164  		err = p.swap.Add(1)
   165  	}
   166  	if err != nil {
   167  		log.Warn(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - cannot process request: %v", req.Key.Log(), err))
   168  		return
   169  	}
   170  
   171  	// call storage.NetStore#Get which
   172  	// blocks until local retrieval finished
   173  	// launches cloud retrieval
   174  	chunk, _ := self.netStore.Get(req.Key)
   175  	req = self.strategyUpdateRequest(chunk.Req, req)
   176  	// check if we can immediately deliver
   177  	if chunk.SData != nil {
   178  		log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, delivering...", req.Key.Log()))
   179  
   180  		if req.MaxSize == 0 || int64(req.MaxSize) >= chunk.Size {
   181  			sreq := &storeRequestMsgData{
   182  				Id:             req.Id,
   183  				Key:            chunk.Key,
   184  				SData:          chunk.SData,
   185  				requestTimeout: req.timeout, //
   186  			}
   187  			syncSendCount.Inc(1)
   188  			p.syncer.addRequest(sreq, DeliverReq)
   189  		} else {
   190  			syncSendRefused.Inc(1)
   191  			log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log()))
   192  		}
   193  	} else {
   194  		syncSendNotFound.Inc(1)
   195  		log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log()))
   196  	}
   197  }
   198  
   199  // add peer request the chunk and decides the timeout for the response if still searching
   200  func (self *Depo) strategyUpdateRequest(rs *storage.RequestStatus, origReq *retrieveRequestMsgData) (req *retrieveRequestMsgData) {
   201  	log.Trace(fmt.Sprintf("Depo.strategyUpdateRequest: key %v", origReq.Key.Log()))
   202  	// we do not create an alternative one
   203  	req = origReq
   204  	if rs != nil {
   205  		self.addRequester(rs, req)
   206  		req.setTimeout(self.searchTimeout(rs, req))
   207  	}
   208  	return
   209  }
   210  
   211  // decides the timeout promise sent with the immediate peers response to a retrieve request
   212  // if timeout is explicitly set and expired
   213  func (self *Depo) searchTimeout(rs *storage.RequestStatus, req *retrieveRequestMsgData) (timeout *time.Time) {
   214  	reqt := req.getTimeout()
   215  	t := time.Now().Add(searchTimeout)
   216  	if reqt != nil && reqt.Before(t) {
   217  		return reqt
   218  	} else {
   219  		return &t
   220  	}
   221  }
   222  
   223  /*
   224  adds a new peer to an existing open request
   225  only add if less than requesterCount peers forwarded the same request id so far
   226  note this is done irrespective of status (searching or found)
   227  */
   228  func (self *Depo) addRequester(rs *storage.RequestStatus, req *retrieveRequestMsgData) {
   229  	log.Trace(fmt.Sprintf("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.Id))
   230  	list := rs.Requesters[req.Id]
   231  	rs.Requesters[req.Id] = append(list, req)
   232  }