github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/swarm/network/depo.go (about)

     1  // Copyright 2016 The Spectrum Authors
     2  // This file is part of the Spectrum library.
     3  //
     4  // The Spectrum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The Spectrum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package network
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"fmt"
    23  	"time"
    24  
    25  	"github.com/SmartMeshFoundation/Spectrum/log"
    26  	"github.com/SmartMeshFoundation/Spectrum/swarm/storage"
    27  )
    28  
    29  // Handler for storage/retrieval related protocol requests
    30  // implements the StorageHandler interface used by the bzz protocol
    31  type Depo struct {
    32  	hashfunc   storage.SwarmHasher
    33  	localStore storage.ChunkStore
    34  	netStore   storage.ChunkStore
    35  }
    36  
    37  func NewDepo(hash storage.SwarmHasher, localStore, remoteStore storage.ChunkStore) *Depo {
    38  	return &Depo{
    39  		hashfunc:   hash,
    40  		localStore: localStore,
    41  		netStore:   remoteStore, // entrypoint internal
    42  	}
    43  }
    44  
    45  // Handles UnsyncedKeysMsg after msg decoding - unsynced hashes upto sync state
    46  // * the remote sync state is just stored and handled in protocol
    47  // * filters through the new syncRequests and send the ones missing
    48  // * back immediately as a deliveryRequest message
    49  // * empty message just pings back for more (is this needed?)
    50  // * strict signed sync states may be needed.
    51  func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error {
    52  	unsynced := req.Unsynced
    53  	var missing []*syncRequest
    54  	var chunk *storage.Chunk
    55  	var err error
    56  	for _, req := range unsynced {
    57  		// skip keys that are found,
    58  		chunk, err = self.localStore.Get(req.Key[:])
    59  		if err != nil || chunk.SData == nil {
    60  			missing = append(missing, req)
    61  		}
    62  	}
    63  	log.Debug(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v unsynced keys: %v missing. new state: %v", len(unsynced), len(missing), req.State))
    64  	log.Trace(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v", unsynced))
    65  	// send delivery request with missing keys
    66  	err = p.deliveryRequest(missing)
    67  	if err != nil {
    68  		return err
    69  	}
    70  	// set peers state to persist
    71  	p.syncState = req.State
    72  	return nil
    73  }
    74  
    75  // Handles deliveryRequestMsg
    76  // * serves actual chunks asked by the remote peer
    77  // by pushing to the delivery queue (sync db) of the correct priority
    78  // (remote peer is free to reprioritize)
    79  // * the message implies remote peer wants more, so trigger for
    80  // * new outgoing unsynced keys message is fired
    81  func (self *Depo) HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error {
    82  	deliver := req.Deliver
    83  	// queue the actual delivery of a chunk ()
    84  	log.Trace(fmt.Sprintf("Depo.HandleDeliveryRequestMsg: received %v delivery requests: %v", len(deliver), deliver))
    85  	for _, sreq := range deliver {
    86  		// TODO: look up in cache here or in deliveries
    87  		// priorities are taken from the message so the remote party can
    88  		// reprioritise to at their leisure
    89  		// r = self.pullCached(sreq.Key) // pulls and deletes from cache
    90  		Push(p, sreq.Key, sreq.Priority)
    91  	}
    92  
    93  	// sends it out as unsyncedKeysMsg
    94  	p.syncer.sendUnsyncedKeys()
    95  	return nil
    96  }
    97  
    98  // the entrypoint for store requests coming from the bzz wire protocol
    99  // if key found locally, return. otherwise
   100  // remote is untrusted, so hash is verified and chunk passed on to NetStore
   101  func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) {
   102  	var islocal bool
   103  	req.from = p
   104  	chunk, err := self.localStore.Get(req.Key)
   105  	switch {
   106  	case err != nil:
   107  		log.Trace(fmt.Sprintf("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key))
   108  		// not found in memory cache, ie., a genuine store request
   109  		// create chunk
   110  		chunk = storage.NewChunk(req.Key, nil)
   111  
   112  	case chunk.SData == nil:
   113  		// found chunk in memory store, needs the data, validate now
   114  		log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v. request entry found", req))
   115  
   116  	default:
   117  		// data is found, store request ignored
   118  		// this should update access count?
   119  		log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v found locally. ignore.", req))
   120  		islocal = true
   121  		//return
   122  	}
   123  
   124  	hasher := self.hashfunc()
   125  	hasher.Write(req.SData)
   126  	if !bytes.Equal(hasher.Sum(nil), req.Key) {
   127  		// data does not validate, ignore
   128  		// TODO: peer should be penalised/dropped?
   129  		log.Warn(fmt.Sprintf("Depo.HandleStoreRequest: chunk invalid. store request ignored: %v", req))
   130  		return
   131  	}
   132  
   133  	if islocal {
   134  		return
   135  	}
   136  	// update chunk with size and data
   137  	chunk.SData = req.SData // protocol validates that SData is minimum 9 bytes long (int64 size  + at least one byte of data)
   138  	chunk.Size = int64(binary.LittleEndian.Uint64(req.SData[0:8]))
   139  	log.Trace(fmt.Sprintf("delivery of %v from %v", chunk, p))
   140  	chunk.Source = p
   141  	self.netStore.Put(chunk)
   142  }
   143  
   144  // entrypoint for retrieve requests coming from the bzz wire protocol
   145  // checks swap balance - return if peer has no credit
   146  func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer) {
   147  	req.from = p
   148  	// swap - record credit for 1 request
   149  	// note that only charge actual reqsearches
   150  	var err error
   151  	if p.swap != nil {
   152  		err = p.swap.Add(1)
   153  	}
   154  	if err != nil {
   155  		log.Warn(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - cannot process request: %v", req.Key.Log(), err))
   156  		return
   157  	}
   158  
   159  	// call storage.NetStore#Get which
   160  	// blocks until local retrieval finished
   161  	// launches cloud retrieval
   162  	chunk, _ := self.netStore.Get(req.Key)
   163  	req = self.strategyUpdateRequest(chunk.Req, req)
   164  	// check if we can immediately deliver
   165  	if chunk.SData != nil {
   166  		log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, delivering...", req.Key.Log()))
   167  
   168  		if req.MaxSize == 0 || int64(req.MaxSize) >= chunk.Size {
   169  			sreq := &storeRequestMsgData{
   170  				Id:             req.Id,
   171  				Key:            chunk.Key,
   172  				SData:          chunk.SData,
   173  				requestTimeout: req.timeout, //
   174  			}
   175  			p.syncer.addRequest(sreq, DeliverReq)
   176  		} else {
   177  			log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log()))
   178  		}
   179  	} else {
   180  		log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log()))
   181  	}
   182  }
   183  
   184  // add peer request the chunk and decides the timeout for the response if still searching
   185  func (self *Depo) strategyUpdateRequest(rs *storage.RequestStatus, origReq *retrieveRequestMsgData) (req *retrieveRequestMsgData) {
   186  	log.Trace(fmt.Sprintf("Depo.strategyUpdateRequest: key %v", origReq.Key.Log()))
   187  	// we do not create an alternative one
   188  	req = origReq
   189  	if rs != nil {
   190  		self.addRequester(rs, req)
   191  		req.setTimeout(self.searchTimeout(rs, req))
   192  	}
   193  	return
   194  }
   195  
   196  // decides the timeout promise sent with the immediate peers response to a retrieve request
   197  // if timeout is explicitly set and expired
   198  func (self *Depo) searchTimeout(rs *storage.RequestStatus, req *retrieveRequestMsgData) (timeout *time.Time) {
   199  	reqt := req.getTimeout()
   200  	t := time.Now().Add(searchTimeout)
   201  	if reqt != nil && reqt.Before(t) {
   202  		return reqt
   203  	} else {
   204  		return &t
   205  	}
   206  }
   207  
   208  /*
   209  adds a new peer to an existing open request
   210  only add if less than requesterCount peers forwarded the same request id so far
   211  note this is done irrespective of status (searching or found)
   212  */
   213  func (self *Depo) addRequester(rs *storage.RequestStatus, req *retrieveRequestMsgData) {
   214  	log.Trace(fmt.Sprintf("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.Id))
   215  	list := rs.Requesters[req.Id]
   216  	rs.Requesters[req.Id] = append(list, req)
   217  }