github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/swarm/network/forwarding.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  package network
    13  
    14  import (
    15  	"fmt"
    16  	"math/rand"
    17  	"time"
    18  
    19  	"github.com/Sberex/go-sberex/log"
    20  	"github.com/Sberex/go-sberex/swarm/storage"
    21  )
    22  
    23  const requesterCount = 3
    24  
    25  /*
    26  forwarder implements the CloudStore interface (use by storage.NetStore)
    27  and serves as the cloud store backend orchestrating storage/retrieval/delivery
    28  via the native bzz protocol
    29  which uses an MSB logarithmic distance-based semi-permanent Kademlia table for
    30  * recursive forwarding style routing for retrieval
    31  * smart syncronisation
    32  */
    33  
    34  type forwarder struct {
    35  	hive *Hive
    36  }
    37  
    38  func NewForwarder(hive *Hive) *forwarder {
    39  	return &forwarder{hive: hive}
    40  }
    41  
    42  // generate a unique id uint64
    43  func generateId() uint64 {
    44  	r := rand.New(rand.NewSource(time.Now().UnixNano()))
    45  	return uint64(r.Int63())
    46  }
    47  
    48  var searchTimeout = 3 * time.Second
    49  
    50  // forwarding logic
    51  // logic propagating retrieve requests to peers given by the kademlia hive
    52  func (self *forwarder) Retrieve(chunk *storage.Chunk) {
    53  	peers := self.hive.getPeers(chunk.Key, 0)
    54  	log.Trace(fmt.Sprintf("forwarder.Retrieve: %v - received %d peers from KΛÐΞMLIΛ...", chunk.Key.Log(), len(peers)))
    55  OUT:
    56  	for _, p := range peers {
    57  		log.Trace(fmt.Sprintf("forwarder.Retrieve: sending retrieveRequest %v to peer [%v]", chunk.Key.Log(), p))
    58  		for _, recipients := range chunk.Req.Requesters {
    59  			for _, recipient := range recipients {
    60  				req := recipient.(*retrieveRequestMsgData)
    61  				if req.from.Addr() == p.Addr() {
    62  					continue OUT
    63  				}
    64  			}
    65  		}
    66  		req := &retrieveRequestMsgData{
    67  			Key: chunk.Key,
    68  			Id:  generateId(),
    69  		}
    70  		var err error
    71  		if p.swap != nil {
    72  			err = p.swap.Add(-1)
    73  		}
    74  		if err == nil {
    75  			p.retrieve(req)
    76  			break OUT
    77  		}
    78  		log.Warn(fmt.Sprintf("forwarder.Retrieve: unable to send retrieveRequest to peer [%v]: %v", chunk.Key.Log(), err))
    79  	}
    80  }
    81  
    82  // requests to specific peers given by the kademlia hive
    83  // except for peers that the store request came from (if any)
    84  // delivery queueing taken care of by syncer
    85  func (self *forwarder) Store(chunk *storage.Chunk) {
    86  	var n int
    87  	msg := &storeRequestMsgData{
    88  		Key:   chunk.Key,
    89  		SData: chunk.SData,
    90  	}
    91  	var source *peer
    92  	if chunk.Source != nil {
    93  		source = chunk.Source.(*peer)
    94  	}
    95  	for _, p := range self.hive.getPeers(chunk.Key, 0) {
    96  		log.Trace(fmt.Sprintf("forwarder.Store: %v %v", p, chunk))
    97  
    98  		if p.syncer != nil && (source == nil || p.Addr() != source.Addr()) {
    99  			n++
   100  			Deliver(p, msg, PropagateReq)
   101  		}
   102  	}
   103  	log.Trace(fmt.Sprintf("forwarder.Store: sent to %v peers (chunk = %v)", n, chunk))
   104  }
   105  
   106  // once a chunk is found deliver it to its requesters unless timed out
   107  func (self *forwarder) Deliver(chunk *storage.Chunk) {
   108  	// iterate over request entries
   109  	for id, requesters := range chunk.Req.Requesters {
   110  		counter := requesterCount
   111  		msg := &storeRequestMsgData{
   112  			Key:   chunk.Key,
   113  			SData: chunk.SData,
   114  		}
   115  		var n int
   116  		var req *retrieveRequestMsgData
   117  		// iterate over requesters with the same id
   118  		for id, r := range requesters {
   119  			req = r.(*retrieveRequestMsgData)
   120  			if req.timeout == nil || req.timeout.After(time.Now()) {
   121  				log.Trace(fmt.Sprintf("forwarder.Deliver: %v -> %v", req.Id, req.from))
   122  				msg.Id = uint64(id)
   123  				Deliver(req.from, msg, DeliverReq)
   124  				n++
   125  				counter--
   126  				if counter <= 0 {
   127  					break
   128  				}
   129  			}
   130  		}
   131  		log.Trace(fmt.Sprintf("forwarder.Deliver: submit chunk %v (request id %v) for delivery to %v peers", chunk.Key.Log(), id, n))
   132  	}
   133  }
   134  
   135  // initiate delivery of a chunk to a particular peer via syncer#addRequest
   136  // depending on syncer mode and priority settings and sync request type
   137  // this either goes via confirmation roundtrip or queued or pushed directly
   138  func Deliver(p *peer, req interface{}, ty int) {
   139  	p.syncer.addRequest(req, ty)
   140  }
   141  
   142  // push chunk over to peer
   143  func Push(p *peer, key storage.Key, priority uint) {
   144  	p.syncer.doDelivery(key, priority, p.syncer.quit)
   145  }