github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/swarm/network/hive.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  package network
    13  
    14  import (
    15  	"fmt"
    16  	"math/rand"
    17  	"path/filepath"
    18  	"time"
    19  
    20  	"github.com/Sberex/go-sberex/common"
    21  	"github.com/Sberex/go-sberex/log"
    22  	"github.com/Sberex/go-sberex/metrics"
    23  	"github.com/Sberex/go-sberex/p2p/discover"
    24  	"github.com/Sberex/go-sberex/p2p/netutil"
    25  	"github.com/Sberex/go-sberex/swarm/network/kademlia"
    26  	"github.com/Sberex/go-sberex/swarm/storage"
    27  )
    28  
    29  // Hive is the logistic manager of the swarm
    30  // it uses a generic kademlia nodetable to find best peer list
    31  // for any target
    32  // this is used by the netstore to search for content in the swarm
    33  // the bzz protocol peersMsgData exchange is relayed to Kademlia
    34  // for db storage and filtering
    35  // connections and disconnections are reported and relayed
    36  // to keep the nodetable uptodate
    37  
    38  var (
    39  	peersNumGauge     = metrics.NewRegisteredGauge("network.peers.num", nil)
    40  	addPeerCounter    = metrics.NewRegisteredCounter("network.addpeer.count", nil)
    41  	removePeerCounter = metrics.NewRegisteredCounter("network.removepeer.count", nil)
    42  )
    43  
    44  type Hive struct {
    45  	listenAddr   func() string
    46  	callInterval uint64
    47  	id           discover.NodeID
    48  	addr         kademlia.Address
    49  	kad          *kademlia.Kademlia
    50  	path         string
    51  	quit         chan bool
    52  	toggle       chan bool
    53  	more         chan bool
    54  
    55  	// for testing only
    56  	swapEnabled bool
    57  	syncEnabled bool
    58  	blockRead   bool
    59  	blockWrite  bool
    60  }
    61  
    62  const (
    63  	callInterval = 3000000000
    64  	// bucketSize   = 3
    65  	// maxProx      = 8
    66  	// proxBinSize  = 4
    67  )
    68  
    69  type HiveParams struct {
    70  	CallInterval uint64
    71  	KadDbPath    string
    72  	*kademlia.KadParams
    73  }
    74  
    75  //create default params
    76  func NewDefaultHiveParams() *HiveParams {
    77  	kad := kademlia.NewDefaultKadParams()
    78  	// kad.BucketSize = bucketSize
    79  	// kad.MaxProx = maxProx
    80  	// kad.ProxBinSize = proxBinSize
    81  
    82  	return &HiveParams{
    83  		CallInterval: callInterval,
    84  		KadParams:    kad,
    85  	}
    86  }
    87  
    88  //this can only finally be set after all config options (file, cmd line, env vars)
    89  //have been evaluated
    90  func (self *HiveParams) Init(path string) {
    91  	self.KadDbPath = filepath.Join(path, "bzz-peers.json")
    92  }
    93  
    94  func NewHive(addr common.Hash, params *HiveParams, swapEnabled, syncEnabled bool) *Hive {
    95  	kad := kademlia.New(kademlia.Address(addr), params.KadParams)
    96  	return &Hive{
    97  		callInterval: params.CallInterval,
    98  		kad:          kad,
    99  		addr:         kad.Addr(),
   100  		path:         params.KadDbPath,
   101  		swapEnabled:  swapEnabled,
   102  		syncEnabled:  syncEnabled,
   103  	}
   104  }
   105  
   106  func (self *Hive) SyncEnabled(on bool) {
   107  	self.syncEnabled = on
   108  }
   109  
   110  func (self *Hive) SwapEnabled(on bool) {
   111  	self.swapEnabled = on
   112  }
   113  
   114  func (self *Hive) BlockNetworkRead(on bool) {
   115  	self.blockRead = on
   116  }
   117  
   118  func (self *Hive) BlockNetworkWrite(on bool) {
   119  	self.blockWrite = on
   120  }
   121  
   122  // public accessor to the hive base address
   123  func (self *Hive) Addr() kademlia.Address {
   124  	return self.addr
   125  }
   126  
   127  // Start receives network info only at startup
   128  // listedAddr is a function to retrieve listening address to advertise to peers
   129  // connectPeer is a function to connect to a peer based on its NodeID or enode URL
   130  // there are called on the p2p.Server which runs on the node
   131  func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPeer func(string) error) (err error) {
   132  	self.toggle = make(chan bool)
   133  	self.more = make(chan bool)
   134  	self.quit = make(chan bool)
   135  	self.id = id
   136  	self.listenAddr = listenAddr
   137  	err = self.kad.Load(self.path, nil)
   138  	if err != nil {
   139  		log.Warn(fmt.Sprintf("Warning: error reading kaddb '%s' (skipping): %v", self.path, err))
   140  		err = nil
   141  	}
   142  	// this loop is doing bootstrapping and maintains a healthy table
   143  	go self.keepAlive()
   144  	go func() {
   145  		// whenever toggled ask kademlia about most preferred peer
   146  		for alive := range self.more {
   147  			if !alive {
   148  				// receiving false closes the loop while allowing parallel routines
   149  				// to attempt to write to more (remove Peer when shutting down)
   150  				return
   151  			}
   152  			node, need, proxLimit := self.kad.Suggest()
   153  
   154  			if node != nil && len(node.Url) > 0 {
   155  				log.Trace(fmt.Sprintf("call known bee %v", node.Url))
   156  				// enode or any lower level connection address is unnecessary in future
   157  				// discovery table is used to look it up.
   158  				connectPeer(node.Url)
   159  			}
   160  			if need {
   161  				// a random peer is taken from the table
   162  				peers := self.kad.FindClosest(kademlia.RandomAddressAt(self.addr, rand.Intn(self.kad.MaxProx)), 1)
   163  				if len(peers) > 0 {
   164  					// a random address at prox bin 0 is sent for lookup
   165  					randAddr := kademlia.RandomAddressAt(self.addr, proxLimit)
   166  					req := &retrieveRequestMsgData{
   167  						Key: storage.Key(randAddr[:]),
   168  					}
   169  					log.Trace(fmt.Sprintf("call any bee near %v (PO%03d) - messenger bee: %v", randAddr, proxLimit, peers[0]))
   170  					peers[0].(*peer).retrieve(req)
   171  				} else {
   172  					log.Warn(fmt.Sprintf("no peer"))
   173  				}
   174  				log.Trace(fmt.Sprintf("buzz kept alive"))
   175  			} else {
   176  				log.Info(fmt.Sprintf("no need for more bees"))
   177  			}
   178  			select {
   179  			case self.toggle <- need:
   180  			case <-self.quit:
   181  				return
   182  			}
   183  			log.Debug(fmt.Sprintf("queen's address: %v, population: %d (%d)", self.addr, self.kad.Count(), self.kad.DBCount()))
   184  		}
   185  	}()
   186  	return
   187  }
   188  
   189  // keepAlive is a forever loop
   190  // in its awake state it periodically triggers connection attempts
   191  // by writing to self.more until Kademlia Table is saturated
   192  // wake state is toggled by writing to self.toggle
   193  // it restarts if the table becomes non-full again due to disconnections
   194  func (self *Hive) keepAlive() {
   195  	alarm := time.NewTicker(time.Duration(self.callInterval)).C
   196  	for {
   197  		peersNumGauge.Update(int64(self.kad.Count()))
   198  		select {
   199  		case <-alarm:
   200  			if self.kad.DBCount() > 0 {
   201  				select {
   202  				case self.more <- true:
   203  					log.Debug(fmt.Sprintf("buzz wakeup"))
   204  				default:
   205  				}
   206  			}
   207  		case need := <-self.toggle:
   208  			if alarm == nil && need {
   209  				alarm = time.NewTicker(time.Duration(self.callInterval)).C
   210  			}
   211  			if alarm != nil && !need {
   212  				alarm = nil
   213  
   214  			}
   215  		case <-self.quit:
   216  			return
   217  		}
   218  	}
   219  }
   220  
   221  func (self *Hive) Stop() error {
   222  	// closing toggle channel quits the updateloop
   223  	close(self.quit)
   224  	return self.kad.Save(self.path, saveSync)
   225  }
   226  
   227  // called at the end of a successful protocol handshake
   228  func (self *Hive) addPeer(p *peer) error {
   229  	addPeerCounter.Inc(1)
   230  	defer func() {
   231  		select {
   232  		case self.more <- true:
   233  		default:
   234  		}
   235  	}()
   236  	log.Trace(fmt.Sprintf("hi new bee %v", p))
   237  	err := self.kad.On(p, loadSync)
   238  	if err != nil {
   239  		return err
   240  	}
   241  	// self lookup (can be encoded as nil/zero key since peers addr known) + no id ()
   242  	// the most common way of saying hi in bzz is initiation of gossip
   243  	// let me know about anyone new from my hood , here is the storageradius
   244  	// to send the 6 byte self lookup
   245  	// we do not record as request or forward it, just reply with peers
   246  	p.retrieve(&retrieveRequestMsgData{})
   247  	log.Trace(fmt.Sprintf("'whatsup wheresdaparty' sent to %v", p))
   248  
   249  	return nil
   250  }
   251  
   252  // called after peer disconnected
   253  func (self *Hive) removePeer(p *peer) {
   254  	removePeerCounter.Inc(1)
   255  	log.Debug(fmt.Sprintf("bee %v removed", p))
   256  	self.kad.Off(p, saveSync)
   257  	select {
   258  	case self.more <- true:
   259  	default:
   260  	}
   261  	if self.kad.Count() == 0 {
   262  		log.Debug(fmt.Sprintf("empty, all bees gone"))
   263  	}
   264  }
   265  
   266  // Retrieve a list of live peers that are closer to target than us
   267  func (self *Hive) getPeers(target storage.Key, max int) (peers []*peer) {
   268  	var addr kademlia.Address
   269  	copy(addr[:], target[:])
   270  	for _, node := range self.kad.FindClosest(addr, max) {
   271  		peers = append(peers, node.(*peer))
   272  	}
   273  	return
   274  }
   275  
   276  // disconnects all the peers
   277  func (self *Hive) DropAll() {
   278  	log.Info(fmt.Sprintf("dropping all bees"))
   279  	for _, node := range self.kad.FindClosest(kademlia.Address{}, 0) {
   280  		node.Drop()
   281  	}
   282  }
   283  
   284  // contructor for kademlia.NodeRecord based on peer address alone
   285  // TODO: should go away and only addr passed to kademlia
   286  func newNodeRecord(addr *peerAddr) *kademlia.NodeRecord {
   287  	now := time.Now()
   288  	return &kademlia.NodeRecord{
   289  		Addr:  addr.Addr,
   290  		Url:   addr.String(),
   291  		Seen:  now,
   292  		After: now,
   293  	}
   294  }
   295  
   296  // called by the protocol when receiving peerset (for target address)
   297  // peersMsgData is converted to a slice of NodeRecords for Kademlia
   298  // this is to store all thats needed
   299  func (self *Hive) HandlePeersMsg(req *peersMsgData, from *peer) {
   300  	var nrs []*kademlia.NodeRecord
   301  	for _, p := range req.Peers {
   302  		if err := netutil.CheckRelayIP(from.remoteAddr.IP, p.IP); err != nil {
   303  			log.Trace(fmt.Sprintf("invalid peer IP %v from %v: %v", from.remoteAddr.IP, p.IP, err))
   304  			continue
   305  		}
   306  		nrs = append(nrs, newNodeRecord(p))
   307  	}
   308  	self.kad.Add(nrs)
   309  }
   310  
   311  // peer wraps the protocol instance to represent a connected peer
   312  // it implements kademlia.Node interface
   313  type peer struct {
   314  	*bzz // protocol instance running on peer connection
   315  }
   316  
   317  // protocol instance implements kademlia.Node interface (embedded peer)
   318  func (self *peer) Addr() kademlia.Address {
   319  	return self.remoteAddr.Addr
   320  }
   321  
   322  func (self *peer) Url() string {
   323  	return self.remoteAddr.String()
   324  }
   325  
   326  // TODO take into account traffic
   327  func (self *peer) LastActive() time.Time {
   328  	return self.lastActive
   329  }
   330  
   331  // reads the serialised form of sync state persisted as the 'Meta' attribute
   332  // and sets the decoded syncState on the online node
   333  func loadSync(record *kademlia.NodeRecord, node kademlia.Node) error {
   334  	p, ok := node.(*peer)
   335  	if !ok {
   336  		return fmt.Errorf("invalid type")
   337  	}
   338  	if record.Meta == nil {
   339  		log.Debug(fmt.Sprintf("no sync state for node record %v setting default", record))
   340  		p.syncState = &syncState{DbSyncState: &storage.DbSyncState{}}
   341  		return nil
   342  	}
   343  	state, err := decodeSync(record.Meta)
   344  	if err != nil {
   345  		return fmt.Errorf("error decoding kddb record meta info into a sync state: %v", err)
   346  	}
   347  	log.Trace(fmt.Sprintf("sync state for node record %v read from Meta: %s", record, string(*(record.Meta))))
   348  	p.syncState = state
   349  	return err
   350  }
   351  
   352  // callback when saving a sync state
   353  func saveSync(record *kademlia.NodeRecord, node kademlia.Node) {
   354  	if p, ok := node.(*peer); ok {
   355  		meta, err := encodeSync(p.syncState)
   356  		if err != nil {
   357  			log.Warn(fmt.Sprintf("error saving sync state for %v: %v", node, err))
   358  			return
   359  		}
   360  		log.Trace(fmt.Sprintf("saved sync state for %v: %s", node, string(*meta)))
   361  		record.Meta = meta
   362  	}
   363  }
   364  
   365  // the immediate response to a retrieve request,
   366  // sends relevant peer data given by the kademlia hive to the requester
   367  // TODO: remember peers sent for duration of the session, only new peers sent
   368  func (self *Hive) peers(req *retrieveRequestMsgData) {
   369  	if req != nil {
   370  		var addrs []*peerAddr
   371  		if req.timeout == nil || time.Now().Before(*(req.timeout)) {
   372  			key := req.Key
   373  			// self lookup from remote peer
   374  			if storage.IsZeroKey(key) {
   375  				addr := req.from.Addr()
   376  				key = storage.Key(addr[:])
   377  				req.Key = nil
   378  			}
   379  			// get peer addresses from hive
   380  			for _, peer := range self.getPeers(key, int(req.MaxPeers)) {
   381  				addrs = append(addrs, peer.remoteAddr)
   382  			}
   383  			log.Debug(fmt.Sprintf("Hive sending %d peer addresses to %v. req.Id: %v, req.Key: %v", len(addrs), req.from, req.Id, req.Key.Log()))
   384  
   385  			peersData := &peersMsgData{
   386  				Peers: addrs,
   387  				Key:   req.Key,
   388  				Id:    req.Id,
   389  			}
   390  			peersData.setTimeout(req.timeout)
   391  			req.from.peers(peersData)
   392  		}
   393  	}
   394  }
   395  
   396  func (self *Hive) String() string {
   397  	return self.kad.String()
   398  }