github.com/digdeepmining/go-atheios@v1.5.13-0.20180902133602-d5687a2e6f43/swarm/network/hive.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package network
    18  
    19  import (
    20  	"fmt"
    21  	"math/rand"
    22  	"path/filepath"
    23  	"time"
    24  
    25  	"github.com/atheioschain/go-atheios/common"
    26  	"github.com/atheioschain/go-atheios/logger"
    27  	"github.com/atheioschain/go-atheios/logger/glog"
    28  	"github.com/atheioschain/go-atheios/p2p/discover"
    29  	"github.com/atheioschain/go-atheios/p2p/netutil"
    30  	"github.com/atheioschain/go-atheios/swarm/network/kademlia"
    31  	"github.com/atheioschain/go-atheios/swarm/storage"
    32  )
    33  
    34  // Hive is the logistic manager of the swarm
    35  // it uses a generic kademlia nodetable to find best peer list
    36  // for any target
    37  // this is used by the netstore to search for content in the swarm
    38  // the bzz protocol peersMsgData exchange is relayed to Kademlia
    39  // for db storage and filtering
    40  // connections and disconnections are reported and relayed
    41  // to keep the nodetable uptodate
    42  
    43  type Hive struct {
    44  	listenAddr   func() string
    45  	callInterval uint64
    46  	id           discover.NodeID
    47  	addr         kademlia.Address
    48  	kad          *kademlia.Kademlia
    49  	path         string
    50  	quit         chan bool
    51  	toggle       chan bool
    52  	more         chan bool
    53  
    54  	// for testing only
    55  	swapEnabled bool
    56  	syncEnabled bool
    57  	blockRead   bool
    58  	blockWrite  bool
    59  }
    60  
    61  const (
    62  	callInterval = 3000000000
    63  	// bucketSize   = 3
    64  	// maxProx      = 8
    65  	// proxBinSize  = 4
    66  )
    67  
    68  type HiveParams struct {
    69  	CallInterval uint64
    70  	KadDbPath    string
    71  	*kademlia.KadParams
    72  }
    73  
    74  func NewHiveParams(path string) *HiveParams {
    75  	kad := kademlia.NewKadParams()
    76  	// kad.BucketSize = bucketSize
    77  	// kad.MaxProx = maxProx
    78  	// kad.ProxBinSize = proxBinSize
    79  
    80  	return &HiveParams{
    81  		CallInterval: callInterval,
    82  		KadDbPath:    filepath.Join(path, "bzz-peers.json"),
    83  		KadParams:    kad,
    84  	}
    85  }
    86  
    87  func NewHive(addr common.Hash, params *HiveParams, swapEnabled, syncEnabled bool) *Hive {
    88  	kad := kademlia.New(kademlia.Address(addr), params.KadParams)
    89  	return &Hive{
    90  		callInterval: params.CallInterval,
    91  		kad:          kad,
    92  		addr:         kad.Addr(),
    93  		path:         params.KadDbPath,
    94  		swapEnabled:  swapEnabled,
    95  		syncEnabled:  syncEnabled,
    96  	}
    97  }
    98  
    99  func (self *Hive) SyncEnabled(on bool) {
   100  	self.syncEnabled = on
   101  }
   102  
   103  func (self *Hive) SwapEnabled(on bool) {
   104  	self.swapEnabled = on
   105  }
   106  
   107  func (self *Hive) BlockNetworkRead(on bool) {
   108  	self.blockRead = on
   109  }
   110  
   111  func (self *Hive) BlockNetworkWrite(on bool) {
   112  	self.blockWrite = on
   113  }
   114  
   115  // public accessor to the hive base address
   116  func (self *Hive) Addr() kademlia.Address {
   117  	return self.addr
   118  }
   119  
   120  // Start receives network info only at startup
   121  // listedAddr is a function to retrieve listening address to advertise to peers
   122  // connectPeer is a function to connect to a peer based on its NodeID or enode URL
   123  // there are called on the p2p.Server which runs on the node
   124  func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPeer func(string) error) (err error) {
   125  	self.toggle = make(chan bool)
   126  	self.more = make(chan bool)
   127  	self.quit = make(chan bool)
   128  	self.id = id
   129  	self.listenAddr = listenAddr
   130  	err = self.kad.Load(self.path, nil)
   131  	if err != nil {
   132  		glog.V(logger.Warn).Infof("Warning: error reading kaddb '%s' (skipping): %v", self.path, err)
   133  		err = nil
   134  	}
   135  	// this loop is doing bootstrapping and maintains a healthy table
   136  	go self.keepAlive()
   137  	go func() {
   138  		// whenever toggled ask kademlia about most preferred peer
   139  		for alive := range self.more {
   140  			if !alive {
   141  				// receiving false closes the loop while allowing parallel routines
   142  				// to attempt to write to more (remove Peer when shutting down)
   143  				return
   144  			}
   145  			node, need, proxLimit := self.kad.Suggest()
   146  
   147  			if node != nil && len(node.Url) > 0 {
   148  				glog.V(logger.Detail).Infof("call known bee %v", node.Url)
   149  				// enode or any lower level connection address is unnecessary in future
   150  				// discovery table is used to look it up.
   151  				connectPeer(node.Url)
   152  			}
   153  			if need {
   154  				// a random peer is taken from the table
   155  				peers := self.kad.FindClosest(kademlia.RandomAddressAt(self.addr, rand.Intn(self.kad.MaxProx)), 1)
   156  				if len(peers) > 0 {
   157  					// a random address at prox bin 0 is sent for lookup
   158  					randAddr := kademlia.RandomAddressAt(self.addr, proxLimit)
   159  					req := &retrieveRequestMsgData{
   160  						Key: storage.Key(randAddr[:]),
   161  					}
   162  					glog.V(logger.Detail).Infof("call any bee near %v (PO%03d) - messenger bee: %v", randAddr, proxLimit, peers[0])
   163  					peers[0].(*peer).retrieve(req)
   164  				} else {
   165  					glog.V(logger.Warn).Infof("no peer")
   166  				}
   167  				glog.V(logger.Detail).Infof("buzz kept alive")
   168  			} else {
   169  				glog.V(logger.Info).Infof("no need for more bees")
   170  			}
   171  			select {
   172  			case self.toggle <- need:
   173  			case <-self.quit:
   174  				return
   175  			}
   176  			glog.V(logger.Debug).Infof("queen's address: %v, population: %d (%d)", self.addr, self.kad.Count(), self.kad.DBCount())
   177  		}
   178  	}()
   179  	return
   180  }
   181  
   182  // keepAlive is a forever loop
   183  // in its awake state it periodically triggers connection attempts
   184  // by writing to self.more until Kademlia Table is saturated
   185  // wake state is toggled by writing to self.toggle
   186  // it restarts if the table becomes non-full again due to disconnections
   187  func (self *Hive) keepAlive() {
   188  	alarm := time.NewTicker(time.Duration(self.callInterval)).C
   189  	for {
   190  		select {
   191  		case <-alarm:
   192  			if self.kad.DBCount() > 0 {
   193  				select {
   194  				case self.more <- true:
   195  					glog.V(logger.Debug).Infof("buzz wakeup")
   196  				default:
   197  				}
   198  			}
   199  		case need := <-self.toggle:
   200  			if alarm == nil && need {
   201  				alarm = time.NewTicker(time.Duration(self.callInterval)).C
   202  			}
   203  			if alarm != nil && !need {
   204  				alarm = nil
   205  
   206  			}
   207  		case <-self.quit:
   208  			return
   209  		}
   210  	}
   211  }
   212  
   213  func (self *Hive) Stop() error {
   214  	// closing toggle channel quits the updateloop
   215  	close(self.quit)
   216  	return self.kad.Save(self.path, saveSync)
   217  }
   218  
   219  // called at the end of a successful protocol handshake
   220  func (self *Hive) addPeer(p *peer) error {
   221  	defer func() {
   222  		select {
   223  		case self.more <- true:
   224  		default:
   225  		}
   226  	}()
   227  	glog.V(logger.Detail).Infof("hi new bee %v", p)
   228  	err := self.kad.On(p, loadSync)
   229  	if err != nil {
   230  		return err
   231  	}
   232  	// self lookup (can be encoded as nil/zero key since peers addr known) + no id ()
   233  	// the most common way of saying hi in bzz is initiation of gossip
   234  	// let me know about anyone new from my hood , here is the storageradius
   235  	// to send the 6 byte self lookup
   236  	// we do not record as request or forward it, just reply with peers
   237  	p.retrieve(&retrieveRequestMsgData{})
   238  	glog.V(logger.Detail).Infof("'whatsup wheresdaparty' sent to %v", p)
   239  
   240  	return nil
   241  }
   242  
   243  // called after peer disconnected
   244  func (self *Hive) removePeer(p *peer) {
   245  	glog.V(logger.Debug).Infof("bee %v removed", p)
   246  	self.kad.Off(p, saveSync)
   247  	select {
   248  	case self.more <- true:
   249  	default:
   250  	}
   251  	if self.kad.Count() == 0 {
   252  		glog.V(logger.Debug).Infof("empty, all bees gone")
   253  	}
   254  }
   255  
   256  // Retrieve a list of live peers that are closer to target than us
   257  func (self *Hive) getPeers(target storage.Key, max int) (peers []*peer) {
   258  	var addr kademlia.Address
   259  	copy(addr[:], target[:])
   260  	for _, node := range self.kad.FindClosest(addr, max) {
   261  		peers = append(peers, node.(*peer))
   262  	}
   263  	return
   264  }
   265  
   266  // disconnects all the peers
   267  func (self *Hive) DropAll() {
   268  	glog.V(logger.Info).Infof("dropping all bees")
   269  	for _, node := range self.kad.FindClosest(kademlia.Address{}, 0) {
   270  		node.Drop()
   271  	}
   272  }
   273  
   274  // contructor for kademlia.NodeRecord based on peer address alone
   275  // TODO: should go away and only addr passed to kademlia
   276  func newNodeRecord(addr *peerAddr) *kademlia.NodeRecord {
   277  	now := time.Now()
   278  	return &kademlia.NodeRecord{
   279  		Addr:  addr.Addr,
   280  		Url:   addr.String(),
   281  		Seen:  now,
   282  		After: now,
   283  	}
   284  }
   285  
   286  // called by the protocol when receiving peerset (for target address)
   287  // peersMsgData is converted to a slice of NodeRecords for Kademlia
   288  // this is to store all thats needed
   289  func (self *Hive) HandlePeersMsg(req *peersMsgData, from *peer) {
   290  	var nrs []*kademlia.NodeRecord
   291  	for _, p := range req.Peers {
   292  		if err := netutil.CheckRelayIP(from.remoteAddr.IP, p.IP); err != nil {
   293  			glog.V(logger.Detail).Infof("invalid peer IP %v from %v: %v", from.remoteAddr.IP, p.IP, err)
   294  			continue
   295  		}
   296  		nrs = append(nrs, newNodeRecord(p))
   297  	}
   298  	self.kad.Add(nrs)
   299  }
   300  
   301  // peer wraps the protocol instance to represent a connected peer
   302  // it implements kademlia.Node interface
   303  type peer struct {
   304  	*bzz // protocol instance running on peer connection
   305  }
   306  
   307  // protocol instance implements kademlia.Node interface (embedded peer)
   308  func (self *peer) Addr() kademlia.Address {
   309  	return self.remoteAddr.Addr
   310  }
   311  
   312  func (self *peer) Url() string {
   313  	return self.remoteAddr.String()
   314  }
   315  
   316  // TODO take into account traffic
   317  func (self *peer) LastActive() time.Time {
   318  	return self.lastActive
   319  }
   320  
   321  // reads the serialised form of sync state persisted as the 'Meta' attribute
   322  // and sets the decoded syncState on the online node
   323  func loadSync(record *kademlia.NodeRecord, node kademlia.Node) error {
   324  	p, ok := node.(*peer)
   325  	if !ok {
   326  		return fmt.Errorf("invalid type")
   327  	}
   328  	if record.Meta == nil {
   329  		glog.V(logger.Debug).Infof("no sync state for node record %v setting default", record)
   330  		p.syncState = &syncState{DbSyncState: &storage.DbSyncState{}}
   331  		return nil
   332  	}
   333  	state, err := decodeSync(record.Meta)
   334  	if err != nil {
   335  		return fmt.Errorf("error decoding kddb record meta info into a sync state: %v", err)
   336  	}
   337  	glog.V(logger.Detail).Infof("sync state for node record %v read from Meta: %s", record, string(*(record.Meta)))
   338  	p.syncState = state
   339  	return err
   340  }
   341  
   342  // callback when saving a sync state
   343  func saveSync(record *kademlia.NodeRecord, node kademlia.Node) {
   344  	if p, ok := node.(*peer); ok {
   345  		meta, err := encodeSync(p.syncState)
   346  		if err != nil {
   347  			glog.V(logger.Warn).Infof("error saving sync state for %v: %v", node, err)
   348  			return
   349  		}
   350  		glog.V(logger.Detail).Infof("saved sync state for %v: %s", node, string(*meta))
   351  		record.Meta = meta
   352  	}
   353  }
   354  
   355  // the immediate response to a retrieve request,
   356  // sends relevant peer data given by the kademlia hive to the requester
   357  // TODO: remember peers sent for duration of the session, only new peers sent
   358  func (self *Hive) peers(req *retrieveRequestMsgData) {
   359  	if req != nil && req.MaxPeers >= 0 {
   360  		var addrs []*peerAddr
   361  		if req.timeout == nil || time.Now().Before(*(req.timeout)) {
   362  			key := req.Key
   363  			// self lookup from remote peer
   364  			if storage.IsZeroKey(key) {
   365  				addr := req.from.Addr()
   366  				key = storage.Key(addr[:])
   367  				req.Key = nil
   368  			}
   369  			// get peer addresses from hive
   370  			for _, peer := range self.getPeers(key, int(req.MaxPeers)) {
   371  				addrs = append(addrs, peer.remoteAddr)
   372  			}
   373  			glog.V(logger.Debug).Infof("Hive sending %d peer addresses to %v. req.Id: %v, req.Key: %v", len(addrs), req.from, req.Id, req.Key.Log())
   374  
   375  			peersData := &peersMsgData{
   376  				Peers: addrs,
   377  				Key:   req.Key,
   378  				Id:    req.Id,
   379  			}
   380  			peersData.setTimeout(req.timeout)
   381  			req.from.peers(peersData)
   382  		}
   383  	}
   384  }
   385  
   386  func (self *Hive) String() string {
   387  	return self.kad.String()
   388  }