github.com/cuiweixie/go-ethereum@v1.8.2-0.20180303084001-66cd41af1e38/swarm/network/hive.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package network
    18  
    19  import (
    20  	"fmt"
    21  	"math/rand"
    22  	"path/filepath"
    23  	"time"
    24  
    25  	"github.com/ethereum/go-ethereum/common"
    26  	"github.com/ethereum/go-ethereum/log"
    27  	"github.com/ethereum/go-ethereum/metrics"
    28  	"github.com/ethereum/go-ethereum/p2p/discover"
    29  	"github.com/ethereum/go-ethereum/p2p/netutil"
    30  	"github.com/ethereum/go-ethereum/swarm/network/kademlia"
    31  	"github.com/ethereum/go-ethereum/swarm/storage"
    32  )
    33  
    34  // Hive is the logistic manager of the swarm
    35  // it uses a generic kademlia nodetable to find best peer list
    36  // for any target
    37  // this is used by the netstore to search for content in the swarm
    38  // the bzz protocol peersMsgData exchange is relayed to Kademlia
    39  // for db storage and filtering
    40  // connections and disconnections are reported and relayed
    41  // to keep the nodetable uptodate
    42  
    43  var (
    44  	peersNumGauge     = metrics.NewRegisteredGauge("network.peers.num", nil)
    45  	addPeerCounter    = metrics.NewRegisteredCounter("network.addpeer.count", nil)
    46  	removePeerCounter = metrics.NewRegisteredCounter("network.removepeer.count", nil)
    47  )
    48  
    49  type Hive struct {
    50  	listenAddr   func() string
    51  	callInterval uint64
    52  	id           discover.NodeID
    53  	addr         kademlia.Address
    54  	kad          *kademlia.Kademlia
    55  	path         string
    56  	quit         chan bool
    57  	toggle       chan bool
    58  	more         chan bool
    59  
    60  	// for testing only
    61  	swapEnabled bool
    62  	syncEnabled bool
    63  	blockRead   bool
    64  	blockWrite  bool
    65  }
    66  
    67  const (
    68  	callInterval = 3000000000
    69  	// bucketSize   = 3
    70  	// maxProx      = 8
    71  	// proxBinSize  = 4
    72  )
    73  
    74  type HiveParams struct {
    75  	CallInterval uint64
    76  	KadDbPath    string
    77  	*kademlia.KadParams
    78  }
    79  
    80  //create default params
    81  func NewDefaultHiveParams() *HiveParams {
    82  	kad := kademlia.NewDefaultKadParams()
    83  	// kad.BucketSize = bucketSize
    84  	// kad.MaxProx = maxProx
    85  	// kad.ProxBinSize = proxBinSize
    86  
    87  	return &HiveParams{
    88  		CallInterval: callInterval,
    89  		KadParams:    kad,
    90  	}
    91  }
    92  
    93  //this can only finally be set after all config options (file, cmd line, env vars)
    94  //have been evaluated
    95  func (self *HiveParams) Init(path string) {
    96  	self.KadDbPath = filepath.Join(path, "bzz-peers.json")
    97  }
    98  
    99  func NewHive(addr common.Hash, params *HiveParams, swapEnabled, syncEnabled bool) *Hive {
   100  	kad := kademlia.New(kademlia.Address(addr), params.KadParams)
   101  	return &Hive{
   102  		callInterval: params.CallInterval,
   103  		kad:          kad,
   104  		addr:         kad.Addr(),
   105  		path:         params.KadDbPath,
   106  		swapEnabled:  swapEnabled,
   107  		syncEnabled:  syncEnabled,
   108  	}
   109  }
   110  
   111  func (self *Hive) SyncEnabled(on bool) {
   112  	self.syncEnabled = on
   113  }
   114  
   115  func (self *Hive) SwapEnabled(on bool) {
   116  	self.swapEnabled = on
   117  }
   118  
   119  func (self *Hive) BlockNetworkRead(on bool) {
   120  	self.blockRead = on
   121  }
   122  
   123  func (self *Hive) BlockNetworkWrite(on bool) {
   124  	self.blockWrite = on
   125  }
   126  
   127  // public accessor to the hive base address
   128  func (self *Hive) Addr() kademlia.Address {
   129  	return self.addr
   130  }
   131  
   132  // Start receives network info only at startup
   133  // listedAddr is a function to retrieve listening address to advertise to peers
   134  // connectPeer is a function to connect to a peer based on its NodeID or enode URL
   135  // there are called on the p2p.Server which runs on the node
   136  func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPeer func(string) error) (err error) {
   137  	self.toggle = make(chan bool)
   138  	self.more = make(chan bool)
   139  	self.quit = make(chan bool)
   140  	self.id = id
   141  	self.listenAddr = listenAddr
   142  	err = self.kad.Load(self.path, nil)
   143  	if err != nil {
   144  		log.Warn(fmt.Sprintf("Warning: error reading kaddb '%s' (skipping): %v", self.path, err))
   145  		err = nil
   146  	}
   147  	// this loop is doing bootstrapping and maintains a healthy table
   148  	go self.keepAlive()
   149  	go func() {
   150  		// whenever toggled ask kademlia about most preferred peer
   151  		for alive := range self.more {
   152  			if !alive {
   153  				// receiving false closes the loop while allowing parallel routines
   154  				// to attempt to write to more (remove Peer when shutting down)
   155  				return
   156  			}
   157  			node, need, proxLimit := self.kad.Suggest()
   158  
   159  			if node != nil && len(node.Url) > 0 {
   160  				log.Trace(fmt.Sprintf("call known bee %v", node.Url))
   161  				// enode or any lower level connection address is unnecessary in future
   162  				// discovery table is used to look it up.
   163  				connectPeer(node.Url)
   164  			}
   165  			if need {
   166  				// a random peer is taken from the table
   167  				peers := self.kad.FindClosest(kademlia.RandomAddressAt(self.addr, rand.Intn(self.kad.MaxProx)), 1)
   168  				if len(peers) > 0 {
   169  					// a random address at prox bin 0 is sent for lookup
   170  					randAddr := kademlia.RandomAddressAt(self.addr, proxLimit)
   171  					req := &retrieveRequestMsgData{
   172  						Key: storage.Key(randAddr[:]),
   173  					}
   174  					log.Trace(fmt.Sprintf("call any bee near %v (PO%03d) - messenger bee: %v", randAddr, proxLimit, peers[0]))
   175  					peers[0].(*peer).retrieve(req)
   176  				} else {
   177  					log.Warn(fmt.Sprintf("no peer"))
   178  				}
   179  				log.Trace(fmt.Sprintf("buzz kept alive"))
   180  			} else {
   181  				log.Info(fmt.Sprintf("no need for more bees"))
   182  			}
   183  			select {
   184  			case self.toggle <- need:
   185  			case <-self.quit:
   186  				return
   187  			}
   188  			log.Debug(fmt.Sprintf("queen's address: %v, population: %d (%d)", self.addr, self.kad.Count(), self.kad.DBCount()))
   189  		}
   190  	}()
   191  	return
   192  }
   193  
   194  // keepAlive is a forever loop
   195  // in its awake state it periodically triggers connection attempts
   196  // by writing to self.more until Kademlia Table is saturated
   197  // wake state is toggled by writing to self.toggle
   198  // it restarts if the table becomes non-full again due to disconnections
   199  func (self *Hive) keepAlive() {
   200  	alarm := time.NewTicker(time.Duration(self.callInterval)).C
   201  	for {
   202  		peersNumGauge.Update(int64(self.kad.Count()))
   203  		select {
   204  		case <-alarm:
   205  			if self.kad.DBCount() > 0 {
   206  				select {
   207  				case self.more <- true:
   208  					log.Debug(fmt.Sprintf("buzz wakeup"))
   209  				default:
   210  				}
   211  			}
   212  		case need := <-self.toggle:
   213  			if alarm == nil && need {
   214  				alarm = time.NewTicker(time.Duration(self.callInterval)).C
   215  			}
   216  			if alarm != nil && !need {
   217  				alarm = nil
   218  
   219  			}
   220  		case <-self.quit:
   221  			return
   222  		}
   223  	}
   224  }
   225  
   226  func (self *Hive) Stop() error {
   227  	// closing toggle channel quits the updateloop
   228  	close(self.quit)
   229  	return self.kad.Save(self.path, saveSync)
   230  }
   231  
   232  // called at the end of a successful protocol handshake
   233  func (self *Hive) addPeer(p *peer) error {
   234  	addPeerCounter.Inc(1)
   235  	defer func() {
   236  		select {
   237  		case self.more <- true:
   238  		default:
   239  		}
   240  	}()
   241  	log.Trace(fmt.Sprintf("hi new bee %v", p))
   242  	err := self.kad.On(p, loadSync)
   243  	if err != nil {
   244  		return err
   245  	}
   246  	// self lookup (can be encoded as nil/zero key since peers addr known) + no id ()
   247  	// the most common way of saying hi in bzz is initiation of gossip
   248  	// let me know about anyone new from my hood , here is the storageradius
   249  	// to send the 6 byte self lookup
   250  	// we do not record as request or forward it, just reply with peers
   251  	p.retrieve(&retrieveRequestMsgData{})
   252  	log.Trace(fmt.Sprintf("'whatsup wheresdaparty' sent to %v", p))
   253  
   254  	return nil
   255  }
   256  
   257  // called after peer disconnected
   258  func (self *Hive) removePeer(p *peer) {
   259  	removePeerCounter.Inc(1)
   260  	log.Debug(fmt.Sprintf("bee %v removed", p))
   261  	self.kad.Off(p, saveSync)
   262  	select {
   263  	case self.more <- true:
   264  	default:
   265  	}
   266  	if self.kad.Count() == 0 {
   267  		log.Debug(fmt.Sprintf("empty, all bees gone"))
   268  	}
   269  }
   270  
   271  // Retrieve a list of live peers that are closer to target than us
   272  func (self *Hive) getPeers(target storage.Key, max int) (peers []*peer) {
   273  	var addr kademlia.Address
   274  	copy(addr[:], target[:])
   275  	for _, node := range self.kad.FindClosest(addr, max) {
   276  		peers = append(peers, node.(*peer))
   277  	}
   278  	return
   279  }
   280  
   281  // disconnects all the peers
   282  func (self *Hive) DropAll() {
   283  	log.Info(fmt.Sprintf("dropping all bees"))
   284  	for _, node := range self.kad.FindClosest(kademlia.Address{}, 0) {
   285  		node.Drop()
   286  	}
   287  }
   288  
   289  // contructor for kademlia.NodeRecord based on peer address alone
   290  // TODO: should go away and only addr passed to kademlia
   291  func newNodeRecord(addr *peerAddr) *kademlia.NodeRecord {
   292  	now := time.Now()
   293  	return &kademlia.NodeRecord{
   294  		Addr:  addr.Addr,
   295  		Url:   addr.String(),
   296  		Seen:  now,
   297  		After: now,
   298  	}
   299  }
   300  
   301  // called by the protocol when receiving peerset (for target address)
   302  // peersMsgData is converted to a slice of NodeRecords for Kademlia
   303  // this is to store all thats needed
   304  func (self *Hive) HandlePeersMsg(req *peersMsgData, from *peer) {
   305  	var nrs []*kademlia.NodeRecord
   306  	for _, p := range req.Peers {
   307  		if err := netutil.CheckRelayIP(from.remoteAddr.IP, p.IP); err != nil {
   308  			log.Trace(fmt.Sprintf("invalid peer IP %v from %v: %v", from.remoteAddr.IP, p.IP, err))
   309  			continue
   310  		}
   311  		nrs = append(nrs, newNodeRecord(p))
   312  	}
   313  	self.kad.Add(nrs)
   314  }
   315  
   316  // peer wraps the protocol instance to represent a connected peer
   317  // it implements kademlia.Node interface
   318  type peer struct {
   319  	*bzz // protocol instance running on peer connection
   320  }
   321  
   322  // protocol instance implements kademlia.Node interface (embedded peer)
   323  func (self *peer) Addr() kademlia.Address {
   324  	return self.remoteAddr.Addr
   325  }
   326  
   327  func (self *peer) Url() string {
   328  	return self.remoteAddr.String()
   329  }
   330  
   331  // TODO take into account traffic
   332  func (self *peer) LastActive() time.Time {
   333  	return self.lastActive
   334  }
   335  
   336  // reads the serialised form of sync state persisted as the 'Meta' attribute
   337  // and sets the decoded syncState on the online node
   338  func loadSync(record *kademlia.NodeRecord, node kademlia.Node) error {
   339  	p, ok := node.(*peer)
   340  	if !ok {
   341  		return fmt.Errorf("invalid type")
   342  	}
   343  	if record.Meta == nil {
   344  		log.Debug(fmt.Sprintf("no sync state for node record %v setting default", record))
   345  		p.syncState = &syncState{DbSyncState: &storage.DbSyncState{}}
   346  		return nil
   347  	}
   348  	state, err := decodeSync(record.Meta)
   349  	if err != nil {
   350  		return fmt.Errorf("error decoding kddb record meta info into a sync state: %v", err)
   351  	}
   352  	log.Trace(fmt.Sprintf("sync state for node record %v read from Meta: %s", record, string(*(record.Meta))))
   353  	p.syncState = state
   354  	return err
   355  }
   356  
   357  // callback when saving a sync state
   358  func saveSync(record *kademlia.NodeRecord, node kademlia.Node) {
   359  	if p, ok := node.(*peer); ok {
   360  		meta, err := encodeSync(p.syncState)
   361  		if err != nil {
   362  			log.Warn(fmt.Sprintf("error saving sync state for %v: %v", node, err))
   363  			return
   364  		}
   365  		log.Trace(fmt.Sprintf("saved sync state for %v: %s", node, string(*meta)))
   366  		record.Meta = meta
   367  	}
   368  }
   369  
   370  // the immediate response to a retrieve request,
   371  // sends relevant peer data given by the kademlia hive to the requester
   372  // TODO: remember peers sent for duration of the session, only new peers sent
   373  func (self *Hive) peers(req *retrieveRequestMsgData) {
   374  	if req != nil {
   375  		var addrs []*peerAddr
   376  		if req.timeout == nil || time.Now().Before(*(req.timeout)) {
   377  			key := req.Key
   378  			// self lookup from remote peer
   379  			if storage.IsZeroKey(key) {
   380  				addr := req.from.Addr()
   381  				key = storage.Key(addr[:])
   382  				req.Key = nil
   383  			}
   384  			// get peer addresses from hive
   385  			for _, peer := range self.getPeers(key, int(req.MaxPeers)) {
   386  				addrs = append(addrs, peer.remoteAddr)
   387  			}
   388  			log.Debug(fmt.Sprintf("Hive sending %d peer addresses to %v. req.Id: %v, req.Key: %v", len(addrs), req.from, req.Id, req.Key.Log()))
   389  
   390  			peersData := &peersMsgData{
   391  				Peers: addrs,
   392  				Key:   req.Key,
   393  				Id:    req.Id,
   394  			}
   395  			peersData.setTimeout(req.timeout)
   396  			req.from.peers(peersData)
   397  		}
   398  	}
   399  }
   400  
   401  func (self *Hive) String() string {
   402  	return self.kad.String()
   403  }