github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/swarm/network/hive.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 19:16:43</date>
    10  //</624450113716097024>
    11  
    12  
    13  package network
    14  
    15  import (
    16  	"fmt"
    17  	"sync"
    18  	"time"
    19  
    20  	"github.com/ethereum/go-ethereum/common/hexutil"
    21  	"github.com/ethereum/go-ethereum/p2p"
    22  	"github.com/ethereum/go-ethereum/p2p/enode"
    23  	"github.com/ethereum/go-ethereum/swarm/log"
    24  	"github.com/ethereum/go-ethereum/swarm/state"
    25  )
    26  
    27  /*
    28  Hive是Swarm的物流经理
    29  
    30  当蜂窝启动时,将启动一个永久循环,
    31  问卡德米利亚
    32  建议对等机到引导连接
    33  **/
    34  
    35  
    36  //hiveparams保存配置选项以进行配置
    37  type HiveParams struct {
    38  Discovery             bool  //如果不想发现
    39  PeersBroadcastSetSize uint8 //中继时要使用多少对等点
    40  MaxPeersPerRequest    uint8 //对等地址批的最大大小
    41  	KeepAliveInterval     time.Duration
    42  }
    43  
    44  //newhiveparams返回hive config,只使用
    45  func NewHiveParams() *HiveParams {
    46  	return &HiveParams{
    47  		Discovery:             true,
    48  		PeersBroadcastSetSize: 3,
    49  		MaxPeersPerRequest:    5,
    50  		KeepAliveInterval:     500 * time.Millisecond,
    51  	}
    52  }
    53  
    54  //蜂巢管理群节点的网络连接
    55  type Hive struct {
    56  *HiveParams                   //设置
    57  *Kademlia                     //覆盖连接驱动程序
    58  Store       state.Store       //存储接口,用于跨会话保存对等端
    59  addPeer     func(*enode.Node) //连接到对等机的服务器回调
    60  //簿记
    61  	lock   sync.Mutex
    62  	peers  map[enode.ID]*BzzPeer
    63  	ticker *time.Ticker
    64  }
    65  
    66  //new hive构造新的hive
    67  //hiveparams:配置参数
    68  //Kademlia:使用网络拓扑的连接驱动程序
    69  //Statestore:保存会话之间的对等点
    70  func NewHive(params *HiveParams, kad *Kademlia, store state.Store) *Hive {
    71  	return &Hive{
    72  		HiveParams: params,
    73  		Kademlia:   kad,
    74  		Store:      store,
    75  		peers:      make(map[enode.ID]*BzzPeer),
    76  	}
    77  }
    78  
    79  //启动星型配置单元,仅在启动时接收p2p.server
    80  //服务器用于基于其nodeid或enode url连接到对等机
    81  //在节点上运行的p2p.server上调用这些
    82  func (h *Hive) Start(server *p2p.Server) error {
    83  	log.Info("Starting hive", "baseaddr", fmt.Sprintf("%x", h.BaseAddr()[:4]))
    84  //如果指定了状态存储,则加载对等端以预填充覆盖通讯簿
    85  	if h.Store != nil {
    86  		log.Info("Detected an existing store. trying to load peers")
    87  		if err := h.loadPeers(); err != nil {
    88  			log.Error(fmt.Sprintf("%08x hive encoutered an error trying to load peers", h.BaseAddr()[:4]))
    89  			return err
    90  		}
    91  	}
    92  //分配p2p.server addpeer函数以连接到对等机
    93  	h.addPeer = server.AddPeer
    94  //保持蜂巢存活的标签
    95  	h.ticker = time.NewTicker(h.KeepAliveInterval)
    96  //这个循环正在执行引导并维护一个健康的表
    97  	go h.connect()
    98  	return nil
    99  }
   100  
   101  //stop终止updateLoop并保存对等方
   102  func (h *Hive) Stop() error {
   103  	log.Info(fmt.Sprintf("%08x hive stopping, saving peers", h.BaseAddr()[:4]))
   104  	h.ticker.Stop()
   105  	if h.Store != nil {
   106  		if err := h.savePeers(); err != nil {
   107  			return fmt.Errorf("could not save peers to persistence store: %v", err)
   108  		}
   109  		if err := h.Store.Close(); err != nil {
   110  			return fmt.Errorf("could not close file handle to persistence store: %v", err)
   111  		}
   112  	}
   113  	log.Info(fmt.Sprintf("%08x hive stopped, dropping peers", h.BaseAddr()[:4]))
   114  	h.EachConn(nil, 255, func(p *Peer, _ int) bool {
   115  		log.Info(fmt.Sprintf("%08x dropping peer %08x", h.BaseAddr()[:4], p.Address()[:4]))
   116  		p.Drop(nil)
   117  		return true
   118  	})
   119  
   120  	log.Info(fmt.Sprintf("%08x all peers dropped", h.BaseAddr()[:4]))
   121  	return nil
   122  }
   123  
   124  //连接是一个永久循环
   125  //在每次迭代中,要求覆盖驱动程序建议要连接到的最首选对等机
   126  //如果需要的话还可以宣传饱和深度
   127  func (h *Hive) connect() {
   128  	for range h.ticker.C {
   129  
   130  		addr, depth, changed := h.SuggestPeer()
   131  		if h.Discovery && changed {
   132  			NotifyDepth(uint8(depth), h.Kademlia)
   133  		}
   134  		if addr == nil {
   135  			continue
   136  		}
   137  
   138  		log.Trace(fmt.Sprintf("%08x hive connect() suggested %08x", h.BaseAddr()[:4], addr.Address()[:4]))
   139  		under, err := enode.ParseV4(string(addr.Under()))
   140  		if err != nil {
   141  			log.Warn(fmt.Sprintf("%08x unable to connect to bee %08x: invalid node URL: %v", h.BaseAddr()[:4], addr.Address()[:4], err))
   142  			continue
   143  		}
   144  		log.Trace(fmt.Sprintf("%08x attempt to connect to bee %08x", h.BaseAddr()[:4], addr.Address()[:4]))
   145  		h.addPeer(under)
   146  	}
   147  }
   148  
   149  //运行协议运行函数
   150  func (h *Hive) Run(p *BzzPeer) error {
   151  	h.trackPeer(p)
   152  	defer h.untrackPeer(p)
   153  
   154  	dp := NewPeer(p, h.Kademlia)
   155  	depth, changed := h.On(dp)
   156  //如果我们想要发现,就宣传深度的变化。
   157  	if h.Discovery {
   158  		if changed {
   159  //如果深度改变,发送给所有对等方
   160  			NotifyDepth(depth, h.Kademlia)
   161  		} else {
   162  //否则,只需向新对等发送深度
   163  			dp.NotifyDepth(depth)
   164  		}
   165  		NotifyPeer(p.BzzAddr, h.Kademlia)
   166  	}
   167  	defer h.Off(dp)
   168  	return dp.Run(dp.HandleMsg)
   169  }
   170  
   171  func (h *Hive) trackPeer(p *BzzPeer) {
   172  	h.lock.Lock()
   173  	h.peers[p.ID()] = p
   174  	h.lock.Unlock()
   175  }
   176  
   177  func (h *Hive) untrackPeer(p *BzzPeer) {
   178  	h.lock.Lock()
   179  	delete(h.peers, p.ID())
   180  	h.lock.Unlock()
   181  }
   182  
   183  //p2p.server rpc接口使用nodeinfo函数显示
   184  //协议特定的节点信息
   185  func (h *Hive) NodeInfo() interface{} {
   186  	return h.String()
   187  }
   188  
   189  //p2p.server rpc接口使用peerinfo函数来显示
   190  //协议特定信息节点ID引用的任何连接的对等点
   191  func (h *Hive) PeerInfo(id enode.ID) interface{} {
   192  	h.lock.Lock()
   193  	p := h.peers[id]
   194  	h.lock.Unlock()
   195  
   196  	if p == nil {
   197  		return nil
   198  	}
   199  	addr := NewAddr(p.Node())
   200  	return struct {
   201  		OAddr hexutil.Bytes
   202  		UAddr hexutil.Bytes
   203  	}{
   204  		OAddr: addr.OAddr,
   205  		UAddr: addr.UAddr,
   206  	}
   207  }
   208  
   209  //loadpeers,savepeer实现持久回调/
   210  func (h *Hive) loadPeers() error {
   211  	var as []*BzzAddr
   212  	err := h.Store.Get("peers", &as)
   213  	if err != nil {
   214  		if err == state.ErrNotFound {
   215  			log.Info(fmt.Sprintf("hive %08x: no persisted peers found", h.BaseAddr()[:4]))
   216  			return nil
   217  		}
   218  		return err
   219  	}
   220  	log.Info(fmt.Sprintf("hive %08x: peers loaded", h.BaseAddr()[:4]))
   221  
   222  	return h.Register(as...)
   223  }
   224  
   225  //savepeer,savepeer实现持久回调/
   226  func (h *Hive) savePeers() error {
   227  	var peers []*BzzAddr
   228  	h.Kademlia.EachAddr(nil, 256, func(pa *BzzAddr, i int) bool {
   229  		if pa == nil {
   230  			log.Warn(fmt.Sprintf("empty addr: %v", i))
   231  			return true
   232  		}
   233  		log.Trace("saving peer", "peer", pa)
   234  		peers = append(peers, pa)
   235  		return true
   236  	})
   237  	if err := h.Store.Put("peers", peers); err != nil {
   238  		return fmt.Errorf("could not save peers: %v", err)
   239  	}
   240  	return nil
   241  }
   242