github.com/alexdevranger/node-1.8.27@v0.0.0-20221128213301-aa5841e41d2d/swarm/network/hive.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-dubxcoin library. 3 // 4 // The go-dubxcoin library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-dubxcoin library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-dubxcoin library. If not, see <http://www.gnu.org/licenses/>. 16 17 package network 18 19 import ( 20 "fmt" 21 "sync" 22 "time" 23 24 "github.com/alexdevranger/node-1.8.27/common/hexutil" 25 "github.com/alexdevranger/node-1.8.27/p2p" 26 "github.com/alexdevranger/node-1.8.27/p2p/enode" 27 "github.com/alexdevranger/node-1.8.27/swarm/log" 28 "github.com/alexdevranger/node-1.8.27/swarm/state" 29 ) 30 31 /* 32 Hive is the logistic manager of the swarm 33 34 When the hive is started, a forever loop is launched that 35 asks the kademlia nodetable 36 to suggest peers to bootstrap connectivity 37 */ 38 39 // HiveParams holds the config options to hive 40 type HiveParams struct { 41 Discovery bool // if want discovery of not 42 PeersBroadcastSetSize uint8 // how many peers to use when relaying 43 MaxPeersPerRequest uint8 // max size for peer address batches 44 KeepAliveInterval time.Duration 45 } 46 47 // NewHiveParams returns hive config with only the 48 func NewHiveParams() *HiveParams { 49 return &HiveParams{ 50 Discovery: true, 51 PeersBroadcastSetSize: 3, 52 MaxPeersPerRequest: 5, 53 KeepAliveInterval: 500 * time.Millisecond, 54 } 55 } 56 57 // Hive manages network connections of the swarm node 58 type Hive struct { 59 *HiveParams // settings 60 *Kademlia // the overlay connectiviy driver 61 Store state.Store // storage interface to save peers across sessions 62 addPeer func(*enode.Node) // server callback to connect to a peer 63 // bookkeeping 64 lock sync.Mutex 65 peers map[enode.ID]*BzzPeer 66 ticker *time.Ticker 67 } 68 69 // NewHive constructs a new hive 70 // HiveParams: config parameters 71 // Kademlia: connectivity driver using a network topology 72 // StateStore: to save peers across sessions 73 func NewHive(params *HiveParams, kad *Kademlia, store state.Store) *Hive { 74 return &Hive{ 75 HiveParams: params, 76 Kademlia: kad, 77 Store: store, 78 peers: make(map[enode.ID]*BzzPeer), 79 } 80 } 81 82 // Start stars the hive, receives p2p.Server only at startup 83 // server is used to connect to a peer based on its NodeID or enode URL 84 // these are called on the p2p.Server which runs on the node 85 func (h *Hive) Start(server *p2p.Server) error { 86 log.Info("Starting hive", "baseaddr", fmt.Sprintf("%x", h.BaseAddr()[:4])) 87 // if state store is specified, load peers to prepopulate the overlay address book 88 if h.Store != nil { 89 log.Info("Detected an existing store. trying to load peers") 90 if err := h.loadPeers(); err != nil { 91 log.Error(fmt.Sprintf("%08x hive encoutered an error trying to load peers", h.BaseAddr()[:4])) 92 return err 93 } 94 } 95 // assigns the p2p.Server#AddPeer function to connect to peers 96 h.addPeer = server.AddPeer 97 // ticker to keep the hive alive 98 h.ticker = time.NewTicker(h.KeepAliveInterval) 99 // this loop is doing bootstrapping and maintains a healthy table 100 go h.connect() 101 return nil 102 } 103 104 // Stop terminates the updateloop and saves the peers 105 func (h *Hive) Stop() error { 106 log.Info(fmt.Sprintf("%08x hive stopping, saving peers", h.BaseAddr()[:4])) 107 h.ticker.Stop() 108 if h.Store != nil { 109 if err := h.savePeers(); err != nil { 110 return fmt.Errorf("could not save peers to persistence store: %v", err) 111 } 112 if err := h.Store.Close(); err != nil { 113 return fmt.Errorf("could not close file handle to persistence store: %v", err) 114 } 115 } 116 log.Info(fmt.Sprintf("%08x hive stopped, dropping peers", h.BaseAddr()[:4])) 117 h.EachConn(nil, 255, func(p *Peer, _ int) bool { 118 log.Info(fmt.Sprintf("%08x dropping peer %08x", h.BaseAddr()[:4], p.Address()[:4])) 119 p.Drop(nil) 120 return true 121 }) 122 123 log.Info(fmt.Sprintf("%08x all peers dropped", h.BaseAddr()[:4])) 124 return nil 125 } 126 127 // connect is a forever loop 128 // at each iteration, ask the overlay driver to suggest the most preferred peer to connect to 129 // as well as advertises saturation depth if needed 130 func (h *Hive) connect() { 131 for range h.ticker.C { 132 133 addr, depth, changed := h.SuggestPeer() 134 if h.Discovery && changed { 135 NotifyDepth(uint8(depth), h.Kademlia) 136 } 137 if addr == nil { 138 continue 139 } 140 141 log.Trace(fmt.Sprintf("%08x hive connect() suggested %08x", h.BaseAddr()[:4], addr.Address()[:4])) 142 under, err := enode.ParseV4(string(addr.Under())) 143 if err != nil { 144 log.Warn(fmt.Sprintf("%08x unable to connect to bee %08x: invalid node URL: %v", h.BaseAddr()[:4], addr.Address()[:4], err)) 145 continue 146 } 147 log.Trace(fmt.Sprintf("%08x attempt to connect to bee %08x", h.BaseAddr()[:4], addr.Address()[:4])) 148 h.addPeer(under) 149 } 150 } 151 152 // Run protocol run function 153 func (h *Hive) Run(p *BzzPeer) error { 154 h.trackPeer(p) 155 defer h.untrackPeer(p) 156 157 dp := NewPeer(p, h.Kademlia) 158 depth, changed := h.On(dp) 159 // if we want discovery, advertise change of depth 160 if h.Discovery { 161 if changed { 162 // if depth changed, send to all peers 163 NotifyDepth(depth, h.Kademlia) 164 } else { 165 // otherwise just send depth to new peer 166 dp.NotifyDepth(depth) 167 } 168 NotifyPeer(p.BzzAddr, h.Kademlia) 169 } 170 defer h.Off(dp) 171 return dp.Run(dp.HandleMsg) 172 } 173 174 func (h *Hive) trackPeer(p *BzzPeer) { 175 h.lock.Lock() 176 h.peers[p.ID()] = p 177 h.lock.Unlock() 178 } 179 180 func (h *Hive) untrackPeer(p *BzzPeer) { 181 h.lock.Lock() 182 delete(h.peers, p.ID()) 183 h.lock.Unlock() 184 } 185 186 // NodeInfo function is used by the p2p.server RPC interface to display 187 // protocol specific node information 188 func (h *Hive) NodeInfo() interface{} { 189 return h.String() 190 } 191 192 // PeerInfo function is used by the p2p.server RPC interface to display 193 // protocol specific information any connected peer referred to by their NodeID 194 func (h *Hive) PeerInfo(id enode.ID) interface{} { 195 h.lock.Lock() 196 p := h.peers[id] 197 h.lock.Unlock() 198 199 if p == nil { 200 return nil 201 } 202 addr := NewAddr(p.Node()) 203 return struct { 204 OAddr hexutil.Bytes 205 UAddr hexutil.Bytes 206 }{ 207 OAddr: addr.OAddr, 208 UAddr: addr.UAddr, 209 } 210 } 211 212 // loadPeers, savePeer implement persistence callback/ 213 func (h *Hive) loadPeers() error { 214 var as []*BzzAddr 215 err := h.Store.Get("peers", &as) 216 if err != nil { 217 if err == state.ErrNotFound { 218 log.Info(fmt.Sprintf("hive %08x: no persisted peers found", h.BaseAddr()[:4])) 219 return nil 220 } 221 return err 222 } 223 log.Info(fmt.Sprintf("hive %08x: peers loaded", h.BaseAddr()[:4])) 224 225 return h.Register(as...) 226 } 227 228 // savePeers, savePeer implement persistence callback/ 229 func (h *Hive) savePeers() error { 230 var peers []*BzzAddr 231 h.Kademlia.EachAddr(nil, 256, func(pa *BzzAddr, i int) bool { 232 if pa == nil { 233 log.Warn(fmt.Sprintf("empty addr: %v", i)) 234 return true 235 } 236 log.Trace("saving peer", "peer", pa) 237 peers = append(peers, pa) 238 return true 239 }) 240 if err := h.Store.Put("peers", peers); err != nil { 241 return fmt.Errorf("could not save peers: %v", err) 242 } 243 return nil 244 }