github.com/DxChainNetwork/dxc@v0.8.1-0.20220824085222-1162e304b6e7/les/vflux/server/clientpool.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package server 18 19 import ( 20 "errors" 21 "sync" 22 "time" 23 24 "github.com/DxChainNetwork/dxc/common/mclock" 25 "github.com/DxChainNetwork/dxc/ethdb" 26 "github.com/DxChainNetwork/dxc/les/utils" 27 "github.com/DxChainNetwork/dxc/les/vflux" 28 "github.com/DxChainNetwork/dxc/log" 29 "github.com/DxChainNetwork/dxc/p2p/enode" 30 "github.com/DxChainNetwork/dxc/p2p/nodestate" 31 "github.com/DxChainNetwork/dxc/rlp" 32 ) 33 34 var ( 35 ErrNotConnected = errors.New("client not connected") 36 ErrNoPriority = errors.New("priority too low to raise capacity") 37 ErrCantFindMaximum = errors.New("Unable to find maximum allowed capacity") 38 ) 39 40 // ClientPool implements a client database that assigns a priority to each client 41 // based on a positive and negative balance. Positive balance is externally assigned 42 // to prioritized clients and is decreased with connection time and processed 43 // requests (unless the price factors are zero). If the positive balance is zero 44 // then negative balance is accumulated. 45 // 46 // Balance tracking and priority calculation for connected clients is done by 47 // balanceTracker. PriorityQueue ensures that clients with the lowest positive or 48 // highest negative balance get evicted when the total capacity allowance is full 49 // and new clients with a better balance want to connect. 50 // 51 // Already connected nodes receive a small bias in their favor in order to avoid 52 // accepting and instantly kicking out clients. In theory, we try to ensure that 53 // each client can have several minutes of connection time. 54 // 55 // Balances of disconnected clients are stored in nodeDB including positive balance 56 // and negative banalce. Boeth positive balance and negative balance will decrease 57 // exponentially. If the balance is low enough, then the record will be dropped. 58 type ClientPool struct { 59 *priorityPool 60 *balanceTracker 61 62 setup *serverSetup 63 clock mclock.Clock 64 closed bool 65 ns *nodestate.NodeStateMachine 66 synced func() bool 67 68 lock sync.RWMutex 69 connectedBias time.Duration 70 71 minCap uint64 // the minimal capacity value allowed for any client 72 capReqNode *enode.Node // node that is requesting capacity change; only used inside NSM operation 73 } 74 75 // clientPeer represents a peer in the client pool. None of the callbacks should block. 76 type clientPeer interface { 77 Node() *enode.Node 78 FreeClientId() string // unique id for non-priority clients (typically a prefix of the network address) 79 InactiveAllowance() time.Duration // disconnection timeout for inactive non-priority peers 80 UpdateCapacity(newCap uint64, requested bool) // signals a capacity update (requested is true if it is a result of a SetCapacity call on the given peer 81 Disconnect() // initiates disconnection (Unregister should always be called) 82 } 83 84 // NewClientPool creates a new client pool 85 func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias time.Duration, clock mclock.Clock, synced func() bool) *ClientPool { 86 setup := newServerSetup() 87 ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) 88 cp := &ClientPool{ 89 priorityPool: newPriorityPool(ns, setup, clock, minCap, connectedBias, 4, 100), 90 balanceTracker: newBalanceTracker(ns, setup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), 91 setup: setup, 92 ns: ns, 93 clock: clock, 94 minCap: minCap, 95 connectedBias: connectedBias, 96 synced: synced, 97 } 98 99 ns.SubscribeState(nodestate.MergeFlags(setup.activeFlag, setup.inactiveFlag, setup.priorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { 100 if newState.Equals(setup.inactiveFlag) { 101 // set timeout for non-priority inactive client 102 var timeout time.Duration 103 if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { 104 timeout = c.InactiveAllowance() 105 } 106 ns.AddTimeout(node, setup.inactiveFlag, timeout) 107 } 108 if oldState.Equals(setup.inactiveFlag) && newState.Equals(setup.inactiveFlag.Or(setup.priorityFlag)) { 109 ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout 110 } 111 if newState.Equals(setup.activeFlag) { 112 // active with no priority; limit capacity to minCap 113 cap, _ := ns.GetField(node, setup.capacityField).(uint64) 114 if cap > minCap { 115 cp.requestCapacity(node, minCap, minCap, 0) 116 } 117 } 118 if newState.Equals(nodestate.Flags{}) { 119 if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { 120 c.Disconnect() 121 } 122 } 123 }) 124 125 ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { 126 if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { 127 newCap, _ := newValue.(uint64) 128 c.UpdateCapacity(newCap, node == cp.capReqNode) 129 } 130 }) 131 132 // add metrics 133 cp.ns.SubscribeState(nodestate.MergeFlags(cp.setup.activeFlag, cp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { 134 if oldState.IsEmpty() && !newState.IsEmpty() { 135 clientConnectedMeter.Mark(1) 136 } 137 if !oldState.IsEmpty() && newState.IsEmpty() { 138 clientDisconnectedMeter.Mark(1) 139 } 140 if oldState.HasNone(cp.setup.activeFlag) && oldState.HasAll(cp.setup.activeFlag) { 141 clientActivatedMeter.Mark(1) 142 } 143 if oldState.HasAll(cp.setup.activeFlag) && oldState.HasNone(cp.setup.activeFlag) { 144 clientDeactivatedMeter.Mark(1) 145 } 146 _, connected := cp.Active() 147 totalConnectedGauge.Update(int64(connected)) 148 }) 149 return cp 150 } 151 152 // Start starts the client pool. Should be called before Register/Unregister. 153 func (cp *ClientPool) Start() { 154 cp.ns.Start() 155 } 156 157 // Stop shuts the client pool down. The clientPeer interface callbacks will not be called 158 // after Stop. Register calls will return nil. 159 func (cp *ClientPool) Stop() { 160 cp.balanceTracker.stop() 161 cp.ns.Stop() 162 } 163 164 // Register registers the peer into the client pool. If the peer has insufficient 165 // priority and remains inactive for longer than the allowed timeout then it will be 166 // disconnected by calling the Disconnect function of the clientPeer interface. 167 func (cp *ClientPool) Register(peer clientPeer) ConnectedBalance { 168 cp.ns.SetField(peer.Node(), cp.setup.clientField, peerWrapper{peer}) 169 balance, _ := cp.ns.GetField(peer.Node(), cp.setup.balanceField).(*nodeBalance) 170 return balance 171 } 172 173 // Unregister removes the peer from the client pool 174 func (cp *ClientPool) Unregister(peer clientPeer) { 175 cp.ns.SetField(peer.Node(), cp.setup.clientField, nil) 176 } 177 178 // setConnectedBias sets the connection bias, which is applied to already connected clients 179 // So that already connected client won't be kicked out very soon and we can ensure all 180 // connected clients can have enough time to request or sync some data. 181 func (cp *ClientPool) SetConnectedBias(bias time.Duration) { 182 cp.lock.Lock() 183 cp.connectedBias = bias 184 cp.setActiveBias(bias) 185 cp.lock.Unlock() 186 } 187 188 // SetCapacity sets the assigned capacity of a connected client 189 func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Duration, requested bool) (capacity uint64, err error) { 190 cp.lock.RLock() 191 if cp.connectedBias > bias { 192 bias = cp.connectedBias 193 } 194 cp.lock.RUnlock() 195 196 cp.ns.Operation(func() { 197 balance, _ := cp.ns.GetField(node, cp.setup.balanceField).(*nodeBalance) 198 if balance == nil { 199 err = ErrNotConnected 200 return 201 } 202 capacity, _ = cp.ns.GetField(node, cp.setup.capacityField).(uint64) 203 if capacity == 0 { 204 // if the client is inactive then it has insufficient priority for the minimal capacity 205 // (will be activated automatically with minCap when possible) 206 return 207 } 208 if reqCap < cp.minCap { 209 // can't request less than minCap; switching between 0 (inactive state) and minCap is 210 // performed by the server automatically as soon as necessary/possible 211 reqCap = cp.minCap 212 } 213 if reqCap > cp.minCap && cp.ns.GetState(node).HasNone(cp.setup.priorityFlag) { 214 err = ErrNoPriority 215 return 216 } 217 if reqCap == capacity { 218 return 219 } 220 if requested { 221 // mark the requested node so that the UpdateCapacity callback can signal 222 // whether the update is the direct result of a SetCapacity call on the given node 223 cp.capReqNode = node 224 defer func() { 225 cp.capReqNode = nil 226 }() 227 } 228 229 var minTarget, maxTarget uint64 230 if reqCap > capacity { 231 // Estimate maximum available capacity at the current priority level and request 232 // the estimated amount. 233 // Note: requestCapacity could find the highest available capacity between the 234 // current and the requested capacity but it could cost a lot of iterations with 235 // fine step adjustment if the requested capacity is very high. By doing a quick 236 // estimation of the maximum available capacity based on the capacity curve we 237 // can limit the number of required iterations. 238 curve := cp.getCapacityCurve().exclude(node.ID()) 239 maxTarget = curve.maxCapacity(func(capacity uint64) int64 { 240 return balance.estimatePriority(capacity, 0, 0, bias, false) 241 }) 242 if maxTarget < reqCap { 243 return 244 } 245 maxTarget = reqCap 246 247 // Specify a narrow target range that allows a limited number of fine step 248 // iterations 249 minTarget = maxTarget - maxTarget/20 250 if minTarget < capacity { 251 minTarget = capacity 252 } 253 } else { 254 minTarget, maxTarget = reqCap, reqCap 255 } 256 if newCap := cp.requestCapacity(node, minTarget, maxTarget, bias); newCap >= minTarget && newCap <= maxTarget { 257 capacity = newCap 258 return 259 } 260 // we should be able to find the maximum allowed capacity in a few iterations 261 log.Error("Unable to find maximum allowed capacity") 262 err = ErrCantFindMaximum 263 }) 264 return 265 } 266 267 // serveCapQuery serves a vflux capacity query. It receives multiple token amount values 268 // and a bias time value. For each given token amount it calculates the maximum achievable 269 // capacity in case the amount is added to the balance. 270 func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { 271 var req vflux.CapacityQueryReq 272 if rlp.DecodeBytes(data, &req) != nil { 273 return nil 274 } 275 if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { 276 return nil 277 } 278 result := make(vflux.CapacityQueryReply, len(req.AddTokens)) 279 if !cp.synced() { 280 capacityQueryZeroMeter.Mark(1) 281 reply, _ := rlp.EncodeToBytes(&result) 282 return reply 283 } 284 285 bias := time.Second * time.Duration(req.Bias) 286 cp.lock.RLock() 287 if cp.connectedBias > bias { 288 bias = cp.connectedBias 289 } 290 cp.lock.RUnlock() 291 292 // use capacityCurve to answer request for multiple newly bought token amounts 293 curve := cp.getCapacityCurve().exclude(id) 294 cp.BalanceOperation(id, freeID, func(balance AtomicBalanceOperator) { 295 pb, _ := balance.GetBalance() 296 for i, addTokens := range req.AddTokens { 297 add := addTokens.Int64() 298 result[i] = curve.maxCapacity(func(capacity uint64) int64 { 299 return balance.estimatePriority(capacity, add, 0, bias, false) / int64(capacity) 300 }) 301 if add <= 0 && uint64(-add) >= pb && result[i] > cp.minCap { 302 result[i] = cp.minCap 303 } 304 if result[i] < cp.minCap { 305 result[i] = 0 306 } 307 } 308 }) 309 // add first result to metrics (don't care about priority client multi-queries yet) 310 if result[0] == 0 { 311 capacityQueryZeroMeter.Mark(1) 312 } else { 313 capacityQueryNonZeroMeter.Mark(1) 314 } 315 reply, _ := rlp.EncodeToBytes(&result) 316 return reply 317 } 318 319 // Handle implements Service 320 func (cp *ClientPool) Handle(id enode.ID, address string, name string, data []byte) []byte { 321 switch name { 322 case vflux.CapacityQueryName: 323 return cp.serveCapQuery(id, address, data) 324 default: 325 return nil 326 } 327 }