github.com/jimmyx0x/go-ethereum@v1.10.28/les/vflux/server/clientpool.go (about) 1 // Copyright 2021 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package server 18 19 import ( 20 "errors" 21 "sync" 22 "time" 23 24 "github.com/ethereum/go-ethereum/common/mclock" 25 "github.com/ethereum/go-ethereum/ethdb" 26 "github.com/ethereum/go-ethereum/les/utils" 27 "github.com/ethereum/go-ethereum/les/vflux" 28 "github.com/ethereum/go-ethereum/log" 29 "github.com/ethereum/go-ethereum/p2p/enode" 30 "github.com/ethereum/go-ethereum/p2p/nodestate" 31 "github.com/ethereum/go-ethereum/rlp" 32 ) 33 34 var ( 35 ErrNotConnected = errors.New("client not connected") 36 ErrNoPriority = errors.New("priority too low to raise capacity") 37 ErrCantFindMaximum = errors.New("unable to find maximum allowed capacity") 38 ) 39 40 // ClientPool implements a client database that assigns a priority to each client 41 // based on a positive and negative balance. Positive balance is externally assigned 42 // to prioritized clients and is decreased with connection time and processed 43 // requests (unless the price factors are zero). If the positive balance is zero 44 // then negative balance is accumulated. 45 // 46 // Balance tracking and priority calculation for connected clients is done by 47 // balanceTracker. PriorityQueue ensures that clients with the lowest positive or 48 // highest negative balance get evicted when the total capacity allowance is full 49 // and new clients with a better balance want to connect. 50 // 51 // Already connected nodes receive a small bias in their favor in order to avoid 52 // accepting and instantly kicking out clients. In theory, we try to ensure that 53 // each client can have several minutes of connection time. 54 // 55 // Balances of disconnected clients are stored in nodeDB including positive balance 56 // and negative balance. Both positive balance and negative balance will decrease 57 // exponentially. If the balance is low enough, then the record will be dropped. 58 type ClientPool struct { 59 *priorityPool 60 *balanceTracker 61 62 setup *serverSetup 63 clock mclock.Clock 64 ns *nodestate.NodeStateMachine 65 synced func() bool 66 67 lock sync.RWMutex 68 connectedBias time.Duration 69 70 minCap uint64 // the minimal capacity value allowed for any client 71 capReqNode *enode.Node // node that is requesting capacity change; only used inside NSM operation 72 } 73 74 // clientPeer represents a peer in the client pool. None of the callbacks should block. 75 type clientPeer interface { 76 Node() *enode.Node 77 FreeClientId() string // unique id for non-priority clients (typically a prefix of the network address) 78 InactiveAllowance() time.Duration // disconnection timeout for inactive non-priority peers 79 UpdateCapacity(newCap uint64, requested bool) // signals a capacity update (requested is true if it is a result of a SetCapacity call on the given peer 80 Disconnect() // initiates disconnection (Unregister should always be called) 81 } 82 83 // NewClientPool creates a new client pool 84 func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias time.Duration, clock mclock.Clock, synced func() bool) *ClientPool { 85 setup := newServerSetup() 86 ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) 87 cp := &ClientPool{ 88 priorityPool: newPriorityPool(ns, setup, clock, minCap, connectedBias, 4, 100), 89 balanceTracker: newBalanceTracker(ns, setup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), 90 setup: setup, 91 ns: ns, 92 clock: clock, 93 minCap: minCap, 94 connectedBias: connectedBias, 95 synced: synced, 96 } 97 98 ns.SubscribeState(nodestate.MergeFlags(setup.activeFlag, setup.inactiveFlag, setup.priorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { 99 if newState.Equals(setup.inactiveFlag) { 100 // set timeout for non-priority inactive client 101 var timeout time.Duration 102 if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { 103 timeout = c.InactiveAllowance() 104 } 105 ns.AddTimeout(node, setup.inactiveFlag, timeout) 106 } 107 if oldState.Equals(setup.inactiveFlag) && newState.Equals(setup.inactiveFlag.Or(setup.priorityFlag)) { 108 ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout 109 } 110 if newState.Equals(setup.activeFlag) { 111 // active with no priority; limit capacity to minCap 112 cap, _ := ns.GetField(node, setup.capacityField).(uint64) 113 if cap > minCap { 114 cp.requestCapacity(node, minCap, minCap, 0) 115 } 116 } 117 if newState.Equals(nodestate.Flags{}) { 118 if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { 119 c.Disconnect() 120 } 121 } 122 }) 123 124 ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { 125 if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { 126 newCap, _ := newValue.(uint64) 127 c.UpdateCapacity(newCap, node == cp.capReqNode) 128 } 129 }) 130 131 // add metrics 132 cp.ns.SubscribeState(nodestate.MergeFlags(cp.setup.activeFlag, cp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { 133 if oldState.IsEmpty() && !newState.IsEmpty() { 134 clientConnectedMeter.Mark(1) 135 } 136 if !oldState.IsEmpty() && newState.IsEmpty() { 137 clientDisconnectedMeter.Mark(1) 138 } 139 if oldState.HasNone(cp.setup.activeFlag) && oldState.HasAll(cp.setup.activeFlag) { 140 clientActivatedMeter.Mark(1) 141 } 142 if oldState.HasAll(cp.setup.activeFlag) && oldState.HasNone(cp.setup.activeFlag) { 143 clientDeactivatedMeter.Mark(1) 144 } 145 activeCount, activeCap := cp.Active() 146 totalActiveCountGauge.Update(int64(activeCount)) 147 totalActiveCapacityGauge.Update(int64(activeCap)) 148 totalInactiveCountGauge.Update(int64(cp.Inactive())) 149 }) 150 return cp 151 } 152 153 // Start starts the client pool. Should be called before Register/Unregister. 154 func (cp *ClientPool) Start() { 155 cp.ns.Start() 156 } 157 158 // Stop shuts the client pool down. The clientPeer interface callbacks will not be called 159 // after Stop. Register calls will return nil. 160 func (cp *ClientPool) Stop() { 161 cp.balanceTracker.stop() 162 cp.ns.Stop() 163 } 164 165 // Register registers the peer into the client pool. If the peer has insufficient 166 // priority and remains inactive for longer than the allowed timeout then it will be 167 // disconnected by calling the Disconnect function of the clientPeer interface. 168 func (cp *ClientPool) Register(peer clientPeer) ConnectedBalance { 169 cp.ns.SetField(peer.Node(), cp.setup.clientField, peerWrapper{peer}) 170 balance, _ := cp.ns.GetField(peer.Node(), cp.setup.balanceField).(*nodeBalance) 171 return balance 172 } 173 174 // Unregister removes the peer from the client pool 175 func (cp *ClientPool) Unregister(peer clientPeer) { 176 cp.ns.SetField(peer.Node(), cp.setup.clientField, nil) 177 } 178 179 // SetConnectedBias sets the connection bias, which is applied to already connected clients 180 // So that already connected client won't be kicked out very soon and we can ensure all 181 // connected clients can have enough time to request or sync some data. 182 func (cp *ClientPool) SetConnectedBias(bias time.Duration) { 183 cp.lock.Lock() 184 cp.connectedBias = bias 185 cp.setActiveBias(bias) 186 cp.lock.Unlock() 187 } 188 189 // SetCapacity sets the assigned capacity of a connected client 190 func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Duration, requested bool) (capacity uint64, err error) { 191 cp.lock.RLock() 192 if cp.connectedBias > bias { 193 bias = cp.connectedBias 194 } 195 cp.lock.RUnlock() 196 197 cp.ns.Operation(func() { 198 balance, _ := cp.ns.GetField(node, cp.setup.balanceField).(*nodeBalance) 199 if balance == nil { 200 err = ErrNotConnected 201 return 202 } 203 capacity, _ = cp.ns.GetField(node, cp.setup.capacityField).(uint64) 204 if capacity == 0 { 205 // if the client is inactive then it has insufficient priority for the minimal capacity 206 // (will be activated automatically with minCap when possible) 207 return 208 } 209 if reqCap < cp.minCap { 210 // can't request less than minCap; switching between 0 (inactive state) and minCap is 211 // performed by the server automatically as soon as necessary/possible 212 reqCap = cp.minCap 213 } 214 if reqCap > cp.minCap && cp.ns.GetState(node).HasNone(cp.setup.priorityFlag) { 215 err = ErrNoPriority 216 return 217 } 218 if reqCap == capacity { 219 return 220 } 221 if requested { 222 // mark the requested node so that the UpdateCapacity callback can signal 223 // whether the update is the direct result of a SetCapacity call on the given node 224 cp.capReqNode = node 225 defer func() { 226 cp.capReqNode = nil 227 }() 228 } 229 230 var minTarget, maxTarget uint64 231 if reqCap > capacity { 232 // Estimate maximum available capacity at the current priority level and request 233 // the estimated amount. 234 // Note: requestCapacity could find the highest available capacity between the 235 // current and the requested capacity but it could cost a lot of iterations with 236 // fine step adjustment if the requested capacity is very high. By doing a quick 237 // estimation of the maximum available capacity based on the capacity curve we 238 // can limit the number of required iterations. 239 curve := cp.getCapacityCurve().exclude(node.ID()) 240 maxTarget = curve.maxCapacity(func(capacity uint64) int64 { 241 return balance.estimatePriority(capacity, 0, 0, bias, false) 242 }) 243 if maxTarget < reqCap { 244 return 245 } 246 maxTarget = reqCap 247 248 // Specify a narrow target range that allows a limited number of fine step 249 // iterations 250 minTarget = maxTarget - maxTarget/20 251 if minTarget < capacity { 252 minTarget = capacity 253 } 254 } else { 255 minTarget, maxTarget = reqCap, reqCap 256 } 257 if newCap := cp.requestCapacity(node, minTarget, maxTarget, bias); newCap >= minTarget && newCap <= maxTarget { 258 capacity = newCap 259 return 260 } 261 // we should be able to find the maximum allowed capacity in a few iterations 262 log.Error("Unable to find maximum allowed capacity") 263 err = ErrCantFindMaximum 264 }) 265 return 266 } 267 268 // serveCapQuery serves a vflux capacity query. It receives multiple token amount values 269 // and a bias time value. For each given token amount it calculates the maximum achievable 270 // capacity in case the amount is added to the balance. 271 func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { 272 var req vflux.CapacityQueryReq 273 if rlp.DecodeBytes(data, &req) != nil { 274 return nil 275 } 276 if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { 277 return nil 278 } 279 result := make(vflux.CapacityQueryReply, len(req.AddTokens)) 280 if !cp.synced() { 281 capacityQueryZeroMeter.Mark(1) 282 reply, _ := rlp.EncodeToBytes(&result) 283 return reply 284 } 285 286 bias := time.Second * time.Duration(req.Bias) 287 cp.lock.RLock() 288 if cp.connectedBias > bias { 289 bias = cp.connectedBias 290 } 291 cp.lock.RUnlock() 292 293 // use capacityCurve to answer request for multiple newly bought token amounts 294 curve := cp.getCapacityCurve().exclude(id) 295 cp.BalanceOperation(id, freeID, func(balance AtomicBalanceOperator) { 296 pb, _ := balance.GetBalance() 297 for i, addTokens := range req.AddTokens { 298 add := addTokens.Int64() 299 result[i] = curve.maxCapacity(func(capacity uint64) int64 { 300 return balance.estimatePriority(capacity, add, 0, bias, false) / int64(capacity) 301 }) 302 if add <= 0 && uint64(-add) >= pb && result[i] > cp.minCap { 303 result[i] = cp.minCap 304 } 305 if result[i] < cp.minCap { 306 result[i] = 0 307 } 308 } 309 }) 310 // add first result to metrics (don't care about priority client multi-queries yet) 311 if result[0] == 0 { 312 capacityQueryZeroMeter.Mark(1) 313 } else { 314 capacityQueryNonZeroMeter.Mark(1) 315 } 316 reply, _ := rlp.EncodeToBytes(&result) 317 return reply 318 } 319 320 // Handle implements Service 321 func (cp *ClientPool) Handle(id enode.ID, address string, name string, data []byte) []byte { 322 switch name { 323 case vflux.CapacityQueryName: 324 return cp.serveCapQuery(id, address, data) 325 default: 326 return nil 327 } 328 }