github.com/kisexp/xdchain@v0.0.0-20211206025815-490d6b732aa7/les/clientpool.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"fmt"
    21  	"sync"
    22  	"time"
    23  
    24  	"github.com/kisexp/xdchain/common/mclock"
    25  	"github.com/kisexp/xdchain/ethdb"
    26  	lps "github.com/kisexp/xdchain/les/lespay/server"
    27  	"github.com/kisexp/xdchain/les/utils"
    28  	"github.com/kisexp/xdchain/log"
    29  	"github.com/kisexp/xdchain/p2p/enode"
    30  	"github.com/kisexp/xdchain/p2p/enr"
    31  	"github.com/kisexp/xdchain/p2p/nodestate"
    32  )
    33  
    34  const (
    35  	defaultNegExpTC = 3600 // default time constant (in seconds) for exponentially reducing negative balance
    36  
    37  	// defaultConnectedBias is applied to already connected clients So that
    38  	// already connected client won't be kicked out very soon and we
    39  	// can ensure all connected clients can have enough time to request
    40  	// or sync some data.
    41  	//
    42  	// todo(rjl493456442) make it configurable. It can be the option of
    43  	// free trial time!
    44  	defaultConnectedBias = time.Minute * 3
    45  	inactiveTimeout      = time.Second * 10
    46  )
    47  
    48  // clientPool implements a client database that assigns a priority to each client
    49  // based on a positive and negative balance. Positive balance is externally assigned
    50  // to prioritized clients and is decreased with connection time and processed
    51  // requests (unless the price factors are zero). If the positive balance is zero
    52  // then negative balance is accumulated.
    53  //
    54  // Balance tracking and priority calculation for connected clients is done by
    55  // balanceTracker. activeQueue ensures that clients with the lowest positive or
    56  // highest negative balance get evicted when the total capacity allowance is full
    57  // and new clients with a better balance want to connect.
    58  //
    59  // Already connected nodes receive a small bias in their favor in order to avoid
    60  // accepting and instantly kicking out clients. In theory, we try to ensure that
    61  // each client can have several minutes of connection time.
    62  //
    63  // Balances of disconnected clients are stored in nodeDB including positive balance
    64  // and negative banalce. Boeth positive balance and negative balance will decrease
    65  // exponentially. If the balance is low enough, then the record will be dropped.
    66  type clientPool struct {
    67  	lps.BalanceTrackerSetup
    68  	lps.PriorityPoolSetup
    69  	lock       sync.Mutex
    70  	clock      mclock.Clock
    71  	closed     bool
    72  	removePeer func(enode.ID)
    73  	ns         *nodestate.NodeStateMachine
    74  	pp         *lps.PriorityPool
    75  	bt         *lps.BalanceTracker
    76  
    77  	defaultPosFactors, defaultNegFactors lps.PriceFactors
    78  	posExpTC, negExpTC                   uint64
    79  	minCap                               uint64 // The minimal capacity value allowed for any client
    80  	connectedBias                        time.Duration
    81  	capLimit                             uint64
    82  }
    83  
    84  // clientPoolPeer represents a client peer in the pool.
    85  // Positive balances are assigned to node key while negative balances are assigned
    86  // to freeClientId. Currently network IP address without port is used because
    87  // clients have a limited access to IP addresses while new node keys can be easily
    88  // generated so it would be useless to assign a negative value to them.
    89  type clientPoolPeer interface {
    90  	Node() *enode.Node
    91  	freeClientId() string
    92  	updateCapacity(uint64)
    93  	freeze()
    94  	allowInactive() bool
    95  }
    96  
    97  // clientInfo defines all information required by clientpool.
    98  type clientInfo struct {
    99  	node                *enode.Node
   100  	address             string
   101  	peer                clientPoolPeer
   102  	connected, priority bool
   103  	connectedAt         mclock.AbsTime
   104  	balance             *lps.NodeBalance
   105  }
   106  
   107  // newClientPool creates a new client pool
   108  func newClientPool(ns *nodestate.NodeStateMachine, lespayDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool {
   109  	pool := &clientPool{
   110  		ns:                  ns,
   111  		BalanceTrackerSetup: balanceTrackerSetup,
   112  		PriorityPoolSetup:   priorityPoolSetup,
   113  		clock:               clock,
   114  		minCap:              minCap,
   115  		connectedBias:       connectedBias,
   116  		removePeer:          removePeer,
   117  	}
   118  	pool.bt = lps.NewBalanceTracker(ns, balanceTrackerSetup, lespayDb, clock, &utils.Expirer{}, &utils.Expirer{})
   119  	pool.pp = lps.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4)
   120  
   121  	// set default expiration constants used by tests
   122  	// Note: server overwrites this if token sale is active
   123  	pool.bt.SetExpirationTCs(0, defaultNegExpTC)
   124  
   125  	ns.SubscribeState(pool.InactiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   126  		if newState.Equals(pool.InactiveFlag) {
   127  			ns.AddTimeout(node, pool.InactiveFlag, inactiveTimeout)
   128  		}
   129  		if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.InactiveFlag.Or(pool.PriorityFlag)) {
   130  			ns.SetStateSub(node, pool.InactiveFlag, nodestate.Flags{}, 0) // remove timeout
   131  		}
   132  	})
   133  
   134  	ns.SubscribeState(pool.ActiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   135  		c, _ := ns.GetField(node, clientInfoField).(*clientInfo)
   136  		if c == nil {
   137  			return
   138  		}
   139  		c.priority = newState.HasAll(pool.PriorityFlag)
   140  		if newState.Equals(pool.ActiveFlag) {
   141  			cap, _ := ns.GetField(node, pool.CapacityField).(uint64)
   142  			if cap > minCap {
   143  				pool.pp.RequestCapacity(node, minCap, 0, true)
   144  			}
   145  		}
   146  	})
   147  
   148  	ns.SubscribeState(pool.InactiveFlag.Or(pool.ActiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   149  		if oldState.IsEmpty() {
   150  			clientConnectedMeter.Mark(1)
   151  			log.Debug("Client connected", "id", node.ID())
   152  		}
   153  		if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.ActiveFlag) {
   154  			clientActivatedMeter.Mark(1)
   155  			log.Debug("Client activated", "id", node.ID())
   156  		}
   157  		if oldState.Equals(pool.ActiveFlag) && newState.Equals(pool.InactiveFlag) {
   158  			clientDeactivatedMeter.Mark(1)
   159  			log.Debug("Client deactivated", "id", node.ID())
   160  			c, _ := ns.GetField(node, clientInfoField).(*clientInfo)
   161  			if c == nil || !c.peer.allowInactive() {
   162  				pool.removePeer(node.ID())
   163  			}
   164  		}
   165  		if newState.IsEmpty() {
   166  			clientDisconnectedMeter.Mark(1)
   167  			log.Debug("Client disconnected", "id", node.ID())
   168  			pool.removePeer(node.ID())
   169  		}
   170  	})
   171  
   172  	var totalConnected uint64
   173  	ns.SubscribeField(pool.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
   174  		oldCap, _ := oldValue.(uint64)
   175  		newCap, _ := newValue.(uint64)
   176  		totalConnected += newCap - oldCap
   177  		totalConnectedGauge.Update(int64(totalConnected))
   178  		c, _ := ns.GetField(node, clientInfoField).(*clientInfo)
   179  		if c != nil {
   180  			c.peer.updateCapacity(newCap)
   181  		}
   182  	})
   183  	return pool
   184  }
   185  
   186  // stop shuts the client pool down
   187  func (f *clientPool) stop() {
   188  	f.lock.Lock()
   189  	f.closed = true
   190  	f.lock.Unlock()
   191  	f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
   192  		// enforces saving all balances in BalanceTracker
   193  		f.disconnectNode(node)
   194  	})
   195  	f.bt.Stop()
   196  }
   197  
   198  // connect should be called after a successful handshake. If the connection was
   199  // rejected, there is no need to call disconnect.
   200  func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) {
   201  	f.lock.Lock()
   202  	defer f.lock.Unlock()
   203  
   204  	// Short circuit if clientPool is already closed.
   205  	if f.closed {
   206  		return 0, fmt.Errorf("Client pool is already closed")
   207  	}
   208  	// Dedup connected peers.
   209  	node, freeID := peer.Node(), peer.freeClientId()
   210  	if f.ns.GetField(node, clientInfoField) != nil {
   211  		log.Debug("Client already connected", "address", freeID, "id", node.ID().String())
   212  		return 0, fmt.Errorf("Client already connected address=%s id=%s", freeID, node.ID().String())
   213  	}
   214  	now := f.clock.Now()
   215  	c := &clientInfo{
   216  		node:        node,
   217  		address:     freeID,
   218  		peer:        peer,
   219  		connected:   true,
   220  		connectedAt: now,
   221  	}
   222  	f.ns.SetField(node, clientInfoField, c)
   223  	f.ns.SetField(node, connAddressField, freeID)
   224  	if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
   225  		f.disconnect(peer)
   226  		return 0, nil
   227  	}
   228  	c.balance.SetPriceFactors(f.defaultPosFactors, f.defaultNegFactors)
   229  
   230  	f.ns.SetState(node, f.InactiveFlag, nodestate.Flags{}, 0)
   231  	var allowed bool
   232  	f.ns.Operation(func() {
   233  		_, allowed = f.pp.RequestCapacity(node, f.minCap, f.connectedBias, true)
   234  	})
   235  	if allowed {
   236  		return f.minCap, nil
   237  	}
   238  	if !peer.allowInactive() {
   239  		f.disconnect(peer)
   240  	}
   241  	return 0, nil
   242  }
   243  
   244  // setConnectedBias sets the connection bias, which is applied to already connected clients
   245  // So that already connected client won't be kicked out very soon and we can ensure all
   246  // connected clients can have enough time to request or sync some data.
   247  func (f *clientPool) setConnectedBias(bias time.Duration) {
   248  	f.lock.Lock()
   249  	defer f.lock.Unlock()
   250  
   251  	f.connectedBias = bias
   252  	f.pp.SetActiveBias(bias)
   253  }
   254  
   255  // disconnect should be called when a connection is terminated. If the disconnection
   256  // was initiated by the pool itself using disconnectFn then calling disconnect is
   257  // not necessary but permitted.
   258  func (f *clientPool) disconnect(p clientPoolPeer) {
   259  	f.disconnectNode(p.Node())
   260  }
   261  
   262  // disconnectNode removes node fields and flags related to connected status
   263  func (f *clientPool) disconnectNode(node *enode.Node) {
   264  	f.ns.SetField(node, connAddressField, nil)
   265  	f.ns.SetField(node, clientInfoField, nil)
   266  }
   267  
   268  // setDefaultFactors sets the default price factors applied to subsequently connected clients
   269  func (f *clientPool) setDefaultFactors(posFactors, negFactors lps.PriceFactors) {
   270  	f.lock.Lock()
   271  	defer f.lock.Unlock()
   272  
   273  	f.defaultPosFactors = posFactors
   274  	f.defaultNegFactors = negFactors
   275  }
   276  
   277  // capacityInfo returns the total capacity allowance, the total capacity of connected
   278  // clients and the total capacity of connected and prioritized clients
   279  func (f *clientPool) capacityInfo() (uint64, uint64, uint64) {
   280  	f.lock.Lock()
   281  	defer f.lock.Unlock()
   282  
   283  	// total priority active cap will be supported when the token issuer module is added
   284  	_, activeCap := f.pp.Active()
   285  	return f.capLimit, activeCap, 0
   286  }
   287  
   288  // setLimits sets the maximum number and total capacity of connected clients,
   289  // dropping some of them if necessary.
   290  func (f *clientPool) setLimits(totalConn int, totalCap uint64) {
   291  	f.lock.Lock()
   292  	defer f.lock.Unlock()
   293  
   294  	f.capLimit = totalCap
   295  	f.pp.SetLimits(uint64(totalConn), totalCap)
   296  }
   297  
   298  // setCapacity sets the assigned capacity of a connected client
   299  func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint64, bias time.Duration, setCap bool) (uint64, error) {
   300  	c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)
   301  	if c == nil {
   302  		if setCap {
   303  			return 0, fmt.Errorf("client %064x is not connected", node.ID())
   304  		}
   305  		c = &clientInfo{node: node}
   306  		f.ns.SetField(node, clientInfoField, c)
   307  		f.ns.SetField(node, connAddressField, freeID)
   308  		if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
   309  			log.Error("BalanceField is missing", "node", node.ID())
   310  			return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID())
   311  		}
   312  		defer func() {
   313  			f.ns.SetField(node, connAddressField, nil)
   314  			f.ns.SetField(node, clientInfoField, nil)
   315  		}()
   316  	}
   317  	var (
   318  		minPriority int64
   319  		allowed     bool
   320  	)
   321  	f.ns.Operation(func() {
   322  		if !setCap || c.priority {
   323  			// check clientInfo.priority inside Operation to ensure thread safety
   324  			minPriority, allowed = f.pp.RequestCapacity(node, capacity, bias, setCap)
   325  		}
   326  	})
   327  	if allowed {
   328  		return 0, nil
   329  	}
   330  	missing := c.balance.PosBalanceMissing(minPriority, capacity, bias)
   331  	if missing < 1 {
   332  		// ensure that we never return 0 missing and insufficient priority error
   333  		missing = 1
   334  	}
   335  	return missing, errNoPriority
   336  }
   337  
   338  // setCapacityLocked is the equivalent of setCapacity used when f.lock is already locked
   339  func (f *clientPool) setCapacityLocked(node *enode.Node, freeID string, capacity uint64, minConnTime time.Duration, setCap bool) (uint64, error) {
   340  	f.lock.Lock()
   341  	defer f.lock.Unlock()
   342  
   343  	return f.setCapacity(node, freeID, capacity, minConnTime, setCap)
   344  }
   345  
   346  // forClients calls the supplied callback for either the listed node IDs or all connected
   347  // nodes. It passes a valid clientInfo to the callback and ensures that the necessary
   348  // fields and flags are set in order for BalanceTracker and PriorityPool to work even if
   349  // the node is not connected.
   350  func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) {
   351  	f.lock.Lock()
   352  	defer f.lock.Unlock()
   353  
   354  	if len(ids) == 0 {
   355  		f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
   356  			c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)
   357  			if c != nil {
   358  				cb(c)
   359  			}
   360  		})
   361  	} else {
   362  		for _, id := range ids {
   363  			node := f.ns.GetNode(id)
   364  			if node == nil {
   365  				node = enode.SignNull(&enr.Record{}, id)
   366  			}
   367  			c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)
   368  			if c != nil {
   369  				cb(c)
   370  			} else {
   371  				c = &clientInfo{node: node}
   372  				f.ns.SetField(node, clientInfoField, c)
   373  				f.ns.SetField(node, connAddressField, "")
   374  				if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance != nil {
   375  					cb(c)
   376  				} else {
   377  					log.Error("BalanceField is missing")
   378  				}
   379  				f.ns.SetField(node, connAddressField, nil)
   380  				f.ns.SetField(node, clientInfoField, nil)
   381  			}
   382  		}
   383  	}
   384  }