github.com/core-coin/go-core/v2@v2.1.9/les/clientpool.go (about)

     1  // Copyright 2019 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"fmt"
    21  	"sync"
    22  	"time"
    23  
    24  	"github.com/core-coin/go-core/v2/xcbdb"
    25  
    26  	"github.com/core-coin/go-core/v2/common/mclock"
    27  	lps "github.com/core-coin/go-core/v2/les/lespay/server"
    28  	"github.com/core-coin/go-core/v2/les/utils"
    29  	"github.com/core-coin/go-core/v2/log"
    30  	"github.com/core-coin/go-core/v2/p2p/enode"
    31  	"github.com/core-coin/go-core/v2/p2p/enr"
    32  	"github.com/core-coin/go-core/v2/p2p/nodestate"
    33  )
    34  
    35  const (
    36  	defaultNegExpTC = 3600 // default time constant (in seconds) for exponentially reducing negative balance
    37  
    38  	// defaultConnectedBias is applied to already connected clients So that
    39  	// already connected client won't be kicked out very soon and we
    40  	// can ensure all connected clients can have enough time to request
    41  	// or sync some data.
    42  	//
    43  	// todo(raisty) make it configurable. It can be the option of
    44  	// free trial time!
    45  	defaultConnectedBias = time.Minute * 3
    46  	inactiveTimeout      = time.Second * 10
    47  )
    48  
    49  // clientPool implements a client database that assigns a priority to each client
    50  // based on a positive and negative balance. Positive balance is externally assigned
    51  // to prioritized clients and is decreased with connection time and processed
    52  // requests (unless the price factors are zero). If the positive balance is zero
    53  // then negative balance is accumulated.
    54  //
    55  // Balance tracking and priority calculation for connected clients is done by
    56  // balanceTracker. activeQueue ensures that clients with the lowest positive or
    57  // highest negative balance get evicted when the total capacity allowance is full
    58  // and new clients with a better balance want to connect.
    59  //
    60  // Already connected nodes receive a small bias in their favor in order to avoid
    61  // accepting and instantly kicking out clients. In theory, we try to ensure that
    62  // each client can have several minutes of connection time.
    63  //
    64  // Balances of disconnected clients are stored in nodeDB including positive balance
    65  // and negative banalce. Boeth positive balance and negative balance will decrease
    66  // exponentially. If the balance is low enough, then the record will be dropped.
    67  type clientPool struct {
    68  	lps.BalanceTrackerSetup
    69  	lps.PriorityPoolSetup
    70  	lock       sync.Mutex
    71  	clock      mclock.Clock
    72  	closed     bool
    73  	removePeer func(enode.ID)
    74  	ns         *nodestate.NodeStateMachine
    75  	pp         *lps.PriorityPool
    76  	bt         *lps.BalanceTracker
    77  
    78  	defaultPosFactors, defaultNegFactors lps.PriceFactors
    79  	posExpTC, negExpTC                   uint64
    80  	minCap                               uint64 // The minimal capacity value allowed for any client
    81  	connectedBias                        time.Duration
    82  	capLimit                             uint64
    83  }
    84  
    85  // clientPoolPeer represents a client peer in the pool.
    86  // Positive balances are assigned to node key while negative balances are assigned
    87  // to freeClientId. Currently network IP address without port is used because
    88  // clients have a limited access to IP addresses while new node keys can be easily
    89  // generated so it would be useless to assign a negative value to them.
    90  type clientPoolPeer interface {
    91  	Node() *enode.Node
    92  	freeClientId() string
    93  	updateCapacity(uint64)
    94  	freeze()
    95  	allowInactive() bool
    96  }
    97  
    98  // clientInfo defines all information required by clientpool.
    99  type clientInfo struct {
   100  	node                *enode.Node
   101  	address             string
   102  	peer                clientPoolPeer
   103  	connected, priority bool
   104  	connectedAt         mclock.AbsTime
   105  	balance             *lps.NodeBalance
   106  }
   107  
   108  // newClientPool creates a new client pool
   109  func newClientPool(ns *nodestate.NodeStateMachine, lespayDb xcbdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool {
   110  	pool := &clientPool{
   111  		ns:                  ns,
   112  		BalanceTrackerSetup: balanceTrackerSetup,
   113  		PriorityPoolSetup:   priorityPoolSetup,
   114  		clock:               clock,
   115  		minCap:              minCap,
   116  		connectedBias:       connectedBias,
   117  		removePeer:          removePeer,
   118  	}
   119  	pool.bt = lps.NewBalanceTracker(ns, balanceTrackerSetup, lespayDb, clock, &utils.Expirer{}, &utils.Expirer{})
   120  	pool.pp = lps.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4)
   121  
   122  	// set default expiration constants used by tests
   123  	// Note: server overwrites this if token sale is active
   124  	pool.bt.SetExpirationTCs(0, defaultNegExpTC)
   125  
   126  	ns.SubscribeState(pool.InactiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   127  		if newState.Equals(pool.InactiveFlag) {
   128  			ns.AddTimeout(node, pool.InactiveFlag, inactiveTimeout)
   129  		}
   130  		if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.InactiveFlag.Or(pool.PriorityFlag)) {
   131  			ns.SetStateSub(node, pool.InactiveFlag, nodestate.Flags{}, 0) // remove timeout
   132  		}
   133  	})
   134  
   135  	ns.SubscribeState(pool.ActiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   136  		c, _ := ns.GetField(node, clientInfoField).(*clientInfo)
   137  		if c == nil {
   138  			return
   139  		}
   140  		c.priority = newState.HasAll(pool.PriorityFlag)
   141  		if newState.Equals(pool.ActiveFlag) {
   142  			cap, _ := ns.GetField(node, pool.CapacityField).(uint64)
   143  			if cap > minCap {
   144  				pool.pp.RequestCapacity(node, minCap, 0, true)
   145  			}
   146  		}
   147  	})
   148  
   149  	ns.SubscribeState(pool.InactiveFlag.Or(pool.ActiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   150  		if oldState.IsEmpty() {
   151  			clientConnectedMeter.Mark(1)
   152  			log.Debug("Client connected", "id", node.ID())
   153  		}
   154  		if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.ActiveFlag) {
   155  			clientActivatedMeter.Mark(1)
   156  			log.Debug("Client activated", "id", node.ID())
   157  		}
   158  		if oldState.Equals(pool.ActiveFlag) && newState.Equals(pool.InactiveFlag) {
   159  			clientDeactivatedMeter.Mark(1)
   160  			log.Debug("Client deactivated", "id", node.ID())
   161  			c, _ := ns.GetField(node, clientInfoField).(*clientInfo)
   162  			if c == nil || !c.peer.allowInactive() {
   163  				pool.removePeer(node.ID())
   164  			}
   165  		}
   166  		if newState.IsEmpty() {
   167  			clientDisconnectedMeter.Mark(1)
   168  			log.Debug("Client disconnected", "id", node.ID())
   169  			pool.removePeer(node.ID())
   170  		}
   171  	})
   172  
   173  	var totalConnected uint64
   174  	ns.SubscribeField(pool.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
   175  		oldCap, _ := oldValue.(uint64)
   176  		newCap, _ := newValue.(uint64)
   177  		totalConnected += newCap - oldCap
   178  		totalConnectedGauge.Update(int64(totalConnected))
   179  		c, _ := ns.GetField(node, clientInfoField).(*clientInfo)
   180  		if c != nil {
   181  			c.peer.updateCapacity(newCap)
   182  		}
   183  	})
   184  	return pool
   185  }
   186  
   187  // stop shuts the client pool down
   188  func (f *clientPool) stop() {
   189  	f.lock.Lock()
   190  	f.closed = true
   191  	f.lock.Unlock()
   192  	f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
   193  		// enforces saving all balances in BalanceTracker
   194  		f.disconnectNode(node)
   195  	})
   196  	f.bt.Stop()
   197  }
   198  
   199  // connect should be called after a successful handshake. If the connection was
   200  // rejected, there is no need to call disconnect.
   201  func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) {
   202  	f.lock.Lock()
   203  	defer f.lock.Unlock()
   204  
   205  	// Short circuit if clientPool is already closed.
   206  	if f.closed {
   207  		return 0, fmt.Errorf("Client pool is already closed")
   208  	}
   209  	// Dedup connected peers.
   210  	node, freeID := peer.Node(), peer.freeClientId()
   211  	if f.ns.GetField(node, clientInfoField) != nil {
   212  		log.Debug("Client already connected", "address", freeID, "id", node.ID().String())
   213  		return 0, fmt.Errorf("Client already connected address=%s id=%s", freeID, node.ID().String())
   214  	}
   215  	now := f.clock.Now()
   216  	c := &clientInfo{
   217  		node:        node,
   218  		address:     freeID,
   219  		peer:        peer,
   220  		connected:   true,
   221  		connectedAt: now,
   222  	}
   223  	f.ns.SetField(node, clientInfoField, c)
   224  	f.ns.SetField(node, connAddressField, freeID)
   225  	if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
   226  		f.disconnect(peer)
   227  		return 0, nil
   228  	}
   229  	c.balance.SetPriceFactors(f.defaultPosFactors, f.defaultNegFactors)
   230  
   231  	f.ns.SetState(node, f.InactiveFlag, nodestate.Flags{}, 0)
   232  	var allowed bool
   233  	f.ns.Operation(func() {
   234  		_, allowed = f.pp.RequestCapacity(node, f.minCap, f.connectedBias, true)
   235  	})
   236  	if allowed {
   237  		return f.minCap, nil
   238  	}
   239  	if !peer.allowInactive() {
   240  		f.disconnect(peer)
   241  	}
   242  	return 0, nil
   243  }
   244  
   245  // setConnectedBias sets the connection bias, which is applied to already connected clients
   246  // So that already connected client won't be kicked out very soon and we can ensure all
   247  // connected clients can have enough time to request or sync some data.
   248  func (f *clientPool) setConnectedBias(bias time.Duration) {
   249  	f.lock.Lock()
   250  	defer f.lock.Unlock()
   251  
   252  	f.connectedBias = bias
   253  	f.pp.SetActiveBias(bias)
   254  }
   255  
   256  // disconnect should be called when a connection is terminated. If the disconnection
   257  // was initiated by the pool itself using disconnectFn then calling disconnect is
   258  // not necessary but permitted.
   259  func (f *clientPool) disconnect(p clientPoolPeer) {
   260  	f.disconnectNode(p.Node())
   261  }
   262  
   263  // disconnectNode removes node fields and flags related to connected status
   264  func (f *clientPool) disconnectNode(node *enode.Node) {
   265  	f.ns.SetField(node, connAddressField, nil)
   266  	f.ns.SetField(node, clientInfoField, nil)
   267  }
   268  
   269  // setDefaultFactors sets the default price factors applied to subsequently connected clients
   270  func (f *clientPool) setDefaultFactors(posFactors, negFactors lps.PriceFactors) {
   271  	f.lock.Lock()
   272  	defer f.lock.Unlock()
   273  
   274  	f.defaultPosFactors = posFactors
   275  	f.defaultNegFactors = negFactors
   276  }
   277  
   278  // capacityInfo returns the total capacity allowance, the total capacity of connected
   279  // clients and the total capacity of connected and prioritized clients
   280  func (f *clientPool) capacityInfo() (uint64, uint64, uint64) {
   281  	f.lock.Lock()
   282  	defer f.lock.Unlock()
   283  
   284  	// total priority active cap will be supported when the token issuer module is added
   285  	_, activeCap := f.pp.Active()
   286  	return f.capLimit, activeCap, 0
   287  }
   288  
   289  // setLimits sets the maximum number and total capacity of connected clients,
   290  // dropping some of them if necessary.
   291  func (f *clientPool) setLimits(totalConn int, totalCap uint64) {
   292  	f.lock.Lock()
   293  	defer f.lock.Unlock()
   294  
   295  	f.capLimit = totalCap
   296  	f.pp.SetLimits(uint64(totalConn), totalCap)
   297  }
   298  
   299  // setCapacity sets the assigned capacity of a connected client
   300  func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint64, bias time.Duration, setCap bool) (uint64, error) {
   301  	c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)
   302  	if c == nil {
   303  		if setCap {
   304  			return 0, fmt.Errorf("client %064x is not connected", node.ID())
   305  		}
   306  		c = &clientInfo{node: node}
   307  		f.ns.SetField(node, clientInfoField, c)
   308  		f.ns.SetField(node, connAddressField, freeID)
   309  		if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
   310  			log.Error("BalanceField is missing", "node", node.ID())
   311  			return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID())
   312  		}
   313  		defer func() {
   314  			f.ns.SetField(node, connAddressField, nil)
   315  			f.ns.SetField(node, clientInfoField, nil)
   316  		}()
   317  	}
   318  	var (
   319  		minPriority int64
   320  		allowed     bool
   321  	)
   322  	f.ns.Operation(func() {
   323  		if !setCap || c.priority {
   324  			// check clientInfo.priority inside Operation to ensure thread safety
   325  			minPriority, allowed = f.pp.RequestCapacity(node, capacity, bias, setCap)
   326  		}
   327  	})
   328  	if allowed {
   329  		return 0, nil
   330  	}
   331  	missing := c.balance.PosBalanceMissing(minPriority, capacity, bias)
   332  	if missing < 1 {
   333  		// ensure that we never return 0 missing and insufficient priority error
   334  		missing = 1
   335  	}
   336  	return missing, errNoPriority
   337  }
   338  
   339  // setCapacityLocked is the equivalent of setCapacity used when f.lock is already locked
   340  func (f *clientPool) setCapacityLocked(node *enode.Node, freeID string, capacity uint64, minConnTime time.Duration, setCap bool) (uint64, error) {
   341  	f.lock.Lock()
   342  	defer f.lock.Unlock()
   343  
   344  	return f.setCapacity(node, freeID, capacity, minConnTime, setCap)
   345  }
   346  
   347  // forClients calls the supplied callback for either the listed node IDs or all connected
   348  // nodes. It passes a valid clientInfo to the callback and ensures that the necessary
   349  // fields and flags are set in order for BalanceTracker and PriorityPool to work even if
   350  // the node is not connected.
   351  func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) {
   352  	f.lock.Lock()
   353  	defer f.lock.Unlock()
   354  
   355  	if len(ids) == 0 {
   356  		f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
   357  			c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)
   358  			if c != nil {
   359  				cb(c)
   360  			}
   361  		})
   362  	} else {
   363  		for _, id := range ids {
   364  			node := f.ns.GetNode(id)
   365  			if node == nil {
   366  				node = enode.SignNull(&enr.Record{}, id)
   367  			}
   368  			c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)
   369  			if c != nil {
   370  				cb(c)
   371  			} else {
   372  				c = &clientInfo{node: node}
   373  				f.ns.SetField(node, clientInfoField, c)
   374  				f.ns.SetField(node, connAddressField, "")
   375  				if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance != nil {
   376  					cb(c)
   377  				} else {
   378  					log.Error("BalanceField is missing")
   379  				}
   380  				f.ns.SetField(node, connAddressField, nil)
   381  				f.ns.SetField(node, clientInfoField, nil)
   382  			}
   383  		}
   384  	}
   385  }