github.com/aswedchain/aswed@v1.0.1/les/clientpool.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"fmt"
    21  	"reflect"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/aswedchain/aswed/common/mclock"
    26  	"github.com/aswedchain/aswed/ethdb"
    27  	lps "github.com/aswedchain/aswed/les/lespay/server"
    28  	"github.com/aswedchain/aswed/les/utils"
    29  	"github.com/aswedchain/aswed/log"
    30  	"github.com/aswedchain/aswed/p2p/enode"
    31  	"github.com/aswedchain/aswed/p2p/enr"
    32  	"github.com/aswedchain/aswed/p2p/nodestate"
    33  )
    34  
    35  const (
    36  	defaultNegExpTC = 3600 // default time constant (in seconds) for exponentially reducing negative balance
    37  
    38  	// defaultConnectedBias is applied to already connected clients So that
    39  	// already connected client won't be kicked out very soon and we
    40  	// can ensure all connected clients can have enough time to request
    41  	// or sync some data.
    42  	//
    43  	// todo(rjl493456442) make it configurable. It can be the option of
    44  	// free trial time!
    45  	defaultConnectedBias = time.Minute * 3
    46  	inactiveTimeout      = time.Second * 10
    47  )
    48  
    49  var (
    50  	clientPoolSetup     = &nodestate.Setup{}
    51  	clientField         = clientPoolSetup.NewField("clientInfo", reflect.TypeOf(&clientInfo{}))
    52  	connAddressField    = clientPoolSetup.NewField("connAddr", reflect.TypeOf(""))
    53  	balanceTrackerSetup = lps.NewBalanceTrackerSetup(clientPoolSetup)
    54  	priorityPoolSetup   = lps.NewPriorityPoolSetup(clientPoolSetup)
    55  )
    56  
    57  func init() {
    58  	balanceTrackerSetup.Connect(connAddressField, priorityPoolSetup.CapacityField)
    59  	priorityPoolSetup.Connect(balanceTrackerSetup.BalanceField, balanceTrackerSetup.UpdateFlag) // NodeBalance implements nodePriority
    60  }
    61  
    62  // clientPool implements a client database that assigns a priority to each client
    63  // based on a positive and negative balance. Positive balance is externally assigned
    64  // to prioritized clients and is decreased with connection time and processed
    65  // requests (unless the price factors are zero). If the positive balance is zero
    66  // then negative balance is accumulated.
    67  //
    68  // Balance tracking and priority calculation for connected clients is done by
    69  // balanceTracker. activeQueue ensures that clients with the lowest positive or
    70  // highest negative balance get evicted when the total capacity allowance is full
    71  // and new clients with a better balance want to connect.
    72  //
    73  // Already connected nodes receive a small bias in their favor in order to avoid
    74  // accepting and instantly kicking out clients. In theory, we try to ensure that
    75  // each client can have several minutes of connection time.
    76  //
    77  // Balances of disconnected clients are stored in nodeDB including positive balance
    78  // and negative banalce. Boeth positive balance and negative balance will decrease
    79  // exponentially. If the balance is low enough, then the record will be dropped.
    80  type clientPool struct {
    81  	lps.BalanceTrackerSetup
    82  	lps.PriorityPoolSetup
    83  	lock       sync.Mutex
    84  	clock      mclock.Clock
    85  	closed     bool
    86  	removePeer func(enode.ID)
    87  	ns         *nodestate.NodeStateMachine
    88  	pp         *lps.PriorityPool
    89  	bt         *lps.BalanceTracker
    90  
    91  	defaultPosFactors, defaultNegFactors lps.PriceFactors
    92  	posExpTC, negExpTC                   uint64
    93  	minCap                               uint64 // The minimal capacity value allowed for any client
    94  	connectedBias                        time.Duration
    95  	capLimit                             uint64
    96  }
    97  
    98  // clientPoolPeer represents a client peer in the pool.
    99  // Positive balances are assigned to node key while negative balances are assigned
   100  // to freeClientId. Currently network IP address without port is used because
   101  // clients have a limited access to IP addresses while new node keys can be easily
   102  // generated so it would be useless to assign a negative value to them.
   103  type clientPoolPeer interface {
   104  	Node() *enode.Node
   105  	freeClientId() string
   106  	updateCapacity(uint64)
   107  	freeze()
   108  	allowInactive() bool
   109  }
   110  
   111  // clientInfo defines all information required by clientpool.
   112  type clientInfo struct {
   113  	node                *enode.Node
   114  	address             string
   115  	peer                clientPoolPeer
   116  	connected, priority bool
   117  	connectedAt         mclock.AbsTime
   118  	balance             *lps.NodeBalance
   119  }
   120  
   121  // newClientPool creates a new client pool
   122  func newClientPool(lespayDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool {
   123  	ns := nodestate.NewNodeStateMachine(nil, nil, clock, clientPoolSetup)
   124  	pool := &clientPool{
   125  		ns:                  ns,
   126  		BalanceTrackerSetup: balanceTrackerSetup,
   127  		PriorityPoolSetup:   priorityPoolSetup,
   128  		clock:               clock,
   129  		minCap:              minCap,
   130  		connectedBias:       connectedBias,
   131  		removePeer:          removePeer,
   132  	}
   133  	pool.bt = lps.NewBalanceTracker(ns, balanceTrackerSetup, lespayDb, clock, &utils.Expirer{}, &utils.Expirer{})
   134  	pool.pp = lps.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4)
   135  
   136  	// set default expiration constants used by tests
   137  	// Note: server overwrites this if token sale is active
   138  	pool.bt.SetExpirationTCs(0, defaultNegExpTC)
   139  
   140  	ns.SubscribeState(pool.InactiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   141  		if newState.Equals(pool.InactiveFlag) {
   142  			ns.AddTimeout(node, pool.InactiveFlag, inactiveTimeout)
   143  		}
   144  		if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.InactiveFlag.Or(pool.PriorityFlag)) {
   145  			ns.SetStateSub(node, pool.InactiveFlag, nodestate.Flags{}, 0) // remove timeout
   146  		}
   147  	})
   148  
   149  	ns.SubscribeState(pool.ActiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   150  		c, _ := ns.GetField(node, clientField).(*clientInfo)
   151  		if c == nil {
   152  			return
   153  		}
   154  		c.priority = newState.HasAll(pool.PriorityFlag)
   155  		if newState.Equals(pool.ActiveFlag) {
   156  			cap, _ := ns.GetField(node, pool.CapacityField).(uint64)
   157  			if cap > minCap {
   158  				pool.pp.RequestCapacity(node, minCap, 0, true)
   159  			}
   160  		}
   161  	})
   162  
   163  	ns.SubscribeState(pool.InactiveFlag.Or(pool.ActiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   164  		if oldState.IsEmpty() {
   165  			clientConnectedMeter.Mark(1)
   166  			log.Debug("Client connected", "id", node.ID())
   167  		}
   168  		if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.ActiveFlag) {
   169  			clientActivatedMeter.Mark(1)
   170  			log.Debug("Client activated", "id", node.ID())
   171  		}
   172  		if oldState.Equals(pool.ActiveFlag) && newState.Equals(pool.InactiveFlag) {
   173  			clientDeactivatedMeter.Mark(1)
   174  			log.Debug("Client deactivated", "id", node.ID())
   175  			c, _ := ns.GetField(node, clientField).(*clientInfo)
   176  			if c == nil || !c.peer.allowInactive() {
   177  				pool.removePeer(node.ID())
   178  			}
   179  		}
   180  		if newState.IsEmpty() {
   181  			clientDisconnectedMeter.Mark(1)
   182  			log.Debug("Client disconnected", "id", node.ID())
   183  			pool.removePeer(node.ID())
   184  		}
   185  	})
   186  
   187  	var totalConnected uint64
   188  	ns.SubscribeField(pool.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
   189  		oldCap, _ := oldValue.(uint64)
   190  		newCap, _ := newValue.(uint64)
   191  		totalConnected += newCap - oldCap
   192  		totalConnectedGauge.Update(int64(totalConnected))
   193  		c, _ := ns.GetField(node, clientField).(*clientInfo)
   194  		if c != nil {
   195  			c.peer.updateCapacity(newCap)
   196  		}
   197  	})
   198  
   199  	ns.Start()
   200  	return pool
   201  }
   202  
   203  // stop shuts the client pool down
   204  func (f *clientPool) stop() {
   205  	f.lock.Lock()
   206  	f.closed = true
   207  	f.lock.Unlock()
   208  	f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
   209  		// enforces saving all balances in BalanceTracker
   210  		f.disconnectNode(node)
   211  	})
   212  	f.bt.Stop()
   213  	f.ns.Stop()
   214  }
   215  
   216  // connect should be called after a successful handshake. If the connection was
   217  // rejected, there is no need to call disconnect.
   218  func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) {
   219  	f.lock.Lock()
   220  	defer f.lock.Unlock()
   221  
   222  	// Short circuit if clientPool is already closed.
   223  	if f.closed {
   224  		return 0, fmt.Errorf("Client pool is already closed")
   225  	}
   226  	// Dedup connected peers.
   227  	node, freeID := peer.Node(), peer.freeClientId()
   228  	if f.ns.GetField(node, clientField) != nil {
   229  		log.Debug("Client already connected", "address", freeID, "id", node.ID().String())
   230  		return 0, fmt.Errorf("Client already connected address=%s id=%s", freeID, node.ID().String())
   231  	}
   232  	now := f.clock.Now()
   233  	c := &clientInfo{
   234  		node:        node,
   235  		address:     freeID,
   236  		peer:        peer,
   237  		connected:   true,
   238  		connectedAt: now,
   239  	}
   240  	f.ns.SetField(node, clientField, c)
   241  	f.ns.SetField(node, connAddressField, freeID)
   242  	if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
   243  		f.disconnect(peer)
   244  		return 0, nil
   245  	}
   246  	c.balance.SetPriceFactors(f.defaultPosFactors, f.defaultNegFactors)
   247  
   248  	f.ns.SetState(node, f.InactiveFlag, nodestate.Flags{}, 0)
   249  	var allowed bool
   250  	f.ns.Operation(func() {
   251  		_, allowed = f.pp.RequestCapacity(node, f.minCap, f.connectedBias, true)
   252  	})
   253  	if allowed {
   254  		return f.minCap, nil
   255  	}
   256  	if !peer.allowInactive() {
   257  		f.disconnect(peer)
   258  	}
   259  	return 0, nil
   260  }
   261  
   262  // setConnectedBias sets the connection bias, which is applied to already connected clients
   263  // So that already connected client won't be kicked out very soon and we can ensure all
   264  // connected clients can have enough time to request or sync some data.
   265  func (f *clientPool) setConnectedBias(bias time.Duration) {
   266  	f.lock.Lock()
   267  	defer f.lock.Unlock()
   268  
   269  	f.connectedBias = bias
   270  	f.pp.SetActiveBias(bias)
   271  }
   272  
   273  // disconnect should be called when a connection is terminated. If the disconnection
   274  // was initiated by the pool itself using disconnectFn then calling disconnect is
   275  // not necessary but permitted.
   276  func (f *clientPool) disconnect(p clientPoolPeer) {
   277  	f.disconnectNode(p.Node())
   278  }
   279  
   280  // disconnectNode removes node fields and flags related to connected status
   281  func (f *clientPool) disconnectNode(node *enode.Node) {
   282  	f.ns.SetField(node, connAddressField, nil)
   283  	f.ns.SetField(node, clientField, nil)
   284  }
   285  
   286  // setDefaultFactors sets the default price factors applied to subsequently connected clients
   287  func (f *clientPool) setDefaultFactors(posFactors, negFactors lps.PriceFactors) {
   288  	f.lock.Lock()
   289  	defer f.lock.Unlock()
   290  
   291  	f.defaultPosFactors = posFactors
   292  	f.defaultNegFactors = negFactors
   293  }
   294  
   295  // capacityInfo returns the total capacity allowance, the total capacity of connected
   296  // clients and the total capacity of connected and prioritized clients
   297  func (f *clientPool) capacityInfo() (uint64, uint64, uint64) {
   298  	f.lock.Lock()
   299  	defer f.lock.Unlock()
   300  
   301  	// total priority active cap will be supported when the token issuer module is added
   302  	return f.capLimit, f.pp.ActiveCapacity(), 0
   303  }
   304  
   305  // setLimits sets the maximum number and total capacity of connected clients,
   306  // dropping some of them if necessary.
   307  func (f *clientPool) setLimits(totalConn int, totalCap uint64) {
   308  	f.lock.Lock()
   309  	defer f.lock.Unlock()
   310  
   311  	f.capLimit = totalCap
   312  	f.pp.SetLimits(uint64(totalConn), totalCap)
   313  }
   314  
   315  // setCapacity sets the assigned capacity of a connected client
   316  func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint64, bias time.Duration, setCap bool) (uint64, error) {
   317  	c, _ := f.ns.GetField(node, clientField).(*clientInfo)
   318  	if c == nil {
   319  		if setCap {
   320  			return 0, fmt.Errorf("client %064x is not connected", node.ID())
   321  		}
   322  		c = &clientInfo{node: node}
   323  		f.ns.SetField(node, clientField, c)
   324  		f.ns.SetField(node, connAddressField, freeID)
   325  		if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
   326  			log.Error("BalanceField is missing", "node", node.ID())
   327  			return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID())
   328  		}
   329  		defer func() {
   330  			f.ns.SetField(node, connAddressField, nil)
   331  			f.ns.SetField(node, clientField, nil)
   332  		}()
   333  	}
   334  	var (
   335  		minPriority int64
   336  		allowed     bool
   337  	)
   338  	f.ns.Operation(func() {
   339  		if !setCap || c.priority {
   340  			// check clientInfo.priority inside Operation to ensure thread safety
   341  			minPriority, allowed = f.pp.RequestCapacity(node, capacity, bias, setCap)
   342  		}
   343  	})
   344  	if allowed {
   345  		return 0, nil
   346  	}
   347  	missing := c.balance.PosBalanceMissing(minPriority, capacity, bias)
   348  	if missing < 1 {
   349  		// ensure that we never return 0 missing and insufficient priority error
   350  		missing = 1
   351  	}
   352  	return missing, errNoPriority
   353  }
   354  
   355  // setCapacityLocked is the equivalent of setCapacity used when f.lock is already locked
   356  func (f *clientPool) setCapacityLocked(node *enode.Node, freeID string, capacity uint64, minConnTime time.Duration, setCap bool) (uint64, error) {
   357  	f.lock.Lock()
   358  	defer f.lock.Unlock()
   359  
   360  	return f.setCapacity(node, freeID, capacity, minConnTime, setCap)
   361  }
   362  
   363  // forClients calls the supplied callback for either the listed node IDs or all connected
   364  // nodes. It passes a valid clientInfo to the callback and ensures that the necessary
   365  // fields and flags are set in order for BalanceTracker and PriorityPool to work even if
   366  // the node is not connected.
   367  func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) {
   368  	f.lock.Lock()
   369  	defer f.lock.Unlock()
   370  
   371  	if len(ids) == 0 {
   372  		f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
   373  			c, _ := f.ns.GetField(node, clientField).(*clientInfo)
   374  			if c != nil {
   375  				cb(c)
   376  			}
   377  		})
   378  	} else {
   379  		for _, id := range ids {
   380  			node := f.ns.GetNode(id)
   381  			if node == nil {
   382  				node = enode.SignNull(&enr.Record{}, id)
   383  			}
   384  			c, _ := f.ns.GetField(node, clientField).(*clientInfo)
   385  			if c != nil {
   386  				cb(c)
   387  			} else {
   388  				c = &clientInfo{node: node}
   389  				f.ns.SetField(node, clientField, c)
   390  				f.ns.SetField(node, connAddressField, "")
   391  				if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance != nil {
   392  					cb(c)
   393  				} else {
   394  					log.Error("BalanceField is missing")
   395  				}
   396  				f.ns.SetField(node, connAddressField, nil)
   397  				f.ns.SetField(node, clientField, nil)
   398  			}
   399  		}
   400  	}
   401  }