github.com/phillinzzz/newBsc@v1.1.6/les/vflux/server/clientpool_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package server
    18  
    19  import (
    20  	"fmt"
    21  	"math/rand"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/phillinzzz/newBsc/common/mclock"
    26  	"github.com/phillinzzz/newBsc/core/rawdb"
    27  	"github.com/phillinzzz/newBsc/p2p/enode"
    28  	"github.com/phillinzzz/newBsc/p2p/enr"
    29  	"github.com/phillinzzz/newBsc/p2p/nodestate"
    30  )
    31  
    32  const defaultConnectedBias = time.Minute * 3
    33  
    34  func TestClientPoolL10C100Free(t *testing.T) {
    35  	testClientPool(t, 10, 100, 0, true)
    36  }
    37  
    38  func TestClientPoolL40C200Free(t *testing.T) {
    39  	testClientPool(t, 40, 200, 0, true)
    40  }
    41  
    42  func TestClientPoolL100C300Free(t *testing.T) {
    43  	testClientPool(t, 100, 300, 0, true)
    44  }
    45  
    46  func TestClientPoolL10C100P4(t *testing.T) {
    47  	testClientPool(t, 10, 100, 4, false)
    48  }
    49  
    50  func TestClientPoolL40C200P30(t *testing.T) {
    51  	testClientPool(t, 40, 200, 30, false)
    52  }
    53  
    54  func TestClientPoolL100C300P20(t *testing.T) {
    55  	testClientPool(t, 100, 300, 20, false)
    56  }
    57  
    58  const testClientPoolTicks = 100000
    59  
    60  type poolTestPeer struct {
    61  	node            *enode.Node
    62  	index           int
    63  	disconnCh       chan int
    64  	cap             uint64
    65  	inactiveAllowed bool
    66  }
    67  
    68  func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {
    69  	return &poolTestPeer{
    70  		index:     i,
    71  		disconnCh: disconnCh,
    72  		node:      enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}),
    73  	}
    74  }
    75  
    76  func (i *poolTestPeer) Node() *enode.Node {
    77  	return i.node
    78  }
    79  
    80  func (i *poolTestPeer) FreeClientId() string {
    81  	return fmt.Sprintf("addr #%d", i.index)
    82  }
    83  
    84  func (i *poolTestPeer) InactiveAllowance() time.Duration {
    85  	if i.inactiveAllowed {
    86  		return time.Second * 10
    87  	}
    88  	return 0
    89  }
    90  
    91  func (i *poolTestPeer) UpdateCapacity(capacity uint64, requested bool) {
    92  	i.cap = capacity
    93  }
    94  
    95  func (i *poolTestPeer) Disconnect() {
    96  	if i.disconnCh == nil {
    97  		return
    98  	}
    99  	id := i.node.ID()
   100  	i.disconnCh <- int(id[0]) + int(id[1])<<8
   101  }
   102  
   103  func getBalance(pool *ClientPool, p *poolTestPeer) (pos, neg uint64) {
   104  	pool.BalanceOperation(p.node.ID(), p.FreeClientId(), func(nb AtomicBalanceOperator) {
   105  		pos, neg = nb.GetBalance()
   106  	})
   107  	return
   108  }
   109  
   110  func addBalance(pool *ClientPool, id enode.ID, amount int64) {
   111  	pool.BalanceOperation(id, "", func(nb AtomicBalanceOperator) {
   112  		nb.AddBalance(amount)
   113  	})
   114  }
   115  
   116  func checkDiff(a, b uint64) bool {
   117  	maxDiff := (a + b) / 2000
   118  	if maxDiff < 1 {
   119  		maxDiff = 1
   120  	}
   121  	return a > b+maxDiff || b > a+maxDiff
   122  }
   123  
   124  func connect(pool *ClientPool, peer *poolTestPeer) uint64 {
   125  	pool.Register(peer)
   126  	return peer.cap
   127  }
   128  
   129  func disconnect(pool *ClientPool, peer *poolTestPeer) {
   130  	pool.Unregister(peer)
   131  }
   132  
   133  func alwaysTrueFn() bool {
   134  	return true
   135  }
   136  
   137  func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) {
   138  	rand.Seed(time.Now().UnixNano())
   139  	var (
   140  		clock     mclock.Simulated
   141  		db        = rawdb.NewMemoryDatabase()
   142  		connected = make([]bool, clientCount)
   143  		connTicks = make([]int, clientCount)
   144  		disconnCh = make(chan int, clientCount)
   145  		pool      = NewClientPool(db, 1, 0, &clock, alwaysTrueFn)
   146  	)
   147  	pool.Start()
   148  	pool.SetExpirationTCs(0, 1000)
   149  
   150  	pool.SetLimits(uint64(activeLimit), uint64(activeLimit))
   151  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   152  
   153  	// pool should accept new peers up to its connected limit
   154  	for i := 0; i < activeLimit; i++ {
   155  		if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 {
   156  			connected[i] = true
   157  		} else {
   158  			t.Fatalf("Test peer #%d rejected", i)
   159  		}
   160  	}
   161  	// randomly connect and disconnect peers, expect to have a similar total connection time at the end
   162  	for tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ {
   163  		clock.Run(1 * time.Second)
   164  
   165  		if tickCounter == testClientPoolTicks/4 {
   166  			// give a positive balance to some of the peers
   167  			amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period
   168  			for i := 0; i < paidCount; i++ {
   169  				addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount)
   170  			}
   171  		}
   172  
   173  		i := rand.Intn(clientCount)
   174  		if connected[i] {
   175  			if randomDisconnect {
   176  				disconnect(pool, newPoolTestPeer(i, disconnCh))
   177  				connected[i] = false
   178  				connTicks[i] += tickCounter
   179  			}
   180  		} else {
   181  			if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 {
   182  				connected[i] = true
   183  				connTicks[i] -= tickCounter
   184  			} else {
   185  				disconnect(pool, newPoolTestPeer(i, disconnCh))
   186  			}
   187  		}
   188  	pollDisconnects:
   189  		for {
   190  			select {
   191  			case i := <-disconnCh:
   192  				disconnect(pool, newPoolTestPeer(i, disconnCh))
   193  				if connected[i] {
   194  					connTicks[i] += tickCounter
   195  					connected[i] = false
   196  				}
   197  			default:
   198  				break pollDisconnects
   199  			}
   200  		}
   201  	}
   202  
   203  	expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount)
   204  	expMin := expTicks - expTicks/5
   205  	expMax := expTicks + expTicks/5
   206  	paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2
   207  	paidMin := paidTicks - paidTicks/5
   208  	paidMax := paidTicks + paidTicks/5
   209  
   210  	// check if the total connected time of peers are all in the expected range
   211  	for i, c := range connected {
   212  		if c {
   213  			connTicks[i] += testClientPoolTicks
   214  		}
   215  		min, max := expMin, expMax
   216  		if i < paidCount {
   217  			// expect a higher amount for clients with a positive balance
   218  			min, max = paidMin, paidMax
   219  		}
   220  		if connTicks[i] < min || connTicks[i] > max {
   221  			t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max)
   222  		}
   223  	}
   224  	pool.Stop()
   225  }
   226  
   227  func testPriorityConnect(t *testing.T, pool *ClientPool, p *poolTestPeer, cap uint64, expSuccess bool) {
   228  	if cap := connect(pool, p); cap == 0 {
   229  		if expSuccess {
   230  			t.Fatalf("Failed to connect paid client")
   231  		} else {
   232  			return
   233  		}
   234  	}
   235  	if newCap, _ := pool.SetCapacity(p.node, cap, defaultConnectedBias, true); newCap != cap {
   236  		if expSuccess {
   237  			t.Fatalf("Failed to raise capacity of paid client")
   238  		} else {
   239  			return
   240  		}
   241  	}
   242  	if !expSuccess {
   243  		t.Fatalf("Should reject high capacity paid client")
   244  	}
   245  }
   246  
   247  func TestConnectPaidClient(t *testing.T) {
   248  	var (
   249  		clock mclock.Simulated
   250  		db    = rawdb.NewMemoryDatabase()
   251  	)
   252  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   253  	pool.Start()
   254  	defer pool.Stop()
   255  	pool.SetLimits(10, uint64(10))
   256  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   257  
   258  	// Add balance for an external client and mark it as paid client
   259  	addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
   260  	testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true)
   261  }
   262  
   263  func TestConnectPaidClientToSmallPool(t *testing.T) {
   264  	var (
   265  		clock mclock.Simulated
   266  		db    = rawdb.NewMemoryDatabase()
   267  	)
   268  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   269  	pool.Start()
   270  	defer pool.Stop()
   271  	pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
   272  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   273  
   274  	// Add balance for an external client and mark it as paid client
   275  	addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
   276  
   277  	// connect a fat paid client to pool, should reject it.
   278  	testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false)
   279  }
   280  
   281  func TestConnectPaidClientToFullPool(t *testing.T) {
   282  	var (
   283  		clock mclock.Simulated
   284  		db    = rawdb.NewMemoryDatabase()
   285  	)
   286  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   287  	pool.Start()
   288  	defer pool.Stop()
   289  	pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
   290  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   291  
   292  	for i := 0; i < 10; i++ {
   293  		addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
   294  		connect(pool, newPoolTestPeer(i, nil))
   295  	}
   296  	addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client
   297  	if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 {
   298  		t.Fatalf("Low balance paid client should be rejected")
   299  	}
   300  	clock.Run(time.Second)
   301  	addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client
   302  	if cap := connect(pool, newPoolTestPeer(12, nil)); cap == 0 {
   303  		t.Fatalf("High balance paid client should be accepted")
   304  	}
   305  }
   306  
   307  func TestPaidClientKickedOut(t *testing.T) {
   308  	var (
   309  		clock    mclock.Simulated
   310  		db       = rawdb.NewMemoryDatabase()
   311  		kickedCh = make(chan int, 100)
   312  	)
   313  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   314  	pool.Start()
   315  	pool.SetExpirationTCs(0, 0)
   316  	defer pool.Stop()
   317  	pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
   318  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   319  
   320  	for i := 0; i < 10; i++ {
   321  		addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
   322  		connect(pool, newPoolTestPeer(i, kickedCh))
   323  		clock.Run(time.Millisecond)
   324  	}
   325  	clock.Run(defaultConnectedBias + time.Second*11)
   326  	if cap := connect(pool, newPoolTestPeer(11, kickedCh)); cap == 0 {
   327  		t.Fatalf("Free client should be accepted")
   328  	}
   329  	clock.Run(0)
   330  	select {
   331  	case id := <-kickedCh:
   332  		if id != 0 {
   333  			t.Fatalf("Kicked client mismatch, want %v, got %v", 0, id)
   334  		}
   335  	default:
   336  		t.Fatalf("timeout")
   337  	}
   338  }
   339  
   340  func TestConnectFreeClient(t *testing.T) {
   341  	var (
   342  		clock mclock.Simulated
   343  		db    = rawdb.NewMemoryDatabase()
   344  	)
   345  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   346  	pool.Start()
   347  	defer pool.Stop()
   348  	pool.SetLimits(10, uint64(10))
   349  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   350  	if cap := connect(pool, newPoolTestPeer(0, nil)); cap == 0 {
   351  		t.Fatalf("Failed to connect free client")
   352  	}
   353  	testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false)
   354  }
   355  
   356  func TestConnectFreeClientToFullPool(t *testing.T) {
   357  	var (
   358  		clock mclock.Simulated
   359  		db    = rawdb.NewMemoryDatabase()
   360  	)
   361  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   362  	pool.Start()
   363  	defer pool.Stop()
   364  	pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
   365  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   366  
   367  	for i := 0; i < 10; i++ {
   368  		connect(pool, newPoolTestPeer(i, nil))
   369  	}
   370  	if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 {
   371  		t.Fatalf("New free client should be rejected")
   372  	}
   373  	clock.Run(time.Minute)
   374  	if cap := connect(pool, newPoolTestPeer(12, nil)); cap != 0 {
   375  		t.Fatalf("New free client should be rejected")
   376  	}
   377  	clock.Run(time.Millisecond)
   378  	clock.Run(4 * time.Minute)
   379  	if cap := connect(pool, newPoolTestPeer(13, nil)); cap == 0 {
   380  		t.Fatalf("Old client connects more than 5min should be kicked")
   381  	}
   382  }
   383  
   384  func TestFreeClientKickedOut(t *testing.T) {
   385  	var (
   386  		clock  mclock.Simulated
   387  		db     = rawdb.NewMemoryDatabase()
   388  		kicked = make(chan int, 100)
   389  	)
   390  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   391  	pool.Start()
   392  	defer pool.Stop()
   393  	pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
   394  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   395  
   396  	for i := 0; i < 10; i++ {
   397  		connect(pool, newPoolTestPeer(i, kicked))
   398  		clock.Run(time.Millisecond)
   399  	}
   400  	if cap := connect(pool, newPoolTestPeer(10, kicked)); cap != 0 {
   401  		t.Fatalf("New free client should be rejected")
   402  	}
   403  	clock.Run(0)
   404  	select {
   405  	case <-kicked:
   406  	default:
   407  		t.Fatalf("timeout")
   408  	}
   409  	disconnect(pool, newPoolTestPeer(10, kicked))
   410  	clock.Run(5 * time.Minute)
   411  	for i := 0; i < 10; i++ {
   412  		connect(pool, newPoolTestPeer(i+10, kicked))
   413  
   414  	}
   415  	clock.Run(0)
   416  
   417  	for i := 0; i < 10; i++ {
   418  		select {
   419  		case id := <-kicked:
   420  			if id >= 10 {
   421  				t.Fatalf("Old client should be kicked, now got: %d", id)
   422  			}
   423  		default:
   424  			t.Fatalf("timeout")
   425  		}
   426  	}
   427  }
   428  
   429  func TestPositiveBalanceCalculation(t *testing.T) {
   430  	var (
   431  		clock  mclock.Simulated
   432  		db     = rawdb.NewMemoryDatabase()
   433  		kicked = make(chan int, 10)
   434  	)
   435  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   436  	pool.Start()
   437  	defer pool.Stop()
   438  	pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
   439  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   440  
   441  	addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
   442  	testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
   443  	clock.Run(time.Minute)
   444  
   445  	disconnect(pool, newPoolTestPeer(0, kicked))
   446  	pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
   447  	if checkDiff(pb, uint64(time.Minute*2)) {
   448  		t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb)
   449  	}
   450  }
   451  
   452  func TestDowngradePriorityClient(t *testing.T) {
   453  	var (
   454  		clock  mclock.Simulated
   455  		db     = rawdb.NewMemoryDatabase()
   456  		kicked = make(chan int, 10)
   457  	)
   458  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   459  	pool.Start()
   460  	defer pool.Stop()
   461  	pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
   462  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   463  
   464  	p := newPoolTestPeer(0, kicked)
   465  	addBalance(pool, p.node.ID(), int64(time.Minute))
   466  	testPriorityConnect(t, pool, p, 10, true)
   467  	if p.cap != 10 {
   468  		t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap)
   469  	}
   470  
   471  	clock.Run(time.Minute)             // All positive balance should be used up.
   472  	time.Sleep(300 * time.Millisecond) // Ensure the callback is called
   473  	if p.cap != 1 {
   474  		t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
   475  	}
   476  	pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
   477  	if pb != 0 {
   478  		t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb)
   479  	}
   480  
   481  	addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute))
   482  	pb, _ = getBalance(pool, newPoolTestPeer(0, kicked))
   483  	if checkDiff(pb, uint64(time.Minute)) {
   484  		t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb)
   485  	}
   486  }
   487  
   488  func TestNegativeBalanceCalculation(t *testing.T) {
   489  	var (
   490  		clock mclock.Simulated
   491  		db    = rawdb.NewMemoryDatabase()
   492  	)
   493  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   494  	pool.Start()
   495  	defer pool.Stop()
   496  	pool.SetExpirationTCs(0, 3600)
   497  	pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
   498  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
   499  
   500  	for i := 0; i < 10; i++ {
   501  		connect(pool, newPoolTestPeer(i, nil))
   502  	}
   503  	clock.Run(time.Second)
   504  
   505  	for i := 0; i < 10; i++ {
   506  		disconnect(pool, newPoolTestPeer(i, nil))
   507  		_, nb := getBalance(pool, newPoolTestPeer(i, nil))
   508  		if nb != 0 {
   509  			t.Fatalf("Short connection shouldn't be recorded")
   510  		}
   511  	}
   512  	for i := 0; i < 10; i++ {
   513  		connect(pool, newPoolTestPeer(i, nil))
   514  	}
   515  	clock.Run(time.Minute)
   516  	for i := 0; i < 10; i++ {
   517  		disconnect(pool, newPoolTestPeer(i, nil))
   518  		_, nb := getBalance(pool, newPoolTestPeer(i, nil))
   519  		exp := uint64(time.Minute) / 1000
   520  		exp -= exp / 120 // correct for negative balance expiration
   521  		if checkDiff(nb, exp) {
   522  			t.Fatalf("Negative balance mismatch, want %v, got %v", exp, nb)
   523  		}
   524  	}
   525  }
   526  
   527  func TestInactiveClient(t *testing.T) {
   528  	var (
   529  		clock mclock.Simulated
   530  		db    = rawdb.NewMemoryDatabase()
   531  	)
   532  	pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
   533  	pool.Start()
   534  	defer pool.Stop()
   535  	pool.SetLimits(2, uint64(2))
   536  
   537  	p1 := newPoolTestPeer(1, nil)
   538  	p1.inactiveAllowed = true
   539  	p2 := newPoolTestPeer(2, nil)
   540  	p2.inactiveAllowed = true
   541  	p3 := newPoolTestPeer(3, nil)
   542  	p3.inactiveAllowed = true
   543  	addBalance(pool, p1.node.ID(), 1000*int64(time.Second))
   544  	addBalance(pool, p3.node.ID(), 2000*int64(time.Second))
   545  	// p1: 1000  p2: 0  p3: 2000
   546  	p1.cap = connect(pool, p1)
   547  	if p1.cap != 1 {
   548  		t.Fatalf("Failed to connect peer #1")
   549  	}
   550  	p2.cap = connect(pool, p2)
   551  	if p2.cap != 1 {
   552  		t.Fatalf("Failed to connect peer #2")
   553  	}
   554  	p3.cap = connect(pool, p3)
   555  	if p3.cap != 1 {
   556  		t.Fatalf("Failed to connect peer #3")
   557  	}
   558  	if p2.cap != 0 {
   559  		t.Fatalf("Failed to deactivate peer #2")
   560  	}
   561  	addBalance(pool, p2.node.ID(), 3000*int64(time.Second))
   562  	// p1: 1000  p2: 3000  p3: 2000
   563  	if p2.cap != 1 {
   564  		t.Fatalf("Failed to activate peer #2")
   565  	}
   566  	if p1.cap != 0 {
   567  		t.Fatalf("Failed to deactivate peer #1")
   568  	}
   569  	addBalance(pool, p2.node.ID(), -2500*int64(time.Second))
   570  	// p1: 1000  p2: 500  p3: 2000
   571  	if p1.cap != 1 {
   572  		t.Fatalf("Failed to activate peer #1")
   573  	}
   574  	if p2.cap != 0 {
   575  		t.Fatalf("Failed to deactivate peer #2")
   576  	}
   577  	pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
   578  	p4 := newPoolTestPeer(4, nil)
   579  	addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
   580  	// p1: 1000  p2: 500  p3: 2000  p4: 1500
   581  	p4.cap = connect(pool, p4)
   582  	if p4.cap != 1 {
   583  		t.Fatalf("Failed to activate peer #4")
   584  	}
   585  	if p1.cap != 0 {
   586  		t.Fatalf("Failed to deactivate peer #1")
   587  	}
   588  	clock.Run(time.Second * 600)
   589  	// manually trigger a check to avoid a long real-time wait
   590  	pool.ns.SetState(p1.node, pool.setup.updateFlag, nodestate.Flags{}, 0)
   591  	pool.ns.SetState(p1.node, nodestate.Flags{}, pool.setup.updateFlag, 0)
   592  	// p1: 1000  p2: 500  p3: 2000  p4: 900
   593  	if p1.cap != 1 {
   594  		t.Fatalf("Failed to activate peer #1")
   595  	}
   596  	if p4.cap != 0 {
   597  		t.Fatalf("Failed to deactivate peer #4")
   598  	}
   599  	disconnect(pool, p2)
   600  	disconnect(pool, p4)
   601  	addBalance(pool, p1.node.ID(), -1000*int64(time.Second))
   602  	if p1.cap != 1 {
   603  		t.Fatalf("Should not deactivate peer #1")
   604  	}
   605  	if p2.cap != 0 {
   606  		t.Fatalf("Should not activate peer #2")
   607  	}
   608  }