github.com/aswedchain/aswed@v1.0.1/les/clientpool_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"fmt"
    21  	"math/rand"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/aswedchain/aswed/common/mclock"
    26  	"github.com/aswedchain/aswed/core/rawdb"
    27  	lps "github.com/aswedchain/aswed/les/lespay/server"
    28  	"github.com/aswedchain/aswed/p2p/enode"
    29  	"github.com/aswedchain/aswed/p2p/enr"
    30  	"github.com/aswedchain/aswed/p2p/nodestate"
    31  )
    32  
    33  func TestClientPoolL10C100Free(t *testing.T) {
    34  	testClientPool(t, 10, 100, 0, true)
    35  }
    36  
    37  func TestClientPoolL40C200Free(t *testing.T) {
    38  	testClientPool(t, 40, 200, 0, true)
    39  }
    40  
    41  func TestClientPoolL100C300Free(t *testing.T) {
    42  	testClientPool(t, 100, 300, 0, true)
    43  }
    44  
    45  func TestClientPoolL10C100P4(t *testing.T) {
    46  	testClientPool(t, 10, 100, 4, false)
    47  }
    48  
    49  func TestClientPoolL40C200P30(t *testing.T) {
    50  	testClientPool(t, 40, 200, 30, false)
    51  }
    52  
    53  func TestClientPoolL100C300P20(t *testing.T) {
    54  	testClientPool(t, 100, 300, 20, false)
    55  }
    56  
    57  const testClientPoolTicks = 100000
    58  
    59  type poolTestPeer struct {
    60  	node            *enode.Node
    61  	index           int
    62  	disconnCh       chan int
    63  	cap             uint64
    64  	inactiveAllowed bool
    65  }
    66  
    67  func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {
    68  	return &poolTestPeer{
    69  		index:     i,
    70  		disconnCh: disconnCh,
    71  		node:      enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}),
    72  	}
    73  }
    74  
    75  func (i *poolTestPeer) Node() *enode.Node {
    76  	return i.node
    77  }
    78  
    79  func (i *poolTestPeer) freeClientId() string {
    80  	return fmt.Sprintf("addr #%d", i.index)
    81  }
    82  
    83  func (i *poolTestPeer) updateCapacity(cap uint64) {
    84  	i.cap = cap
    85  }
    86  
    87  func (i *poolTestPeer) freeze() {}
    88  
    89  func (i *poolTestPeer) allowInactive() bool {
    90  	return i.inactiveAllowed
    91  }
    92  
    93  func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) {
    94  	temp := pool.ns.GetField(p.node, clientField) == nil
    95  	if temp {
    96  		pool.ns.SetField(p.node, connAddressField, p.freeClientId())
    97  	}
    98  	n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*lps.NodeBalance)
    99  	pos, neg = n.GetBalance()
   100  	if temp {
   101  		pool.ns.SetField(p.node, connAddressField, nil)
   102  	}
   103  	return
   104  }
   105  
   106  func addBalance(pool *clientPool, id enode.ID, amount int64) {
   107  	pool.forClients([]enode.ID{id}, func(c *clientInfo) {
   108  		c.balance.AddBalance(amount)
   109  	})
   110  }
   111  
   112  func checkDiff(a, b uint64) bool {
   113  	maxDiff := (a + b) / 2000
   114  	if maxDiff < 1 {
   115  		maxDiff = 1
   116  	}
   117  	return a > b+maxDiff || b > a+maxDiff
   118  }
   119  
   120  func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) {
   121  	rand.Seed(time.Now().UnixNano())
   122  	var (
   123  		clock     mclock.Simulated
   124  		db        = rawdb.NewMemoryDatabase()
   125  		connected = make([]bool, clientCount)
   126  		connTicks = make([]int, clientCount)
   127  		disconnCh = make(chan int, clientCount)
   128  		disconnFn = func(id enode.ID) {
   129  			disconnCh <- int(id[0]) + int(id[1])<<8
   130  		}
   131  		pool = newClientPool(db, 1, 0, &clock, disconnFn)
   132  	)
   133  
   134  	pool.setLimits(activeLimit, uint64(activeLimit))
   135  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   136  
   137  	// pool should accept new peers up to its connected limit
   138  	for i := 0; i < activeLimit; i++ {
   139  		if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {
   140  			connected[i] = true
   141  		} else {
   142  			t.Fatalf("Test peer #%d rejected", i)
   143  		}
   144  	}
   145  	// randomly connect and disconnect peers, expect to have a similar total connection time at the end
   146  	for tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ {
   147  		clock.Run(1 * time.Second)
   148  
   149  		if tickCounter == testClientPoolTicks/4 {
   150  			// give a positive balance to some of the peers
   151  			amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period
   152  			for i := 0; i < paidCount; i++ {
   153  				addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount)
   154  			}
   155  		}
   156  
   157  		i := rand.Intn(clientCount)
   158  		if connected[i] {
   159  			if randomDisconnect {
   160  				pool.disconnect(newPoolTestPeer(i, disconnCh))
   161  				connected[i] = false
   162  				connTicks[i] += tickCounter
   163  			}
   164  		} else {
   165  			if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {
   166  				connected[i] = true
   167  				connTicks[i] -= tickCounter
   168  			} else {
   169  				pool.disconnect(newPoolTestPeer(i, disconnCh))
   170  			}
   171  		}
   172  	pollDisconnects:
   173  		for {
   174  			select {
   175  			case i := <-disconnCh:
   176  				pool.disconnect(newPoolTestPeer(i, disconnCh))
   177  				if connected[i] {
   178  					connTicks[i] += tickCounter
   179  					connected[i] = false
   180  				}
   181  			default:
   182  				break pollDisconnects
   183  			}
   184  		}
   185  	}
   186  
   187  	expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount)
   188  	expMin := expTicks - expTicks/5
   189  	expMax := expTicks + expTicks/5
   190  	paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2
   191  	paidMin := paidTicks - paidTicks/5
   192  	paidMax := paidTicks + paidTicks/5
   193  
   194  	// check if the total connected time of peers are all in the expected range
   195  	for i, c := range connected {
   196  		if c {
   197  			connTicks[i] += testClientPoolTicks
   198  		}
   199  		min, max := expMin, expMax
   200  		if i < paidCount {
   201  			// expect a higher amount for clients with a positive balance
   202  			min, max = paidMin, paidMax
   203  		}
   204  		if connTicks[i] < min || connTicks[i] > max {
   205  			t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max)
   206  		}
   207  	}
   208  	pool.stop()
   209  }
   210  
   211  func testPriorityConnect(t *testing.T, pool *clientPool, p *poolTestPeer, cap uint64, expSuccess bool) {
   212  	if cap, _ := pool.connect(p); cap == 0 {
   213  		if expSuccess {
   214  			t.Fatalf("Failed to connect paid client")
   215  		} else {
   216  			return
   217  		}
   218  	}
   219  	if _, err := pool.setCapacity(p.node, "", cap, defaultConnectedBias, true); err != nil {
   220  		if expSuccess {
   221  			t.Fatalf("Failed to raise capacity of paid client")
   222  		} else {
   223  			return
   224  		}
   225  	}
   226  	if !expSuccess {
   227  		t.Fatalf("Should reject high capacity paid client")
   228  	}
   229  }
   230  
   231  func TestConnectPaidClient(t *testing.T) {
   232  	var (
   233  		clock mclock.Simulated
   234  		db    = rawdb.NewMemoryDatabase()
   235  	)
   236  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
   237  	defer pool.stop()
   238  	pool.setLimits(10, uint64(10))
   239  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   240  
   241  	// Add balance for an external client and mark it as paid client
   242  	addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
   243  	testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true)
   244  }
   245  
   246  func TestConnectPaidClientToSmallPool(t *testing.T) {
   247  	var (
   248  		clock mclock.Simulated
   249  		db    = rawdb.NewMemoryDatabase()
   250  	)
   251  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
   252  	defer pool.stop()
   253  	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
   254  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   255  
   256  	// Add balance for an external client and mark it as paid client
   257  	addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
   258  
   259  	// Connect a fat paid client to pool, should reject it.
   260  	testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false)
   261  }
   262  
   263  func TestConnectPaidClientToFullPool(t *testing.T) {
   264  	var (
   265  		clock mclock.Simulated
   266  		db    = rawdb.NewMemoryDatabase()
   267  	)
   268  	removeFn := func(enode.ID) {} // Noop
   269  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
   270  	defer pool.stop()
   271  	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
   272  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   273  
   274  	for i := 0; i < 10; i++ {
   275  		addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
   276  		pool.connect(newPoolTestPeer(i, nil))
   277  	}
   278  	addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client
   279  	if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {
   280  		t.Fatalf("Low balance paid client should be rejected")
   281  	}
   282  	clock.Run(time.Second)
   283  	addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client
   284  	if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap == 0 {
   285  		t.Fatalf("High balance paid client should be accepted")
   286  	}
   287  }
   288  
   289  func TestPaidClientKickedOut(t *testing.T) {
   290  	var (
   291  		clock    mclock.Simulated
   292  		db       = rawdb.NewMemoryDatabase()
   293  		kickedCh = make(chan int, 100)
   294  	)
   295  	removeFn := func(id enode.ID) {
   296  		kickedCh <- int(id[0])
   297  	}
   298  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
   299  	pool.bt.SetExpirationTCs(0, 0)
   300  	defer pool.stop()
   301  	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
   302  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   303  
   304  	for i := 0; i < 10; i++ {
   305  		addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
   306  		pool.connect(newPoolTestPeer(i, kickedCh))
   307  		clock.Run(time.Millisecond)
   308  	}
   309  	clock.Run(defaultConnectedBias + time.Second*11)
   310  	if cap, _ := pool.connect(newPoolTestPeer(11, kickedCh)); cap == 0 {
   311  		t.Fatalf("Free client should be accepted")
   312  	}
   313  	select {
   314  	case id := <-kickedCh:
   315  		if id != 0 {
   316  			t.Fatalf("Kicked client mismatch, want %v, got %v", 0, id)
   317  		}
   318  	case <-time.NewTimer(time.Second).C:
   319  		t.Fatalf("timeout")
   320  	}
   321  }
   322  
   323  func TestConnectFreeClient(t *testing.T) {
   324  	var (
   325  		clock mclock.Simulated
   326  		db    = rawdb.NewMemoryDatabase()
   327  	)
   328  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
   329  	defer pool.stop()
   330  	pool.setLimits(10, uint64(10))
   331  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   332  	if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 {
   333  		t.Fatalf("Failed to connect free client")
   334  	}
   335  	testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false)
   336  }
   337  
   338  func TestConnectFreeClientToFullPool(t *testing.T) {
   339  	var (
   340  		clock mclock.Simulated
   341  		db    = rawdb.NewMemoryDatabase()
   342  	)
   343  	removeFn := func(enode.ID) {} // Noop
   344  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
   345  	defer pool.stop()
   346  	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
   347  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   348  
   349  	for i := 0; i < 10; i++ {
   350  		pool.connect(newPoolTestPeer(i, nil))
   351  	}
   352  	if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {
   353  		t.Fatalf("New free client should be rejected")
   354  	}
   355  	clock.Run(time.Minute)
   356  	if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap != 0 {
   357  		t.Fatalf("New free client should be rejected")
   358  	}
   359  	clock.Run(time.Millisecond)
   360  	clock.Run(4 * time.Minute)
   361  	if cap, _ := pool.connect(newPoolTestPeer(13, nil)); cap == 0 {
   362  		t.Fatalf("Old client connects more than 5min should be kicked")
   363  	}
   364  }
   365  
   366  func TestFreeClientKickedOut(t *testing.T) {
   367  	var (
   368  		clock  mclock.Simulated
   369  		db     = rawdb.NewMemoryDatabase()
   370  		kicked = make(chan int, 100)
   371  	)
   372  	removeFn := func(id enode.ID) { kicked <- int(id[0]) }
   373  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
   374  	defer pool.stop()
   375  	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
   376  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   377  
   378  	for i := 0; i < 10; i++ {
   379  		pool.connect(newPoolTestPeer(i, kicked))
   380  		clock.Run(time.Millisecond)
   381  	}
   382  	if cap, _ := pool.connect(newPoolTestPeer(10, kicked)); cap != 0 {
   383  		t.Fatalf("New free client should be rejected")
   384  	}
   385  	select {
   386  	case <-kicked:
   387  	case <-time.NewTimer(time.Second).C:
   388  		t.Fatalf("timeout")
   389  	}
   390  	pool.disconnect(newPoolTestPeer(10, kicked))
   391  	clock.Run(5 * time.Minute)
   392  	for i := 0; i < 10; i++ {
   393  		pool.connect(newPoolTestPeer(i+10, kicked))
   394  	}
   395  	for i := 0; i < 10; i++ {
   396  		select {
   397  		case id := <-kicked:
   398  			if id >= 10 {
   399  				t.Fatalf("Old client should be kicked, now got: %d", id)
   400  			}
   401  		case <-time.NewTimer(time.Second).C:
   402  			t.Fatalf("timeout")
   403  		}
   404  	}
   405  }
   406  
   407  func TestPositiveBalanceCalculation(t *testing.T) {
   408  	var (
   409  		clock  mclock.Simulated
   410  		db     = rawdb.NewMemoryDatabase()
   411  		kicked = make(chan int, 10)
   412  	)
   413  	removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
   414  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
   415  	defer pool.stop()
   416  	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
   417  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   418  
   419  	addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
   420  	testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
   421  	clock.Run(time.Minute)
   422  
   423  	pool.disconnect(newPoolTestPeer(0, kicked))
   424  	pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
   425  	if checkDiff(pb, uint64(time.Minute*2)) {
   426  		t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb)
   427  	}
   428  }
   429  
   430  func TestDowngradePriorityClient(t *testing.T) {
   431  	var (
   432  		clock  mclock.Simulated
   433  		db     = rawdb.NewMemoryDatabase()
   434  		kicked = make(chan int, 10)
   435  	)
   436  	removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
   437  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
   438  	defer pool.stop()
   439  	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
   440  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
   441  
   442  	p := newPoolTestPeer(0, kicked)
   443  	addBalance(pool, p.node.ID(), int64(time.Minute))
   444  	testPriorityConnect(t, pool, p, 10, true)
   445  	if p.cap != 10 {
   446  		t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap)
   447  	}
   448  
   449  	clock.Run(time.Minute)             // All positive balance should be used up.
   450  	time.Sleep(300 * time.Millisecond) // Ensure the callback is called
   451  	if p.cap != 1 {
   452  		t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
   453  	}
   454  	pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
   455  	if pb != 0 {
   456  		t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb)
   457  	}
   458  
   459  	addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute))
   460  	pb, _ = getBalance(pool, newPoolTestPeer(0, kicked))
   461  	if checkDiff(pb, uint64(time.Minute)) {
   462  		t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb)
   463  	}
   464  }
   465  
   466  func TestNegativeBalanceCalculation(t *testing.T) {
   467  	var (
   468  		clock mclock.Simulated
   469  		db    = rawdb.NewMemoryDatabase()
   470  	)
   471  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
   472  	defer pool.stop()
   473  	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
   474  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
   475  
   476  	for i := 0; i < 10; i++ {
   477  		pool.connect(newPoolTestPeer(i, nil))
   478  	}
   479  	clock.Run(time.Second)
   480  
   481  	for i := 0; i < 10; i++ {
   482  		pool.disconnect(newPoolTestPeer(i, nil))
   483  		_, nb := getBalance(pool, newPoolTestPeer(i, nil))
   484  		if nb != 0 {
   485  			t.Fatalf("Short connection shouldn't be recorded")
   486  		}
   487  	}
   488  	for i := 0; i < 10; i++ {
   489  		pool.connect(newPoolTestPeer(i, nil))
   490  	}
   491  	clock.Run(time.Minute)
   492  	for i := 0; i < 10; i++ {
   493  		pool.disconnect(newPoolTestPeer(i, nil))
   494  		_, nb := getBalance(pool, newPoolTestPeer(i, nil))
   495  		if checkDiff(nb, uint64(time.Minute)/1000) {
   496  			t.Fatalf("Negative balance mismatch, want %v, got %v", uint64(time.Minute)/1000, nb)
   497  		}
   498  	}
   499  }
   500  
   501  func TestInactiveClient(t *testing.T) {
   502  	var (
   503  		clock mclock.Simulated
   504  		db    = rawdb.NewMemoryDatabase()
   505  	)
   506  	pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
   507  	defer pool.stop()
   508  	pool.setLimits(2, uint64(2))
   509  
   510  	p1 := newPoolTestPeer(1, nil)
   511  	p1.inactiveAllowed = true
   512  	p2 := newPoolTestPeer(2, nil)
   513  	p2.inactiveAllowed = true
   514  	p3 := newPoolTestPeer(3, nil)
   515  	p3.inactiveAllowed = true
   516  	addBalance(pool, p1.node.ID(), 1000*int64(time.Second))
   517  	addBalance(pool, p3.node.ID(), 2000*int64(time.Second))
   518  	// p1: 1000  p2: 0  p3: 2000
   519  	p1.cap, _ = pool.connect(p1)
   520  	if p1.cap != 1 {
   521  		t.Fatalf("Failed to connect peer #1")
   522  	}
   523  	p2.cap, _ = pool.connect(p2)
   524  	if p2.cap != 1 {
   525  		t.Fatalf("Failed to connect peer #2")
   526  	}
   527  	p3.cap, _ = pool.connect(p3)
   528  	if p3.cap != 1 {
   529  		t.Fatalf("Failed to connect peer #3")
   530  	}
   531  	if p2.cap != 0 {
   532  		t.Fatalf("Failed to deactivate peer #2")
   533  	}
   534  	addBalance(pool, p2.node.ID(), 3000*int64(time.Second))
   535  	// p1: 1000  p2: 3000  p3: 2000
   536  	if p2.cap != 1 {
   537  		t.Fatalf("Failed to activate peer #2")
   538  	}
   539  	if p1.cap != 0 {
   540  		t.Fatalf("Failed to deactivate peer #1")
   541  	}
   542  	addBalance(pool, p2.node.ID(), -2500*int64(time.Second))
   543  	// p1: 1000  p2: 500  p3: 2000
   544  	if p1.cap != 1 {
   545  		t.Fatalf("Failed to activate peer #1")
   546  	}
   547  	if p2.cap != 0 {
   548  		t.Fatalf("Failed to deactivate peer #2")
   549  	}
   550  	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
   551  	p4 := newPoolTestPeer(4, nil)
   552  	addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
   553  	// p1: 1000  p2: 500  p3: 2000  p4: 1500
   554  	p4.cap, _ = pool.connect(p4)
   555  	if p4.cap != 1 {
   556  		t.Fatalf("Failed to activate peer #4")
   557  	}
   558  	if p1.cap != 0 {
   559  		t.Fatalf("Failed to deactivate peer #1")
   560  	}
   561  	clock.Run(time.Second * 600)
   562  	// manually trigger a check to avoid a long real-time wait
   563  	pool.ns.SetState(p1.node, pool.UpdateFlag, nodestate.Flags{}, 0)
   564  	pool.ns.SetState(p1.node, nodestate.Flags{}, pool.UpdateFlag, 0)
   565  	// p1: 1000  p2: 500  p3: 2000  p4: 900
   566  	if p1.cap != 1 {
   567  		t.Fatalf("Failed to activate peer #1")
   568  	}
   569  	if p4.cap != 0 {
   570  		t.Fatalf("Failed to deactivate peer #4")
   571  	}
   572  	pool.disconnect(p2)
   573  	pool.disconnect(p4)
   574  	addBalance(pool, p1.node.ID(), -1000*int64(time.Second))
   575  	if p1.cap != 1 {
   576  		t.Fatalf("Should not deactivate peer #1")
   577  	}
   578  	if p2.cap != 0 {
   579  		t.Fatalf("Should not activate peer #2")
   580  	}
   581  }