github.com/cryptogateway/go-paymex@v0.0.0-20210204174735-96277fb1e602/les/clientpool_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package les 18 19 import ( 20 "fmt" 21 "math/rand" 22 "testing" 23 "time" 24 25 "github.com/cryptogateway/go-paymex/common/mclock" 26 "github.com/cryptogateway/go-paymex/core/rawdb" 27 lps "github.com/cryptogateway/go-paymex/les/lespay/server" 28 "github.com/cryptogateway/go-paymex/p2p/enode" 29 "github.com/cryptogateway/go-paymex/p2p/enr" 30 "github.com/cryptogateway/go-paymex/p2p/nodestate" 31 ) 32 33 func TestClientPoolL10C100Free(t *testing.T) { 34 testClientPool(t, 10, 100, 0, true) 35 } 36 37 func TestClientPoolL40C200Free(t *testing.T) { 38 testClientPool(t, 40, 200, 0, true) 39 } 40 41 func TestClientPoolL100C300Free(t *testing.T) { 42 testClientPool(t, 100, 300, 0, true) 43 } 44 45 func TestClientPoolL10C100P4(t *testing.T) { 46 testClientPool(t, 10, 100, 4, false) 47 } 48 49 func TestClientPoolL40C200P30(t *testing.T) { 50 testClientPool(t, 40, 200, 30, false) 51 } 52 53 func TestClientPoolL100C300P20(t *testing.T) { 54 testClientPool(t, 100, 300, 20, false) 55 } 56 57 const testClientPoolTicks = 100000 58 59 type poolTestPeer struct { 60 node *enode.Node 61 index int 62 disconnCh chan int 63 cap uint64 64 inactiveAllowed bool 65 } 66 67 func testStateMachine() *nodestate.NodeStateMachine { 68 return nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) 69 70 } 71 72 func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer { 73 return &poolTestPeer{ 74 index: i, 75 disconnCh: disconnCh, 76 node: enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}), 77 } 78 } 79 80 func (i *poolTestPeer) Node() *enode.Node { 81 return i.node 82 } 83 84 func (i *poolTestPeer) freeClientId() string { 85 return fmt.Sprintf("addr #%d", i.index) 86 } 87 88 func (i *poolTestPeer) updateCapacity(cap uint64) { 89 i.cap = cap 90 } 91 92 func (i *poolTestPeer) freeze() {} 93 94 func (i *poolTestPeer) allowInactive() bool { 95 return i.inactiveAllowed 96 } 97 98 func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) { 99 temp := pool.ns.GetField(p.node, clientInfoField) == nil 100 if temp { 101 pool.ns.SetField(p.node, connAddressField, p.freeClientId()) 102 } 103 n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*lps.NodeBalance) 104 pos, neg = n.GetBalance() 105 if temp { 106 pool.ns.SetField(p.node, connAddressField, nil) 107 } 108 return 109 } 110 111 func addBalance(pool *clientPool, id enode.ID, amount int64) { 112 pool.forClients([]enode.ID{id}, func(c *clientInfo) { 113 c.balance.AddBalance(amount) 114 }) 115 } 116 117 func checkDiff(a, b uint64) bool { 118 maxDiff := (a + b) / 2000 119 if maxDiff < 1 { 120 maxDiff = 1 121 } 122 return a > b+maxDiff || b > a+maxDiff 123 } 124 125 func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) { 126 rand.Seed(time.Now().UnixNano()) 127 var ( 128 clock mclock.Simulated 129 db = rawdb.NewMemoryDatabase() 130 connected = make([]bool, clientCount) 131 connTicks = make([]int, clientCount) 132 disconnCh = make(chan int, clientCount) 133 disconnFn = func(id enode.ID) { 134 disconnCh <- int(id[0]) + int(id[1])<<8 135 } 136 pool = newClientPool(testStateMachine(), db, 1, 0, &clock, disconnFn) 137 ) 138 pool.ns.Start() 139 140 pool.setLimits(activeLimit, uint64(activeLimit)) 141 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 142 143 // pool should accept new peers up to its connected limit 144 for i := 0; i < activeLimit; i++ { 145 if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 { 146 connected[i] = true 147 } else { 148 t.Fatalf("Test peer #%d rejected", i) 149 } 150 } 151 // randomly connect and disconnect peers, expect to have a similar total connection time at the end 152 for tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ { 153 clock.Run(1 * time.Second) 154 155 if tickCounter == testClientPoolTicks/4 { 156 // give a positive balance to some of the peers 157 amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period 158 for i := 0; i < paidCount; i++ { 159 addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount) 160 } 161 } 162 163 i := rand.Intn(clientCount) 164 if connected[i] { 165 if randomDisconnect { 166 pool.disconnect(newPoolTestPeer(i, disconnCh)) 167 connected[i] = false 168 connTicks[i] += tickCounter 169 } 170 } else { 171 if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 { 172 connected[i] = true 173 connTicks[i] -= tickCounter 174 } else { 175 pool.disconnect(newPoolTestPeer(i, disconnCh)) 176 } 177 } 178 pollDisconnects: 179 for { 180 select { 181 case i := <-disconnCh: 182 pool.disconnect(newPoolTestPeer(i, disconnCh)) 183 if connected[i] { 184 connTicks[i] += tickCounter 185 connected[i] = false 186 } 187 default: 188 break pollDisconnects 189 } 190 } 191 } 192 193 expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount) 194 expMin := expTicks - expTicks/5 195 expMax := expTicks + expTicks/5 196 paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2 197 paidMin := paidTicks - paidTicks/5 198 paidMax := paidTicks + paidTicks/5 199 200 // check if the total connected time of peers are all in the expected range 201 for i, c := range connected { 202 if c { 203 connTicks[i] += testClientPoolTicks 204 } 205 min, max := expMin, expMax 206 if i < paidCount { 207 // expect a higher amount for clients with a positive balance 208 min, max = paidMin, paidMax 209 } 210 if connTicks[i] < min || connTicks[i] > max { 211 t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max) 212 } 213 } 214 pool.stop() 215 } 216 217 func testPriorityConnect(t *testing.T, pool *clientPool, p *poolTestPeer, cap uint64, expSuccess bool) { 218 if cap, _ := pool.connect(p); cap == 0 { 219 if expSuccess { 220 t.Fatalf("Failed to connect paid client") 221 } else { 222 return 223 } 224 } 225 if _, err := pool.setCapacity(p.node, "", cap, defaultConnectedBias, true); err != nil { 226 if expSuccess { 227 t.Fatalf("Failed to raise capacity of paid client") 228 } else { 229 return 230 } 231 } 232 if !expSuccess { 233 t.Fatalf("Should reject high capacity paid client") 234 } 235 } 236 237 func TestConnectPaidClient(t *testing.T) { 238 var ( 239 clock mclock.Simulated 240 db = rawdb.NewMemoryDatabase() 241 ) 242 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) 243 pool.ns.Start() 244 defer pool.stop() 245 pool.setLimits(10, uint64(10)) 246 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 247 248 // Add balance for an external client and mark it as paid client 249 addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) 250 testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true) 251 } 252 253 func TestConnectPaidClientToSmallPool(t *testing.T) { 254 var ( 255 clock mclock.Simulated 256 db = rawdb.NewMemoryDatabase() 257 ) 258 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) 259 pool.ns.Start() 260 defer pool.stop() 261 pool.setLimits(10, uint64(10)) // Total capacity limit is 10 262 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 263 264 // Add balance for an external client and mark it as paid client 265 addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) 266 267 // Connect a fat paid client to pool, should reject it. 268 testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false) 269 } 270 271 func TestConnectPaidClientToFullPool(t *testing.T) { 272 var ( 273 clock mclock.Simulated 274 db = rawdb.NewMemoryDatabase() 275 ) 276 removeFn := func(enode.ID) {} // Noop 277 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) 278 pool.ns.Start() 279 defer pool.stop() 280 pool.setLimits(10, uint64(10)) // Total capacity limit is 10 281 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 282 283 for i := 0; i < 10; i++ { 284 addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20)) 285 pool.connect(newPoolTestPeer(i, nil)) 286 } 287 addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client 288 if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 { 289 t.Fatalf("Low balance paid client should be rejected") 290 } 291 clock.Run(time.Second) 292 addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client 293 if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap == 0 { 294 t.Fatalf("High balance paid client should be accepted") 295 } 296 } 297 298 func TestPaidClientKickedOut(t *testing.T) { 299 var ( 300 clock mclock.Simulated 301 db = rawdb.NewMemoryDatabase() 302 kickedCh = make(chan int, 100) 303 ) 304 removeFn := func(id enode.ID) { 305 kickedCh <- int(id[0]) 306 } 307 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) 308 pool.ns.Start() 309 pool.bt.SetExpirationTCs(0, 0) 310 defer pool.stop() 311 pool.setLimits(10, uint64(10)) // Total capacity limit is 10 312 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 313 314 for i := 0; i < 10; i++ { 315 addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance 316 pool.connect(newPoolTestPeer(i, kickedCh)) 317 clock.Run(time.Millisecond) 318 } 319 clock.Run(defaultConnectedBias + time.Second*11) 320 if cap, _ := pool.connect(newPoolTestPeer(11, kickedCh)); cap == 0 { 321 t.Fatalf("Free client should be accepted") 322 } 323 select { 324 case id := <-kickedCh: 325 if id != 0 { 326 t.Fatalf("Kicked client mismatch, want %v, got %v", 0, id) 327 } 328 case <-time.NewTimer(time.Second).C: 329 t.Fatalf("timeout") 330 } 331 } 332 333 func TestConnectFreeClient(t *testing.T) { 334 var ( 335 clock mclock.Simulated 336 db = rawdb.NewMemoryDatabase() 337 ) 338 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) 339 pool.ns.Start() 340 defer pool.stop() 341 pool.setLimits(10, uint64(10)) 342 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 343 if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 { 344 t.Fatalf("Failed to connect free client") 345 } 346 testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false) 347 } 348 349 func TestConnectFreeClientToFullPool(t *testing.T) { 350 var ( 351 clock mclock.Simulated 352 db = rawdb.NewMemoryDatabase() 353 ) 354 removeFn := func(enode.ID) {} // Noop 355 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) 356 pool.ns.Start() 357 defer pool.stop() 358 pool.setLimits(10, uint64(10)) // Total capacity limit is 10 359 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 360 361 for i := 0; i < 10; i++ { 362 pool.connect(newPoolTestPeer(i, nil)) 363 } 364 if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 { 365 t.Fatalf("New free client should be rejected") 366 } 367 clock.Run(time.Minute) 368 if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap != 0 { 369 t.Fatalf("New free client should be rejected") 370 } 371 clock.Run(time.Millisecond) 372 clock.Run(4 * time.Minute) 373 if cap, _ := pool.connect(newPoolTestPeer(13, nil)); cap == 0 { 374 t.Fatalf("Old client connects more than 5min should be kicked") 375 } 376 } 377 378 func TestFreeClientKickedOut(t *testing.T) { 379 var ( 380 clock mclock.Simulated 381 db = rawdb.NewMemoryDatabase() 382 kicked = make(chan int, 100) 383 ) 384 removeFn := func(id enode.ID) { kicked <- int(id[0]) } 385 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) 386 pool.ns.Start() 387 defer pool.stop() 388 pool.setLimits(10, uint64(10)) // Total capacity limit is 10 389 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 390 391 for i := 0; i < 10; i++ { 392 pool.connect(newPoolTestPeer(i, kicked)) 393 clock.Run(time.Millisecond) 394 } 395 if cap, _ := pool.connect(newPoolTestPeer(10, kicked)); cap != 0 { 396 t.Fatalf("New free client should be rejected") 397 } 398 select { 399 case <-kicked: 400 case <-time.NewTimer(time.Second).C: 401 t.Fatalf("timeout") 402 } 403 pool.disconnect(newPoolTestPeer(10, kicked)) 404 clock.Run(5 * time.Minute) 405 for i := 0; i < 10; i++ { 406 pool.connect(newPoolTestPeer(i+10, kicked)) 407 } 408 for i := 0; i < 10; i++ { 409 select { 410 case id := <-kicked: 411 if id >= 10 { 412 t.Fatalf("Old client should be kicked, now got: %d", id) 413 } 414 case <-time.NewTimer(time.Second).C: 415 t.Fatalf("timeout") 416 } 417 } 418 } 419 420 func TestPositiveBalanceCalculation(t *testing.T) { 421 var ( 422 clock mclock.Simulated 423 db = rawdb.NewMemoryDatabase() 424 kicked = make(chan int, 10) 425 ) 426 removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop 427 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) 428 pool.ns.Start() 429 defer pool.stop() 430 pool.setLimits(10, uint64(10)) // Total capacity limit is 10 431 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 432 433 addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3)) 434 testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true) 435 clock.Run(time.Minute) 436 437 pool.disconnect(newPoolTestPeer(0, kicked)) 438 pb, _ := getBalance(pool, newPoolTestPeer(0, kicked)) 439 if checkDiff(pb, uint64(time.Minute*2)) { 440 t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb) 441 } 442 } 443 444 func TestDowngradePriorityClient(t *testing.T) { 445 var ( 446 clock mclock.Simulated 447 db = rawdb.NewMemoryDatabase() 448 kicked = make(chan int, 10) 449 ) 450 removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop 451 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn) 452 pool.ns.Start() 453 defer pool.stop() 454 pool.setLimits(10, uint64(10)) // Total capacity limit is 10 455 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 456 457 p := newPoolTestPeer(0, kicked) 458 addBalance(pool, p.node.ID(), int64(time.Minute)) 459 testPriorityConnect(t, pool, p, 10, true) 460 if p.cap != 10 { 461 t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap) 462 } 463 464 clock.Run(time.Minute) // All positive balance should be used up. 465 time.Sleep(300 * time.Millisecond) // Ensure the callback is called 466 if p.cap != 1 { 467 t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap) 468 } 469 pb, _ := getBalance(pool, newPoolTestPeer(0, kicked)) 470 if pb != 0 { 471 t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb) 472 } 473 474 addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute)) 475 pb, _ = getBalance(pool, newPoolTestPeer(0, kicked)) 476 if checkDiff(pb, uint64(time.Minute)) { 477 t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb) 478 } 479 } 480 481 func TestNegativeBalanceCalculation(t *testing.T) { 482 var ( 483 clock mclock.Simulated 484 db = rawdb.NewMemoryDatabase() 485 ) 486 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) 487 pool.ns.Start() 488 defer pool.stop() 489 pool.setLimits(10, uint64(10)) // Total capacity limit is 10 490 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}) 491 492 for i := 0; i < 10; i++ { 493 pool.connect(newPoolTestPeer(i, nil)) 494 } 495 clock.Run(time.Second) 496 497 for i := 0; i < 10; i++ { 498 pool.disconnect(newPoolTestPeer(i, nil)) 499 _, nb := getBalance(pool, newPoolTestPeer(i, nil)) 500 if nb != 0 { 501 t.Fatalf("Short connection shouldn't be recorded") 502 } 503 } 504 for i := 0; i < 10; i++ { 505 pool.connect(newPoolTestPeer(i, nil)) 506 } 507 clock.Run(time.Minute) 508 for i := 0; i < 10; i++ { 509 pool.disconnect(newPoolTestPeer(i, nil)) 510 _, nb := getBalance(pool, newPoolTestPeer(i, nil)) 511 if checkDiff(nb, uint64(time.Minute)/1000) { 512 t.Fatalf("Negative balance mismatch, want %v, got %v", uint64(time.Minute)/1000, nb) 513 } 514 } 515 } 516 517 func TestInactiveClient(t *testing.T) { 518 var ( 519 clock mclock.Simulated 520 db = rawdb.NewMemoryDatabase() 521 ) 522 pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}) 523 pool.ns.Start() 524 defer pool.stop() 525 pool.setLimits(2, uint64(2)) 526 527 p1 := newPoolTestPeer(1, nil) 528 p1.inactiveAllowed = true 529 p2 := newPoolTestPeer(2, nil) 530 p2.inactiveAllowed = true 531 p3 := newPoolTestPeer(3, nil) 532 p3.inactiveAllowed = true 533 addBalance(pool, p1.node.ID(), 1000*int64(time.Second)) 534 addBalance(pool, p3.node.ID(), 2000*int64(time.Second)) 535 // p1: 1000 p2: 0 p3: 2000 536 p1.cap, _ = pool.connect(p1) 537 if p1.cap != 1 { 538 t.Fatalf("Failed to connect peer #1") 539 } 540 p2.cap, _ = pool.connect(p2) 541 if p2.cap != 1 { 542 t.Fatalf("Failed to connect peer #2") 543 } 544 p3.cap, _ = pool.connect(p3) 545 if p3.cap != 1 { 546 t.Fatalf("Failed to connect peer #3") 547 } 548 if p2.cap != 0 { 549 t.Fatalf("Failed to deactivate peer #2") 550 } 551 addBalance(pool, p2.node.ID(), 3000*int64(time.Second)) 552 // p1: 1000 p2: 3000 p3: 2000 553 if p2.cap != 1 { 554 t.Fatalf("Failed to activate peer #2") 555 } 556 if p1.cap != 0 { 557 t.Fatalf("Failed to deactivate peer #1") 558 } 559 addBalance(pool, p2.node.ID(), -2500*int64(time.Second)) 560 // p1: 1000 p2: 500 p3: 2000 561 if p1.cap != 1 { 562 t.Fatalf("Failed to activate peer #1") 563 } 564 if p2.cap != 0 { 565 t.Fatalf("Failed to deactivate peer #2") 566 } 567 pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}) 568 p4 := newPoolTestPeer(4, nil) 569 addBalance(pool, p4.node.ID(), 1500*int64(time.Second)) 570 // p1: 1000 p2: 500 p3: 2000 p4: 1500 571 p4.cap, _ = pool.connect(p4) 572 if p4.cap != 1 { 573 t.Fatalf("Failed to activate peer #4") 574 } 575 if p1.cap != 0 { 576 t.Fatalf("Failed to deactivate peer #1") 577 } 578 clock.Run(time.Second * 600) 579 // manually trigger a check to avoid a long real-time wait 580 pool.ns.SetState(p1.node, pool.UpdateFlag, nodestate.Flags{}, 0) 581 pool.ns.SetState(p1.node, nodestate.Flags{}, pool.UpdateFlag, 0) 582 // p1: 1000 p2: 500 p3: 2000 p4: 900 583 if p1.cap != 1 { 584 t.Fatalf("Failed to activate peer #1") 585 } 586 if p4.cap != 0 { 587 t.Fatalf("Failed to deactivate peer #4") 588 } 589 pool.disconnect(p2) 590 pool.disconnect(p4) 591 addBalance(pool, p1.node.ID(), -1000*int64(time.Second)) 592 if p1.cap != 1 { 593 t.Fatalf("Should not deactivate peer #1") 594 } 595 if p2.cap != 0 { 596 t.Fatalf("Should not activate peer #2") 597 } 598 }