github.com/theQRL/go-zond@v0.1.1/les/vflux/server/clientpool_test.go (about) 1 // Copyright 2021 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package server 18 19 import ( 20 "fmt" 21 "math/rand" 22 "testing" 23 "time" 24 25 "github.com/theQRL/go-zond/common/mclock" 26 "github.com/theQRL/go-zond/core/rawdb" 27 "github.com/theQRL/go-zond/p2p/enode" 28 "github.com/theQRL/go-zond/p2p/enr" 29 "github.com/theQRL/go-zond/p2p/nodestate" 30 ) 31 32 const defaultConnectedBias = time.Minute * 3 33 34 func TestClientPoolL10C100Free(t *testing.T) { 35 testClientPool(t, 10, 100, 0, true) 36 } 37 38 func TestClientPoolL40C200Free(t *testing.T) { 39 testClientPool(t, 40, 200, 0, true) 40 } 41 42 func TestClientPoolL100C300Free(t *testing.T) { 43 testClientPool(t, 100, 300, 0, true) 44 } 45 46 func TestClientPoolL10C100P4(t *testing.T) { 47 testClientPool(t, 10, 100, 4, false) 48 } 49 50 func TestClientPoolL40C200P30(t *testing.T) { 51 testClientPool(t, 40, 200, 30, false) 52 } 53 54 func TestClientPoolL100C300P20(t *testing.T) { 55 testClientPool(t, 100, 300, 20, false) 56 } 57 58 const testClientPoolTicks = 100000 59 60 type poolTestPeer struct { 61 node *enode.Node 62 index int 63 disconnCh chan int 64 cap uint64 65 inactiveAllowed bool 66 } 67 68 func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer { 69 return &poolTestPeer{ 70 index: i, 71 disconnCh: disconnCh, 72 node: enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}), 73 } 74 } 75 76 func (i *poolTestPeer) Node() *enode.Node { 77 return i.node 78 } 79 80 func (i *poolTestPeer) FreeClientId() string { 81 return fmt.Sprintf("addr #%d", i.index) 82 } 83 84 func (i *poolTestPeer) InactiveAllowance() time.Duration { 85 if i.inactiveAllowed { 86 return time.Second * 10 87 } 88 return 0 89 } 90 91 func (i *poolTestPeer) UpdateCapacity(capacity uint64, requested bool) { 92 i.cap = capacity 93 } 94 95 func (i *poolTestPeer) Disconnect() { 96 if i.disconnCh == nil { 97 return 98 } 99 id := i.node.ID() 100 i.disconnCh <- int(id[0]) + int(id[1])<<8 101 } 102 103 func getBalance(pool *ClientPool, p *poolTestPeer) (pos, neg uint64) { 104 pool.BalanceOperation(p.node.ID(), p.FreeClientId(), func(nb AtomicBalanceOperator) { 105 pos, neg = nb.GetBalance() 106 }) 107 return 108 } 109 110 func addBalance(pool *ClientPool, id enode.ID, amount int64) { 111 pool.BalanceOperation(id, "", func(nb AtomicBalanceOperator) { 112 nb.AddBalance(amount) 113 }) 114 } 115 116 func checkDiff(a, b uint64) bool { 117 maxDiff := (a + b) / 2000 118 if maxDiff < 1 { 119 maxDiff = 1 120 } 121 return a > b+maxDiff || b > a+maxDiff 122 } 123 124 func connect(pool *ClientPool, peer *poolTestPeer) uint64 { 125 pool.Register(peer) 126 return peer.cap 127 } 128 129 func disconnect(pool *ClientPool, peer *poolTestPeer) { 130 pool.Unregister(peer) 131 } 132 133 func alwaysTrueFn() bool { 134 return true 135 } 136 137 func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) { 138 var ( 139 clock mclock.Simulated 140 db = rawdb.NewMemoryDatabase() 141 connected = make([]bool, clientCount) 142 connTicks = make([]int, clientCount) 143 disconnCh = make(chan int, clientCount) 144 pool = NewClientPool(db, 1, 0, &clock, alwaysTrueFn) 145 ) 146 pool.Start() 147 pool.SetExpirationTCs(0, 1000) 148 149 pool.SetLimits(uint64(activeLimit), uint64(activeLimit)) 150 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 151 152 // pool should accept new peers up to its connected limit 153 for i := 0; i < activeLimit; i++ { 154 if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 { 155 connected[i] = true 156 } else { 157 t.Fatalf("Test peer #%d rejected", i) 158 } 159 } 160 // randomly connect and disconnect peers, expect to have a similar total connection time at the end 161 for tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ { 162 clock.Run(1 * time.Second) 163 164 if tickCounter == testClientPoolTicks/4 { 165 // give a positive balance to some of the peers 166 amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period 167 for i := 0; i < paidCount; i++ { 168 addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount) 169 } 170 } 171 172 i := rand.Intn(clientCount) 173 if connected[i] { 174 if randomDisconnect { 175 disconnect(pool, newPoolTestPeer(i, disconnCh)) 176 connected[i] = false 177 connTicks[i] += tickCounter 178 } 179 } else { 180 if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 { 181 connected[i] = true 182 connTicks[i] -= tickCounter 183 } else { 184 disconnect(pool, newPoolTestPeer(i, disconnCh)) 185 } 186 } 187 pollDisconnects: 188 for { 189 select { 190 case i := <-disconnCh: 191 disconnect(pool, newPoolTestPeer(i, disconnCh)) 192 if connected[i] { 193 connTicks[i] += tickCounter 194 connected[i] = false 195 } 196 default: 197 break pollDisconnects 198 } 199 } 200 } 201 202 expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount) 203 expMin := expTicks - expTicks/5 204 expMax := expTicks + expTicks/5 205 paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2 206 paidMin := paidTicks - paidTicks/5 207 paidMax := paidTicks + paidTicks/5 208 209 // check if the total connected time of peers are all in the expected range 210 for i, c := range connected { 211 if c { 212 connTicks[i] += testClientPoolTicks 213 } 214 min, max := expMin, expMax 215 if i < paidCount { 216 // expect a higher amount for clients with a positive balance 217 min, max = paidMin, paidMax 218 } 219 if connTicks[i] < min || connTicks[i] > max { 220 t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max) 221 } 222 } 223 pool.Stop() 224 } 225 226 func testPriorityConnect(t *testing.T, pool *ClientPool, p *poolTestPeer, cap uint64, expSuccess bool) { 227 if cap := connect(pool, p); cap == 0 { 228 if expSuccess { 229 t.Fatalf("Failed to connect paid client") 230 } else { 231 return 232 } 233 } 234 if newCap, _ := pool.SetCapacity(p.node, cap, defaultConnectedBias, true); newCap != cap { 235 if expSuccess { 236 t.Fatalf("Failed to raise capacity of paid client") 237 } else { 238 return 239 } 240 } 241 if !expSuccess { 242 t.Fatalf("Should reject high capacity paid client") 243 } 244 } 245 246 func TestConnectPaidClient(t *testing.T) { 247 var ( 248 clock mclock.Simulated 249 db = rawdb.NewMemoryDatabase() 250 ) 251 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 252 pool.Start() 253 defer pool.Stop() 254 pool.SetLimits(10, uint64(10)) 255 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 256 257 // Add balance for an external client and mark it as paid client 258 addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) 259 testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true) 260 } 261 262 func TestConnectPaidClientToSmallPool(t *testing.T) { 263 var ( 264 clock mclock.Simulated 265 db = rawdb.NewMemoryDatabase() 266 ) 267 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 268 pool.Start() 269 defer pool.Stop() 270 pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 271 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 272 273 // Add balance for an external client and mark it as paid client 274 addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) 275 276 // connect a fat paid client to pool, should reject it. 277 testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false) 278 } 279 280 func TestConnectPaidClientToFullPool(t *testing.T) { 281 var ( 282 clock mclock.Simulated 283 db = rawdb.NewMemoryDatabase() 284 ) 285 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 286 pool.Start() 287 defer pool.Stop() 288 pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 289 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 290 291 for i := 0; i < 10; i++ { 292 addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20)) 293 connect(pool, newPoolTestPeer(i, nil)) 294 } 295 addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client 296 if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 { 297 t.Fatalf("Low balance paid client should be rejected") 298 } 299 clock.Run(time.Second) 300 addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client 301 if cap := connect(pool, newPoolTestPeer(12, nil)); cap == 0 { 302 t.Fatalf("High balance paid client should be accepted") 303 } 304 } 305 306 func TestPaidClientKickedOut(t *testing.T) { 307 var ( 308 clock mclock.Simulated 309 db = rawdb.NewMemoryDatabase() 310 kickedCh = make(chan int, 100) 311 ) 312 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 313 pool.Start() 314 pool.SetExpirationTCs(0, 0) 315 defer pool.Stop() 316 pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 317 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 318 319 for i := 0; i < 10; i++ { 320 addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance 321 connect(pool, newPoolTestPeer(i, kickedCh)) 322 clock.Run(time.Millisecond) 323 } 324 clock.Run(defaultConnectedBias + time.Second*11) 325 if cap := connect(pool, newPoolTestPeer(11, kickedCh)); cap == 0 { 326 t.Fatalf("Free client should be accepted") 327 } 328 clock.Run(0) 329 select { 330 case id := <-kickedCh: 331 if id != 0 { 332 t.Fatalf("Kicked client mismatch, want %v, got %v", 0, id) 333 } 334 default: 335 t.Fatalf("timeout") 336 } 337 } 338 339 func TestConnectFreeClient(t *testing.T) { 340 var ( 341 clock mclock.Simulated 342 db = rawdb.NewMemoryDatabase() 343 ) 344 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 345 pool.Start() 346 defer pool.Stop() 347 pool.SetLimits(10, uint64(10)) 348 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 349 if cap := connect(pool, newPoolTestPeer(0, nil)); cap == 0 { 350 t.Fatalf("Failed to connect free client") 351 } 352 testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false) 353 } 354 355 func TestConnectFreeClientToFullPool(t *testing.T) { 356 var ( 357 clock mclock.Simulated 358 db = rawdb.NewMemoryDatabase() 359 ) 360 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 361 pool.Start() 362 defer pool.Stop() 363 pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 364 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 365 366 for i := 0; i < 10; i++ { 367 connect(pool, newPoolTestPeer(i, nil)) 368 } 369 if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 { 370 t.Fatalf("New free client should be rejected") 371 } 372 clock.Run(time.Minute) 373 if cap := connect(pool, newPoolTestPeer(12, nil)); cap != 0 { 374 t.Fatalf("New free client should be rejected") 375 } 376 clock.Run(time.Millisecond) 377 clock.Run(4 * time.Minute) 378 if cap := connect(pool, newPoolTestPeer(13, nil)); cap == 0 { 379 t.Fatalf("Old client connects more than 5min should be kicked") 380 } 381 } 382 383 func TestFreeClientKickedOut(t *testing.T) { 384 var ( 385 clock mclock.Simulated 386 db = rawdb.NewMemoryDatabase() 387 kicked = make(chan int, 100) 388 ) 389 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 390 pool.Start() 391 defer pool.Stop() 392 pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 393 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 394 395 for i := 0; i < 10; i++ { 396 connect(pool, newPoolTestPeer(i, kicked)) 397 clock.Run(time.Millisecond) 398 } 399 if cap := connect(pool, newPoolTestPeer(10, kicked)); cap != 0 { 400 t.Fatalf("New free client should be rejected") 401 } 402 clock.Run(0) 403 select { 404 case <-kicked: 405 default: 406 t.Fatalf("timeout") 407 } 408 disconnect(pool, newPoolTestPeer(10, kicked)) 409 clock.Run(5 * time.Minute) 410 for i := 0; i < 10; i++ { 411 connect(pool, newPoolTestPeer(i+10, kicked)) 412 } 413 clock.Run(0) 414 415 for i := 0; i < 10; i++ { 416 select { 417 case id := <-kicked: 418 if id >= 10 { 419 t.Fatalf("Old client should be kicked, now got: %d", id) 420 } 421 default: 422 t.Fatalf("timeout") 423 } 424 } 425 } 426 427 func TestPositiveBalanceCalculation(t *testing.T) { 428 var ( 429 clock mclock.Simulated 430 db = rawdb.NewMemoryDatabase() 431 kicked = make(chan int, 10) 432 ) 433 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 434 pool.Start() 435 defer pool.Stop() 436 pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 437 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 438 439 addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3)) 440 testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true) 441 clock.Run(time.Minute) 442 443 disconnect(pool, newPoolTestPeer(0, kicked)) 444 pb, _ := getBalance(pool, newPoolTestPeer(0, kicked)) 445 if checkDiff(pb, uint64(time.Minute*2)) { 446 t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb) 447 } 448 } 449 450 func TestDowngradePriorityClient(t *testing.T) { 451 var ( 452 clock mclock.Simulated 453 db = rawdb.NewMemoryDatabase() 454 kicked = make(chan int, 10) 455 ) 456 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 457 pool.Start() 458 defer pool.Stop() 459 pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 460 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) 461 462 p := newPoolTestPeer(0, kicked) 463 addBalance(pool, p.node.ID(), int64(time.Minute)) 464 testPriorityConnect(t, pool, p, 10, true) 465 if p.cap != 10 { 466 t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap) 467 } 468 469 clock.Run(time.Minute) // All positive balance should be used up. 470 time.Sleep(300 * time.Millisecond) // Ensure the callback is called 471 if p.cap != 1 { 472 t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap) 473 } 474 pb, _ := getBalance(pool, newPoolTestPeer(0, kicked)) 475 if pb != 0 { 476 t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb) 477 } 478 479 addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute)) 480 pb, _ = getBalance(pool, newPoolTestPeer(0, kicked)) 481 if checkDiff(pb, uint64(time.Minute)) { 482 t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb) 483 } 484 } 485 486 func TestNegativeBalanceCalculation(t *testing.T) { 487 var ( 488 clock mclock.Simulated 489 db = rawdb.NewMemoryDatabase() 490 ) 491 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 492 pool.Start() 493 defer pool.Stop() 494 pool.SetExpirationTCs(0, 3600) 495 pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 496 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}) 497 498 for i := 0; i < 10; i++ { 499 connect(pool, newPoolTestPeer(i, nil)) 500 } 501 clock.Run(time.Second) 502 503 for i := 0; i < 10; i++ { 504 disconnect(pool, newPoolTestPeer(i, nil)) 505 _, nb := getBalance(pool, newPoolTestPeer(i, nil)) 506 if nb != 0 { 507 t.Fatalf("Short connection shouldn't be recorded") 508 } 509 } 510 for i := 0; i < 10; i++ { 511 connect(pool, newPoolTestPeer(i, nil)) 512 } 513 clock.Run(time.Minute) 514 for i := 0; i < 10; i++ { 515 disconnect(pool, newPoolTestPeer(i, nil)) 516 _, nb := getBalance(pool, newPoolTestPeer(i, nil)) 517 exp := uint64(time.Minute) / 1000 518 exp -= exp / 120 // correct for negative balance expiration 519 if checkDiff(nb, exp) { 520 t.Fatalf("Negative balance mismatch, want %v, got %v", exp, nb) 521 } 522 } 523 } 524 525 func TestInactiveClient(t *testing.T) { 526 var ( 527 clock mclock.Simulated 528 db = rawdb.NewMemoryDatabase() 529 ) 530 pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) 531 pool.Start() 532 defer pool.Stop() 533 pool.SetLimits(2, uint64(2)) 534 535 p1 := newPoolTestPeer(1, nil) 536 p1.inactiveAllowed = true 537 p2 := newPoolTestPeer(2, nil) 538 p2.inactiveAllowed = true 539 p3 := newPoolTestPeer(3, nil) 540 p3.inactiveAllowed = true 541 addBalance(pool, p1.node.ID(), 1000*int64(time.Second)) 542 addBalance(pool, p3.node.ID(), 2000*int64(time.Second)) 543 // p1: 1000 p2: 0 p3: 2000 544 p1.cap = connect(pool, p1) 545 if p1.cap != 1 { 546 t.Fatalf("Failed to connect peer #1") 547 } 548 p2.cap = connect(pool, p2) 549 if p2.cap != 1 { 550 t.Fatalf("Failed to connect peer #2") 551 } 552 p3.cap = connect(pool, p3) 553 if p3.cap != 1 { 554 t.Fatalf("Failed to connect peer #3") 555 } 556 if p2.cap != 0 { 557 t.Fatalf("Failed to deactivate peer #2") 558 } 559 addBalance(pool, p2.node.ID(), 3000*int64(time.Second)) 560 // p1: 1000 p2: 3000 p3: 2000 561 if p2.cap != 1 { 562 t.Fatalf("Failed to activate peer #2") 563 } 564 if p1.cap != 0 { 565 t.Fatalf("Failed to deactivate peer #1") 566 } 567 addBalance(pool, p2.node.ID(), -2500*int64(time.Second)) 568 // p1: 1000 p2: 500 p3: 2000 569 if p1.cap != 1 { 570 t.Fatalf("Failed to activate peer #1") 571 } 572 if p2.cap != 0 { 573 t.Fatalf("Failed to deactivate peer #2") 574 } 575 pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}) 576 p4 := newPoolTestPeer(4, nil) 577 addBalance(pool, p4.node.ID(), 1500*int64(time.Second)) 578 // p1: 1000 p2: 500 p3: 2000 p4: 1500 579 p4.cap = connect(pool, p4) 580 if p4.cap != 1 { 581 t.Fatalf("Failed to activate peer #4") 582 } 583 if p1.cap != 0 { 584 t.Fatalf("Failed to deactivate peer #1") 585 } 586 clock.Run(time.Second * 600) 587 // manually trigger a check to avoid a long real-time wait 588 pool.ns.SetState(p1.node, pool.setup.updateFlag, nodestate.Flags{}, 0) 589 pool.ns.SetState(p1.node, nodestate.Flags{}, pool.setup.updateFlag, 0) 590 // p1: 1000 p2: 500 p3: 2000 p4: 900 591 if p1.cap != 1 { 592 t.Fatalf("Failed to activate peer #1") 593 } 594 if p4.cap != 0 { 595 t.Fatalf("Failed to deactivate peer #4") 596 } 597 disconnect(pool, p2) 598 disconnect(pool, p4) 599 addBalance(pool, p1.node.ID(), -1000*int64(time.Second)) 600 if p1.cap != 1 { 601 t.Fatalf("Should not deactivate peer #1") 602 } 603 if p2.cap != 0 { 604 t.Fatalf("Should not activate peer #2") 605 } 606 }