github.com/cilium/cilium@v1.16.2/pkg/kvstore/allocator/allocator_test.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright Authors of Cilium 3 4 package allocator 5 6 import ( 7 "context" 8 "fmt" 9 "math" 10 "path" 11 "sync" 12 "testing" 13 "time" 14 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/require" 17 "k8s.io/apimachinery/pkg/util/rand" 18 19 "github.com/cilium/cilium/pkg/allocator" 20 "github.com/cilium/cilium/pkg/idpool" 21 "github.com/cilium/cilium/pkg/kvstore" 22 "github.com/cilium/cilium/pkg/rate" 23 "github.com/cilium/cilium/pkg/testutils" 24 ) 25 26 const ( 27 testPrefix = "test-prefix" 28 ) 29 30 // Configure a generous timeout to prevent flakes when running in a noisy CI environment. 31 var ( 32 tick = 10 * time.Millisecond 33 timeout = 5 * time.Second 34 ) 35 36 // FIXME: this should be named better, it implements pkg/allocator.Backend 37 type TestAllocatorKey string 38 39 func (t TestAllocatorKey) GetKey() string { return string(t) } 40 func (t TestAllocatorKey) GetAsMap() map[string]string { 41 return map[string]string{string(t): string(t)} 42 } 43 func (t TestAllocatorKey) String() string { return string(t) } 44 func (t TestAllocatorKey) PutKey(v string) allocator.AllocatorKey { 45 return TestAllocatorKey(v) 46 } 47 func (t TestAllocatorKey) PutKeyFromMap(m map[string]string) allocator.AllocatorKey { 48 for _, v := range m { 49 return TestAllocatorKey(v) 50 } 51 52 panic("empty map") 53 } 54 55 func (t TestAllocatorKey) PutValue(key any, value any) allocator.AllocatorKey { 56 panic("not implemented") 57 } 58 59 func (t TestAllocatorKey) Value(any) any { 60 panic("not implemented") 61 } 62 63 func randomTestName() string { 64 return fmt.Sprintf("%s%s", testPrefix, rand.String(12)) 65 } 66 67 func BenchmarkAllocate(b *testing.B) { 68 testutils.IntegrationTest(b) 69 for _, backendName := range []string{"etcd", "consul"} { 70 b.Run(backendName, func(b *testing.B) { 71 kvstore.SetupDummyWithConfigOpts(b, backendName, opts(backendName)) 72 benchmarkAllocate(b) 73 }) 74 } 75 } 76 77 func benchmarkAllocate(b *testing.B) { 78 allocatorName := randomTestName() 79 maxID := idpool.ID(256 + b.N) 80 backend, err := NewKVStoreBackend(allocatorName, "a", TestAllocatorKey(""), kvstore.Client()) 81 require.NoError(b, err) 82 a, err := allocator.NewAllocator(TestAllocatorKey(""), backend, allocator.WithMax(maxID)) 83 require.NoError(b, err) 84 require.NotNil(b, a) 85 defer a.DeleteAllKeys() 86 87 b.ResetTimer() 88 for i := 0; i < b.N; i++ { 89 _, _, _, err := a.Allocate(context.Background(), TestAllocatorKey(fmt.Sprintf("key%04d", i))) 90 require.NoError(b, err) 91 } 92 b.StopTimer() 93 } 94 95 func BenchmarkRunLocksGC(b *testing.B) { 96 testutils.IntegrationTest(b) 97 for _, backendName := range []string{"etcd", "consul"} { 98 b.Run(backendName, func(b *testing.B) { 99 kvstore.SetupDummyWithConfigOpts(b, backendName, opts(backendName)) 100 benchmarkRunLocksGC(b, backendName) 101 }) 102 } 103 } 104 105 func benchmarkRunLocksGC(b *testing.B, backendName string) { 106 allocatorName := randomTestName() 107 maxID := idpool.ID(256 + b.N) 108 // FIXME: Did this previously use allocatorName := randomTestName() ? so TestAllocatorKey(randomeTestName()) 109 backend1, err := NewKVStoreBackend(allocatorName, "a", TestAllocatorKey(""), kvstore.Client()) 110 require.NoError(b, err) 111 allocator, err := allocator.NewAllocator(TestAllocatorKey(""), backend1, allocator.WithMax(maxID), allocator.WithoutGC()) 112 require.NoError(b, err) 113 shortKey := TestAllocatorKey("1;") 114 115 staleLocks := map[string]kvstore.Value{} 116 staleLocks, err = allocator.RunLocksGC(context.Background(), staleLocks) 117 require.NoError(b, err) 118 require.Empty(b, staleLocks) 119 120 var ( 121 lock1, lock2 kvstore.KVLocker 122 gotLock1 = make(chan struct{}) 123 gotLock2 = make(chan struct{}) 124 ) 125 go func() { 126 var ( 127 err error 128 ) 129 lock1, err = backend1.Lock(context.Background(), shortKey) 130 require.NoError(b, err) 131 close(gotLock1) 132 var client kvstore.BackendOperations 133 switch backendName { 134 case "etcd": 135 client, _ = kvstore.NewClient(context.Background(), 136 backendName, 137 map[string]string{ 138 kvstore.EtcdAddrOption: kvstore.EtcdDummyAddress(), 139 }, 140 nil, 141 ) 142 case "consul": 143 client, _ = kvstore.NewClient(context.Background(), 144 backendName, 145 map[string]string{ 146 kvstore.ConsulAddrOption: kvstore.ConsulDummyAddress(), 147 kvstore.ConsulOptionConfig: kvstore.ConsulDummyConfigFile(), 148 }, 149 nil, 150 ) 151 } 152 lock2, err = client.LockPath(context.Background(), allocatorName+"/locks/"+kvstore.Client().Encode([]byte(shortKey.GetKey()))) 153 require.NoError(b, err) 154 close(gotLock2) 155 }() 156 157 // Wait until lock1 is gotten. 158 select { 159 case <-gotLock1: 160 case <-time.After(timeout): 161 b.Error("Lock1 not obtained on time") 162 } 163 164 // wait until client2, in line 160, tries to grab the lock. 165 // We can't detect when that actually happen so we have to assume it will 166 // happen within one second. 167 time.Sleep(time.Second) 168 169 // Check which locks are stale, it should be lock1 and lock2 170 staleLocks, err = allocator.RunLocksGC(context.Background(), staleLocks) 171 require.NoError(b, err) 172 switch backendName { 173 case "consul": 174 // Contrary to etcd, consul does not create a lock in the kvstore 175 // if a lock is already being held. 176 require.Len(b, staleLocks, 1) 177 case "etcd": 178 require.Len(b, staleLocks, 2) 179 } 180 181 var ( 182 oldestRev = uint64(math.MaxUint64) 183 oldestLeaseID int64 184 oldestKey string 185 sessionID string 186 ) 187 // Stale locks contains 2 locks, which is expected but we only want to GC 188 // the oldest one so we can unlock all the remaining clients waiting to hold 189 // the lock. 190 for k, v := range staleLocks { 191 if v.ModRevision < oldestRev { 192 oldestKey = k 193 oldestRev = v.ModRevision 194 oldestLeaseID = v.LeaseID 195 sessionID = v.SessionID 196 } 197 } 198 199 // store the oldest key in the map so that it can be GCed. 200 staleLocks = map[string]kvstore.Value{} 201 staleLocks[oldestKey] = kvstore.Value{ 202 ModRevision: oldestRev, 203 LeaseID: oldestLeaseID, 204 SessionID: sessionID, 205 } 206 207 // GC lock1 because it's the oldest lock being held. 208 staleLocks, err = allocator.RunLocksGC(context.Background(), staleLocks) 209 require.NoError(b, err) 210 switch backendName { 211 case "consul": 212 // Contrary to etcd, consul does not create a lock in the kvstore 213 // if a lock is already being held. So we have GCed the only lock 214 // available. 215 require.Len(b, staleLocks, 0) 216 case "etcd": 217 // There are 2 clients trying to get the lock, we have GC one of them 218 // so that is way we have 1 staleLock in the map. 219 require.Len(b, staleLocks, 1) 220 } 221 222 // Wait until lock2 is gotten as it should have happen since we have 223 // GC lock1. 224 select { 225 case <-gotLock2: 226 case <-time.After(timeout): 227 b.Error("Lock2 not obtained on time") 228 } 229 230 // Unlock lock1 because we still hold the local locks. 231 err = lock1.Unlock(context.Background()) 232 require.NoError(b, err) 233 err = lock2.Unlock(context.Background()) 234 require.NoError(b, err) 235 } 236 237 func BenchmarkGC(b *testing.B) { 238 testutils.IntegrationTest(b) 239 for _, backendName := range []string{"etcd", "consul"} { 240 b.Run(backendName, func(b *testing.B) { 241 kvstore.SetupDummyWithConfigOpts(b, backendName, opts(backendName)) 242 benchmarkGC(b) 243 }) 244 } 245 } 246 247 func benchmarkGC(b *testing.B) { 248 allocatorName := randomTestName() 249 maxID := idpool.ID(256 + b.N) 250 // FIXME: Did this previously use allocatorName := randomTestName() ? so TestAllocatorKey(randomeTestName()) 251 backend, err := NewKVStoreBackend(allocatorName, "a", TestAllocatorKey(""), kvstore.Client()) 252 require.NoError(b, err) 253 allocator, err := allocator.NewAllocator(TestAllocatorKey(""), backend, allocator.WithMax(maxID), allocator.WithoutGC()) 254 require.NoError(b, err) 255 require.NotNil(b, allocator) 256 defer allocator.DeleteAllKeys() 257 defer allocator.Delete() 258 259 allocator.DeleteAllKeys() 260 261 shortKey := TestAllocatorKey("1;") 262 shortID, _, _, err := allocator.Allocate(context.Background(), shortKey) 263 require.NoError(b, err) 264 require.NotEqual(b, 0, shortID) 265 266 longKey := TestAllocatorKey("1;2;") 267 longID, _, _, err := allocator.Allocate(context.Background(), longKey) 268 require.NoError(b, err) 269 require.NotEqual(b, 0, longID) 270 271 _, err = allocator.Release(context.Background(), shortKey) 272 require.NoError(b, err) 273 274 rateLimiter := rate.NewLimiter(10*time.Second, 100) 275 276 keysToDelete := map[string]uint64{} 277 keysToDelete, _, err = allocator.RunGC(rateLimiter, keysToDelete) 278 require.NoError(b, err) 279 require.Len(b, keysToDelete, 1) 280 keysToDelete, _, err = allocator.RunGC(rateLimiter, keysToDelete) 281 require.NoError(b, err) 282 require.Len(b, keysToDelete, 0) 283 284 // wait for cache to be updated via delete notification 285 require.EventuallyWithT(b, func(c *assert.CollectT) { 286 key, err := allocator.GetByID(context.TODO(), shortID) 287 assert.NoError(c, err) 288 assert.Nil(c, key) 289 }, timeout, tick) 290 291 key, err := allocator.GetByID(context.TODO(), shortID) 292 require.NoError(b, err) 293 require.Nil(b, key) 294 } 295 296 func BenchmarkGCShouldSkipOutOfRangeIdentities(b *testing.B) { 297 testutils.IntegrationTest(b) 298 for _, backendName := range []string{"etcd", "consul"} { 299 b.Run(backendName, func(b *testing.B) { 300 kvstore.SetupDummyWithConfigOpts(b, backendName, opts(backendName)) 301 benchmarkGCShouldSkipOutOfRangeIdentities(b) 302 }) 303 } 304 } 305 306 func benchmarkGCShouldSkipOutOfRangeIdentities(b *testing.B) { 307 // Allocator1: allocator under test 308 backend, err := NewKVStoreBackend(randomTestName(), "a", TestAllocatorKey(""), kvstore.Client()) 309 require.NoError(b, err) 310 311 maxID1 := idpool.ID(4 + b.N) 312 allocator1, err := allocator.NewAllocator(TestAllocatorKey(""), backend, allocator.WithMax(maxID1), allocator.WithoutGC()) 313 require.NoError(b, err) 314 require.NotNil(b, allocator1) 315 316 defer allocator1.DeleteAllKeys() 317 defer allocator1.Delete() 318 319 allocator1.DeleteAllKeys() 320 321 shortKey1 := TestAllocatorKey("1;") 322 shortID1, _, _, err := allocator1.Allocate(context.Background(), shortKey1) 323 require.NoError(b, err) 324 require.NotEqual(b, 0, shortID1) 325 326 _, err = allocator1.Release(context.Background(), shortKey1) 327 require.NoError(b, err) 328 329 // Alloctor2: with a non-overlapping range compared with allocator1 330 backend2, err := NewKVStoreBackend(randomTestName(), "a", TestAllocatorKey(""), kvstore.Client()) 331 require.NoError(b, err) 332 333 minID2 := maxID1 + 1 334 maxID2 := minID2 + 4 335 allocator2, err := allocator.NewAllocator(TestAllocatorKey(""), backend2, allocator.WithMin(minID2), allocator.WithMax(maxID2), allocator.WithoutGC()) 336 require.NoError(b, err) 337 require.NotNil(b, allocator2) 338 339 shortKey2 := TestAllocatorKey("2;") 340 shortID2, _, _, err := allocator2.Allocate(context.Background(), shortKey2) 341 require.NoError(b, err) 342 require.NotEqual(b, 0, shortID2) 343 344 defer allocator2.DeleteAllKeys() 345 defer allocator2.Delete() 346 347 allocator2.Release(context.Background(), shortKey2) 348 349 // Perform GC with allocator1: there are two entries in kvstore currently 350 rateLimiter := rate.NewLimiter(10*time.Second, 100) 351 352 keysToDelete := map[string]uint64{} 353 keysToDelete, _, err = allocator1.RunGC(rateLimiter, keysToDelete) 354 require.NoError(b, err) 355 // But, only one will be filtered out and GC'ed 356 require.Len(b, keysToDelete, 1) 357 keysToDelete, _, err = allocator1.RunGC(rateLimiter, keysToDelete) 358 require.NoError(b, err) 359 require.Len(b, keysToDelete, 0) 360 361 // Wait for cache to be updated via delete notification 362 require.EventuallyWithT(b, func(c *assert.CollectT) { 363 key, err := allocator1.GetByID(context.TODO(), shortID1) 364 assert.NoError(c, err) 365 assert.Nil(c, key) 366 }, timeout, tick) 367 368 // The key created with allocator1 should be GC'd 369 key, err := allocator1.GetByID(context.TODO(), shortID1) 370 require.NoError(b, err) 371 require.Nil(b, key) 372 373 // The key created with allocator2 should NOT be GC'd 374 key2, err := allocator2.GetByID(context.TODO(), shortID2) 375 require.NoError(b, err) 376 require.NotNil(b, key2) 377 } 378 379 func TestAllocateCached(t *testing.T) { 380 testutils.IntegrationTest(t) 381 for _, backendName := range []string{"etcd", "consul"} { 382 t.Run(backendName, func(t *testing.T) { 383 kvstore.SetupDummyWithConfigOpts(t, backendName, opts(backendName)) 384 testAllocatorCached(t, idpool.ID(32), randomTestName()) // enable use of local cache 385 }) 386 } 387 } 388 389 func testAllocatorCached(t *testing.T, maxID idpool.ID, allocatorName string) { 390 backend, err := NewKVStoreBackend(allocatorName, "a", TestAllocatorKey(""), kvstore.Client()) 391 require.NoError(t, err) 392 a, err := allocator.NewAllocator(TestAllocatorKey(""), backend, 393 allocator.WithMax(maxID), allocator.WithoutGC()) 394 require.NoError(t, err) 395 require.NotNil(t, a) 396 397 // remove any keys which might be leftover 398 a.DeleteAllKeys() 399 400 // allocate all available IDs 401 for i := idpool.ID(1); i <= maxID; i++ { 402 key := TestAllocatorKey(fmt.Sprintf("key%04d", i)) 403 id, new, newLocally, err := a.Allocate(context.Background(), key) 404 require.NoError(t, err) 405 require.NotEqual(t, 0, id) 406 require.True(t, new) 407 require.True(t, newLocally) 408 } 409 410 // allocate all IDs again using the same set of keys, refcnt should go to 2 411 for i := idpool.ID(1); i <= maxID; i++ { 412 key := TestAllocatorKey(fmt.Sprintf("key%04d", i)) 413 id, new, newLocally, err := a.Allocate(context.Background(), key) 414 require.NoError(t, err) 415 require.NotEqual(t, 0, id) 416 require.False(t, new) 417 require.False(t, newLocally) 418 } 419 420 // Create a 2nd allocator, refill it 421 backend2, err := NewKVStoreBackend(allocatorName, "r", TestAllocatorKey(""), kvstore.Client()) 422 require.NoError(t, err) 423 a2, err := allocator.NewAllocator(TestAllocatorKey(""), backend2, 424 allocator.WithMax(maxID), allocator.WithoutGC()) 425 require.NoError(t, err) 426 require.NotNil(t, a2) 427 428 // allocate all IDs again using the same set of keys, refcnt should go to 2 429 for i := idpool.ID(1); i <= maxID; i++ { 430 key := TestAllocatorKey(fmt.Sprintf("key%04d", i)) 431 id, new, newLocally, err := a2.Allocate(context.Background(), key) 432 require.NoError(t, err) 433 require.NotEqual(t, 0, id) 434 require.False(t, new) 435 require.True(t, newLocally) 436 437 a2.Release(context.Background(), key) 438 } 439 440 // release 2nd reference of all IDs 441 for i := idpool.ID(1); i <= maxID; i++ { 442 _, err := a.Release(context.Background(), TestAllocatorKey(fmt.Sprintf("key%04d", i))) 443 require.NoError(t, err) 444 } 445 446 staleKeysPreviousRound := map[string]uint64{} 447 rateLimiter := rate.NewLimiter(10*time.Second, 100) 448 // running the GC should not evict any entries 449 staleKeysPreviousRound, _, err = a.RunGC(rateLimiter, staleKeysPreviousRound) 450 require.NoError(t, err) 451 452 v, err := kvstore.Client().ListPrefix(context.TODO(), path.Join(allocatorName, "id")) 453 require.NoError(t, err) 454 require.Len(t, v, int(maxID)) 455 456 // release final reference of all IDs 457 for i := idpool.ID(1); i <= maxID; i++ { 458 _, err := a.Release(context.Background(), TestAllocatorKey(fmt.Sprintf("key%04d", i))) 459 require.NoError(t, err) 460 } 461 462 // running the GC should evict all entries 463 staleKeysPreviousRound, _, err = a.RunGC(rateLimiter, staleKeysPreviousRound) 464 require.NoError(t, err) 465 _, _, err = a.RunGC(rateLimiter, staleKeysPreviousRound) 466 require.NoError(t, err) 467 468 v, err = kvstore.Client().ListPrefix(context.TODO(), path.Join(allocatorName, "id")) 469 require.NoError(t, err) 470 require.Len(t, v, 0) 471 472 a.DeleteAllKeys() 473 a.Delete() 474 a2.Delete() 475 } 476 477 func TestKeyToID(t *testing.T) { 478 testutils.IntegrationTest(t) 479 for _, backendName := range []string{"etcd", "consul"} { 480 t.Run(backendName, func(t *testing.T) { 481 kvstore.SetupDummyWithConfigOpts(t, backendName, opts(backendName)) 482 testKeyToID(t) 483 }) 484 } 485 } 486 487 func testKeyToID(t *testing.T) { 488 allocatorName := randomTestName() 489 backend, err := NewKVStoreBackend(allocatorName, "a", TestAllocatorKey(""), kvstore.Client()) 490 require.NoError(t, err) 491 a, err := allocator.NewAllocator(TestAllocatorKey(""), backend) 492 require.NoError(t, err) 493 require.NotNil(t, a) 494 495 // An error is returned because the path is outside the prefix (allocatorName/id) 496 id, err := backend.keyToID(path.Join(allocatorName, "invalid")) 497 require.NotNil(t, err) 498 require.Equal(t, idpool.NoID, id) 499 500 // An error is returned because the path contains the prefix 501 // (allocatorName/id) but cannot be parsed ("invalid") 502 id, err = backend.keyToID(path.Join(allocatorName, "id", "invalid")) 503 require.NotNil(t, err) 504 require.Equal(t, idpool.NoID, id) 505 506 // A valid lookup that finds an ID 507 id, err = backend.keyToID(path.Join(allocatorName, "id", "10")) 508 require.NoError(t, err) 509 require.Equal(t, idpool.ID(10), id) 510 } 511 512 func TestGetNoCache(t *testing.T) { 513 testutils.IntegrationTest(t) 514 for _, backendName := range []string{"etcd", "consul"} { 515 t.Run(backendName, func(t *testing.T) { 516 kvstore.SetupDummyWithConfigOpts(t, backendName, opts(backendName)) 517 testGetNoCache(t, idpool.ID(256)) 518 }) 519 } 520 } 521 522 func testGetNoCache(t *testing.T, maxID idpool.ID) { 523 allocatorName := randomTestName() 524 backend, err := NewKVStoreBackend(allocatorName, "a", TestAllocatorKey(""), kvstore.Client()) 525 require.NoError(t, err) 526 allocator, err := allocator.NewAllocator(TestAllocatorKey(""), backend, allocator.WithMax(maxID), allocator.WithoutGC()) 527 require.NoError(t, err) 528 require.NotNil(t, allocator) 529 530 // remove any keys which might be leftover 531 allocator.DeleteAllKeys() 532 defer allocator.DeleteAllKeys() 533 534 labelsLong := "foo;/;bar;" 535 key := TestAllocatorKey(fmt.Sprintf("%s%010d", labelsLong, 0)) 536 longID, new, newLocally, err := allocator.Allocate(context.Background(), key) 537 require.NoError(t, err) 538 require.NotEqual(t, 0, longID) 539 require.True(t, new) 540 require.True(t, newLocally) 541 542 observedID, err := allocator.GetNoCache(context.Background(), key) 543 require.NoError(t, err) 544 require.NotEqual(t, 0, observedID) 545 546 labelsShort := "foo;/;" 547 shortKey := TestAllocatorKey(labelsShort) 548 observedID, err = allocator.GetNoCache(context.Background(), shortKey) 549 require.NoError(t, err) 550 require.Equal(t, idpool.NoID, observedID) 551 552 shortID, new, newLocally, err := allocator.Allocate(context.Background(), shortKey) 553 require.NoError(t, err) 554 require.NotEqual(t, 0, shortID) 555 require.True(t, new) 556 require.True(t, newLocally) 557 558 observedID, err = allocator.GetNoCache(context.Background(), shortKey) 559 require.NoError(t, err) 560 require.Equal(t, shortID, observedID) 561 } 562 563 func TestPrefixMatchesKey(t *testing.T) { 564 // cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60 565 566 tests := []struct { 567 prefix string 568 key string 569 expected bool 570 }{ 571 { 572 prefix: "foo", 573 key: "foo/bar", 574 expected: true, 575 }, 576 { 577 prefix: "foo/;bar;baz;/;a;", 578 key: "foo/;bar;baz;/;a;/alice", 579 expected: true, 580 }, 581 { 582 prefix: "foo/;bar;baz;", 583 key: "foo/;bar;baz;/;a;/alice", 584 expected: false, 585 }, 586 { 587 prefix: "foo/;bar;baz;/;a;/baz", 588 key: "foo/;bar;baz;/;a;/alice", 589 expected: false, 590 }, 591 } 592 593 for _, tt := range tests { 594 t.Logf("prefixMatchesKey(%q, %q) expected to be %t", tt.prefix, tt.key, tt.expected) 595 result := prefixMatchesKey(tt.prefix, tt.key) 596 require.Equal(t, tt.expected, result) 597 } 598 } 599 600 func TestRemoteCache(t *testing.T) { 601 testutils.IntegrationTest(t) 602 for _, backendName := range []string{"etcd", "consul"} { 603 t.Run(backendName, func(t *testing.T) { 604 kvstore.SetupDummyWithConfigOpts(t, backendName, opts(backendName)) 605 testRemoteCache(t) 606 }) 607 } 608 } 609 610 func testRemoteCache(t *testing.T) { 611 testName := randomTestName() 612 backend, err := NewKVStoreBackend(testName, "a", TestAllocatorKey(""), kvstore.Client()) 613 require.NoError(t, err) 614 a, err := allocator.NewAllocator(TestAllocatorKey(""), backend, allocator.WithMax(idpool.ID(256))) 615 require.NoError(t, err) 616 require.NotNil(t, a) 617 618 // remove any keys which might be leftover 619 a.DeleteAllKeys() 620 621 defer func() { 622 a.DeleteAllKeys() 623 a.Delete() 624 }() 625 626 // allocate all available IDs 627 for i := idpool.ID(1); i <= idpool.ID(4); i++ { 628 key := TestAllocatorKey(fmt.Sprintf("key%04d", i)) 629 _, _, _, err := a.Allocate(context.Background(), key) 630 require.NoError(t, err) 631 } 632 633 // wait for main cache to be populated 634 require.EventuallyWithT(t, func(c *assert.CollectT) { 635 cacheLen := 0 636 a.ForeachCache(func(id idpool.ID, val allocator.AllocatorKey) { 637 cacheLen++ 638 }) 639 assert.EqualValues(c, 4, cacheLen) 640 }, timeout, tick) 641 642 // count identical allocations returned 643 cache := map[idpool.ID]int{} 644 a.ForeachCache(func(id idpool.ID, val allocator.AllocatorKey) { 645 cache[id]++ 646 }) 647 648 // ForeachCache must have returned 4 allocations all unique 649 require.Len(t, cache, 4) 650 for i := range cache { 651 require.Equal(t, 1, cache[i]) 652 } 653 654 // watch the prefix in the same kvstore via a 2nd watcher 655 backend2, err := NewKVStoreBackend(testName, "a", TestAllocatorKey(""), kvstore.Client()) 656 require.NoError(t, err) 657 a2, err := allocator.NewAllocator(TestAllocatorKey(""), backend2, allocator.WithMax(idpool.ID(256)), 658 allocator.WithoutAutostart(), allocator.WithoutGC()) 659 require.NoError(t, err) 660 661 rc := a.NewRemoteCache("remote", a2) 662 require.NotNil(t, rc) 663 664 var wg sync.WaitGroup 665 ctx, cancel := context.WithCancel(context.Background()) 666 667 defer func() { 668 cancel() 669 wg.Wait() 670 }() 671 672 wg.Add(1) 673 go func() { 674 rc.Watch(ctx, func(ctx context.Context) {}) 675 wg.Done() 676 }() 677 678 // wait for remote cache to be populated 679 require.EventuallyWithT(t, func(c *assert.CollectT) { 680 cacheLen := 0 681 a.ForeachCache(func(id idpool.ID, val allocator.AllocatorKey) { 682 cacheLen++ 683 }) 684 // 4 local + 4 remote 685 assert.EqualValues(c, 8, cacheLen) 686 }, timeout, tick) 687 688 // count the allocations in the main cache *AND* the remote cache 689 cache = map[idpool.ID]int{} 690 a.ForeachCache(func(id idpool.ID, val allocator.AllocatorKey) { 691 cache[id]++ 692 }) 693 694 // Foreach must have returned 4 allocations each duplicated, once in 695 // the main cache, once in the remote cache 696 require.Len(t, cache, 4) 697 for i := range cache { 698 require.Equal(t, 2, cache[i]) 699 } 700 } 701 702 func opts(backendName string) map[string]string { 703 if backendName == "etcd" { 704 // Explicitly set higher QPS than the default to speedup the test 705 return map[string]string{kvstore.EtcdRateLimitOption: "100"} 706 } 707 708 return nil 709 }