github.com/cilium/cilium@v1.16.2/pkg/identity/cache/allocation_test.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright Authors of Cilium 3 4 package cache 5 6 import ( 7 "context" 8 "os" 9 "path/filepath" 10 "testing" 11 "time" 12 13 "github.com/stretchr/testify/assert" 14 "github.com/stretchr/testify/require" 15 16 "github.com/cilium/cilium/pkg/allocator" 17 cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" 18 "github.com/cilium/cilium/pkg/identity" 19 cacheKey "github.com/cilium/cilium/pkg/identity/key" 20 "github.com/cilium/cilium/pkg/idpool" 21 "github.com/cilium/cilium/pkg/inctimer" 22 "github.com/cilium/cilium/pkg/kvstore" 23 "github.com/cilium/cilium/pkg/labels" 24 "github.com/cilium/cilium/pkg/lock" 25 "github.com/cilium/cilium/pkg/option" 26 "github.com/cilium/cilium/pkg/testutils" 27 ) 28 29 var fakeConfig = &option.DaemonConfig{ 30 ConfigPatchMutex: new(lock.RWMutex), 31 K8sNamespace: "kube-system", 32 } 33 34 func TestAllocateIdentityReserved(t *testing.T) { 35 for _, be := range []string{"etcd", "consul"} { 36 t.Run(be, func(t *testing.T) { 37 testutils.IntegrationTest(t) 38 kvstore.SetupDummy(t, be) 39 testAllocateIdentityReserved(t) 40 }) 41 } 42 } 43 44 func testAllocateIdentityReserved(t *testing.T) { 45 var ( 46 lbls labels.Labels 47 i *identity.Identity 48 isNew bool 49 err error 50 ) 51 52 lbls = labels.Labels{ 53 labels.IDNameHost: labels.NewLabel(labels.IDNameHost, "", labels.LabelSourceReserved), 54 } 55 56 mgr := NewCachingIdentityAllocator(newDummyOwner()) 57 <-mgr.InitIdentityAllocator(nil) 58 59 require.Equal(t, true, identity.IdentityAllocationIsLocal(lbls)) 60 i, isNew, err = mgr.AllocateIdentity(context.Background(), lbls, false, identity.InvalidIdentity) 61 require.NoError(t, err) 62 require.Equal(t, identity.ReservedIdentityHost, i.ID) 63 require.False(t, isNew) 64 65 lbls = labels.Labels{ 66 labels.IDNameWorld: labels.NewLabel(labels.IDNameWorld, "", labels.LabelSourceReserved), 67 } 68 require.Equal(t, true, identity.IdentityAllocationIsLocal(lbls)) 69 i, isNew, err = mgr.AllocateIdentity(context.Background(), lbls, false, identity.InvalidIdentity) 70 require.NoError(t, err) 71 require.Equal(t, identity.ReservedIdentityWorld, i.ID) 72 require.False(t, isNew) 73 74 require.Equal(t, true, identity.IdentityAllocationIsLocal(labels.LabelHealth)) 75 i, isNew, err = mgr.AllocateIdentity(context.Background(), labels.LabelHealth, false, identity.InvalidIdentity) 76 require.NoError(t, err) 77 require.Equal(t, identity.ReservedIdentityHealth, i.ID) 78 require.False(t, isNew) 79 80 lbls = labels.Labels{ 81 labels.IDNameInit: labels.NewLabel(labels.IDNameInit, "", labels.LabelSourceReserved), 82 } 83 require.Equal(t, true, identity.IdentityAllocationIsLocal(lbls)) 84 i, isNew, err = mgr.AllocateIdentity(context.Background(), lbls, false, identity.InvalidIdentity) 85 require.NoError(t, err) 86 require.Equal(t, identity.ReservedIdentityInit, i.ID) 87 require.False(t, isNew) 88 89 lbls = labels.Labels{ 90 labels.IDNameUnmanaged: labels.NewLabel(labels.IDNameUnmanaged, "", labels.LabelSourceReserved), 91 } 92 require.Equal(t, true, identity.IdentityAllocationIsLocal(lbls)) 93 i, isNew, err = mgr.AllocateIdentity(context.Background(), lbls, false, identity.InvalidIdentity) 94 require.NoError(t, err) 95 require.Equal(t, identity.ReservedIdentityUnmanaged, i.ID) 96 require.False(t, isNew) 97 } 98 99 type dummyOwner struct { 100 updated chan identity.NumericIdentity 101 mutex lock.Mutex 102 cache identity.IdentityMap 103 } 104 105 func newDummyOwner() *dummyOwner { 106 return &dummyOwner{ 107 cache: identity.IdentityMap{}, 108 updated: make(chan identity.NumericIdentity, 1024), 109 } 110 } 111 112 func (d *dummyOwner) UpdateIdentities(added, deleted identity.IdentityMap) { 113 d.mutex.Lock() 114 log.Debugf("Dummy UpdateIdentities(added: %v, deleted: %v)", added, deleted) 115 for id, lbls := range added { 116 d.cache[id] = lbls 117 d.updated <- id 118 } 119 for id := range deleted { 120 delete(d.cache, id) 121 d.updated <- id 122 } 123 d.mutex.Unlock() 124 } 125 126 func (d *dummyOwner) GetIdentity(id identity.NumericIdentity) labels.LabelArray { 127 d.mutex.Lock() 128 defer d.mutex.Unlock() 129 return d.cache[id] 130 } 131 132 func (d *dummyOwner) GetNodeSuffix() string { 133 return "foo" 134 } 135 136 // WaitUntilID waits until an update event is received for the 137 // 'target' identity and returns the number of events processed to get 138 // there. Returns 0 in case of 'd.updated' channel is closed or 139 // nothing is received from that channel in 60 seconds. 140 func (d *dummyOwner) WaitUntilID(target identity.NumericIdentity) int { 141 rounds := 0 142 timer, timerDone := inctimer.New() 143 defer timerDone() 144 for { 145 select { 146 case nid, ok := <-d.updated: 147 if !ok { 148 // updates channel closed 149 return 0 150 } 151 rounds++ 152 if nid == target { 153 return rounds 154 } 155 case <-timer.After(60 * time.Second): 156 // Timed out waiting for KV-store events 157 return 0 158 } 159 } 160 } 161 162 func TestEventWatcherBatching(t *testing.T) { 163 for _, be := range []string{"etcd", "consul"} { 164 t.Run(be, func(t *testing.T) { 165 testutils.IntegrationTest(t) 166 kvstore.SetupDummy(t, be) 167 testEventWatcherBatching(t) 168 }) 169 } 170 } 171 172 func testEventWatcherBatching(t *testing.T) { 173 owner := newDummyOwner() 174 events := make(allocator.AllocatorEventChan, 1024) 175 watcher := identityWatcher{ 176 owner: owner, 177 } 178 179 watcher.watch(events) 180 181 lbls := labels.NewLabelsFromSortedList("id=foo") 182 key := &cacheKey.GlobalIdentity{LabelArray: lbls.LabelArray()} 183 184 for i := 1024; i < 1034; i++ { 185 events <- allocator.AllocatorEvent{ 186 Typ: allocator.AllocatorChangeUpsert, 187 ID: idpool.ID(i), 188 Key: key, 189 } 190 } 191 require.NotEqual(t, 0, owner.WaitUntilID(1033)) 192 require.EqualValues(t, lbls.LabelArray(), owner.GetIdentity(identity.NumericIdentity(1033))) 193 for i := 1024; i < 1034; i++ { 194 events <- allocator.AllocatorEvent{ 195 Typ: allocator.AllocatorChangeDelete, 196 ID: idpool.ID(i), 197 } 198 } 199 require.NotEqual(t, 0, owner.WaitUntilID(1033)) 200 for i := 2048; i < 2058; i++ { 201 events <- allocator.AllocatorEvent{ 202 Typ: allocator.AllocatorChangeUpsert, 203 ID: idpool.ID(i), 204 Key: key, 205 } 206 } 207 for i := 2048; i < 2053; i++ { 208 events <- allocator.AllocatorEvent{ 209 Typ: allocator.AllocatorChangeDelete, 210 ID: idpool.ID(i), 211 } 212 } 213 require.NotEqual(t, 0, owner.WaitUntilID(2052)) 214 require.Nil(t, owner.GetIdentity(identity.NumericIdentity(2052))) // Pooling removed the add 215 216 for i := 2053; i < 2058; i++ { 217 events <- allocator.AllocatorEvent{ 218 Typ: allocator.AllocatorChangeDelete, 219 ID: idpool.ID(i), 220 } 221 } 222 require.NotEqual(t, 0, owner.WaitUntilID(2057)) 223 } 224 225 func TestGetIdentityCache(t *testing.T) { 226 for _, be := range []string{"etcd", "consul"} { 227 t.Run(be, func(t *testing.T) { 228 testutils.IntegrationTest(t) 229 kvstore.SetupDummy(t, be) 230 testGetIdentityCache(t) 231 }) 232 } 233 } 234 235 func testGetIdentityCache(t *testing.T) { 236 identity.InitWellKnownIdentities(fakeConfig, cmtypes.ClusterInfo{Name: "default", ID: 5}) 237 // The nils are only used by k8s CRD identities. We default to kvstore. 238 mgr := NewCachingIdentityAllocator(newDummyOwner()) 239 <-mgr.InitIdentityAllocator(nil) 240 defer mgr.Close() 241 defer mgr.IdentityAllocator.DeleteAllKeys() 242 243 cache := mgr.GetIdentityCache() 244 _, ok := cache[identity.ReservedCiliumKVStore] 245 require.Equal(t, true, ok) 246 } 247 248 func TestAllocator(t *testing.T) { 249 for _, be := range []string{"etcd", "consul"} { 250 t.Run(be, func(t *testing.T) { 251 testutils.IntegrationTest(t) 252 kvstore.SetupDummy(t, be) 253 testAllocator(t) 254 }) 255 } 256 } 257 258 func testAllocator(t *testing.T) { 259 lbls1 := labels.NewLabelsFromSortedList("blah=%%//!!;id=foo;user=anna") 260 lbls2 := labels.NewLabelsFromSortedList("id=bar;user=anna") 261 lbls3 := labels.NewLabelsFromSortedList("id=bar;user=susan") 262 263 owner := newDummyOwner() 264 identity.InitWellKnownIdentities(fakeConfig, cmtypes.ClusterInfo{Name: "default", ID: 5}) 265 // The nils are only used by k8s CRD identities. We default to kvstore. 266 mgr := NewCachingIdentityAllocator(owner) 267 <-mgr.InitIdentityAllocator(nil) 268 defer mgr.Close() 269 defer mgr.IdentityAllocator.DeleteAllKeys() 270 271 id1a, isNew, err := mgr.AllocateIdentity(context.Background(), lbls1, false, identity.InvalidIdentity) 272 require.NotNil(t, id1a) 273 require.NoError(t, err) 274 require.Equal(t, true, isNew) 275 // Wait for the update event from the KV-store 276 require.NotEqual(t, 0, owner.WaitUntilID(id1a.ID)) 277 require.EqualValues(t, lbls1.LabelArray(), owner.GetIdentity(id1a.ID)) 278 279 // reuse the same identity 280 id1b, isNew, err := mgr.AllocateIdentity(context.Background(), lbls1, false, identity.InvalidIdentity) 281 require.NotNil(t, id1b) 282 require.False(t, isNew) 283 require.NoError(t, err) 284 require.Equal(t, id1b.ID, id1a.ID) 285 286 released, err := mgr.Release(context.Background(), id1a, false) 287 require.NoError(t, err) 288 require.False(t, released) 289 released, err = mgr.Release(context.Background(), id1b, false) 290 require.NoError(t, err) 291 require.Equal(t, true, released) 292 // KV-store still keeps the ID even when a single node has released it. 293 // This also means that we should have not received an event from the 294 // KV-store for the deletion of the identity, so it should still be in 295 // owner's cache. 296 require.EqualValues(t, lbls1.LabelArray(), owner.GetIdentity(id1a.ID)) 297 298 id1b, isNew, err = mgr.AllocateIdentity(context.Background(), lbls1, false, identity.InvalidIdentity) 299 require.NotNil(t, id1b) 300 require.NoError(t, err) 301 // the value key should not have been removed so the same ID should be 302 // assigned again and it should not be marked as new 303 require.False(t, isNew) 304 require.Equal(t, id1b.ID, id1a.ID) 305 // Should still be cached, no new events should have been received. 306 require.EqualValues(t, lbls1.LabelArray(), owner.GetIdentity(id1a.ID)) 307 308 ident := mgr.LookupIdentityByID(context.TODO(), id1b.ID) 309 require.NotNil(t, ident) 310 require.EqualValues(t, ident.Labels, lbls1) 311 312 id2, isNew, err := mgr.AllocateIdentity(context.Background(), lbls2, false, identity.InvalidIdentity) 313 require.NotNil(t, id2) 314 require.Equal(t, true, isNew) 315 require.NoError(t, err) 316 require.NotEqual(t, id2.ID, id1a.ID) 317 // Wait for the update event from the KV-store 318 require.NotEqual(t, 0, owner.WaitUntilID(id2.ID)) 319 require.EqualValues(t, lbls2.LabelArray(), owner.GetIdentity(id2.ID)) 320 321 id3, isNew, err := mgr.AllocateIdentity(context.Background(), lbls3, false, identity.InvalidIdentity) 322 require.NotNil(t, id3) 323 require.Equal(t, true, isNew) 324 require.NoError(t, err) 325 require.NotEqual(t, id3.ID, id1a.ID) 326 require.NotEqual(t, id3.ID, id2.ID) 327 // Wait for the update event from the KV-store 328 require.NotEqual(t, 0, owner.WaitUntilID(id3.ID)) 329 require.EqualValues(t, lbls3.LabelArray(), owner.GetIdentity(id3.ID)) 330 331 released, err = mgr.Release(context.Background(), id1b, false) 332 require.NoError(t, err) 333 require.Equal(t, true, released) 334 released, err = mgr.Release(context.Background(), id2, false) 335 require.NoError(t, err) 336 require.Equal(t, true, released) 337 released, err = mgr.Release(context.Background(), id3, false) 338 require.NoError(t, err) 339 require.Equal(t, true, released) 340 341 mgr.IdentityAllocator.DeleteAllKeys() 342 require.NotEqual(t, 0, owner.WaitUntilID(id3.ID)) 343 } 344 345 func TestLocalAllocation(t *testing.T) { 346 for _, be := range []string{"etcd", "consul"} { 347 t.Run(be, func(t *testing.T) { 348 testutils.IntegrationTest(t) 349 kvstore.SetupDummy(t, be) 350 testLocalAllocation(t) 351 }) 352 } 353 } 354 355 func testLocalAllocation(t *testing.T) { 356 lbls1 := labels.NewLabelsFromSortedList("cidr:192.0.2.3/32") 357 358 owner := newDummyOwner() 359 identity.InitWellKnownIdentities(fakeConfig, cmtypes.ClusterInfo{Name: "default", ID: 5}) 360 // The nils are only used by k8s CRD identities. We default to kvstore. 361 mgr := NewCachingIdentityAllocator(owner) 362 <-mgr.InitIdentityAllocator(nil) 363 defer mgr.Close() 364 defer mgr.IdentityAllocator.DeleteAllKeys() 365 366 id, isNew, err := mgr.AllocateIdentity(context.Background(), lbls1, true, identity.InvalidIdentity) 367 require.NotNil(t, id) 368 require.NoError(t, err) 369 require.Equal(t, true, isNew) 370 require.Equal(t, true, id.ID.HasLocalScope()) 371 // Wait for the update event from the KV-store 372 require.NotEqual(t, 0, owner.WaitUntilID(id.ID)) 373 require.EqualValues(t, lbls1.LabelArray(), owner.GetIdentity(id.ID)) 374 375 // reuse the same identity 376 id, isNew, err = mgr.AllocateIdentity(context.Background(), lbls1, true, identity.InvalidIdentity) 377 require.NotNil(t, id) 378 require.NoError(t, err) 379 require.False(t, isNew) 380 381 cache := mgr.GetIdentityCache() 382 require.NotNil(t, cache[id.ID]) 383 384 // 1st Release, not released 385 released, err := mgr.Release(context.Background(), id, true) 386 require.NoError(t, err) 387 require.False(t, released) 388 389 // Identity still exists 390 require.EqualValues(t, lbls1.LabelArray(), owner.GetIdentity(id.ID)) 391 392 // 2nd Release, released 393 released, err = mgr.Release(context.Background(), id, true) 394 require.NoError(t, err) 395 require.Equal(t, true, released) 396 397 // Wait until the identity is released 398 require.NotEqual(t, 0, owner.WaitUntilID(id.ID)) 399 // Identity does not exist any more 400 require.Nil(t, owner.GetIdentity(id.ID)) 401 402 cache = mgr.GetIdentityCache() 403 require.Nil(t, cache[id.ID]) 404 405 id, isNew, err = mgr.AllocateIdentity(context.Background(), lbls1, true, identity.InvalidIdentity) 406 require.NotNil(t, id) 407 require.NoError(t, err) 408 require.Equal(t, true, isNew) 409 require.Equal(t, true, id.ID.HasLocalScope()) 410 411 released, err = mgr.Release(context.Background(), id, true) 412 require.NoError(t, err) 413 require.Equal(t, true, released) 414 415 mgr.IdentityAllocator.DeleteAllKeys() 416 require.NotEqual(t, 0, owner.WaitUntilID(id.ID)) 417 } 418 419 func TestAllocatorReset(t *testing.T) { 420 for _, be := range []string{"etcd", "consul"} { 421 t.Run(be, func(t *testing.T) { 422 testutils.IntegrationTest(t) 423 kvstore.SetupDummy(t, be) 424 testAllocatorReset(t) 425 }) 426 } 427 } 428 429 // Test that we can close and reopen the allocator successfully. 430 func testAllocatorReset(t *testing.T) { 431 labels := labels.NewLabelsFromSortedList("id=bar;user=anna") 432 owner := newDummyOwner() 433 mgr := NewCachingIdentityAllocator(owner) 434 testAlloc := func() { 435 id1a, _, err := mgr.AllocateIdentity(context.Background(), labels, false, identity.InvalidIdentity) 436 require.NotNil(t, id1a) 437 require.NoError(t, err) 438 439 queued, ok := <-owner.updated 440 require.Equal(t, true, ok) 441 require.Equal(t, id1a.ID, queued) 442 } 443 444 <-mgr.InitIdentityAllocator(nil) 445 testAlloc() 446 mgr.Close() 447 <-mgr.InitIdentityAllocator(nil) 448 testAlloc() 449 mgr.Close() 450 } 451 452 func TestAllocateLocally(t *testing.T) { 453 mgr := NewCachingIdentityAllocator(newDummyOwner()) 454 455 cidrLbls := labels.NewLabelsFromSortedList("cidr:1.2.3.4/32") 456 podLbls := labels.NewLabelsFromSortedList("k8s:foo=bar") 457 458 assert.False(t, needsGlobalIdentity(cidrLbls)) 459 assert.True(t, needsGlobalIdentity(podLbls)) 460 461 id, allocated, err := mgr.AllocateLocalIdentity(cidrLbls, false, identity.IdentityScopeLocal+50) 462 assert.Nil(t, err) 463 assert.True(t, allocated) 464 assert.Equal(t, id.ID.Scope(), identity.IdentityScopeLocal) 465 assert.Equal(t, id.ID, identity.IdentityScopeLocal+50) 466 467 id, _, err = mgr.AllocateLocalIdentity(podLbls, false, 0) 468 assert.Error(t, err, ErrNonLocalIdentity) 469 assert.Nil(t, id) 470 } 471 472 func TestCheckpointRestore(t *testing.T) { 473 owner := newDummyOwner() 474 mgr := NewCachingIdentityAllocator(owner) 475 defer mgr.Close() 476 dir := t.TempDir() 477 mgr.checkpointPath = filepath.Join(dir, CheckpointFile) 478 mgr.EnableCheckpointing() 479 480 for _, l := range []string{ 481 "cidr:1.1.1.1/32;reserved:kube-apiserver", 482 "cidr:1.1.1.2/32;reserved:kube-apiserver", 483 "cidr:1.1.1.1/32", 484 "cidr:1.1.1.2/32", 485 } { 486 lbls := labels.NewLabelsFromSortedList(l) 487 assert.NotEqual(t, identity.IdentityScopeGlobal, identity.ScopeForLabels(lbls), "test bug: only restore locally-scoped labels") 488 489 _, _, err := mgr.AllocateIdentity(context.Background(), lbls, false, 0) 490 assert.Nil(t, err) 491 } 492 493 // ensure that the checkpoint file has been written 494 // This is asynchronous, so we must retry 495 assert.Eventually(t, func() bool { 496 _, err := os.Stat(mgr.checkpointPath) 497 return err == nil 498 }, time.Second, 50*time.Millisecond) 499 500 modelBefore := mgr.GetIdentities() 501 502 // Explicitly checkpoint, to ensure we get the latest data 503 err := mgr.checkpoint(context.TODO()) 504 require.NoError(t, err) 505 506 newMgr := NewCachingIdentityAllocator(owner) 507 defer newMgr.Close() 508 newMgr.checkpointPath = mgr.checkpointPath 509 510 restored, err := newMgr.RestoreLocalIdentities() 511 assert.Nil(t, err) 512 assert.Len(t, restored, 4) 513 514 modelAfter := newMgr.GetIdentities() 515 516 assert.ElementsMatch(t, modelBefore, modelAfter) 517 } 518 519 func TestClusterIDValidator(t *testing.T) { 520 const ( 521 cid = 5 522 minID = cid << 16 523 maxID = minID + 65535 524 ) 525 526 var ( 527 validator = clusterIDValidator(cid) 528 key = &cacheKey.GlobalIdentity{} 529 ) 530 531 // Identities matching the cluster ID should pass validation 532 for _, id := range []idpool.ID{minID, minID + 1, maxID - 1, maxID} { 533 assert.NoError(t, validator(allocator.AllocatorChangeUpsert, id, key), "ID %d should have passed validation", id) 534 } 535 536 // Identities not matching the cluster ID should fail validation 537 for _, id := range []idpool.ID{1, minID - 1, maxID + 1} { 538 assert.Error(t, validator(allocator.AllocatorChangeUpsert, id, key), "ID %d should have failed validation", id) 539 } 540 } 541 542 func TestClusterNameValidator(t *testing.T) { 543 const id = 100 544 545 var ( 546 validator = clusterNameValidator("foo") 547 generator = cacheKey.GlobalIdentity{} 548 ) 549 550 key := generator.PutKey("k8s:foo=bar;k8s:bar=baz;qux=fred;k8s:io.cilium.k8s.policy.cluster=foo") 551 assert.NoError(t, validator(allocator.AllocatorChangeUpsert, id, key)) 552 553 key = generator.PutKey("k8s:foo=bar;k8s:bar=baz") 554 assert.EqualError(t, validator(allocator.AllocatorChangeUpsert, id, key), "could not find expected label io.cilium.k8s.policy.cluster") 555 556 key = generator.PutKey("k8s:foo=bar;k8s:bar=baz;k8s:io.cilium.k8s.policy.cluster=bar") 557 assert.EqualError(t, validator(allocator.AllocatorChangeUpsert, id, key), "unexpected cluster name: got bar, expected foo") 558 559 key = generator.PutKey("k8s:foo=bar;k8s:bar=baz;qux:io.cilium.k8s.policy.cluster=bar") 560 assert.EqualError(t, validator(allocator.AllocatorChangeUpsert, id, key), "unexpected source for cluster label: got qux, expected k8s") 561 562 key = generator.PutKey("k8s:foo=bar;k8s:bar=baz;qux:io.cilium.k8s.policy.cluster=bar;k8s:io.cilium.k8s.policy.cluster=bar") 563 assert.EqualError(t, validator(allocator.AllocatorChangeUpsert, id, key), "unexpected source for cluster label: got qux, expected k8s") 564 565 assert.EqualError(t, validator(allocator.AllocatorChangeUpsert, id, nil), "unsupported key type <nil>") 566 567 key = generator.PutKey("") 568 assert.NoError(t, validator(allocator.AllocatorChangeDelete, id, key)) 569 }