istio.io/istio@v0.0.0-20240520182934-d79c90f27776/pkg/kube/multicluster/secretcontroller_test.go (about) 1 // Copyright Istio Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package multicluster 16 17 import ( 18 "fmt" 19 "testing" 20 "time" 21 22 "go.uber.org/atomic" 23 v1 "k8s.io/api/core/v1" 24 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 25 klabels "k8s.io/apimachinery/pkg/labels" 26 "k8s.io/client-go/rest" 27 28 meshconfig "istio.io/api/mesh/v1alpha1" 29 "istio.io/istio/pkg/cluster" 30 "istio.io/istio/pkg/config/mesh" 31 "istio.io/istio/pkg/kube" 32 "istio.io/istio/pkg/kube/controllers" 33 "istio.io/istio/pkg/kube/kclient" 34 "istio.io/istio/pkg/kube/kclient/clienttest" 35 "istio.io/istio/pkg/kube/namespace" 36 "istio.io/istio/pkg/slices" 37 "istio.io/istio/pkg/test" 38 "istio.io/istio/pkg/test/util/assert" 39 "istio.io/istio/pkg/test/util/retry" 40 "istio.io/istio/pkg/util/sets" 41 ) 42 43 const secretNamespace string = "istio-system" 44 45 type clusterCredential struct { 46 clusterID string 47 kubeconfig []byte 48 } 49 50 func makeSecret(namespace string, secret string, clusterConfigs ...clusterCredential) *v1.Secret { 51 s := &v1.Secret{ 52 ObjectMeta: metav1.ObjectMeta{ 53 Name: secret, 54 Namespace: namespace, 55 Labels: map[string]string{ 56 MultiClusterSecretLabel: "true", 57 }, 58 }, 59 Data: map[string][]byte{}, 60 } 61 62 for _, config := range clusterConfigs { 63 s.Data[config.clusterID] = config.kubeconfig 64 } 65 return s 66 } 67 68 func TestKubeConfigOverride(t *testing.T) { 69 var ( 70 expectedQPS = float32(100) 71 expectedBurst = 200 72 ) 73 fakeRestConfig := &rest.Config{} 74 client := kube.NewFakeClient() 75 stopCh := test.NewStop(t) 76 c := NewController(client, secretNamespace, "", mesh.NewFixedWatcher(nil), func(cfg *rest.Config) { 77 cfg.QPS = expectedQPS 78 cfg.Burst = expectedBurst 79 }) 80 c.ClientBuilder = func(kubeConfig []byte, c cluster.ID, configOverrides ...func(*rest.Config)) (kube.Client, error) { 81 for _, override := range configOverrides { 82 override(fakeRestConfig) 83 } 84 return kube.NewFakeClient(), nil 85 } 86 client.RunAndWait(stopCh) 87 assert.NoError(t, c.Run(stopCh)) 88 retry.UntilOrFail(t, c.HasSynced, retry.Timeout(2*time.Second)) 89 secret0 := makeSecret(secretNamespace, "s0", clusterCredential{"c0", []byte("kubeconfig0-0")}) 90 secrets := clienttest.NewWriter[*v1.Secret](t, client) 91 t.Run("test kube config override", func(t *testing.T) { 92 secrets.Create(secret0) 93 assert.EventuallyEqual(t, func() bool { 94 return c.cs.GetByID("c0") != nil 95 }, true) 96 assert.Equal(t, fakeRestConfig, &rest.Config{ 97 QPS: expectedQPS, 98 Burst: expectedBurst, 99 }) 100 }) 101 } 102 103 func TestingBuildClientsFromConfig(kubeConfig []byte, c cluster.ID, configOverrides ...func(*rest.Config)) (kube.Client, error) { 104 return kube.NewFakeClient(), nil 105 } 106 107 type testController struct { 108 controller *Controller 109 client kube.Client 110 component *Component[testHandler] 111 t *testing.T 112 secrets clienttest.TestWriter[*v1.Secret] 113 } 114 115 func buildTestController(t *testing.T, synced bool) testController { 116 tc := testController{ 117 client: kube.NewFakeClient(), 118 t: t, 119 } 120 tc.secrets = clienttest.NewWriter[*v1.Secret](t, tc.client) 121 tc.controller = NewController(tc.client, secretNamespace, "config", mesh.NewFixedWatcher(nil)) 122 tc.controller.ClientBuilder = TestingBuildClientsFromConfig 123 iter := atomic.NewInt32(0) 124 tc.component = BuildMultiClusterComponent(tc.controller, func(cluster *Cluster) testHandler { 125 it := iter.Inc() 126 return testHandler{ 127 ID: cluster.ID, 128 Iter: int(it), 129 Closed: atomic.NewBool(false), 130 Synced: atomic.NewBool(synced), 131 } 132 }) 133 return tc 134 } 135 136 var kubeconfig = 0 137 138 func (t *testController) AddSecret(secretName, clusterID string) { 139 kubeconfig++ 140 t.secrets.CreateOrUpdate(makeSecret(secretNamespace, secretName, clusterCredential{clusterID, []byte(fmt.Sprintf("kubeconfig-%d", kubeconfig))})) 141 } 142 143 func (t *testController) DeleteSecret(secretName string) { 144 t.t.Helper() 145 t.secrets.Delete(secretName, secretNamespace) 146 } 147 148 func (t *testController) ConfigClusterHandler() testHandler { 149 return t.component.clusters["config"] 150 } 151 152 func (t *testController) Run(stop chan struct{}) { 153 assert.NoError(t.t, t.controller.Run(stop)) 154 t.client.RunAndWait(stop) 155 } 156 157 func TestListRemoteClusters(t *testing.T) { 158 stop := make(chan struct{}) 159 c := buildTestController(t, false) 160 c.AddSecret("s0", "c0") 161 c.AddSecret("s1", "c1") 162 c.Run(stop) 163 164 // before sync 165 assert.EventuallyEqual(t, c.controller.ListRemoteClusters, []cluster.DebugInfo{ 166 {ID: "config", SyncStatus: "syncing"}, 167 {ID: "c0", SecretName: "istio-system/s0", SyncStatus: "syncing"}, 168 {ID: "c1", SecretName: "istio-system/s1", SyncStatus: "syncing"}, 169 }) 170 assert.EventuallyEqual(t, func() int { return len(c.component.All()) }, 3) 171 172 // Sync all but one 173 for _, c := range c.component.All() { 174 if c.ID != "c1" { 175 c.Synced.Store(true) 176 } 177 } 178 assert.EventuallyEqual(t, c.controller.ListRemoteClusters, []cluster.DebugInfo{ 179 {ID: "config", SyncStatus: "synced"}, 180 {ID: "c0", SecretName: "istio-system/s0", SyncStatus: "synced"}, 181 {ID: "c1", SecretName: "istio-system/s1", SyncStatus: "syncing"}, 182 }) 183 184 // Sync the last one 185 c.component.ForCluster("c1").Synced.Store(true) 186 assert.EventuallyEqual(t, c.controller.ListRemoteClusters, []cluster.DebugInfo{ 187 {ID: "config", SyncStatus: "synced"}, 188 {ID: "c0", SecretName: "istio-system/s0", SyncStatus: "synced"}, 189 {ID: "c1", SecretName: "istio-system/s1", SyncStatus: "synced"}, 190 }) 191 192 // Remove one 193 c.DeleteSecret("s1") 194 assert.EventuallyEqual(t, c.controller.ListRemoteClusters, []cluster.DebugInfo{ 195 {ID: "config", SyncStatus: "synced"}, 196 {ID: "c0", SecretName: "istio-system/s0", SyncStatus: "synced"}, 197 }) 198 } 199 200 func TestShutdown(t *testing.T) { 201 stop := make(chan struct{}) 202 c := buildTestController(t, true) 203 c.AddSecret("s0", "c0") 204 c.AddSecret("s1", "c1") 205 c.Run(stop) 206 retry.UntilOrFail(t, c.controller.HasSynced, retry.Timeout(2*time.Second)) 207 components := c.component.All() 208 assert.Equal(t, []bool{false, false, false}, slices.Map(components, func(e testHandler) bool { 209 return e.Closed.Load() 210 })) 211 212 // Remove secret, it should be marked as closed 213 c.DeleteSecret("s0") 214 fetchClosed := func() map[string]bool { 215 res := map[string]bool{} 216 for _, c := range components { 217 res[string(c.ID)] = c.Closed.Load() 218 } 219 return res 220 } 221 assert.EventuallyEqual(t, fetchClosed, map[string]bool{"config": false, "c1": false, "c0": true}) 222 223 // close everything 224 close(stop) 225 226 // We should *not* shutdown anything else except the config cluster 227 // In theory we could, but we only shut down the controller when the entire application is closing so we don't bother 228 assert.EventuallyEqual(t, fetchClosed, map[string]bool{"config": true, "c1": false, "c0": true}) 229 } 230 231 // TestObjectFilter tests that when a component is created, it should have access to the objectfilter. 232 // This ensures we do not load everything, then later filter it out. 233 func TestObjectFilter(t *testing.T) { 234 stop := test.NewStop(t) 235 clientWithNamespace := func() kube.Client { 236 return kube.NewFakeClient( 237 &v1.Namespace{ 238 ObjectMeta: metav1.ObjectMeta{ 239 Name: "allowed", 240 Labels: map[string]string{"kubernetes.io/metadata.name": "allowed"}, 241 }, 242 }, 243 &v1.Namespace{ 244 ObjectMeta: metav1.ObjectMeta{ 245 Name: "not-allowed", 246 Labels: map[string]string{"kubernetes.io/metadata.name": "not-allowed"}, 247 }, 248 }) 249 } 250 tc := testController{ 251 client: clientWithNamespace(), 252 t: t, 253 } 254 mesh := mesh.NewFixedWatcher(&meshconfig.MeshConfig{ 255 DiscoverySelectors: []*metav1.LabelSelector{ 256 { 257 MatchLabels: map[string]string{ 258 "kubernetes.io/metadata.name": "allowed", 259 }, 260 }, 261 }, 262 }) 263 264 // For primary cluster, we need to set it up ourselves. 265 namespaces := kclient.New[*v1.Namespace](tc.client) 266 filter := namespace.NewDiscoveryNamespacesFilter(namespaces, mesh, stop) 267 tc.client = kube.SetObjectFilter(tc.client, filter) 268 269 tc.secrets = clienttest.NewWriter[*v1.Secret](t, tc.client) 270 tc.controller = NewController(tc.client, secretNamespace, "config", mesh) 271 tc.controller.ClientBuilder = func(kubeConfig []byte, c cluster.ID, configOverrides ...func(*rest.Config)) (kube.Client, error) { 272 return clientWithNamespace(), nil 273 } 274 275 tc.component = BuildMultiClusterComponent(tc.controller, func(cluster *Cluster) testHandler { 276 // Filter must immediately work! 277 assert.Equal(t, cluster.Client.ObjectFilter() != nil, true, "cluster "+cluster.ID.String()) 278 assert.Equal(t, cluster.Client.ObjectFilter().Filter("allowed"), true) 279 assert.Equal(t, cluster.Client.ObjectFilter().Filter("not-allowed"), false) 280 return testHandler{ 281 ID: cluster.ID, 282 Closed: atomic.NewBool(false), 283 Synced: atomic.NewBool(true), 284 } 285 }) 286 tc.AddSecret("s0", "c0") 287 tc.AddSecret("s1", "c1") 288 tc.Run(stop) 289 retry.UntilOrFail(t, tc.controller.HasSynced, retry.Timeout(2*time.Second)) 290 } 291 292 type informerHandler[T controllers.ComparableObject] struct { 293 client kclient.Client[T] 294 } 295 296 func (i *informerHandler[T]) Close() { 297 i.client.ShutdownHandlers() 298 } 299 300 func (i *informerHandler[T]) HasSynced() bool { 301 return i.client.HasSynced() 302 } 303 304 // Test our (lack of) ability to do seamless updates of a cluster. 305 // Tracking improvements in https://github.com/istio/istio/issues/49349 306 func TestSeamlessMigration(t *testing.T) { 307 stop := make(chan struct{}) 308 c := buildTestController(t, true) 309 tt := assert.NewTracker[string](t) 310 initial := kube.NewFakeClient( 311 &v1.ConfigMap{ 312 ObjectMeta: metav1.ObjectMeta{Name: "initial"}, 313 }, 314 &v1.ConfigMap{ 315 ObjectMeta: metav1.ObjectMeta{Name: "common"}, 316 }, 317 ) 318 later := kube.NewFakeClient( 319 &v1.ConfigMap{ 320 ObjectMeta: metav1.ObjectMeta{Name: "later"}, 321 }, 322 &v1.ConfigMap{ 323 ObjectMeta: metav1.ObjectMeta{Name: "common"}, 324 }, 325 ) 326 nextClient := initial 327 c.controller.ClientBuilder = func(kubeConfig []byte, clusterId cluster.ID, configOverrides ...func(*rest.Config)) (kube.Client, error) { 328 ret := nextClient 329 nextClient = later 330 return ret, nil 331 } 332 component := BuildMultiClusterComponent(c.controller, func(cluster *Cluster) *informerHandler[*v1.ConfigMap] { 333 cl := kclient.New[*v1.ConfigMap](cluster.Client) 334 cl.AddEventHandler(clienttest.TrackerHandler(tt)) 335 return &informerHandler[*v1.ConfigMap]{client: cl} 336 }) 337 c.AddSecret("s0", "c0") 338 c.Run(stop) 339 retry.UntilOrFail(t, c.controller.HasSynced, retry.Timeout(2*time.Second)) 340 assert.Equal(t, 341 clienttest.Names((*component.ForCluster("c0")).client.List(metav1.NamespaceAll, klabels.Everything())), 342 sets.New("initial", "common")) 343 344 tt.WaitUnordered("add/common", "add/initial") 345 346 // Update the cluster 347 c.AddSecret("s0", "c0") 348 var fatal error 349 retry.UntilOrFail(t, func() bool { 350 have := clienttest.Names((*component.ForCluster("c0")).client.List(metav1.NamespaceAll, klabels.Everything())) 351 if have.Equals(sets.New("later", "common")) { 352 return true 353 } 354 if !have.Equals(sets.New("initial", "common")) { 355 fatal = fmt.Errorf("unexpected contents: %v", have) 356 // TODO: return true here, then assert.NoError(t, fatal) after 357 // This would properly check that we do not go from `old -> empty -> new` and instead go from `old -> new` seamlessly 358 // However, the code does not currently handler this case. 359 return false 360 } 361 return false 362 }) 363 _ = fatal 364 // We get ADD again! Oops. Ideally we would be abstracted from the cluster update and instead get 'delete/initial, add/later, update/common'. 365 // See discussion in https://github.com/istio/enhancements/pull/107 366 tt.WaitUnordered("add/common", "add/later") 367 } 368 369 func TestSecretController(t *testing.T) { 370 client := kube.NewFakeClient() 371 372 var ( 373 secret0 = makeSecret(secretNamespace, "s0", 374 clusterCredential{"c0", []byte("kubeconfig0-0")}) 375 secret0UpdateKubeconfigChanged = makeSecret(secretNamespace, "s0", 376 clusterCredential{"c0", []byte("kubeconfig0-1")}) 377 secret0UpdateKubeconfigSame = makeSecret(secretNamespace, "s0", 378 clusterCredential{"c0", []byte("kubeconfig0-1")}) 379 secret0AddCluster = makeSecret(secretNamespace, "s0", 380 clusterCredential{"c0", []byte("kubeconfig0-1")}, clusterCredential{"c0-1", []byte("kubeconfig0-2")}) 381 secret0DeleteCluster = secret0UpdateKubeconfigChanged // "c0-1" cluster deleted 382 secret0ReAddCluster = makeSecret(secretNamespace, "s0", 383 clusterCredential{"c0", []byte("kubeconfig0-1")}, clusterCredential{"c0-1", []byte("kubeconfig0-2")}) 384 secret0ReDeleteCluster = secret0UpdateKubeconfigChanged // "c0-1" cluster re-deleted 385 secret1 = makeSecret(secretNamespace, "s1", 386 clusterCredential{"c1", []byte("kubeconfig1-0")}) 387 otherNSSecret = makeSecret("some-other-namespace", "s2", 388 clusterCredential{"c1", []byte("kubeconfig1-0")}) 389 secret2Cluster0 = makeSecret(secretNamespace, "s2", 390 clusterCredential{"c0", []byte("kubeconfig1-1")}) 391 configCluster = makeSecret(secretNamespace, "s3", 392 clusterCredential{"config", []byte("kubeconfig3-0")}) 393 ) 394 395 secret0UpdateKubeconfigSame.Annotations = map[string]string{"foo": "bar"} 396 397 type result struct { 398 ID cluster.ID 399 Iter int 400 } 401 402 steps := []struct { 403 name string 404 // only set one of these per step. The others should be nil. 405 add *v1.Secret 406 update *v1.Secret 407 delete *v1.Secret 408 409 want []result 410 }{ 411 { 412 name: "Create secret s0 and add kubeconfig for cluster c0, which will add remote cluster c0", 413 add: secret0, 414 want: []result{{"config", 1}, {"c0", 2}}, 415 }, 416 { 417 name: "Update secret s0 and update the kubeconfig of cluster c0, which will update remote cluster c0", 418 update: secret0UpdateKubeconfigChanged, 419 want: []result{{"config", 1}, {"c0", 3}}, 420 }, 421 { 422 name: "Update secret s0 but keep the kubeconfig of cluster c0 unchanged, which will not update remote cluster c0", 423 update: secret0UpdateKubeconfigSame, 424 want: []result{{"config", 1}, {"c0", 3}}, 425 }, 426 { 427 name: "Update secret s0 and add kubeconfig for cluster c0-1 but keep the kubeconfig of cluster c0 unchanged, " + 428 "which will add remote cluster c0-1 but will not update remote cluster c0", 429 update: secret0AddCluster, 430 want: []result{{"config", 1}, {"c0", 3}, {"c0-1", 4}}, 431 }, 432 { 433 name: "Update secret s0 and delete cluster c0-1 but keep the kubeconfig of cluster c0 unchanged, " + 434 "which will delete remote cluster c0-1 but will not update remote cluster c0", 435 update: secret0DeleteCluster, 436 want: []result{{"config", 1}, {"c0", 3}}, 437 }, 438 { 439 name: "Update secret s0 and re-add kubeconfig for cluster c0-1 but keep the kubeconfig of cluster c0 unchanged, " + 440 "which will add remote cluster c0-1 but will not update remote cluster c0", 441 update: secret0ReAddCluster, 442 want: []result{{"config", 1}, {"c0", 3}, {"c0-1", 5}}, 443 }, 444 { 445 name: "Update secret s0 and re-delete cluster c0-1 but keep the kubeconfig of cluster c0 unchanged, " + 446 "which will delete remote cluster c0-1 but will not update remote cluster c0", 447 update: secret0ReDeleteCluster, 448 want: []result{{"config", 1}, {"c0", 3}}, 449 }, 450 { 451 name: "Create secret s1 and add kubeconfig for cluster c1, which will add remote cluster c1", 452 add: secret1, 453 want: []result{{"config", 1}, {"c0", 3}, {"c1", 6}}, 454 }, 455 { 456 name: "Add secret s2, with already existing cluster", 457 add: secret2Cluster0, 458 want: []result{{"config", 1}, {"c0", 3}, {"c1", 6}}, 459 }, 460 { 461 name: "Delete secret s2, with already existing cluster", 462 delete: secret2Cluster0, 463 want: []result{{"config", 1}, {"c0", 3}, {"c1", 6}}, 464 }, 465 { 466 name: "Delete secret s0, which will delete remote cluster c0", 467 delete: secret0, 468 want: []result{{"config", 1}, {"c1", 6}}, 469 }, 470 { 471 name: "Delete secret s1, which will delete remote cluster c1", 472 delete: secret1, 473 want: []result{{"config", 1}}, 474 }, 475 { 476 name: "Add secret from another namespace", 477 add: otherNSSecret, 478 want: []result{{"config", 1}}, 479 }, 480 { 481 name: "Add secret referencing config cluster", 482 add: configCluster, 483 want: []result{{"config", 1}}, 484 }, 485 { 486 name: "Delete secret referencing config cluster", 487 delete: configCluster, 488 want: []result{{"config", 1}}, 489 }, 490 } 491 492 // Start the secret controller and sleep to allow secret process to start. 493 stopCh := test.NewStop(t) 494 c := NewController(client, secretNamespace, "config", mesh.NewFixedWatcher(nil)) 495 c.ClientBuilder = TestingBuildClientsFromConfig 496 client.RunAndWait(stopCh) 497 secrets := clienttest.NewWriter[*v1.Secret](t, client) 498 iter := 0 499 component := BuildMultiClusterComponent(c, func(cluster *Cluster) testHandler { 500 iter++ 501 return testHandler{ 502 ID: cluster.ID, 503 Iter: iter, 504 Closed: atomic.NewBool(false), 505 Synced: atomic.NewBool(false), 506 } 507 }) 508 client.RunAndWait(stopCh) 509 assert.NoError(t, c.Run(stopCh)) 510 // Should not be synced... 511 assert.Equal(t, c.HasSynced(), false) 512 // Now mark the config cluster as synced 513 component.All()[0].Synced.Store(true) 514 t.Run("sync timeout", func(t *testing.T) { 515 retry.UntilOrFail(t, c.HasSynced, retry.Timeout(2*time.Second)) 516 }) 517 kube.WaitForCacheSync("test", stopCh, c.HasSynced) 518 519 for _, step := range steps { 520 t.Run(step.name, func(t *testing.T) { 521 switch { 522 case step.add != nil: 523 secrets.Create(step.add) 524 case step.update != nil: 525 secrets.Update(step.update) 526 case step.delete != nil: 527 secrets.Delete(step.delete.Name, step.delete.Namespace) 528 } 529 530 assert.EventuallyEqual(t, func() []result { 531 return slices.Map(component.All(), func(e testHandler) result { 532 return result{e.ID, e.Iter} 533 }) 534 }, step.want) 535 }) 536 } 537 } 538 539 type testHandler struct { 540 ID cluster.ID 541 Iter int 542 Closed *atomic.Bool 543 Synced *atomic.Bool 544 } 545 546 func (h testHandler) Close() { 547 h.Closed.Store(true) 548 } 549 550 func (h testHandler) HasSynced() bool { 551 return h.Synced.Load() 552 }