sigs.k8s.io/cluster-api@v1.7.1/cmd/clusterctl/client/init_test.go (about) 1 /* 2 Copyright 2019 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package client 18 19 import ( 20 "fmt" 21 "testing" 22 23 . "github.com/onsi/gomega" 24 "github.com/pkg/errors" 25 ctrl "sigs.k8s.io/controller-runtime" 26 27 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 28 clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" 29 "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" 30 "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" 31 "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" 32 utilyaml "sigs.k8s.io/cluster-api/util/yaml" 33 ) 34 35 var ( 36 ctx = ctrl.SetupSignalHandler() 37 ) 38 39 func Test_clusterctlClient_InitImages(t *testing.T) { 40 type field struct { 41 client *fakeClient 42 } 43 44 type args struct { 45 kubeconfigContext string 46 coreProvider string 47 bootstrapProvider []string 48 controlPlaneProvider []string 49 infrastructureProvider []string 50 } 51 52 tests := []struct { 53 name string 54 field field 55 args args 56 additionalProviders []Provider 57 expectedImages []string 58 wantErr bool 59 expectedErrorMessage string 60 certManagerImages []string 61 certManagerImagesErr error 62 }{ 63 { 64 name: "returns error if cannot find cluster client", 65 field: field{ 66 client: fakeEmptyCluster(), 67 }, 68 args: args{ 69 kubeconfigContext: "does-not-exist", 70 }, 71 expectedImages: []string{}, 72 wantErr: true, 73 }, 74 { 75 name: "returns list of images even if component variable values are not found", 76 args: args{ 77 coreProvider: "", // with an empty cluster, a core provider should be added automatically 78 bootstrapProvider: nil, // with an empty cluster, a bootstrap provider should be added automatically 79 controlPlaneProvider: nil, // with an empty cluster, a control plane provider should be added automatically 80 infrastructureProvider: []string{"infra"}, 81 kubeconfigContext: "mgmt-context", 82 }, 83 expectedImages: []string{ 84 "registry.k8s.io/cluster-api-aws/cluster-api-aws-controller:v0.5.3", 85 }, 86 wantErr: false, 87 }, 88 { 89 name: "returns error when core provider name is invalid", 90 args: args{ 91 coreProvider: "some-core-provider", 92 kubeconfigContext: "mgmt-context", 93 }, 94 additionalProviders: []Provider{ 95 config.NewProvider("some-core-provider", "some-core-url", clusterctlv1.CoreProviderType), 96 }, 97 wantErr: true, 98 expectedErrorMessage: "name cluster-api must be used with the CoreProvider type", 99 }, 100 { 101 name: "return no error when core provider as the correct name", 102 args: args{ 103 coreProvider: config.ClusterAPIProviderName, 104 bootstrapProvider: nil, 105 controlPlaneProvider: nil, 106 infrastructureProvider: nil, 107 kubeconfigContext: "mgmt-context", 108 }, 109 expectedImages: []string{}, 110 wantErr: false, 111 }, 112 { 113 name: "returns error when a bootstrap provider is not present", 114 args: args{ 115 bootstrapProvider: []string{"not-provided"}, 116 kubeconfigContext: "mgmt-context", 117 }, 118 wantErr: true, 119 expectedErrorMessage: "failed to get configuration for the BootstrapProvider with name not-provided", 120 }, 121 { 122 name: "returns error when a control plane provider is not present", 123 args: args{ 124 controlPlaneProvider: []string{"not-provided"}, 125 kubeconfigContext: "mgmt-context", 126 }, 127 wantErr: true, 128 expectedErrorMessage: "failed to get configuration for the ControlPlaneProvider with name not-provided", 129 }, 130 { 131 name: "returns error when a infrastructure provider is not present", 132 args: args{ 133 infrastructureProvider: []string{"not-provided"}, 134 kubeconfigContext: "mgmt-context", 135 }, 136 wantErr: true, 137 expectedErrorMessage: "failed to get configuration for the InfrastructureProvider with name not-provided", 138 }, 139 { 140 name: "returns certificate manager images when required", 141 args: args{ 142 kubeconfigContext: "mgmt-context", 143 }, 144 wantErr: false, 145 certManagerImages: []string{ 146 "some.registry.com/cert-image-1:latest", 147 "some.registry.com/cert-image-2:some-tag", 148 }, 149 expectedImages: []string{ 150 "some.registry.com/cert-image-1:latest", 151 "some.registry.com/cert-image-2:some-tag", 152 }, 153 }, 154 { 155 name: "returns error when cert-manager client cannot retrieve the image list", 156 args: args{ 157 kubeconfigContext: "mgmt-context", 158 }, 159 wantErr: true, 160 certManagerImagesErr: errors.New("failed to get cert images"), 161 }, 162 } 163 164 for _, tt := range tests { 165 _, fc := setupCluster(tt.additionalProviders, newFakeCertManagerClient(tt.certManagerImages, tt.certManagerImagesErr)) 166 if tt.field.client == nil { 167 tt.field.client = fc 168 } 169 170 t.Run(tt.name, func(t *testing.T) { 171 g := NewWithT(t) 172 173 got, err := tt.field.client.InitImages(ctx, InitOptions{ 174 Kubeconfig: Kubeconfig{Path: "kubeconfig", Context: tt.args.kubeconfigContext}, 175 CoreProvider: tt.args.coreProvider, 176 BootstrapProviders: tt.args.bootstrapProvider, 177 ControlPlaneProviders: tt.args.controlPlaneProvider, 178 InfrastructureProviders: tt.args.infrastructureProvider, 179 }) 180 181 if tt.wantErr { 182 g.Expect(err).To(HaveOccurred()) 183 if tt.expectedErrorMessage == "" { 184 return 185 } 186 g.Expect(err.Error()).To(ContainSubstring(tt.expectedErrorMessage)) 187 return 188 } 189 g.Expect(err).ToNot(HaveOccurred()) 190 g.Expect(got).To(HaveLen(len(tt.expectedImages))) 191 g.Expect(got).To(ConsistOf(tt.expectedImages)) 192 }) 193 } 194 } 195 196 func Test_clusterctlClient_Init(t *testing.T) { 197 // create a config variables client which does not have the value for 198 // SOME_VARIABLE as expected in the infra components YAML 199 fconfig := newFakeConfig(ctx). 200 WithVar("ANOTHER_VARIABLE", "value"). 201 WithProvider(capiProviderConfig). 202 WithProvider(infraProviderConfig) 203 frepositories := fakeRepositories(fconfig, nil) 204 fcluster := fakeCluster(fconfig, frepositories, newFakeCertManagerClient(nil, nil)) 205 fclient := fakeClusterCtlClient(fconfig, frepositories, []*fakeClusterClient{fcluster}) 206 207 type field struct { 208 client *fakeClient 209 hasCRD bool 210 } 211 212 type args struct { 213 coreProvider string 214 bootstrapProvider []string 215 controlPlaneProvider []string 216 infrastructureProvider []string 217 targetNameSpace string 218 } 219 type want struct { 220 provider Provider 221 version string 222 targetNamespace string 223 } 224 225 tests := []struct { 226 name string 227 field field 228 args args 229 want []want 230 wantErr bool 231 }{ 232 { 233 name: "returns error if variables are not available", 234 field: field{ 235 client: fclient, 236 }, 237 args: args{ 238 coreProvider: "", // with an empty cluster, a core provider should be added automatically 239 bootstrapProvider: nil, // with an empty cluster, a bootstrap provider should be added automatically 240 controlPlaneProvider: nil, // with an empty cluster, a control plane provider should be added automatically 241 infrastructureProvider: []string{"infra"}, 242 }, 243 wantErr: true, 244 }, 245 { 246 name: "Init (with an empty cluster) with default provider versions/current contract", 247 field: field{ 248 client: fakeEmptyCluster(), // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap, control plane and infra provider) 249 hasCRD: false, 250 }, 251 args: args{ 252 coreProvider: "", // with an empty cluster, a core provider should be added automatically 253 bootstrapProvider: nil, // with an empty cluster, a bootstrap provider should be added automatically 254 controlPlaneProvider: nil, // with an empty cluster, a control plane provider should be added automatically 255 infrastructureProvider: []string{"infra"}, 256 targetNameSpace: "", 257 }, 258 want: []want{ 259 { 260 provider: capiProviderConfig, 261 version: "v1.0.0", 262 targetNamespace: "ns1", 263 }, 264 { 265 provider: bootstrapProviderConfig, 266 version: "v2.0.0", 267 targetNamespace: "ns2", 268 }, 269 { 270 provider: controlPlaneProviderConfig, 271 version: "v2.0.0", 272 targetNamespace: "ns3", 273 }, 274 { 275 provider: infraProviderConfig, 276 version: "v3.0.0", 277 targetNamespace: "ns4", 278 }, 279 }, 280 wantErr: false, 281 }, 282 { 283 name: "Init (with an empty cluster) opting out from automatic install of providers/current contract", 284 field: field{ 285 client: fakeEmptyCluster(), // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap, control plane and infra provider) 286 hasCRD: false, 287 }, 288 args: args{ 289 coreProvider: "", // with an empty cluster, a core provider should be added automatically 290 bootstrapProvider: []string{"-"}, // opt-out from the automatic bootstrap provider installation 291 controlPlaneProvider: []string{"-"}, // opt-out from the automatic control plane provider installation 292 infrastructureProvider: []string{"infra"}, 293 targetNameSpace: "", 294 }, 295 want: []want{ 296 { 297 provider: capiProviderConfig, 298 version: "v1.0.0", 299 targetNamespace: "ns1", 300 }, 301 { 302 provider: infraProviderConfig, 303 version: "v3.0.0", 304 targetNamespace: "ns4", 305 }, 306 }, 307 wantErr: false, 308 }, 309 { 310 name: "Init (with an empty cluster) with custom provider versions/current contract", 311 field: field{ 312 client: fakeEmptyCluster(), // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap, control plane and infra provider) 313 hasCRD: false, 314 }, 315 args: args{ 316 coreProvider: fmt.Sprintf("%s:v1.1.0", config.ClusterAPIProviderName), 317 bootstrapProvider: []string{fmt.Sprintf("%s:v2.1.0", config.KubeadmBootstrapProviderName)}, 318 controlPlaneProvider: []string{fmt.Sprintf("%s:v2.1.0", config.KubeadmControlPlaneProviderName)}, 319 infrastructureProvider: []string{"infra:v3.1.0"}, 320 targetNameSpace: "", 321 }, 322 want: []want{ 323 { 324 provider: capiProviderConfig, 325 version: "v1.1.0", 326 targetNamespace: "ns1", 327 }, 328 { 329 provider: bootstrapProviderConfig, 330 version: "v2.1.0", 331 targetNamespace: "ns2", 332 }, 333 { 334 provider: controlPlaneProviderConfig, 335 version: "v2.1.0", 336 targetNamespace: "ns3", 337 }, 338 { 339 provider: infraProviderConfig, 340 version: "v3.1.0", 341 targetNamespace: "ns4", 342 }, 343 }, 344 wantErr: false, 345 }, 346 { 347 name: "Init (with an empty cluster) with target namespace/current contract", 348 field: field{ 349 client: fakeEmptyCluster(), // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap, control plane and infra provider) 350 hasCRD: false, 351 }, 352 args: args{ 353 coreProvider: "", // with an empty cluster, a core provider should be added automatically 354 bootstrapProvider: []string{config.KubeadmBootstrapProviderName}, 355 infrastructureProvider: []string{"infra"}, 356 targetNameSpace: "nsx", 357 }, 358 want: []want{ 359 { 360 provider: capiProviderConfig, 361 version: "v1.0.0", 362 targetNamespace: "nsx", 363 }, 364 { 365 provider: bootstrapProviderConfig, 366 version: "v2.0.0", 367 targetNamespace: "nsx", 368 }, 369 { 370 provider: controlPlaneProviderConfig, 371 version: "v2.0.0", 372 targetNamespace: "nsx", 373 }, 374 { 375 provider: infraProviderConfig, 376 version: "v3.0.0", 377 targetNamespace: "nsx", 378 }, 379 }, 380 wantErr: false, 381 }, 382 { 383 name: "Init (with a NOT empty cluster) adds a provider/current contract", 384 field: field{ 385 client: fakeInitializedCluster(), // clusterctl client for an management cluster with capi installed (with repository setup for capi, bootstrap, control plane and infra provider) 386 hasCRD: true, 387 }, 388 args: args{ 389 coreProvider: "", // with a NOT empty cluster, a core provider should NOT be added automatically 390 bootstrapProvider: []string{config.KubeadmBootstrapProviderName}, 391 infrastructureProvider: []string{"infra"}, 392 targetNameSpace: "", 393 }, 394 want: []want{ 395 { 396 provider: bootstrapProviderConfig, 397 version: "v2.0.0", 398 targetNamespace: "ns2", 399 }, 400 { 401 provider: infraProviderConfig, 402 version: "v3.0.0", 403 targetNamespace: "ns4", 404 }, 405 }, 406 wantErr: false, 407 }, 408 { 409 name: "Fails when opting out from coreProvider automatic installation", 410 field: field{ 411 client: fakeEmptyCluster(), // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap, control plane and infra provider) 412 }, 413 args: args{ 414 coreProvider: "-", // not allowed 415 bootstrapProvider: nil, 416 controlPlaneProvider: nil, 417 infrastructureProvider: nil, 418 targetNameSpace: "", 419 }, 420 want: nil, 421 wantErr: true, 422 }, 423 { 424 name: "Fails when coreProvider is a provider with the wrong type", 425 field: field{ 426 client: fakeEmptyCluster(), // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap, control plane and infra provider) 427 }, 428 args: args{ 429 coreProvider: "infra", // wrong 430 bootstrapProvider: nil, 431 controlPlaneProvider: nil, 432 infrastructureProvider: nil, 433 targetNameSpace: "", 434 }, 435 want: nil, 436 wantErr: true, 437 }, 438 { 439 name: "Fails when bootstrapProvider list contains providers of the wrong type", 440 field: field{ 441 client: fakeEmptyCluster(), // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap, control plane and infra provider) 442 }, 443 args: args{ 444 coreProvider: "", 445 bootstrapProvider: []string{"infra"}, // wrong 446 controlPlaneProvider: nil, 447 infrastructureProvider: nil, 448 targetNameSpace: "", 449 }, 450 want: nil, 451 wantErr: true, 452 }, 453 { 454 name: "Fails when controlPlaneProvider list contains providers of the wrong type", 455 field: field{ 456 client: fakeEmptyCluster(), // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap, control plane and infra provider) 457 }, 458 args: args{ 459 coreProvider: "", 460 bootstrapProvider: nil, 461 controlPlaneProvider: []string{"infra"}, // wrong 462 infrastructureProvider: nil, 463 targetNameSpace: "", 464 }, 465 want: nil, 466 wantErr: true, 467 }, 468 { 469 name: "Fails when infrastructureProvider list contains providers of the wrong type", 470 field: field{ 471 client: fakeEmptyCluster(), // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap, control plane and infra provider) 472 }, 473 args: args{ 474 coreProvider: "", 475 bootstrapProvider: nil, 476 controlPlaneProvider: nil, 477 infrastructureProvider: []string{config.KubeadmBootstrapProviderName}, // wrong 478 targetNameSpace: "", 479 }, 480 want: nil, 481 wantErr: true, 482 }, 483 { 484 name: "Init (with an empty cluster) with custom provider versions/previous contract, not supported", 485 field: field{ 486 client: fakeEmptyCluster(), // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap, control plane and infra provider) 487 hasCRD: false, 488 }, 489 args: args{ 490 coreProvider: fmt.Sprintf("%s:v0.9.0", config.ClusterAPIProviderName), 491 bootstrapProvider: []string{fmt.Sprintf("%s:v0.9.0", config.KubeadmBootstrapProviderName)}, 492 controlPlaneProvider: []string{fmt.Sprintf("%s:v0.9.0", config.KubeadmControlPlaneProviderName)}, 493 infrastructureProvider: []string{"infra:v0.9.0"}, 494 targetNameSpace: "", 495 }, 496 wantErr: true, 497 }, 498 { 499 name: "Init (with a NOT empty cluster) adds a provider/previous contract, not supported", 500 field: field{ 501 client: fakeInitializedCluster(), // clusterctl client for an management cluster with capi installed (with repository setup for capi, bootstrap, control plane and infra provider) 502 hasCRD: true, 503 }, 504 args: args{ 505 coreProvider: "", // with a NOT empty cluster, a core provider should NOT be added automatically 506 bootstrapProvider: []string{fmt.Sprintf("%s:v0.9.0", config.KubeadmBootstrapProviderName)}, 507 infrastructureProvider: []string{"infra:v0.9.0"}, 508 targetNameSpace: "", 509 }, 510 wantErr: true, 511 }, 512 { 513 name: "Init (with an NOT empty cluster) adds the same core providers version again - should ignore duplicate", 514 field: field{ 515 client: fakeClusterWithCoreProvider(), // clusterctl client for an management cluster with CoreProvider cluster-api already installed. 516 hasCRD: true, 517 }, 518 args: args{ 519 coreProvider: "cluster-api:v1.0.0", // core provider of the same version is already installed on the cluster. should be skipped. 520 bootstrapProvider: []string{}, 521 infrastructureProvider: []string{"infra"}, 522 targetNameSpace: "", 523 }, 524 want: []want{ 525 // Only the infra provider should be installed. Core provider should be skipped. 526 { 527 provider: infraProviderConfig, 528 version: "v3.0.0", 529 targetNamespace: "ns4", 530 }, 531 }, 532 wantErr: false, 533 }, 534 } 535 for _, tt := range tests { 536 t.Run(tt.name, func(t *testing.T) { 537 g := NewWithT(t) 538 539 if tt.field.hasCRD { 540 input := cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"} 541 g.Expect(tt.field.client.clusters[input].ProviderInventory().EnsureCustomResourceDefinitions(ctx)).To(Succeed()) 542 } 543 544 got, err := tt.field.client.Init(ctx, InitOptions{ 545 Kubeconfig: Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, 546 CoreProvider: tt.args.coreProvider, 547 BootstrapProviders: tt.args.bootstrapProvider, 548 ControlPlaneProviders: tt.args.controlPlaneProvider, 549 InfrastructureProviders: tt.args.infrastructureProvider, 550 TargetNamespace: tt.args.targetNameSpace, 551 }) 552 if tt.wantErr { 553 g.Expect(err).To(HaveOccurred()) 554 return 555 } 556 g.Expect(err).ToNot(HaveOccurred()) 557 558 g.Expect(got).To(HaveLen(len(tt.want))) 559 for i, gItem := range got { 560 w := tt.want[i] 561 g.Expect(gItem.Name()).To(Equal(w.provider.Name())) 562 g.Expect(gItem.Type()).To(Equal(w.provider.Type())) 563 g.Expect(gItem.Version()).To(Equal(w.version)) 564 g.Expect(gItem.TargetNamespace()).To(Equal(w.targetNamespace)) 565 } 566 }) 567 } 568 } 569 570 var ( 571 capiProviderConfig = config.NewProvider(config.ClusterAPIProviderName, "url", clusterctlv1.CoreProviderType) 572 bootstrapProviderConfig = config.NewProvider(config.KubeadmBootstrapProviderName, "url", clusterctlv1.BootstrapProviderType) 573 controlPlaneProviderConfig = config.NewProvider(config.KubeadmControlPlaneProviderName, "url", clusterctlv1.ControlPlaneProviderType) 574 infraProviderConfig = config.NewProvider("infra", "url", clusterctlv1.InfrastructureProviderType) 575 ) 576 577 // setup a cluster client and the fake configuration for testing. 578 func setupCluster(providers []Provider, certManagerClient cluster.CertManagerClient) (*fakeConfigClient, *fakeClient) { 579 // create a config variables client which does not have the value for 580 // SOME_VARIABLE as expected in the infra components YAML 581 cfg := newFakeConfig(ctx). 582 WithVar("ANOTHER_VARIABLE", "value"). 583 WithProvider(capiProviderConfig). 584 WithProvider(infraProviderConfig) 585 586 for _, provider := range providers { 587 cfg.WithProvider(provider) 588 } 589 590 frepositories := fakeRepositories(cfg, providers) 591 cluster := fakeCluster(cfg, frepositories, certManagerClient) 592 fc := fakeClusterCtlClient(cfg, frepositories, []*fakeClusterClient{cluster}) 593 return cfg, fc 594 } 595 596 // clusterctl client for an empty management cluster (with repository setup for capi, bootstrap and infra provider). 597 func fakeEmptyCluster() *fakeClient { 598 // create a config variables client which contains the value for the 599 // variable required 600 config1 := fakeConfig( 601 []config.Provider{capiProviderConfig, bootstrapProviderConfig, controlPlaneProviderConfig, infraProviderConfig}, 602 map[string]string{"SOME_VARIABLE": "value"}, 603 ) 604 605 // fake repository for capi, bootstrap and infra provider (matching provider's config) 606 repositories := fakeRepositories(config1, nil) 607 // fake empty cluster from fake repository for capi, bootstrap and infra 608 // provider (matching provider's config) 609 cluster1 := fakeCluster(config1, repositories, newFakeCertManagerClient(nil, nil)) 610 611 client := fakeClusterCtlClient(config1, repositories, []*fakeClusterClient{cluster1}) 612 return client 613 } 614 615 func fakeConfig(providers []config.Provider, variables map[string]string) *fakeConfigClient { 616 config := newFakeConfig(ctx) 617 for _, p := range providers { 618 config = config.WithProvider(p) 619 } 620 for k, v := range variables { 621 config = config.WithVar(k, v) 622 } 623 return config 624 } 625 626 func fakeCluster(config *fakeConfigClient, repos []*fakeRepositoryClient, certManagerClient cluster.CertManagerClient) *fakeClusterClient { 627 cluster := newFakeCluster(cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, config) 628 for _, r := range repos { 629 cluster = cluster.WithRepository(r) 630 } 631 cluster.WithCertManagerClient(certManagerClient) 632 return cluster 633 } 634 635 // fakeRepositories returns a base set of repositories for the different types 636 // of providers. 637 func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRepositoryClient { 638 repository1 := newFakeRepository(ctx, capiProviderConfig, config). 639 WithPaths("root", "components.yaml"). 640 WithDefaultVersion("v1.0.0"). 641 WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). 642 WithMetadata("v0.9.0", &clusterctlv1.Metadata{ 643 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 644 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 645 }, 646 }). 647 WithFile("v1.0.0", "components.yaml", componentsYAML("ns1")). 648 WithMetadata("v1.0.0", &clusterctlv1.Metadata{ 649 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 650 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 651 {Major: 1, Minor: 0, Contract: test.CurrentCAPIContract}, 652 }, 653 }). 654 WithFile("v1.1.0", "components.yaml", componentsYAML("ns1")). 655 WithMetadata("v1.1.0", &clusterctlv1.Metadata{ 656 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 657 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 658 {Major: 1, Minor: 1, Contract: test.CurrentCAPIContract}, 659 }, 660 }) 661 repository2 := newFakeRepository(ctx, bootstrapProviderConfig, config). 662 WithPaths("root", "components.yaml"). 663 WithDefaultVersion("v2.0.0"). 664 WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). 665 WithMetadata("v0.9.0", &clusterctlv1.Metadata{ 666 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 667 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 668 }, 669 }). 670 WithFile("v2.0.0", "components.yaml", componentsYAML("ns2")). 671 WithMetadata("v2.0.0", &clusterctlv1.Metadata{ 672 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 673 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 674 {Major: 2, Minor: 0, Contract: test.CurrentCAPIContract}, 675 }, 676 }). 677 WithFile("v2.1.0", "components.yaml", componentsYAML("ns2")). 678 WithMetadata("v2.1.0", &clusterctlv1.Metadata{ 679 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 680 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 681 {Major: 2, Minor: 1, Contract: test.CurrentCAPIContract}, 682 }, 683 }) 684 repository3 := newFakeRepository(ctx, controlPlaneProviderConfig, config). 685 WithPaths("root", "components.yaml"). 686 WithDefaultVersion("v2.0.0"). 687 WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). 688 WithMetadata("v0.9.0", &clusterctlv1.Metadata{ 689 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 690 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 691 }, 692 }). 693 WithFile("v2.0.0", "components.yaml", componentsYAML("ns3")). 694 WithMetadata("v2.0.0", &clusterctlv1.Metadata{ 695 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 696 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 697 {Major: 2, Minor: 0, Contract: test.CurrentCAPIContract}, 698 }, 699 }). 700 WithFile("v2.1.0", "components.yaml", componentsYAML("ns3")). 701 WithMetadata("v2.1.0", &clusterctlv1.Metadata{ 702 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 703 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 704 {Major: 2, Minor: 1, Contract: test.CurrentCAPIContract}, 705 }, 706 }) 707 repository4 := newFakeRepository(ctx, infraProviderConfig, config). 708 WithPaths("root", "components.yaml"). 709 WithDefaultVersion("v3.0.0"). 710 WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). 711 WithMetadata("v0.9.0", &clusterctlv1.Metadata{ 712 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 713 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 714 }, 715 }). 716 WithFile("v3.0.0", "components.yaml", infraComponentsYAML("ns4")). 717 WithMetadata("v3.0.0", &clusterctlv1.Metadata{ 718 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 719 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 720 {Major: 3, Minor: 0, Contract: test.CurrentCAPIContract}, 721 }, 722 }). 723 WithFile("v3.1.0", "components.yaml", infraComponentsYAML("ns4")). 724 WithMetadata("v3.1.0", &clusterctlv1.Metadata{ 725 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 726 {Major: 0, Minor: 9, Contract: test.PreviousCAPIContractNotSupported}, 727 {Major: 3, Minor: 1, Contract: test.CurrentCAPIContract}, 728 }, 729 }). 730 WithFile("v3.0.0", "cluster-template.yaml", templateYAML("ns4", "test")) 731 732 var providerRepositories = []*fakeRepositoryClient{repository1, repository2, repository3, repository4} 733 734 for _, provider := range providers { 735 providerRepositories = append(providerRepositories, 736 newFakeRepository(ctx, provider, config). 737 WithPaths("root", "components.yaml"). 738 WithDefaultVersion("v2.0.0"). 739 WithFile("v2.0.0", "components.yaml", componentsYAML("ns2")). 740 WithMetadata("v2.0.0", &clusterctlv1.Metadata{ 741 ReleaseSeries: []clusterctlv1.ReleaseSeries{ 742 {Major: 2, Minor: 0, Contract: test.CurrentCAPIContract}, 743 }, 744 })) 745 } 746 747 return providerRepositories 748 } 749 750 func fakeClusterCtlClient(config *fakeConfigClient, repos []*fakeRepositoryClient, clusters []*fakeClusterClient) *fakeClient { 751 client := newFakeClient(ctx, config) 752 for _, r := range repos { 753 client = client.WithRepository(r) 754 } 755 for _, c := range clusters { 756 client = client.WithCluster(c) 757 } 758 return client 759 } 760 761 // clusterctl client for a management cluster with capi installed (with repository setup for capi, bootstrap and infra provider) 762 // It references a cluster client that corresponds to the mgmt-context in the 763 // kubeconfig file. 764 func fakeInitializedCluster() *fakeClient { 765 client := fakeEmptyCluster() 766 767 input := cluster.Kubeconfig{ 768 Path: "kubeconfig", 769 Context: "mgmt-context", 770 } 771 p := client.clusters[input].Proxy() 772 fp := p.(*test.FakeProxy) 773 774 fp.WithProviderInventory(capiProviderConfig.Name(), capiProviderConfig.Type(), "v1.0.0", "capi-system") 775 776 return client 777 } 778 779 func fakeClusterWithCoreProvider() *fakeClient { 780 client := fakeEmptyCluster() 781 782 input := cluster.Kubeconfig{ 783 Path: "kubeconfig", 784 Context: "mgmt-context", 785 } 786 p := client.clusters[input].Proxy() 787 fp := p.(*test.FakeProxy) 788 789 fp.WithProviderInventory(capiProviderConfig.Name(), capiProviderConfig.Type(), "v1.0.0", "ns1") 790 791 return client 792 } 793 794 func componentsYAML(ns string) []byte { 795 var namespaceYaml = []byte("apiVersion: v1\n" + 796 "kind: Namespace\n" + 797 "metadata:\n" + 798 fmt.Sprintf(" name: %s", ns)) 799 800 var podYaml = []byte("apiVersion: v1\n" + 801 "kind: Pod\n" + 802 "metadata:\n" + 803 " name: manager") 804 805 return utilyaml.JoinYaml(namespaceYaml, podYaml) 806 } 807 808 func templateYAML(ns string, clusterName string) []byte { 809 var podYaml = []byte("apiVersion: v1\n" + 810 "kind: Cluster\n" + 811 "metadata:\n" + 812 fmt.Sprintf(" name: %s\n", clusterName) + 813 fmt.Sprintf(" namespace: %s", ns)) 814 815 return podYaml 816 } 817 818 func mangedTopologyTemplateYAML(ns, clusterName, clusterClassName string) []byte { 819 return []byte(fmt.Sprintf("apiVersion: %s\n", clusterv1.GroupVersion.String()) + 820 "kind: Cluster\n" + 821 "metadata:\n" + 822 fmt.Sprintf(" name: %s\n", clusterName) + 823 fmt.Sprintf(" namespace: %s\n", ns) + 824 "spec:\n" + 825 " topology:\n" + 826 fmt.Sprintf(" class: %s", clusterClassName)) 827 } 828 829 func clusterClassYAML(ns, clusterClassName string) []byte { 830 var podYaml = []byte(fmt.Sprintf("apiVersion: %s\n", clusterv1.GroupVersion.String()) + 831 "kind: ClusterClass\n" + 832 "metadata:\n" + 833 fmt.Sprintf(" name: %s\n", clusterClassName) + 834 fmt.Sprintf(" namespace: %s", ns)) 835 836 return podYaml 837 } 838 839 // infraComponentsYAML defines a namespace and deployment with container 840 // images and a variable. 841 func infraComponentsYAML(namespace string) []byte { 842 var infraComponentsYAML = `--- 843 apiVersion: v1 844 kind: Namespace 845 metadata: 846 name: %[1]s 847 --- 848 apiVersion: apps/v1 849 kind: Deployment 850 metadata: 851 name: capa-controller-manager 852 namespace: %[1]s 853 spec: 854 template: 855 spec: 856 containers: 857 - image: registry.k8s.io/cluster-api-aws/cluster-api-aws-controller:v0.5.3 858 name: manager 859 volumeMounts: 860 - mountPath: /home/.aws 861 name: credentials 862 volumes: 863 - name: credentials 864 secret: 865 secretName: ${SOME_VARIABLE} 866 ` 867 return []byte(fmt.Sprintf(infraComponentsYAML, namespace)) 868 }