github.com/openshift/installer@v1.4.17/pkg/asset/machines/worker.go (about) 1 package machines 2 3 import ( 4 "context" 5 "fmt" 6 "os" 7 "path/filepath" 8 "strings" 9 10 "github.com/pkg/errors" 11 "github.com/sirupsen/logrus" 12 "k8s.io/apimachinery/pkg/runtime" 13 "k8s.io/apimachinery/pkg/runtime/serializer" 14 "k8s.io/apimachinery/pkg/util/intstr" 15 "k8s.io/utils/pointer" 16 ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" 17 "sigs.k8s.io/yaml" 18 19 configv1 "github.com/openshift/api/config/v1" 20 machinev1 "github.com/openshift/api/machine/v1" 21 machinev1alpha1 "github.com/openshift/api/machine/v1alpha1" 22 machinev1beta1 "github.com/openshift/api/machine/v1beta1" 23 mcfgv1 "github.com/openshift/api/machineconfiguration/v1" 24 baremetalapi "github.com/openshift/cluster-api-provider-baremetal/pkg/apis" 25 baremetalprovider "github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" 26 libvirtapi "github.com/openshift/cluster-api-provider-libvirt/pkg/apis" 27 libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1" 28 ovirtproviderapi "github.com/openshift/cluster-api-provider-ovirt/pkg/apis" 29 ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1" 30 "github.com/openshift/installer/pkg/asset" 31 "github.com/openshift/installer/pkg/asset/ignition/machine" 32 "github.com/openshift/installer/pkg/asset/installconfig" 33 icaws "github.com/openshift/installer/pkg/asset/installconfig/aws" 34 icazure "github.com/openshift/installer/pkg/asset/installconfig/azure" 35 icgcp "github.com/openshift/installer/pkg/asset/installconfig/gcp" 36 "github.com/openshift/installer/pkg/asset/machines/aws" 37 "github.com/openshift/installer/pkg/asset/machines/azure" 38 "github.com/openshift/installer/pkg/asset/machines/baremetal" 39 "github.com/openshift/installer/pkg/asset/machines/gcp" 40 "github.com/openshift/installer/pkg/asset/machines/ibmcloud" 41 "github.com/openshift/installer/pkg/asset/machines/machineconfig" 42 "github.com/openshift/installer/pkg/asset/machines/nutanix" 43 "github.com/openshift/installer/pkg/asset/machines/openstack" 44 "github.com/openshift/installer/pkg/asset/machines/ovirt" 45 "github.com/openshift/installer/pkg/asset/machines/powervs" 46 "github.com/openshift/installer/pkg/asset/machines/vsphere" 47 "github.com/openshift/installer/pkg/asset/rhcos" 48 rhcosutils "github.com/openshift/installer/pkg/rhcos" 49 "github.com/openshift/installer/pkg/types" 50 awstypes "github.com/openshift/installer/pkg/types/aws" 51 awsdefaults "github.com/openshift/installer/pkg/types/aws/defaults" 52 azuretypes "github.com/openshift/installer/pkg/types/azure" 53 azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults" 54 baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" 55 externaltypes "github.com/openshift/installer/pkg/types/external" 56 gcptypes "github.com/openshift/installer/pkg/types/gcp" 57 ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud" 58 nonetypes "github.com/openshift/installer/pkg/types/none" 59 nutanixtypes "github.com/openshift/installer/pkg/types/nutanix" 60 openstacktypes "github.com/openshift/installer/pkg/types/openstack" 61 ovirttypes "github.com/openshift/installer/pkg/types/ovirt" 62 powervstypes "github.com/openshift/installer/pkg/types/powervs" 63 vspheretypes "github.com/openshift/installer/pkg/types/vsphere" 64 ibmcloudapi "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis" 65 ibmcloudprovider "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1" 66 ) 67 68 const ( 69 // workerMachineSetFileName is the format string for constructing the worker MachineSet filenames. 70 workerMachineSetFileName = "99_openshift-cluster-api_worker-machineset-%s.yaml" 71 72 // workerMachineFileName is the format string for constructing the worker Machine filenames. 73 workerMachineFileName = "99_openshift-cluster-api_worker-machines-%s.yaml" 74 75 // workerUserDataFileName is the filename used for the worker user-data secret. 76 workerUserDataFileName = "99_openshift-cluster-api_worker-user-data-secret.yaml" 77 78 // decimalRootVolumeSize is the size in GB we use for some platforms. 79 // See below. 80 decimalRootVolumeSize = 120 81 82 // powerOfTwoRootVolumeSize is the size in GB we use for other platforms. 83 // The reasons for the specific choices between these two may boil down 84 // to which section of code the person adding a platform was copy-pasting from. 85 // https://github.com/openshift/openshift-docs/blob/main/modules/installation-requirements-user-infra.adoc#minimum-resource-requirements 86 powerOfTwoRootVolumeSize = 128 87 ) 88 89 var ( 90 workerMachineSetFileNamePattern = fmt.Sprintf(workerMachineSetFileName, "*") 91 workerMachineFileNamePattern = fmt.Sprintf(workerMachineFileName, "*") 92 workerIPClaimFileNamePattern = fmt.Sprintf(ipClaimFileName, "*worker*") 93 workerIPAddressFileNamePattern = fmt.Sprintf(ipAddressFileName, "*worker*") 94 95 _ asset.WritableAsset = (*Worker)(nil) 96 ) 97 98 func defaultAWSMachinePoolPlatform(poolName string) awstypes.MachinePool { 99 defaultEBSType := awstypes.VolumeTypeGp3 100 101 // gp3 is not offered in all local-zones locations used by Edge Pools. 102 // Once it is available, it can be used as default for all machine pools. 103 // https://aws.amazon.com/about-aws/global-infrastructure/localzones/features 104 if poolName == types.MachinePoolEdgeRoleName { 105 defaultEBSType = awstypes.VolumeTypeGp2 106 } 107 return awstypes.MachinePool{ 108 EC2RootVolume: awstypes.EC2RootVolume{ 109 Type: defaultEBSType, 110 Size: decimalRootVolumeSize, 111 }, 112 } 113 } 114 115 func defaultAzureMachinePoolPlatform() azuretypes.MachinePool { 116 return azuretypes.MachinePool{ 117 OSDisk: azuretypes.OSDisk{ 118 DiskSizeGB: powerOfTwoRootVolumeSize, 119 DiskType: azuretypes.DefaultDiskType, 120 }, 121 } 122 } 123 124 func defaultGCPMachinePoolPlatform(arch types.Architecture) gcptypes.MachinePool { 125 return gcptypes.MachinePool{ 126 InstanceType: icgcp.DefaultInstanceTypeForArch(arch), 127 OSDisk: gcptypes.OSDisk{ 128 DiskSizeGB: powerOfTwoRootVolumeSize, 129 DiskType: "pd-ssd", 130 }, 131 } 132 } 133 134 func defaultIBMCloudMachinePoolPlatform() ibmcloudtypes.MachinePool { 135 return ibmcloudtypes.MachinePool{ 136 InstanceType: "bx2-4x16", 137 } 138 } 139 140 func defaultOpenStackMachinePoolPlatform() openstacktypes.MachinePool { 141 return openstacktypes.MachinePool{ 142 Zones: []string{""}, 143 } 144 } 145 146 func defaultBareMetalMachinePoolPlatform() baremetaltypes.MachinePool { 147 return baremetaltypes.MachinePool{} 148 } 149 150 func defaultOvirtMachinePoolPlatform() ovirttypes.MachinePool { 151 return ovirttypes.MachinePool{ 152 CPU: &ovirttypes.CPU{ 153 Cores: 4, 154 Sockets: 1, 155 Threads: 1, 156 }, 157 MemoryMB: 16348, 158 OSDisk: &ovirttypes.Disk{ 159 SizeGB: decimalRootVolumeSize, 160 }, 161 VMType: ovirttypes.VMTypeServer, 162 AutoPinningPolicy: ovirttypes.AutoPinningNone, 163 } 164 } 165 166 func defaultVSphereMachinePoolPlatform() vspheretypes.MachinePool { 167 return vspheretypes.MachinePool{ 168 NumCPUs: 4, 169 NumCoresPerSocket: 4, 170 MemoryMiB: 16384, 171 OSDisk: vspheretypes.OSDisk{ 172 DiskSizeGB: decimalRootVolumeSize, 173 }, 174 } 175 } 176 177 func defaultPowerVSMachinePoolPlatform(ic *types.InstallConfig) powervstypes.MachinePool { 178 var ( 179 defaultMp powervstypes.MachinePool 180 sysTypes []string 181 err error 182 ) 183 184 defaultMp = powervstypes.MachinePool{ 185 MemoryGiB: 32, 186 Processors: intstr.FromString("0.5"), 187 ProcType: machinev1.PowerVSProcessorTypeShared, 188 SysType: "s922", 189 } 190 191 sysTypes, err = powervstypes.AvailableSysTypes(ic.PowerVS.Region) 192 if err == nil { 193 defaultMp.SysType = sysTypes[0] 194 } else { 195 logrus.Warnf("For given region %v, AvailableSysTypes returns %v", ic.PowerVS.Region, err) 196 } 197 198 return defaultMp 199 } 200 201 func defaultNutanixMachinePoolPlatform() nutanixtypes.MachinePool { 202 return nutanixtypes.MachinePool{ 203 NumCPUs: 4, 204 NumCoresPerSocket: 1, 205 MemoryMiB: 16384, 206 OSDisk: nutanixtypes.OSDisk{ 207 DiskSizeGiB: decimalRootVolumeSize, 208 }, 209 } 210 } 211 212 // awsSetPreferredInstanceByEdgeZone discovers supported instanceType for each edge pool 213 // using the existing preferred instance list used by worker compute pool. 214 // Each machine set in the edge pool, created for each zone, can use different instance 215 // types depending on the instance offerings in the location (Local Zones). 216 func awsSetPreferredInstanceByEdgeZone(ctx context.Context, defaultTypes []string, meta *icaws.Metadata, zones icaws.Zones) (ok bool) { 217 allZonesFound := true 218 for zone := range zones { 219 preferredType, err := aws.PreferredInstanceType(ctx, meta, defaultTypes, []string{zone}) 220 if err != nil { 221 logrus.Warnf("unable to select instanceType on the zone[%v] from the preferred list: %v. You must update the MachineSet manifest: %v", zone, defaultTypes, err) 222 allZonesFound = false 223 continue 224 } 225 if _, ok := zones[zone]; !ok { 226 zones[zone] = &icaws.Zone{Name: zone} 227 } 228 zones[zone].PreferredInstanceType = preferredType 229 } 230 return allZonesFound 231 } 232 233 // Worker generates the machinesets for `worker` machine pool. 234 type Worker struct { 235 UserDataFile *asset.File 236 MachineConfigFiles []*asset.File 237 MachineSetFiles []*asset.File 238 MachineFiles []*asset.File 239 IPClaimFiles []*asset.File 240 IPAddrFiles []*asset.File 241 } 242 243 // Name returns a human friendly name for the Worker Asset. 244 func (w *Worker) Name() string { 245 return "Worker Machines" 246 } 247 248 // Dependencies returns all of the dependencies directly needed by the 249 // Worker asset 250 func (w *Worker) Dependencies() []asset.Asset { 251 return []asset.Asset{ 252 &installconfig.ClusterID{}, 253 // PlatformCredsCheck just checks the creds (and asks, if needed) 254 // We do not actually use it in this asset directly, hence 255 // it is put in the dependencies but not fetched in Generate 256 &installconfig.PlatformCredsCheck{}, 257 &installconfig.InstallConfig{}, 258 new(rhcos.Image), 259 new(rhcos.Release), 260 &machine.Worker{}, 261 } 262 } 263 264 // Generate generates the Worker asset. 265 // 266 //nolint:gocyclo 267 func (w *Worker) Generate(ctx context.Context, dependencies asset.Parents) error { 268 clusterID := &installconfig.ClusterID{} 269 installConfig := &installconfig.InstallConfig{} 270 rhcosImage := new(rhcos.Image) 271 rhcosRelease := new(rhcos.Release) 272 wign := &machine.Worker{} 273 dependencies.Get(clusterID, installConfig, rhcosImage, rhcosRelease, wign) 274 275 workerUserDataSecretName := "worker-user-data" 276 277 machines := []machinev1beta1.Machine{} 278 machineConfigs := []*mcfgv1.MachineConfig{} 279 machineSets := []runtime.Object{} 280 var ipClaims []ipamv1.IPAddressClaim 281 var ipAddrs []ipamv1.IPAddress 282 var err error 283 ic := installConfig.Config 284 for _, pool := range ic.Compute { 285 pool := pool // this makes golint happy... G601: Implicit memory aliasing in for loop. (gosec) 286 if pool.Hyperthreading == types.HyperthreadingDisabled { 287 ignHT, err := machineconfig.ForHyperthreadingDisabled("worker") 288 if err != nil { 289 return errors.Wrap(err, "failed to create ignition for hyperthreading disabled for worker machines") 290 } 291 machineConfigs = append(machineConfigs, ignHT) 292 } 293 if ic.SSHKey != "" { 294 ignSSH, err := machineconfig.ForAuthorizedKeys(ic.SSHKey, "worker") 295 if err != nil { 296 return errors.Wrap(err, "failed to create ignition for authorized SSH keys for worker machines") 297 } 298 machineConfigs = append(machineConfigs, ignSSH) 299 } 300 if ic.FIPS { 301 ignFIPS, err := machineconfig.ForFIPSEnabled("worker") 302 if err != nil { 303 return errors.Wrap(err, "failed to create ignition for FIPS enabled for worker machines") 304 } 305 machineConfigs = append(machineConfigs, ignFIPS) 306 } 307 if ic.Platform.Name() == powervstypes.Name { 308 // always enable multipath for powervs. 309 ignMultipath, err := machineconfig.ForMultipathEnabled("worker") 310 if err != nil { 311 return errors.Wrap(err, "failed to create ignition for multipath enabled for worker machines") 312 } 313 machineConfigs = append(machineConfigs, ignMultipath) 314 315 // set SMT level if specified for powervs. 316 if pool.Platform.PowerVS != nil && pool.Platform.PowerVS.SMTLevel != "" { 317 ignPowerSMT, err := machineconfig.ForPowerSMT("worker", pool.Platform.PowerVS.SMTLevel) 318 if err != nil { 319 return errors.Wrap(err, "failed to create ignition for Power SMT for worker machines") 320 } 321 machineConfigs = append(machineConfigs, ignPowerSMT) 322 } 323 } 324 // The maximum number of networks supported on ServiceNetwork is two, one IPv4 and one IPv6 network. 325 // The cluster-network-operator handles the validation of this field. 326 // Reference: https://github.com/openshift/cluster-network-operator/blob/fc3e0e25b4cfa43e14122bdcdd6d7f2585017d75/pkg/network/cluster_config.go#L45-L52 327 if ic.Networking != nil && len(ic.Networking.ServiceNetwork) == 2 && 328 (ic.Platform.Name() == openstacktypes.Name || ic.Platform.Name() == vspheretypes.Name) { 329 // Only configure kernel args for dual-stack clusters. 330 ignIPv6, err := machineconfig.ForDualStackAddresses("worker") 331 if err != nil { 332 return errors.Wrap(err, "failed to create ignition to configure IPv6 for worker machines") 333 } 334 machineConfigs = append(machineConfigs, ignIPv6) 335 } 336 337 switch ic.Platform.Name() { 338 case awstypes.Name: 339 subnets := icaws.Subnets{} 340 zones := icaws.Zones{} 341 if len(ic.Platform.AWS.Subnets) > 0 { 342 var subnetsMeta icaws.Subnets 343 switch pool.Name { 344 case types.MachinePoolEdgeRoleName: 345 subnetsMeta, err = installConfig.AWS.EdgeSubnets(ctx) 346 if err != nil { 347 return err 348 } 349 default: 350 subnetsMeta, err = installConfig.AWS.PrivateSubnets(ctx) 351 if err != nil { 352 return err 353 } 354 } 355 for _, subnet := range subnetsMeta { 356 subnets[subnet.Zone.Name] = subnet 357 } 358 } 359 mpool := defaultAWSMachinePoolPlatform(pool.Name) 360 361 osImage := strings.SplitN(rhcosImage.Compute, ",", 2) 362 osImageID := osImage[0] 363 if len(osImage) == 2 { 364 osImageID = "" // the AMI will be generated later on 365 } 366 mpool.AMIID = osImageID 367 368 mpool.Set(ic.Platform.AWS.DefaultMachinePlatform) 369 mpool.Set(pool.Platform.AWS) 370 zoneDefaults := false 371 if len(mpool.Zones) == 0 { 372 if len(subnets) > 0 { 373 for _, subnet := range subnets { 374 if subnet.Zone == nil { 375 return errors.Wrapf(err, "failed to find zone attributes for subnet %s", subnet.ID) 376 } 377 mpool.Zones = append(mpool.Zones, subnet.Zone.Name) 378 zones[subnet.Zone.Name] = subnets[subnet.Zone.Name].Zone 379 } 380 } else { 381 mpool.Zones, err = installConfig.AWS.AvailabilityZones(ctx) 382 if err != nil { 383 return err 384 } 385 zoneDefaults = true 386 } 387 } 388 389 // Requirements when using edge compute pools to populate machine sets. 390 if pool.Name == types.MachinePoolEdgeRoleName { 391 err = installConfig.AWS.SetZoneAttributes(ctx, mpool.Zones, zones) 392 if err != nil { 393 return errors.Wrap(err, "failed to retrieve zone attributes for edge compute pool") 394 } 395 396 if pool.Replicas == nil || *pool.Replicas == 0 { 397 pool.Replicas = pointer.Int64(int64(len(mpool.Zones))) 398 } 399 } 400 401 if mpool.InstanceType == "" { 402 arch := installConfig.Config.ControlPlane.Architecture 403 if len(installConfig.Config.Compute) > 0 { 404 arch = installConfig.Config.Compute[0].Architecture 405 } 406 instanceTypes := awsdefaults.InstanceTypes(installConfig.Config.Platform.AWS.Region, arch, configv1.HighlyAvailableTopologyMode) 407 switch pool.Name { 408 case types.MachinePoolEdgeRoleName: 409 ok := awsSetPreferredInstanceByEdgeZone(ctx, instanceTypes, installConfig.AWS, zones) 410 if !ok { 411 logrus.Warnf("failed to find preferred instance type for one or more zones in the %s pool, using default: %s", pool.Name, instanceTypes[0]) 412 mpool.InstanceType = instanceTypes[0] 413 } 414 default: 415 mpool.InstanceType, err = aws.PreferredInstanceType(ctx, installConfig.AWS, instanceTypes, mpool.Zones) 416 if err != nil { 417 logrus.Warn(errors.Wrapf(err, "failed to find default instance type for %s pool", pool.Name)) 418 mpool.InstanceType = instanceTypes[0] 419 } 420 } 421 } 422 // if the list of zones is the default we need to try to filter the list in case there are some zones where the instance might not be available 423 if zoneDefaults { 424 mpool.Zones, err = aws.FilterZonesBasedOnInstanceType(ctx, installConfig.AWS, mpool.InstanceType, mpool.Zones) 425 if err != nil { 426 logrus.Warn(errors.Wrap(err, "failed to filter zone list")) 427 } 428 } 429 430 pool.Platform.AWS = &mpool 431 sets, err := aws.MachineSets(&aws.MachineSetInput{ 432 ClusterID: clusterID.InfraID, 433 InstallConfigPlatformAWS: installConfig.Config.Platform.AWS, 434 Subnets: subnets, 435 Zones: zones, 436 Pool: &pool, 437 Role: pool.Name, 438 UserDataSecret: workerUserDataSecretName, 439 }) 440 if err != nil { 441 return errors.Wrap(err, "failed to create worker machine objects") 442 } 443 for _, set := range sets { 444 machineSets = append(machineSets, set) 445 } 446 case azuretypes.Name: 447 mpool := defaultAzureMachinePoolPlatform() 448 mpool.InstanceType = azuredefaults.ComputeInstanceType( 449 installConfig.Config.Platform.Azure.CloudName, 450 installConfig.Config.Platform.Azure.Region, 451 pool.Architecture, 452 ) 453 mpool.Set(ic.Platform.Azure.DefaultMachinePlatform) 454 mpool.Set(pool.Platform.Azure) 455 456 session, err := installConfig.Azure.Session() 457 if err != nil { 458 return errors.Wrap(err, "failed to fetch session") 459 } 460 461 client := icazure.NewClient(session) 462 if len(mpool.Zones) == 0 { 463 azs, err := client.GetAvailabilityZones(ctx, ic.Platform.Azure.Region, mpool.InstanceType) 464 if err != nil { 465 return errors.Wrap(err, "failed to fetch availability zones") 466 } 467 mpool.Zones = azs 468 if len(azs) == 0 { 469 // if no azs are given we set to []string{""} for convenience over later operations. 470 // It means no-zoned for the machine API 471 mpool.Zones = []string{""} 472 } 473 } 474 475 if mpool.OSImage.Publisher != "" { 476 img, ierr := client.GetMarketplaceImage(ctx, ic.Platform.Azure.Region, mpool.OSImage.Publisher, mpool.OSImage.Offer, mpool.OSImage.SKU, mpool.OSImage.Version) 477 if ierr != nil { 478 return fmt.Errorf("failed to fetch marketplace image: %w", ierr) 479 } 480 // Publisher is case-sensitive and matched against exactly. Also 481 // the Plan's publisher might not be exactly the same as the 482 // Image's publisher 483 if img.Plan != nil && img.Plan.Publisher != nil { 484 mpool.OSImage.Publisher = *img.Plan.Publisher 485 } 486 } 487 pool.Platform.Azure = &mpool 488 489 capabilities, err := client.GetVMCapabilities(ctx, mpool.InstanceType, installConfig.Config.Platform.Azure.Region) 490 if err != nil { 491 return err 492 } 493 494 useImageGallery := ic.Platform.Azure.CloudName != azuretypes.StackCloud 495 sets, err := azure.MachineSets(clusterID.InfraID, ic, &pool, rhcosImage.Compute, "worker", workerUserDataSecretName, capabilities, useImageGallery) 496 if err != nil { 497 return errors.Wrap(err, "failed to create worker machine objects") 498 } 499 for _, set := range sets { 500 machineSets = append(machineSets, set) 501 } 502 case baremetaltypes.Name: 503 mpool := defaultBareMetalMachinePoolPlatform() 504 mpool.Set(ic.Platform.BareMetal.DefaultMachinePlatform) 505 mpool.Set(pool.Platform.BareMetal) 506 pool.Platform.BareMetal = &mpool 507 508 enabledCaps := installConfig.Config.GetEnabledCapabilities() 509 if enabledCaps.Has(configv1.ClusterVersionCapabilityMachineAPI) { 510 // Use managed user data secret, since images used by MachineSet 511 // are always up to date 512 workerUserDataSecretName = "worker-user-data-managed" 513 sets, err := baremetal.MachineSets(clusterID.InfraID, ic, &pool, "", "worker", workerUserDataSecretName) 514 if err != nil { 515 return errors.Wrap(err, "failed to create worker machine objects") 516 } 517 for _, set := range sets { 518 machineSets = append(machineSets, set) 519 } 520 } 521 case gcptypes.Name: 522 mpool := defaultGCPMachinePoolPlatform(pool.Architecture) 523 mpool.Set(ic.Platform.GCP.DefaultMachinePlatform) 524 mpool.Set(pool.Platform.GCP) 525 if len(mpool.Zones) == 0 { 526 azs, err := gcp.ZonesForInstanceType(ic.Platform.GCP.ProjectID, ic.Platform.GCP.Region, mpool.InstanceType) 527 if err != nil { 528 return errors.Wrap(err, "failed to fetch availability zones") 529 } 530 mpool.Zones = azs 531 } 532 pool.Platform.GCP = &mpool 533 sets, err := gcp.MachineSets(clusterID.InfraID, ic, &pool, rhcosImage.Compute, "worker", workerUserDataSecretName) 534 if err != nil { 535 return errors.Wrap(err, "failed to create worker machine objects") 536 } 537 for _, set := range sets { 538 machineSets = append(machineSets, set) 539 } 540 case ibmcloudtypes.Name: 541 subnets := map[string]string{} 542 if len(ic.Platform.IBMCloud.ComputeSubnets) > 0 { 543 subnetMetas, err := installConfig.IBMCloud.ComputeSubnets(ctx) 544 if err != nil { 545 return err 546 } 547 for _, subnet := range subnetMetas { 548 subnets[subnet.Zone] = subnet.Name 549 } 550 } 551 mpool := defaultIBMCloudMachinePoolPlatform() 552 mpool.Set(ic.Platform.IBMCloud.DefaultMachinePlatform) 553 mpool.Set(pool.Platform.IBMCloud) 554 if len(mpool.Zones) == 0 { 555 azs, err := ibmcloud.AvailabilityZones(ic.Platform.IBMCloud.Region, ic.Platform.IBMCloud.ServiceEndpoints) 556 if err != nil { 557 return errors.Wrap(err, "failed to fetch availability zones") 558 } 559 mpool.Zones = azs 560 } 561 pool.Platform.IBMCloud = &mpool 562 sets, err := ibmcloud.MachineSets(clusterID.InfraID, ic, subnets, &pool, "worker", workerUserDataSecretName) 563 if err != nil { 564 return errors.Wrap(err, "failed to create worker machine objects") 565 } 566 for _, set := range sets { 567 machineSets = append(machineSets, set) 568 } 569 case openstacktypes.Name: 570 mpool := defaultOpenStackMachinePoolPlatform() 571 mpool.Set(ic.Platform.OpenStack.DefaultMachinePlatform) 572 mpool.Set(pool.Platform.OpenStack) 573 pool.Platform.OpenStack = &mpool 574 575 imageName, _ := rhcosutils.GenerateOpenStackImageName(rhcosImage.Compute, clusterID.InfraID) 576 577 sets, err := openstack.MachineSets(ctx, clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName) 578 if err != nil { 579 return fmt.Errorf("failed to create worker machine objects: %w", err) 580 } 581 for _, set := range sets { 582 machineSets = append(machineSets, set) 583 } 584 case vspheretypes.Name: 585 mpool := defaultVSphereMachinePoolPlatform() 586 mpool.Set(ic.Platform.VSphere.DefaultMachinePlatform) 587 mpool.Set(pool.Platform.VSphere) 588 pool.Platform.VSphere = &mpool 589 templateName := clusterID.InfraID + "-rhcos" 590 591 sets, err := vsphere.MachineSets(clusterID.InfraID, ic, &pool, templateName, "worker", workerUserDataSecretName) 592 if err != nil { 593 return errors.Wrap(err, "failed to create worker machine objects") 594 } 595 for _, set := range sets { 596 machineSets = append(machineSets, set) 597 } 598 599 // If static IPs are configured, we must generate worker machines and scale the machinesets to 0. 600 if ic.Platform.VSphere.Hosts != nil { 601 logrus.Debug("Generating worker machines with static IPs.") 602 templateName := clusterID.InfraID + "-rhcos" 603 604 data, err := vsphere.Machines(clusterID.InfraID, ic, &pool, templateName, "worker", workerUserDataSecretName) 605 if err != nil { 606 return errors.Wrap(err, "failed to create worker machine objects") 607 } 608 609 machines = data.Machines 610 ipClaims = data.IPClaims 611 ipAddrs = data.IPAddresses 612 613 logrus.Debugf("Generated %v worker machines.", len(machines)) 614 615 for _, ms := range sets { 616 ms.Spec.Replicas = pointer.Int32(0) 617 } 618 } 619 case ovirttypes.Name: 620 mpool := defaultOvirtMachinePoolPlatform() 621 mpool.Set(ic.Platform.Ovirt.DefaultMachinePlatform) 622 mpool.Set(pool.Platform.Ovirt) 623 pool.Platform.Ovirt = &mpool 624 625 imageName, _ := rhcosutils.GenerateOpenStackImageName(rhcosImage.Compute, clusterID.InfraID) 626 627 sets, err := ovirt.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName) 628 if err != nil { 629 return errors.Wrap(err, "failed to create worker machine objects for ovirt provider") 630 } 631 for _, set := range sets { 632 machineSets = append(machineSets, set) 633 } 634 case powervstypes.Name: 635 mpool := defaultPowerVSMachinePoolPlatform(ic) 636 mpool.Set(ic.Platform.PowerVS.DefaultMachinePlatform) 637 mpool.Set(pool.Platform.PowerVS) 638 pool.Platform.PowerVS = &mpool 639 sets, err := powervs.MachineSets(clusterID.InfraID, ic, &pool, "worker", "worker-user-data") 640 if err != nil { 641 return errors.Wrap(err, "failed to create worker machine objects for powervs provider") 642 } 643 for _, set := range sets { 644 machineSets = append(machineSets, set) 645 } 646 case externaltypes.Name, nonetypes.Name: 647 case nutanixtypes.Name: 648 mpool := defaultNutanixMachinePoolPlatform() 649 mpool.Set(ic.Platform.Nutanix.DefaultMachinePlatform) 650 mpool.Set(pool.Platform.Nutanix) 651 if err = mpool.ValidateConfig(ic.Platform.Nutanix, "worker"); err != nil { 652 return errors.Wrap(err, "failed to create worker machine objects") 653 } 654 pool.Platform.Nutanix = &mpool 655 imageName := nutanixtypes.RHCOSImageName(clusterID.InfraID) 656 657 sets, err := nutanix.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName) 658 if err != nil { 659 return errors.Wrap(err, "failed to create worker machine objects") 660 } 661 for _, set := range sets { 662 machineSets = append(machineSets, set) 663 } 664 default: 665 return fmt.Errorf("invalid Platform") 666 } 667 } 668 669 data, err := userDataSecret(workerUserDataSecretName, wign.File.Data) 670 if err != nil { 671 return errors.Wrap(err, "failed to create user-data secret for worker machines") 672 } 673 w.UserDataFile = &asset.File{ 674 Filename: filepath.Join(directory, workerUserDataFileName), 675 Data: data, 676 } 677 678 w.MachineConfigFiles, err = machineconfig.Manifests(machineConfigs, "worker", directory) 679 if err != nil { 680 return errors.Wrap(err, "failed to create MachineConfig manifests for worker machines") 681 } 682 683 w.MachineSetFiles = make([]*asset.File, len(machineSets)) 684 padFormat := fmt.Sprintf("%%0%dd", len(fmt.Sprintf("%d", len(machineSets)))) 685 for i, machineSet := range machineSets { 686 data, err := yaml.Marshal(machineSet) 687 if err != nil { 688 return errors.Wrapf(err, "marshal worker %d", i) 689 } 690 691 padded := fmt.Sprintf(padFormat, i) 692 w.MachineSetFiles[i] = &asset.File{ 693 Filename: filepath.Join(directory, fmt.Sprintf(workerMachineSetFileName, padded)), 694 Data: data, 695 } 696 } 697 698 w.IPClaimFiles = make([]*asset.File, len(ipClaims)) 699 for i, claim := range ipClaims { 700 data, err := yaml.Marshal(claim) 701 if err != nil { 702 return errors.Wrapf(err, "marshal ip claim %v", claim.Name) 703 } 704 705 w.IPClaimFiles[i] = &asset.File{ 706 Filename: filepath.Join(directory, fmt.Sprintf(ipClaimFileName, claim.Name)), 707 Data: data, 708 } 709 } 710 711 w.IPAddrFiles = make([]*asset.File, len(ipAddrs)) 712 for i, address := range ipAddrs { 713 data, err := yaml.Marshal(address) 714 if err != nil { 715 return errors.Wrapf(err, "marshal ip claim %v", address.Name) 716 } 717 718 w.IPAddrFiles[i] = &asset.File{ 719 Filename: filepath.Join(directory, fmt.Sprintf(ipAddressFileName, address.Name)), 720 Data: data, 721 } 722 } 723 w.MachineFiles = make([]*asset.File, len(machines)) 724 for i, machineDef := range machines { 725 data, err := yaml.Marshal(machineDef) 726 if err != nil { 727 return errors.Wrapf(err, "marshal master %d", i) 728 } 729 730 padded := fmt.Sprintf(padFormat, i) 731 w.MachineFiles[i] = &asset.File{ 732 Filename: filepath.Join(directory, fmt.Sprintf(workerMachineFileName, padded)), 733 Data: data, 734 } 735 } 736 return nil 737 } 738 739 // Files returns the files generated by the asset. 740 func (w *Worker) Files() []*asset.File { 741 files := make([]*asset.File, 0, 1+len(w.MachineConfigFiles)+len(w.MachineSetFiles)) 742 if w.UserDataFile != nil { 743 files = append(files, w.UserDataFile) 744 } 745 files = append(files, w.MachineConfigFiles...) 746 files = append(files, w.MachineSetFiles...) 747 files = append(files, w.MachineFiles...) 748 files = append(files, w.IPClaimFiles...) 749 files = append(files, w.IPAddrFiles...) 750 return files 751 } 752 753 // Load reads the asset files from disk. 754 func (w *Worker) Load(f asset.FileFetcher) (found bool, err error) { 755 file, err := f.FetchByName(filepath.Join(directory, workerUserDataFileName)) 756 if err != nil { 757 if os.IsNotExist(err) { 758 return false, nil 759 } 760 return false, err 761 } 762 w.UserDataFile = file 763 764 w.MachineConfigFiles, err = machineconfig.Load(f, "worker", directory) 765 if err != nil { 766 return true, err 767 } 768 769 fileList, err := f.FetchByPattern(filepath.Join(directory, workerMachineSetFileNamePattern)) 770 if err != nil { 771 return true, err 772 } 773 774 w.MachineSetFiles = fileList 775 776 fileList, err = f.FetchByPattern(filepath.Join(directory, workerMachineFileNamePattern)) 777 if err != nil { 778 return true, err 779 } 780 w.MachineFiles = fileList 781 782 fileList, err = f.FetchByPattern(filepath.Join(directory, workerIPClaimFileNamePattern)) 783 if err != nil { 784 return true, err 785 } 786 w.IPClaimFiles = fileList 787 788 fileList, err = f.FetchByPattern(filepath.Join(directory, workerIPAddressFileNamePattern)) 789 if err != nil { 790 return true, err 791 } 792 w.IPAddrFiles = fileList 793 794 return true, nil 795 } 796 797 // MachineSets returns MachineSet manifest structures. 798 func (w *Worker) MachineSets() ([]machinev1beta1.MachineSet, error) { 799 scheme := runtime.NewScheme() 800 baremetalapi.AddToScheme(scheme) 801 ibmcloudapi.AddToScheme(scheme) 802 libvirtapi.AddToScheme(scheme) 803 ovirtproviderapi.AddToScheme(scheme) 804 scheme.AddKnownTypes(machinev1alpha1.GroupVersion, 805 &machinev1alpha1.OpenstackProviderSpec{}, 806 ) 807 scheme.AddKnownTypes(machinev1beta1.SchemeGroupVersion, 808 &machinev1beta1.AWSMachineProviderConfig{}, 809 &machinev1beta1.VSphereMachineProviderSpec{}, 810 &machinev1beta1.AzureMachineProviderSpec{}, 811 &machinev1beta1.GCPMachineProviderSpec{}, 812 ) 813 machinev1.Install(scheme) 814 scheme.AddKnownTypes(machinev1.GroupVersion, 815 &machinev1.NutanixMachineProviderConfig{}, 816 &machinev1.PowerVSMachineProviderConfig{}, 817 ) 818 machinev1beta1.AddToScheme(scheme) 819 decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( 820 baremetalprovider.SchemeGroupVersion, 821 ibmcloudprovider.SchemeGroupVersion, 822 libvirtprovider.SchemeGroupVersion, 823 machinev1.GroupVersion, 824 machinev1alpha1.GroupVersion, 825 ovirtprovider.SchemeGroupVersion, 826 machinev1beta1.SchemeGroupVersion, 827 ) 828 829 machineSets := []machinev1beta1.MachineSet{} 830 for i, file := range w.MachineSetFiles { 831 machineSet := &machinev1beta1.MachineSet{} 832 err := yaml.Unmarshal(file.Data, &machineSet) 833 if err != nil { 834 return machineSets, errors.Wrapf(err, "unmarshal worker %d", i) 835 } 836 837 obj, _, err := decoder.Decode(machineSet.Spec.Template.Spec.ProviderSpec.Value.Raw, nil, nil) 838 if err != nil { 839 return machineSets, errors.Wrapf(err, "unmarshal worker %d", i) 840 } 841 842 machineSet.Spec.Template.Spec.ProviderSpec.Value = &runtime.RawExtension{Object: obj} 843 machineSets = append(machineSets, *machineSet) 844 } 845 846 return machineSets, nil 847 }