github.com/openshift/installer@v1.4.17/pkg/asset/machines/openstack/machines.go (about) 1 // Package openstack generates Machine objects for openstack. 2 package openstack 3 4 import ( 5 "context" 6 "fmt" 7 "net/http" 8 9 "github.com/gophercloud/gophercloud/v2" 10 netext "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions" 11 "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" 12 "github.com/gophercloud/utils/v2/openstack/clientconfig" 13 "github.com/pkg/errors" 14 corev1 "k8s.io/api/core/v1" 15 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 "k8s.io/apimachinery/pkg/runtime" 17 18 v1 "github.com/openshift/api/config/v1" 19 machinev1 "github.com/openshift/api/machine/v1" 20 machinev1alpha1 "github.com/openshift/api/machine/v1alpha1" 21 machineapi "github.com/openshift/api/machine/v1beta1" 22 "github.com/openshift/installer/pkg/types" 23 "github.com/openshift/installer/pkg/types/openstack" 24 openstackdefaults "github.com/openshift/installer/pkg/types/openstack/defaults" 25 ) 26 27 const ( 28 // TODO(flaper87): We're choosing to hardcode these values to make 29 // the environment more predictable. We expect there to a secret 30 // named `openstack-credentials` and a cloud named `openstack` in 31 // the clouds file stored in this secret. 32 cloudsSecret = "openstack-cloud-credentials" 33 cloudsSecretNamespace = "openshift-machine-api" 34 35 // CloudName is a constant containing the name of the cloud used in the internal cloudsSecret 36 CloudName = "openstack" 37 ) 38 39 // Machines returns a list of machines for a machinepool. 40 func Machines(ctx context.Context, clusterID string, config *types.InstallConfig, pool *types.MachinePool, osImage, role, userDataSecret string) ([]machineapi.Machine, *machinev1.ControlPlaneMachineSet, error) { 41 if configPlatform := config.Platform.Name(); configPlatform != openstack.Name { 42 return nil, nil, fmt.Errorf("non-OpenStack configuration: %q", configPlatform) 43 } 44 if poolPlatform := pool.Platform.Name(); poolPlatform != openstack.Name { 45 return nil, nil, fmt.Errorf("non-OpenStack machine-pool: %q", poolPlatform) 46 } 47 48 mpool := pool.Platform.OpenStack 49 50 total := int64(1) 51 if pool.Replicas != nil { 52 total = *pool.Replicas 53 } 54 machines := make([]machineapi.Machine, 0, total) 55 failureDomains := failureDomainsFromSpec(*mpool) 56 for idx := int64(0); idx < total; idx++ { 57 failureDomain := failureDomains[uint(idx)%uint(len(failureDomains))] 58 59 providerSpec, err := generateProviderSpec( 60 ctx, 61 clusterID, 62 config.Platform.OpenStack, 63 mpool, 64 osImage, 65 role, 66 userDataSecret, 67 failureDomain, 68 ) 69 if err != nil { 70 return nil, nil, err 71 } 72 73 machine := machineapi.Machine{ 74 TypeMeta: metav1.TypeMeta{ 75 APIVersion: "machine.openshift.io/v1beta1", 76 Kind: "Machine", 77 }, 78 ObjectMeta: metav1.ObjectMeta{ 79 Namespace: "openshift-machine-api", 80 Name: fmt.Sprintf("%s-%s-%d", clusterID, pool.Name, idx), 81 Labels: map[string]string{ 82 "machine.openshift.io/cluster-api-cluster": clusterID, 83 "machine.openshift.io/cluster-api-machine-role": role, 84 "machine.openshift.io/cluster-api-machine-type": role, 85 }, 86 }, 87 Spec: machineapi.MachineSpec{ 88 ProviderSpec: machineapi.ProviderSpec{ 89 Value: &runtime.RawExtension{Object: providerSpec}, 90 }, 91 // we don't need to set Versions, because we control those via operators. 92 }, 93 } 94 machines = append(machines, machine) 95 } 96 97 machineSetProviderSpec, err := generateProviderSpec( 98 ctx, 99 clusterID, 100 config.Platform.OpenStack, 101 mpool, 102 osImage, 103 role, 104 userDataSecret, 105 machinev1.OpenStackFailureDomain{RootVolume: &machinev1.RootVolume{}}, 106 ) 107 if err != nil { 108 return nil, nil, err 109 } 110 111 replicas := int32(total) 112 113 controlPlaneMachineSet := &machinev1.ControlPlaneMachineSet{ 114 TypeMeta: metav1.TypeMeta{ 115 APIVersion: "machine.openshift.io/v1", 116 Kind: "ControlPlaneMachineSet", 117 }, 118 ObjectMeta: metav1.ObjectMeta{ 119 Namespace: "openshift-machine-api", 120 Name: "cluster", 121 Labels: map[string]string{ 122 "machine.openshift.io/cluster-api-cluster": clusterID, 123 }, 124 }, 125 Spec: machinev1.ControlPlaneMachineSetSpec{ 126 State: machinev1.ControlPlaneMachineSetStateActive, 127 Replicas: &replicas, 128 Selector: metav1.LabelSelector{ 129 MatchLabels: map[string]string{ 130 "machine.openshift.io/cluster-api-cluster": clusterID, 131 "machine.openshift.io/cluster-api-machine-role": role, 132 "machine.openshift.io/cluster-api-machine-type": role, 133 }, 134 }, 135 Template: machinev1.ControlPlaneMachineSetTemplate{ 136 MachineType: machinev1.OpenShiftMachineV1Beta1MachineType, 137 OpenShiftMachineV1Beta1Machine: &machinev1.OpenShiftMachineV1Beta1MachineTemplate{ 138 ObjectMeta: machinev1.ControlPlaneMachineSetTemplateObjectMeta{ 139 Labels: map[string]string{ 140 "machine.openshift.io/cluster-api-cluster": clusterID, 141 "machine.openshift.io/cluster-api-machine-role": role, 142 "machine.openshift.io/cluster-api-machine-type": role, 143 }, 144 }, 145 Spec: machineapi.MachineSpec{ 146 ProviderSpec: machineapi.ProviderSpec{ 147 Value: &runtime.RawExtension{Object: machineSetProviderSpec}, 148 }, 149 }, 150 }, 151 }, 152 }, 153 } 154 155 if CPMSFailureDomains := pruneFailureDomains(failureDomains); CPMSFailureDomains != nil { 156 controlPlaneMachineSet.Spec.Template.OpenShiftMachineV1Beta1Machine.FailureDomains = &machinev1.FailureDomains{ 157 Platform: v1.OpenStackPlatformType, 158 OpenStack: CPMSFailureDomains, 159 } 160 } 161 return machines, controlPlaneMachineSet, nil 162 } 163 164 func generateProviderSpec(ctx context.Context, clusterID string, platform *openstack.Platform, mpool *openstack.MachinePool, osImage string, role, userDataSecret string, failureDomain machinev1.OpenStackFailureDomain) (*machinev1alpha1.OpenstackProviderSpec, error) { 165 var controlPlaneNetwork machinev1alpha1.NetworkParam 166 additionalNetworks := make([]machinev1alpha1.NetworkParam, 0, len(mpool.AdditionalNetworkIDs)) 167 primarySubnet := "" 168 169 if platform.ControlPlanePort != nil { 170 var subnets []machinev1alpha1.SubnetParam 171 controlPlanePort := platform.ControlPlanePort 172 networkID := controlPlanePort.Network.ID 173 174 for _, fixedIP := range controlPlanePort.FixedIPs { 175 subnets = append(subnets, machinev1alpha1.SubnetParam{ 176 Filter: machinev1alpha1.SubnetFilter{ID: fixedIP.Subnet.ID, Name: fixedIP.Subnet.Name}, 177 }) 178 } 179 180 // In a dual-stack cluster, when network ID or Name is not specified, the network ID needs to 181 // be discovered and added to the ProviderSpec for MAPO to create one unique Port with two addresses. 182 var err error 183 if networkID == "" && controlPlanePort.Network.Name == "" && len(controlPlanePort.FixedIPs) == 2 { 184 networkID, err = getNetworkFromSubnet(ctx, controlPlanePort.FixedIPs[0], platform.Cloud) 185 if err != nil { 186 return nil, err 187 } 188 } 189 190 controlPlaneNetwork = machinev1alpha1.NetworkParam{ 191 Subnets: subnets, 192 Filter: machinev1alpha1.Filter{ 193 Name: controlPlanePort.Network.Name, 194 ID: networkID, 195 }, 196 } 197 primarySubnet = controlPlanePort.FixedIPs[0].Subnet.ID 198 } else { 199 controlPlaneNetwork = machinev1alpha1.NetworkParam{ 200 Subnets: []machinev1alpha1.SubnetParam{ 201 { 202 Filter: machinev1alpha1.SubnetFilter{ 203 Tags: fmt.Sprintf("openshiftClusterID=%s", clusterID), 204 }, 205 }, 206 }, 207 } 208 } 209 210 for _, networkID := range mpool.AdditionalNetworkIDs { 211 additionalNetworks = append(additionalNetworks, machinev1alpha1.NetworkParam{ 212 UUID: networkID, 213 NoAllowedAddressPairs: true, 214 }) 215 } 216 217 securityGroups := []machinev1alpha1.SecurityGroupParam{ 218 { 219 Name: fmt.Sprintf("%s-%s", clusterID, role), 220 }, 221 } 222 for _, sg := range mpool.AdditionalSecurityGroupIDs { 223 securityGroups = append(securityGroups, machinev1alpha1.SecurityGroupParam{ 224 UUID: sg, 225 }) 226 } 227 228 serverGroupName := clusterID + "-" + role 229 // We initially used the AZ name as part of the server group name for the masters 230 // but we realized that it was not useful. Whether or not the AZ is specified, the 231 // masters will be spread across multiple hosts by default by the Nova scheduler 232 // (the policy can be changed via `serverGroupPolicy` in install-config.yaml). 233 // For the workers, we still use the AZ name as part of the server group name 234 // so the user can control the scheduling policy per AZ and change the MachineSets 235 // if needed on a day 2 operation. 236 if role == "worker" && failureDomain.AvailabilityZone != "" { 237 serverGroupName += "-" + failureDomain.AvailabilityZone 238 } 239 240 spec := machinev1alpha1.OpenstackProviderSpec{ 241 TypeMeta: metav1.TypeMeta{ 242 APIVersion: machinev1alpha1.GroupVersion.String(), 243 Kind: "OpenstackProviderSpec", 244 }, 245 Flavor: mpool.FlavorName, 246 CloudName: CloudName, 247 CloudsSecret: &corev1.SecretReference{Name: cloudsSecret, Namespace: cloudsSecretNamespace}, 248 UserDataSecret: &corev1.SecretReference{Name: userDataSecret}, 249 Networks: append([]machinev1alpha1.NetworkParam{controlPlaneNetwork}, additionalNetworks...), 250 PrimarySubnet: primarySubnet, 251 AvailabilityZone: failureDomain.AvailabilityZone, 252 SecurityGroups: securityGroups, 253 ServerGroupName: serverGroupName, 254 Trunk: false, 255 Tags: []string{ 256 fmt.Sprintf("openshiftClusterID=%s", clusterID), 257 }, 258 ServerMetadata: map[string]string{ 259 "Name": fmt.Sprintf("%s-%s", clusterID, role), 260 "openshiftClusterID": clusterID, 261 }, 262 } 263 if mpool.RootVolume != nil { 264 spec.RootVolume = &machinev1alpha1.RootVolume{ 265 Size: mpool.RootVolume.Size, 266 SourceUUID: osImage, 267 VolumeType: failureDomain.RootVolume.VolumeType, 268 Zone: failureDomain.RootVolume.AvailabilityZone, 269 } 270 } else { 271 spec.Image = osImage 272 } 273 return &spec, nil 274 } 275 276 // failureDomainIsEmpty returns true if the failure domain only contains nil or 277 // zero values. 278 func failureDomainIsEmpty(failureDomain machinev1.OpenStackFailureDomain) bool { 279 if failureDomain.AvailabilityZone == "" { 280 if failureDomain.RootVolume == nil { 281 return true 282 } 283 if failureDomain.RootVolume.AvailabilityZone == "" && failureDomain.RootVolume.VolumeType == "" { 284 return true 285 } 286 } 287 return false 288 } 289 290 // pruneFailureDomains returns nil if the only failure domain in the given 291 // slice is empty. One empty failure domain is not syntactically valid in CPMS. 292 func pruneFailureDomains(failureDomains []machinev1.OpenStackFailureDomain) []machinev1.OpenStackFailureDomain { 293 if len(failureDomains) == 1 && failureDomainIsEmpty(failureDomains[0]) { 294 return nil 295 } 296 return failureDomains 297 } 298 299 // failureDomainsFromSpec returns as many failure domains as there are zones in 300 // the given machine-pool. The returned failure domains have nil RootVolume if 301 // and only if the given machine-pool has nil RootVolume. The returned failure 302 // domain slice is guaranteed to have at least one element. 303 func failureDomainsFromSpec(mpool openstack.MachinePool) []machinev1.OpenStackFailureDomain { 304 var numberOfFailureDomains int 305 if mpool.RootVolume != nil { 306 // At this point, after validation, compute availability zones, 307 // storage avalability zones and root volume types must all be 308 // equal in number. However, we want to accept case where any 309 // of them has zero or one value (which means: apply the same 310 // value to all failure domains). 311 var ( 312 highestCardinality int 313 highestCardinalityField string 314 ) 315 for field, cardinality := range map[string]int{ 316 "compute availability zones": len(mpool.Zones), 317 "storage availability zones": len(mpool.RootVolume.Zones), 318 "root volume types": len(mpool.RootVolume.Types), 319 } { 320 if cardinality > 1 { 321 if highestCardinality > 1 && cardinality != highestCardinality { 322 panic(highestCardinalityField + " and " + field + " should have equal length") 323 } 324 highestCardinality = cardinality 325 highestCardinalityField = field 326 } 327 } 328 numberOfFailureDomains = highestCardinality 329 } else { 330 numberOfFailureDomains = len(mpool.Zones) 331 } 332 333 // No failure domain is exactly like one failure domain with the default values. 334 if numberOfFailureDomains < 1 { 335 numberOfFailureDomains = 1 336 } 337 338 failureDomains := make([]machinev1.OpenStackFailureDomain, numberOfFailureDomains) 339 340 for i := range failureDomains { 341 switch len(mpool.Zones) { 342 case 0: 343 failureDomains[i].AvailabilityZone = openstackdefaults.DefaultComputeAZ() 344 case 1: 345 failureDomains[i].AvailabilityZone = mpool.Zones[0] 346 default: 347 failureDomains[i].AvailabilityZone = mpool.Zones[i] 348 } 349 350 if mpool.RootVolume != nil { 351 switch len(mpool.RootVolume.Zones) { 352 case 0: 353 failureDomains[i].RootVolume = &machinev1.RootVolume{ 354 AvailabilityZone: openstackdefaults.DefaultRootVolumeAZ(), 355 } 356 case 1: 357 failureDomains[i].RootVolume = &machinev1.RootVolume{ 358 AvailabilityZone: mpool.RootVolume.Zones[0], 359 } 360 default: 361 failureDomains[i].RootVolume = &machinev1.RootVolume{ 362 AvailabilityZone: mpool.RootVolume.Zones[i], 363 } 364 } 365 366 switch len(mpool.RootVolume.Types) { 367 case 0: 368 panic("Root volume types should have been validated to have at least one element") 369 case 1: 370 failureDomains[i].RootVolume.VolumeType = mpool.RootVolume.Types[0] 371 default: 372 failureDomains[i].RootVolume.VolumeType = mpool.RootVolume.Types[i] 373 } 374 } 375 } 376 return failureDomains 377 } 378 379 // CheckNetworkExtensionAvailability interrogates the OpenStack API to validate 380 // the availability of a given Neutron extension. 381 // The `opts` parameter is provided for external consumers needing to configure 382 // the client e.g. with custom certs. If unspecified (nil), a default client is 383 // built based on the specified `cloud`. 384 func CheckNetworkExtensionAvailability(ctx context.Context, cloud, alias string, opts *clientconfig.ClientOpts) (bool, error) { 385 if opts == nil { 386 opts = openstackdefaults.DefaultClientOpts(cloud) 387 } 388 conn, err := openstackdefaults.NewServiceClient(ctx, "network", opts) 389 if err != nil { 390 return false, err 391 } 392 393 res := netext.Get(ctx, conn, alias) 394 if res.Err != nil { 395 if gophercloud.ResponseCodeIs(res.Err, http.StatusNotFound) { 396 return false, nil 397 } 398 return false, res.Err 399 } 400 401 return true, nil 402 } 403 404 func getNetworkFromSubnet(ctx context.Context, fixedIP openstack.FixedIP, cloud string) (string, error) { 405 opts := openstackdefaults.DefaultClientOpts(cloud) 406 conn, err := openstackdefaults.NewServiceClient(ctx, "network", opts) 407 if err != nil { 408 return "", err 409 } 410 page, err := subnets.List(conn, subnets.ListOpts{Name: fixedIP.Subnet.Name, ID: fixedIP.Subnet.ID}).AllPages(ctx) 411 if err != nil { 412 return "", errors.Wrap(err, "failed to get subnet list") 413 } 414 subnetList, err := subnets.ExtractSubnets(page) 415 if err != nil { 416 return "", errors.Wrap(err, "failed to extract subnets list") 417 } 418 if len(subnetList) == 0 { 419 return "", errors.New("subnet not found") 420 } 421 return subnetList[0].NetworkID, nil 422 } 423 424 // ConfigMasters sets the PublicIP flag and assigns a set of load balancers to the given machines 425 func ConfigMasters(machines []machineapi.Machine, clusterID string) { 426 /*for _, machine := range machines { 427 providerSpec := machine.Spec.ProviderSpec.Value.Object.(*openstackprovider.OpenstackProviderSpec) 428 }*/ 429 }