github.com/openshift/installer@v1.4.17/pkg/asset/machines/nutanix/machines.go (about)

     1  // Package nutanix generates Machine objects for nutanix.
     2  package nutanix
     3  
     4  import (
     5  	"fmt"
     6  
     7  	corev1 "k8s.io/api/core/v1"
     8  	"k8s.io/apimachinery/pkg/api/resource"
     9  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    10  	"k8s.io/apimachinery/pkg/runtime"
    11  	"k8s.io/utils/ptr"
    12  
    13  	configv1 "github.com/openshift/api/config/v1"
    14  	machinev1 "github.com/openshift/api/machine/v1"
    15  	machineapi "github.com/openshift/api/machine/v1beta1"
    16  	"github.com/openshift/installer/pkg/types"
    17  	"github.com/openshift/installer/pkg/types/nutanix"
    18  )
    19  
    20  // Machines returns a list of machines for a machinepool.
    21  func Machines(clusterID string, config *types.InstallConfig, pool *types.MachinePool, osImage, role, userDataSecret string) ([]machineapi.Machine, *machinev1.ControlPlaneMachineSet, error) {
    22  	if configPlatform := config.Platform.Name(); configPlatform != nutanix.Name {
    23  		return nil, nil, fmt.Errorf("non nutanix configuration: %q", configPlatform)
    24  	}
    25  	if poolPlatform := pool.Platform.Name(); poolPlatform != nutanix.Name {
    26  		return nil, nil, fmt.Errorf("non-nutanix machine-pool: %q", poolPlatform)
    27  	}
    28  	platform := config.Platform.Nutanix
    29  	mpool := pool.Platform.Nutanix
    30  
    31  	failureDomains := make([]*nutanix.FailureDomain, 0, len(mpool.FailureDomains))
    32  	for _, fdName := range mpool.FailureDomains {
    33  		fd, err := platform.GetFailureDomainByName(fdName)
    34  		if err != nil {
    35  			return nil, nil, err
    36  		}
    37  		failureDomains = append(failureDomains, fd)
    38  	}
    39  
    40  	total := int64(1)
    41  	if pool.Replicas != nil {
    42  		total = *pool.Replicas
    43  	}
    44  	var machines []machineapi.Machine
    45  	var machineSetProvider *machinev1.NutanixMachineProviderConfig
    46  	for idx := int64(0); idx < total; idx++ {
    47  		var failureDomain *nutanix.FailureDomain
    48  		if len(failureDomains) > 0 {
    49  			failureDomain = failureDomains[idx%int64(len(failureDomains))]
    50  		}
    51  		provider, err := provider(clusterID, platform, mpool, osImage, userDataSecret, failureDomain)
    52  		if err != nil {
    53  			return nil, nil, fmt.Errorf("failed to create provider: %w", err)
    54  		}
    55  
    56  		machine := machineapi.Machine{
    57  			TypeMeta: metav1.TypeMeta{
    58  				APIVersion: "machine.openshift.io/v1beta1",
    59  				Kind:       "Machine",
    60  			},
    61  			ObjectMeta: metav1.ObjectMeta{
    62  				Namespace: "openshift-machine-api",
    63  				Name:      fmt.Sprintf("%s-%s-%d", clusterID, pool.Name, idx),
    64  				Labels: map[string]string{
    65  					"machine.openshift.io/cluster-api-cluster":      clusterID,
    66  					"machine.openshift.io/cluster-api-machine-role": role,
    67  					"machine.openshift.io/cluster-api-machine-type": role,
    68  				},
    69  			},
    70  			Spec: machineapi.MachineSpec{
    71  				ProviderSpec: machineapi.ProviderSpec{
    72  					Value: &runtime.RawExtension{Object: provider},
    73  				},
    74  				// we don't need to set Versions, because we control those via operators.
    75  			},
    76  		}
    77  		machineSetProvider = provider.DeepCopy()
    78  		machines = append(machines, machine)
    79  	}
    80  
    81  	replicas := int32(total)
    82  	controlPlaneMachineSet := &machinev1.ControlPlaneMachineSet{
    83  		TypeMeta: metav1.TypeMeta{
    84  			APIVersion: "machine.openshift.io/v1",
    85  			Kind:       "ControlPlaneMachineSet",
    86  		},
    87  		ObjectMeta: metav1.ObjectMeta{
    88  			Namespace: "openshift-machine-api",
    89  			Name:      "cluster",
    90  			Labels: map[string]string{
    91  				"machine.openshift.io/cluster-api-cluster": clusterID,
    92  			},
    93  		},
    94  		Spec: machinev1.ControlPlaneMachineSetSpec{
    95  			Replicas: &replicas,
    96  			State:    machinev1.ControlPlaneMachineSetStateActive,
    97  			Selector: metav1.LabelSelector{
    98  				MatchLabels: map[string]string{
    99  					"machine.openshift.io/cluster-api-machine-role": role,
   100  					"machine.openshift.io/cluster-api-machine-type": role,
   101  					"machine.openshift.io/cluster-api-cluster":      clusterID,
   102  				},
   103  			},
   104  			Template: machinev1.ControlPlaneMachineSetTemplate{
   105  				MachineType: machinev1.OpenShiftMachineV1Beta1MachineType,
   106  				OpenShiftMachineV1Beta1Machine: &machinev1.OpenShiftMachineV1Beta1MachineTemplate{
   107  					ObjectMeta: machinev1.ControlPlaneMachineSetTemplateObjectMeta{
   108  						Labels: map[string]string{
   109  							"machine.openshift.io/cluster-api-cluster":      clusterID,
   110  							"machine.openshift.io/cluster-api-machine-role": role,
   111  							"machine.openshift.io/cluster-api-machine-type": role,
   112  						},
   113  					},
   114  				},
   115  			},
   116  		},
   117  	}
   118  
   119  	if len(failureDomains) > 0 {
   120  		fdRefs := make([]machinev1.NutanixFailureDomainReference, 0, len(failureDomains))
   121  		for _, fd := range failureDomains {
   122  			fdRefs = append(fdRefs, machinev1.NutanixFailureDomainReference{Name: fd.Name})
   123  		}
   124  		controlPlaneMachineSet.Spec.Template.OpenShiftMachineV1Beta1Machine.FailureDomains = &machinev1.FailureDomains{
   125  			Platform: configv1.NutanixPlatformType,
   126  			Nutanix:  fdRefs,
   127  		}
   128  
   129  		// Reset the providerSpec fields related to the failure domain
   130  		machineSetProvider.Cluster = machinev1.NutanixResourceIdentifier{}
   131  		machineSetProvider.Subnets = []machinev1.NutanixResourceIdentifier{}
   132  		machineSetProvider.FailureDomain = nil
   133  	}
   134  
   135  	controlPlaneMachineSet.Spec.Template.OpenShiftMachineV1Beta1Machine.Spec = machineapi.MachineSpec{
   136  		ProviderSpec: machineapi.ProviderSpec{
   137  			Value: &runtime.RawExtension{Object: machineSetProvider},
   138  		},
   139  	}
   140  
   141  	return machines, controlPlaneMachineSet, nil
   142  }
   143  
   144  func provider(clusterID string, platform *nutanix.Platform, mpool *nutanix.MachinePool, osImage string, userDataSecret string, failureDomain *nutanix.FailureDomain) (*machinev1.NutanixMachineProviderConfig, error) {
   145  	// cluster
   146  	peUUID := platform.PrismElements[0].UUID
   147  	if failureDomain != nil {
   148  		peUUID = failureDomain.PrismElement.UUID
   149  	}
   150  
   151  	// subnets
   152  	subnets := []machinev1.NutanixResourceIdentifier{}
   153  	subnetUUIDs := platform.SubnetUUIDs
   154  	if failureDomain != nil {
   155  		subnetUUIDs = failureDomain.SubnetUUIDs
   156  	}
   157  	for _, subnetUUID := range subnetUUIDs {
   158  		subnet := machinev1.NutanixResourceIdentifier{
   159  			Type: machinev1.NutanixIdentifierUUID,
   160  			UUID: &subnetUUID,
   161  		}
   162  		subnets = append(subnets, subnet)
   163  	}
   164  
   165  	providerCfg := &machinev1.NutanixMachineProviderConfig{
   166  		TypeMeta: metav1.TypeMeta{
   167  			APIVersion: machinev1.GroupVersion.String(),
   168  			Kind:       "NutanixMachineProviderConfig",
   169  		},
   170  		UserDataSecret:    &corev1.LocalObjectReference{Name: userDataSecret},
   171  		CredentialsSecret: &corev1.LocalObjectReference{Name: nutanix.CredentialsSecretName},
   172  		Image: machinev1.NutanixResourceIdentifier{
   173  			Type: machinev1.NutanixIdentifierName,
   174  			Name: &osImage,
   175  		},
   176  		Subnets:        subnets,
   177  		VCPUsPerSocket: int32(mpool.NumCoresPerSocket),
   178  		VCPUSockets:    int32(mpool.NumCPUs),
   179  		MemorySize:     resource.MustParse(fmt.Sprintf("%dMi", mpool.MemoryMiB)),
   180  		Cluster: machinev1.NutanixResourceIdentifier{
   181  			Type: machinev1.NutanixIdentifierUUID,
   182  			UUID: &peUUID,
   183  		},
   184  		SystemDiskSize: resource.MustParse(fmt.Sprintf("%dGi", mpool.OSDisk.DiskSizeGiB)),
   185  		GPUs:           mpool.GPUs,
   186  	}
   187  
   188  	// FailureDomain
   189  	if failureDomain != nil {
   190  		providerCfg.FailureDomain = &machinev1.NutanixFailureDomainReference{Name: failureDomain.Name}
   191  	}
   192  
   193  	if len(mpool.BootType) != 0 {
   194  		providerCfg.BootType = mpool.BootType
   195  	}
   196  
   197  	if mpool.Project != nil && mpool.Project.Type == machinev1.NutanixIdentifierUUID {
   198  		providerCfg.Project = machinev1.NutanixResourceIdentifier{
   199  			Type: machinev1.NutanixIdentifierUUID,
   200  			UUID: mpool.Project.UUID,
   201  		}
   202  	}
   203  
   204  	if len(mpool.Categories) > 0 {
   205  		providerCfg.Categories = mpool.Categories
   206  	}
   207  
   208  	for _, disk := range mpool.DataDisks {
   209  		providerDisk := machinev1.NutanixVMDisk{
   210  			DiskSize:         disk.DiskSize,
   211  			DeviceProperties: disk.DeviceProperties,
   212  		}
   213  
   214  		if disk.StorageConfig != nil {
   215  			providerDisk.StorageConfig = &machinev1.NutanixVMStorageConfig{
   216  				DiskMode: disk.StorageConfig.DiskMode,
   217  			}
   218  
   219  			if disk.StorageConfig.StorageContainer != nil {
   220  				scRef := disk.StorageConfig.StorageContainer
   221  				if scRef.ReferenceName != "" && failureDomain != nil {
   222  					if scRef, err := platform.GetStorageContainerFromFailureDomain(failureDomain.Name, scRef.ReferenceName); err != nil {
   223  						return nil, fmt.Errorf("not found storage container with reference name %q in failureDomain %q", scRef.ReferenceName, failureDomain.Name)
   224  					}
   225  				}
   226  
   227  				providerDisk.StorageConfig.StorageContainer = &machinev1.NutanixStorageResourceIdentifier{
   228  					Type: machinev1.NutanixIdentifierUUID,
   229  					UUID: ptr.To(scRef.UUID),
   230  				}
   231  			}
   232  		}
   233  
   234  		if disk.DataSourceImage != nil {
   235  			imgRef := disk.DataSourceImage
   236  			if imgRef.ReferenceName != "" && failureDomain != nil {
   237  				if imgRef, err := platform.GetDataSourceImageFromFailureDomain(failureDomain.Name, imgRef.ReferenceName); err != nil {
   238  					return nil, fmt.Errorf("not found dataSource image with reference name %q in failureDomain %q", imgRef.ReferenceName, failureDomain.Name)
   239  				}
   240  			}
   241  
   242  			providerDisk.DataSource = &machinev1.NutanixResourceIdentifier{
   243  				Type: machinev1.NutanixIdentifierUUID,
   244  				UUID: ptr.To(imgRef.UUID),
   245  			}
   246  		}
   247  
   248  		providerCfg.DataDisks = append(providerCfg.DataDisks, providerDisk)
   249  	}
   250  
   251  	return providerCfg, nil
   252  }
   253  
   254  // ConfigMasters sets the PublicIP flag and assigns a set of load balancers to the given machines
   255  func ConfigMasters(machines []machineapi.Machine, clusterID string) {
   256  }