github.com/openshift/installer@v1.4.17/pkg/asset/machines/vsphere/machines.go (about)

     1  // Package vsphere generates Machine objects for vsphere.
     2  package vsphere
     3  
     4  import (
     5  	"fmt"
     6  	"path"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/pkg/errors"
    11  	"github.com/sirupsen/logrus"
    12  	corev1 "k8s.io/api/core/v1"
    13  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    14  	"k8s.io/apimachinery/pkg/runtime"
    15  	ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1"
    16  
    17  	v1 "github.com/openshift/api/config/v1"
    18  	machinev1 "github.com/openshift/api/machine/v1"
    19  	machineapi "github.com/openshift/api/machine/v1beta1"
    20  	"github.com/openshift/installer/pkg/types"
    21  	"github.com/openshift/installer/pkg/types/vsphere"
    22  )
    23  
    24  // MachineData contains all result output from the Machines() function.
    25  type MachineData struct {
    26  	Machines               []machineapi.Machine
    27  	ControlPlaneMachineSet *machinev1.ControlPlaneMachineSet
    28  	IPClaims               []ipamv1.IPAddressClaim
    29  	IPAddresses            []ipamv1.IPAddress
    30  }
    31  
    32  // Machines returns a list of machines for a machinepool.
    33  func Machines(clusterID string, config *types.InstallConfig, pool *types.MachinePool, osImage, role, userDataSecret string) (*MachineData, error) {
    34  	data := &MachineData{}
    35  	if configPlatform := config.Platform.Name(); configPlatform != vsphere.Name {
    36  		return data, fmt.Errorf("non vsphere configuration: %q", configPlatform)
    37  	}
    38  	if poolPlatform := pool.Platform.Name(); poolPlatform != vsphere.Name {
    39  		return data, fmt.Errorf("non-VSphere machine-pool: %q", poolPlatform)
    40  	}
    41  
    42  	var failureDomain vsphere.FailureDomain
    43  	platform := config.Platform.VSphere
    44  	mpool := pool.Platform.VSphere
    45  	replicas := int32(1)
    46  
    47  	numOfZones := len(mpool.Zones)
    48  
    49  	zones, err := getDefinedZonesFromTopology(platform)
    50  	if err != nil {
    51  		return data, err
    52  	}
    53  
    54  	if pool.Replicas != nil {
    55  		replicas = int32(*pool.Replicas)
    56  	}
    57  
    58  	// Create hosts to populate from.  Copying so we can remove without changing original
    59  	// and only put in the ones that match the role.
    60  	var hosts []*vsphere.Host
    61  	if config.Platform.VSphere.Hosts != nil {
    62  		for _, host := range config.Platform.VSphere.Hosts {
    63  			if (host.IsCompute() && role == "worker") || (host.IsControlPlane() && role == "master") {
    64  				logrus.Debugf("Adding host for static ip assignment: %v - %v", host.FailureDomain, host.NetworkDevice.IPAddrs[0])
    65  				hosts = append(hosts, host)
    66  			}
    67  		}
    68  	}
    69  
    70  	failureDomains := []machinev1.VSphereFailureDomain{}
    71  
    72  	vsphereMachineProvider := &machineapi.VSphereMachineProviderSpec{}
    73  
    74  	for idx := int32(0); idx < replicas; idx++ {
    75  		logrus.Debugf("Creating %v machine %v", role, idx)
    76  		var host *vsphere.Host
    77  		desiredZone := mpool.Zones[int(idx)%numOfZones]
    78  		if hosts != nil && int(idx) < len(hosts) {
    79  			host = hosts[idx]
    80  			if host.FailureDomain != "" {
    81  				desiredZone = host.FailureDomain
    82  			}
    83  		}
    84  		logrus.Debugf("Desired zone: %v", desiredZone)
    85  
    86  		if _, exists := zones[desiredZone]; !exists {
    87  			return data, errors.Errorf("zone [%s] specified by machinepool is not defined", desiredZone)
    88  		}
    89  
    90  		failureDomain = zones[desiredZone]
    91  
    92  		machineLabels := map[string]string{
    93  			"machine.openshift.io/cluster-api-cluster":      clusterID,
    94  			"machine.openshift.io/cluster-api-machine-role": role,
    95  			"machine.openshift.io/cluster-api-machine-type": role,
    96  		}
    97  
    98  		if !hasFailureDomain(failureDomains, failureDomain.Name) {
    99  			failureDomains = append(failureDomains, machinev1.VSphereFailureDomain{
   100  				Name: failureDomain.Name,
   101  			})
   102  		}
   103  
   104  		osImageForZone := failureDomain.Topology.Template
   105  		if failureDomain.Topology.Template == "" {
   106  			osImageForZone = fmt.Sprintf("%s-%s-%s", osImage, failureDomain.Region, failureDomain.Zone)
   107  		}
   108  
   109  		vcenter, err := getVCenterFromServerName(failureDomain.Server, platform)
   110  		if err != nil {
   111  			return data, errors.Wrap(err, "unable to find vCenter in failure domains")
   112  		}
   113  		provider, err := provider(clusterID, vcenter, failureDomain, mpool, osImageForZone, userDataSecret)
   114  		if err != nil {
   115  			return data, errors.Wrap(err, "failed to create provider")
   116  		}
   117  
   118  		machine := machineapi.Machine{
   119  			TypeMeta: metav1.TypeMeta{
   120  				APIVersion: "machine.openshift.io/v1beta1",
   121  				Kind:       "Machine",
   122  			},
   123  			ObjectMeta: metav1.ObjectMeta{
   124  				Namespace: "openshift-machine-api",
   125  				Name:      fmt.Sprintf("%s-%s-%d", clusterID, pool.Name, idx),
   126  				Labels:    machineLabels,
   127  			},
   128  			Spec: machineapi.MachineSpec{
   129  				ProviderSpec: machineapi.ProviderSpec{
   130  					Value: &runtime.RawExtension{Object: provider},
   131  				},
   132  				// we don't need to set Versions, because we control those via operators.
   133  			},
   134  		}
   135  
   136  		// Apply static IP if configured
   137  		claim, address, err := applyNetworkConfig(host, provider, machine)
   138  		if err != nil {
   139  			return data, err
   140  		} else if claim != nil && address != nil {
   141  			data.IPClaims = append(data.IPClaims, claim...)
   142  			data.IPAddresses = append(data.IPAddresses, address...)
   143  		}
   144  		data.Machines = append(data.Machines, machine)
   145  
   146  		vsphereMachineProvider = provider.DeepCopy()
   147  	}
   148  
   149  	// when multiple zones are defined, network and workspace are derived from the topology
   150  	origProv := vsphereMachineProvider.DeepCopy()
   151  	if len(failureDomains) >= 1 {
   152  		vsphereMachineProvider.Network = machineapi.NetworkSpec{}
   153  		vsphereMachineProvider.Workspace = &machineapi.Workspace{}
   154  		vsphereMachineProvider.Template = ""
   155  	}
   156  
   157  	// Only set AddressesFromPools and Nameservers if AddressesFromPools is > 0, else revert to
   158  	// the older static IP manifest way.
   159  	if len(hosts) > 0 {
   160  		if len(origProv.Network.Devices[0].AddressesFromPools) > 0 {
   161  			vsphereMachineProvider.Network.Devices = []machineapi.NetworkDeviceSpec{
   162  				{
   163  					AddressesFromPools: origProv.Network.Devices[0].AddressesFromPools,
   164  					Nameservers:        origProv.Network.Devices[0].Nameservers,
   165  				},
   166  			}
   167  		} else {
   168  			// Older static IP config, lets remove network since it'll come from FD
   169  			vsphereMachineProvider.Network = machineapi.NetworkSpec{}
   170  		}
   171  	}
   172  
   173  	data.ControlPlaneMachineSet = &machinev1.ControlPlaneMachineSet{
   174  		TypeMeta: metav1.TypeMeta{
   175  			APIVersion: "machine.openshift.io/v1",
   176  			Kind:       "ControlPlaneMachineSet",
   177  		},
   178  		ObjectMeta: metav1.ObjectMeta{
   179  			Namespace: "openshift-machine-api",
   180  			Name:      "cluster",
   181  			Labels: map[string]string{
   182  				"machine.openshift.io/cluster-api-cluster": clusterID,
   183  			},
   184  		},
   185  		Spec: machinev1.ControlPlaneMachineSetSpec{
   186  			Replicas: &replicas,
   187  			State:    machinev1.ControlPlaneMachineSetStateActive,
   188  			Selector: metav1.LabelSelector{
   189  				MatchLabels: map[string]string{
   190  					"machine.openshift.io/cluster-api-machine-role": role,
   191  					"machine.openshift.io/cluster-api-machine-type": role,
   192  					"machine.openshift.io/cluster-api-cluster":      clusterID,
   193  				},
   194  			},
   195  			Template: machinev1.ControlPlaneMachineSetTemplate{
   196  				MachineType: machinev1.OpenShiftMachineV1Beta1MachineType,
   197  				OpenShiftMachineV1Beta1Machine: &machinev1.OpenShiftMachineV1Beta1MachineTemplate{
   198  					FailureDomains: &machinev1.FailureDomains{
   199  						Platform: v1.VSpherePlatformType,
   200  						VSphere:  failureDomains,
   201  					},
   202  					ObjectMeta: machinev1.ControlPlaneMachineSetTemplateObjectMeta{
   203  						Labels: map[string]string{
   204  							"machine.openshift.io/cluster-api-cluster":      clusterID,
   205  							"machine.openshift.io/cluster-api-machine-role": role,
   206  							"machine.openshift.io/cluster-api-machine-type": role,
   207  						},
   208  					},
   209  					Spec: machineapi.MachineSpec{
   210  						ProviderSpec: machineapi.ProviderSpec{
   211  							Value: &runtime.RawExtension{Object: vsphereMachineProvider},
   212  						},
   213  					},
   214  				},
   215  			},
   216  		},
   217  	}
   218  
   219  	return data, nil
   220  }
   221  
   222  // applyNetworkConfig this function will apply the static ip configuration to the networkDevice
   223  // field in the provider spec.  The function will use the desired zone to determine which config
   224  // to apply and then remove that host config from the hosts array.
   225  func applyNetworkConfig(host *vsphere.Host, provider *machineapi.VSphereMachineProviderSpec, machine machineapi.Machine) ([]ipamv1.IPAddressClaim, []ipamv1.IPAddress, error) {
   226  	var ipClaims []ipamv1.IPAddressClaim
   227  	var ipAddrs []ipamv1.IPAddress
   228  	if host != nil {
   229  		networkDevice := host.NetworkDevice
   230  		if networkDevice != nil {
   231  			for idx, address := range networkDevice.IPAddrs {
   232  				provider.Network.Devices[0].Nameservers = networkDevice.Nameservers
   233  				provider.Network.Devices[0].AddressesFromPools = append(provider.Network.Devices[0].AddressesFromPools, machineapi.AddressesFromPool{
   234  					Group:    "installer.openshift.io",
   235  					Name:     fmt.Sprintf("default-%d", idx),
   236  					Resource: "IPPool",
   237  				},
   238  				)
   239  
   240  				// Generate the capi networking objects
   241  				slashIndex := strings.Index(address, "/")
   242  				ipAddress := address[0:slashIndex]
   243  				prefix, err := strconv.Atoi(address[slashIndex+1:])
   244  				if err != nil {
   245  					return nil, nil, errors.Wrap(err, "unable to determine address prefix")
   246  				}
   247  				ipClaim, ipAddr := generateCapiNetwork(machine.Name, ipAddress, networkDevice.Gateway, prefix, 0, idx)
   248  				ipClaims = append(ipClaims, *ipClaim)
   249  				ipAddrs = append(ipAddrs, *ipAddr)
   250  			}
   251  		}
   252  	}
   253  
   254  	return ipClaims, ipAddrs, nil
   255  }
   256  
   257  // generateCapiNetwork this function will create IPAddressClaim and IPAddress for the specified information.
   258  func generateCapiNetwork(machineName, ipAddress, gateway string, prefix, deviceIndex, ipIndex int) (*ipamv1.IPAddressClaim, *ipamv1.IPAddress) {
   259  	// Generate PoolRef
   260  	apigroup := "installer.openshift.io"
   261  	poolRef := corev1.TypedLocalObjectReference{
   262  		APIGroup: &apigroup,
   263  		Kind:     "IPPool",
   264  		Name:     fmt.Sprintf("default-%d", ipIndex),
   265  	}
   266  
   267  	// Generate IPAddressClaim
   268  	ipclaim := &ipamv1.IPAddressClaim{
   269  		TypeMeta: metav1.TypeMeta{
   270  			APIVersion: "ipam.cluster.x-k8s.io/v1beta1",
   271  			Kind:       "IPAddressClaim",
   272  		},
   273  		ObjectMeta: metav1.ObjectMeta{
   274  			Finalizers: []string{
   275  				machineapi.IPClaimProtectionFinalizer,
   276  			},
   277  			Name:      fmt.Sprintf("%s-claim-%d-%d", machineName, deviceIndex, ipIndex),
   278  			Namespace: "openshift-machine-api",
   279  		},
   280  		Spec: ipamv1.IPAddressClaimSpec{
   281  			PoolRef: poolRef,
   282  		},
   283  	}
   284  
   285  	// Populate IPAddress info
   286  	ipaddr := &ipamv1.IPAddress{
   287  		TypeMeta: metav1.TypeMeta{
   288  			APIVersion: "ipam.cluster.x-k8s.io/v1beta1",
   289  			Kind:       "IPAddress",
   290  		},
   291  		ObjectMeta: metav1.ObjectMeta{
   292  			Name:      fmt.Sprintf("%s-claim-%d-%d", machineName, deviceIndex, ipIndex),
   293  			Namespace: "openshift-machine-api",
   294  		},
   295  		Spec: ipamv1.IPAddressSpec{
   296  			Address: ipAddress,
   297  			ClaimRef: corev1.LocalObjectReference{
   298  				Name: ipclaim.Name,
   299  			},
   300  			Gateway: gateway,
   301  			PoolRef: poolRef,
   302  			Prefix:  prefix,
   303  		},
   304  	}
   305  
   306  	ipclaim.Status = ipamv1.IPAddressClaimStatus{
   307  		AddressRef: corev1.LocalObjectReference{
   308  			Name: ipaddr.Name,
   309  		},
   310  	}
   311  
   312  	return ipclaim, ipaddr
   313  }
   314  
   315  func provider(clusterID string, vcenter *vsphere.VCenter, failureDomain vsphere.FailureDomain, mpool *vsphere.MachinePool, osImage string, userDataSecret string) (*machineapi.VSphereMachineProviderSpec, error) {
   316  	networkDeviceSpec := make([]machineapi.NetworkDeviceSpec, len(failureDomain.Topology.Networks))
   317  
   318  	// If failureDomain.Topology.Folder is empty this will be used
   319  	folder := path.Clean(fmt.Sprintf("/%s/vm/%s", failureDomain.Topology.Datacenter, clusterID))
   320  
   321  	// If failureDomain.Topology.ResourcePool is empty this will be used
   322  	// computeCluster is required to be a path
   323  	resourcePool := path.Clean(fmt.Sprintf("%s/Resources", failureDomain.Topology.ComputeCluster))
   324  
   325  	if failureDomain.Topology.Folder != "" {
   326  		folder = failureDomain.Topology.Folder
   327  	}
   328  	if failureDomain.Topology.ResourcePool != "" {
   329  		resourcePool = failureDomain.Topology.ResourcePool
   330  	}
   331  
   332  	resourcePool = path.Clean(resourcePool)
   333  
   334  	for i, network := range failureDomain.Topology.Networks {
   335  		networkDeviceSpec[i] = machineapi.NetworkDeviceSpec{NetworkName: network}
   336  	}
   337  
   338  	return &machineapi.VSphereMachineProviderSpec{
   339  		TypeMeta: metav1.TypeMeta{
   340  			APIVersion: machineapi.SchemeGroupVersion.String(),
   341  			Kind:       "VSphereMachineProviderSpec",
   342  		},
   343  		UserDataSecret:    &corev1.LocalObjectReference{Name: userDataSecret},
   344  		CredentialsSecret: &corev1.LocalObjectReference{Name: "vsphere-cloud-credentials"},
   345  		Template:          osImage,
   346  		Network: machineapi.NetworkSpec{
   347  			Devices: networkDeviceSpec,
   348  		},
   349  		Workspace: &machineapi.Workspace{
   350  			Server:       vcenter.Server,
   351  			Datacenter:   failureDomain.Topology.Datacenter,
   352  			Datastore:    failureDomain.Topology.Datastore,
   353  			Folder:       folder,
   354  			ResourcePool: resourcePool,
   355  		},
   356  		TagIDs:            failureDomain.Topology.TagIDs,
   357  		NumCPUs:           mpool.NumCPUs,
   358  		NumCoresPerSocket: mpool.NumCoresPerSocket,
   359  		MemoryMiB:         mpool.MemoryMiB,
   360  		DiskGiB:           mpool.OSDisk.DiskSizeGB,
   361  	}, nil
   362  }
   363  
   364  // ConfigMasters sets the PublicIP flag and assigns a set of load balancers to the given machines
   365  func ConfigMasters(machines []machineapi.Machine, clusterID string) {
   366  }
   367  
   368  func hasFailureDomain(failureDomains []machinev1.VSphereFailureDomain, failureDomain string) bool {
   369  	for _, fd := range failureDomains {
   370  		if fd.Name == failureDomain {
   371  			return true
   372  		}
   373  	}
   374  	return false
   375  }