github.com/vmware/govmomi@v0.51.0/cli/vm/create.go (about)

     1  // © Broadcom. All Rights Reserved.
     2  // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
     3  // SPDX-License-Identifier: Apache-2.0
     4  
     5  package vm
     6  
     7  import (
     8  	"context"
     9  	"flag"
    10  	"fmt"
    11  	"io"
    12  	"strings"
    13  	"text/tabwriter"
    14  
    15  	"github.com/vmware/govmomi/cli"
    16  	"github.com/vmware/govmomi/cli/flags"
    17  	"github.com/vmware/govmomi/find"
    18  	"github.com/vmware/govmomi/object"
    19  	"github.com/vmware/govmomi/property"
    20  	"github.com/vmware/govmomi/units"
    21  	"github.com/vmware/govmomi/vim25"
    22  	"github.com/vmware/govmomi/vim25/mo"
    23  	"github.com/vmware/govmomi/vim25/types"
    24  )
    25  
    26  var (
    27  	FirmwareTypes = types.GuestOsDescriptorFirmwareType("").Strings()
    28  
    29  	FirmwareUsage = fmt.Sprintf("Firmware type [%s]", strings.Join(FirmwareTypes, "|"))
    30  )
    31  
    32  type create struct {
    33  	*flags.ClientFlag
    34  	*flags.ClusterFlag
    35  	*flags.DatacenterFlag
    36  	*flags.DatastoreFlag
    37  	*flags.StoragePodFlag
    38  	*flags.ResourcePoolFlag
    39  	*flags.HostSystemFlag
    40  	*flags.NetworkFlag
    41  	*flags.FolderFlag
    42  	*flags.StorageProfileFlag
    43  
    44  	name       string
    45  	memory     int
    46  	cpus       int
    47  	guestID    string
    48  	link       bool
    49  	on         bool
    50  	force      bool
    51  	controller string
    52  	eager      bool
    53  	thick      bool
    54  	annotation string
    55  	firmware   string
    56  	version    string
    57  	place      bool
    58  
    59  	iso              string
    60  	isoDatastoreFlag *flags.DatastoreFlag
    61  
    62  	disk              string
    63  	diskDatastoreFlag *flags.DatastoreFlag
    64  	diskDatastore     *object.Datastore
    65  
    66  	// Only set if the disk argument is a byte size, which means the disk
    67  	// doesn't exist yet and should be created
    68  	diskByteSize int64
    69  
    70  	Client       *vim25.Client
    71  	Cluster      *object.ClusterComputeResource
    72  	Datacenter   *object.Datacenter
    73  	Datastore    *object.Datastore
    74  	StoragePod   *object.StoragePod
    75  	ResourcePool *object.ResourcePool
    76  	HostSystem   *object.HostSystem
    77  	Folder       *object.Folder
    78  }
    79  
    80  func init() {
    81  	cli.Register("vm.create", &create{})
    82  }
    83  
    84  func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {
    85  	cmd.ClientFlag, ctx = flags.NewClientFlag(ctx)
    86  	cmd.ClientFlag.Register(ctx, f)
    87  
    88  	cmd.ClusterFlag, ctx = flags.NewClusterFlag(ctx)
    89  	cmd.ClusterFlag.RegisterPlacement(ctx, f)
    90  
    91  	cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)
    92  	cmd.DatacenterFlag.Register(ctx, f)
    93  
    94  	cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)
    95  	cmd.DatastoreFlag.Register(ctx, f)
    96  
    97  	cmd.StoragePodFlag, ctx = flags.NewStoragePodFlag(ctx)
    98  	cmd.StoragePodFlag.Register(ctx, f)
    99  
   100  	cmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx)
   101  	cmd.ResourcePoolFlag.Register(ctx, f)
   102  
   103  	cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)
   104  	cmd.HostSystemFlag.Register(ctx, f)
   105  
   106  	cmd.NetworkFlag, ctx = flags.NewNetworkFlag(ctx)
   107  	cmd.NetworkFlag.Register(ctx, f)
   108  
   109  	cmd.FolderFlag, ctx = flags.NewFolderFlag(ctx)
   110  	cmd.FolderFlag.Register(ctx, f)
   111  
   112  	cmd.StorageProfileFlag, ctx = flags.NewStorageProfileFlag(ctx)
   113  	cmd.StorageProfileFlag.Register(ctx, f)
   114  
   115  	f.IntVar(&cmd.memory, "m", 1024, "Size in MB of memory")
   116  	f.IntVar(&cmd.cpus, "c", 1, "Number of CPUs")
   117  	f.StringVar(&cmd.guestID, "g", "otherGuest", "Guest OS ID")
   118  	f.BoolVar(&cmd.link, "link", true, "Link specified disk")
   119  	f.BoolVar(&cmd.on, "on", true, "Power on VM")
   120  	f.BoolVar(&cmd.force, "force", false, "Create VM if vmx already exists")
   121  	f.StringVar(&cmd.controller, "disk.controller", "scsi", "Disk controller type")
   122  	f.BoolVar(&cmd.eager, "disk.eager", false, "Eagerly scrub new disk")
   123  	f.BoolVar(&cmd.thick, "disk.thick", false, "Thick provision new disk")
   124  	f.StringVar(&cmd.annotation, "annotation", "", "VM description")
   125  	f.StringVar(&cmd.firmware, "firmware", FirmwareTypes[0], FirmwareUsage)
   126  	if cli.ShowUnreleased() {
   127  		f.BoolVar(&cmd.place, "place", false, "Place VM without creating")
   128  	}
   129  
   130  	esxiVersions := types.GetESXiVersions()
   131  	esxiVersionStrings := make([]string, len(esxiVersions))
   132  	for i := range esxiVersions {
   133  		esxiVersionStrings[i] = esxiVersions[i].String()
   134  	}
   135  	f.StringVar(&cmd.version, "version", "",
   136  		fmt.Sprintf("ESXi hardware version [%s]", strings.Join(esxiVersionStrings, "|")))
   137  
   138  	f.StringVar(&cmd.iso, "iso", "", "ISO path")
   139  	cmd.isoDatastoreFlag, ctx = flags.NewCustomDatastoreFlag(ctx)
   140  	f.StringVar(&cmd.isoDatastoreFlag.Name, "iso-datastore", "", "Datastore for ISO file")
   141  
   142  	f.StringVar(&cmd.disk, "disk", "", "Disk path (to use existing) OR size (to create new, e.g. 20GB)")
   143  	cmd.diskDatastoreFlag, _ = flags.NewCustomDatastoreFlag(ctx)
   144  	f.StringVar(&cmd.diskDatastoreFlag.Name, "disk-datastore", "", "Datastore for disk file")
   145  }
   146  
   147  func (cmd *create) Process(ctx context.Context) error {
   148  	if err := cmd.ClientFlag.Process(ctx); err != nil {
   149  		return err
   150  	}
   151  	if err := cmd.ClusterFlag.Process(ctx); err != nil {
   152  		return err
   153  	}
   154  	if err := cmd.DatacenterFlag.Process(ctx); err != nil {
   155  		return err
   156  	}
   157  	if err := cmd.DatastoreFlag.Process(ctx); err != nil {
   158  		return err
   159  	}
   160  	if err := cmd.StoragePodFlag.Process(ctx); err != nil {
   161  		return err
   162  	}
   163  	if err := cmd.ResourcePoolFlag.Process(ctx); err != nil {
   164  		return err
   165  	}
   166  	if err := cmd.HostSystemFlag.Process(ctx); err != nil {
   167  		return err
   168  	}
   169  	if err := cmd.NetworkFlag.Process(ctx); err != nil {
   170  		return err
   171  	}
   172  	if err := cmd.FolderFlag.Process(ctx); err != nil {
   173  		return err
   174  	}
   175  	if err := cmd.StorageProfileFlag.Process(ctx); err != nil {
   176  		return err
   177  	}
   178  
   179  	// Default iso/disk datastores to the VM's datastore
   180  	if cmd.isoDatastoreFlag.Name == "" {
   181  		cmd.isoDatastoreFlag = cmd.DatastoreFlag
   182  	}
   183  	if cmd.diskDatastoreFlag.Name == "" {
   184  		cmd.diskDatastoreFlag = cmd.DatastoreFlag
   185  	}
   186  
   187  	return nil
   188  }
   189  
   190  func (cmd *create) Usage() string {
   191  	return "NAME"
   192  }
   193  
   194  func (cmd *create) Description() string {
   195  	return `Create VM.
   196  
   197  For a list of possible '-g' IDs, use 'govc vm.option.info' or see:
   198  https://developer.broadcom.com/xapis/vsphere-web-services-api/latest/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html
   199  
   200  Examples:
   201    govc vm.create -on=false vm-name
   202    govc vm.create -iso library:/boot/linux/ubuntu.iso vm-name # Content Library ISO
   203    govc vm.create -cluster cluster1 vm-name # use compute cluster placement
   204    govc vm.create -datastore-cluster dscluster vm-name # use datastore cluster placement
   205    govc vm.create -m 2048 -c 2 -g freebsd64Guest -net.adapter vmxnet3 -disk.controller pvscsi vm-name`
   206  }
   207  
   208  func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {
   209  	var err error
   210  
   211  	if len(f.Args()) != 1 {
   212  		return flag.ErrHelp
   213  	}
   214  
   215  	cmd.name = f.Arg(0)
   216  	if cmd.name == "" {
   217  		return flag.ErrHelp
   218  	}
   219  
   220  	cmd.Client, err = cmd.ClientFlag.Client()
   221  	if err != nil {
   222  		return err
   223  	}
   224  
   225  	cmd.Cluster, err = cmd.ClusterFlag.ClusterIfSpecified()
   226  	if err != nil {
   227  		return err
   228  	}
   229  
   230  	cmd.Datacenter, err = cmd.DatacenterFlag.Datacenter()
   231  	if err != nil {
   232  		return err
   233  	}
   234  
   235  	if cmd.StoragePodFlag.Isset() {
   236  		cmd.StoragePod, err = cmd.StoragePodFlag.StoragePod()
   237  		if err != nil {
   238  			return err
   239  		}
   240  	} else if cmd.Cluster == nil {
   241  		cmd.Datastore, err = cmd.DatastoreFlag.Datastore()
   242  		if err != nil {
   243  			return err
   244  		}
   245  	}
   246  
   247  	cmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified()
   248  	if err != nil {
   249  		return err
   250  	}
   251  
   252  	if cmd.HostSystem != nil {
   253  		if cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(ctx); err != nil {
   254  			return err
   255  		}
   256  	} else {
   257  		if cmd.Cluster == nil {
   258  			// -host is optional
   259  			if cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil {
   260  				return err
   261  			}
   262  		} else {
   263  			if cmd.ResourcePool, err = cmd.Cluster.ResourcePool(ctx); err != nil {
   264  				return err
   265  			}
   266  		}
   267  	}
   268  
   269  	if cmd.Folder, err = cmd.FolderFlag.Folder(); err != nil {
   270  		return err
   271  	}
   272  
   273  	// Verify ISO exists
   274  	if cmd.iso != "" {
   275  		iso, err := cmd.isoDatastoreFlag.FileBacking(ctx, cmd.iso, true)
   276  		if err != nil {
   277  			return err
   278  		}
   279  		cmd.iso = iso
   280  	}
   281  
   282  	// Verify disk exists
   283  	if cmd.disk != "" {
   284  		var b units.ByteSize
   285  
   286  		// If disk can be parsed as byte units, don't stat
   287  		err = b.Set(cmd.disk)
   288  		if err == nil {
   289  			cmd.diskByteSize = int64(b)
   290  		} else {
   291  			_, err = cmd.diskDatastoreFlag.Stat(ctx, cmd.disk)
   292  			if err != nil {
   293  				return err
   294  			}
   295  
   296  			cmd.diskDatastore, err = cmd.diskDatastoreFlag.Datastore()
   297  			if err != nil {
   298  				return err
   299  			}
   300  		}
   301  	}
   302  
   303  	task, err := cmd.createVM(ctx)
   304  	if err != nil {
   305  		return err
   306  	}
   307  	if cmd.place || cmd.Spec {
   308  		return nil
   309  	}
   310  	info, err := task.WaitForResult(ctx, nil)
   311  	if err != nil {
   312  		return err
   313  	}
   314  
   315  	vm := object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference))
   316  
   317  	if cmd.on {
   318  		task, err := vm.PowerOn(ctx)
   319  		if err != nil {
   320  			return err
   321  		}
   322  
   323  		_, err = task.WaitForResult(ctx, nil)
   324  		if err != nil {
   325  			return err
   326  		}
   327  	}
   328  
   329  	return nil
   330  }
   331  
   332  type place struct {
   333  	Spec            types.PlacementSpec           `json:"spec"`
   334  	Recommendations []types.ClusterRecommendation `json:"recommendations"`
   335  
   336  	ctx context.Context
   337  	cmd *create
   338  }
   339  
   340  func (p *place) Dump() any {
   341  	return p.Recommendations
   342  }
   343  
   344  func (p *place) action(w io.Writer, r types.ClusterRecommendation, a *types.PlacementAction) error {
   345  	spec := a.RelocateSpec
   346  	if spec == nil {
   347  		return nil
   348  	}
   349  
   350  	fields := []struct {
   351  		name string
   352  		moid *types.ManagedObjectReference
   353  	}{
   354  		{"Target", r.Target},
   355  		{"  Folder", spec.Folder},
   356  		{"  Datastore", spec.Datastore},
   357  		{"  Pool", spec.Pool},
   358  		{"  Host", spec.Host},
   359  	}
   360  
   361  	for _, f := range fields {
   362  		if f.moid == nil {
   363  			continue
   364  		}
   365  		path, err := find.InventoryPath(p.ctx, p.cmd.Client, *f.moid)
   366  		if err != nil {
   367  			return err
   368  		}
   369  		fmt.Fprintf(w, "%s:\t%s\n", f.name, path)
   370  	}
   371  
   372  	return nil
   373  }
   374  
   375  func (p *place) Write(w io.Writer) error {
   376  	tw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)
   377  
   378  	for _, r := range p.Recommendations {
   379  		for _, a := range r.Action {
   380  			p.action(tw, r, a.(*types.PlacementAction))
   381  		}
   382  	}
   383  
   384  	return tw.Flush()
   385  }
   386  
   387  func (cmd *create) createVM(ctx context.Context) (*object.Task, error) {
   388  	var devices object.VirtualDeviceList
   389  	var err error
   390  
   391  	if cmd.version != "" {
   392  		if v, _ := types.ParseESXiVersion(cmd.version); v.IsValid() {
   393  			cmd.version = v.HardwareVersion().String()
   394  		} else if v, _ := types.ParseHardwareVersion(cmd.version); v.IsValid() {
   395  			cmd.version = v.String()
   396  		} else {
   397  			return nil, fmt.Errorf("invalid version: %s", cmd.version)
   398  		}
   399  	}
   400  
   401  	spec := &types.VirtualMachineConfigSpec{
   402  		Name:       cmd.name,
   403  		GuestId:    cmd.guestID,
   404  		NumCPUs:    int32(cmd.cpus),
   405  		MemoryMB:   int64(cmd.memory),
   406  		Annotation: cmd.annotation,
   407  		Firmware:   cmd.firmware,
   408  		Version:    cmd.version,
   409  	}
   410  
   411  	spec.VmProfile, err = cmd.StorageProfileSpec(ctx)
   412  	if err != nil {
   413  		return nil, err
   414  	}
   415  
   416  	devices, err = cmd.addStorage(nil)
   417  	if err != nil {
   418  		return nil, err
   419  	}
   420  
   421  	devices, err = cmd.addNetwork(devices)
   422  	if err != nil {
   423  		return nil, err
   424  	}
   425  
   426  	deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd)
   427  	if err != nil {
   428  		return nil, err
   429  	}
   430  
   431  	spec.DeviceChange = deviceChange
   432  
   433  	var datastore *object.Datastore
   434  
   435  	// If storage pod is specified, collect placement recommendations
   436  	if cmd.StoragePod != nil {
   437  		datastore, err = cmd.recommendDatastore(ctx, spec)
   438  		if err != nil {
   439  			return nil, err
   440  		}
   441  	} else if cmd.Datastore != nil {
   442  		datastore = cmd.Datastore
   443  	} else if cmd.Cluster != nil {
   444  		pspec := types.PlacementSpec{
   445  			PlacementType: string(types.PlacementSpecPlacementTypeCreate),
   446  			ConfigSpec:    spec,
   447  		}
   448  		result, err := cmd.Cluster.PlaceVm(ctx, pspec)
   449  		if err != nil {
   450  			return nil, err
   451  		}
   452  
   453  		recs := result.Recommendations
   454  		if cmd.place {
   455  			return nil, cmd.WriteResult(&place{pspec, recs, ctx, cmd})
   456  		}
   457  		if len(recs) == 0 {
   458  			return nil, fmt.Errorf("no cluster recommendations")
   459  		}
   460  
   461  		rspec := *recs[0].Action[0].(*types.PlacementAction).RelocateSpec
   462  		if rspec.Datastore != nil {
   463  			datastore = object.NewDatastore(cmd.Client, *rspec.Datastore)
   464  			datastore.InventoryPath, _ = datastore.ObjectName(ctx)
   465  			cmd.Datastore = datastore
   466  		}
   467  		if rspec.Host != nil {
   468  			cmd.HostSystem = object.NewHostSystem(cmd.Client, *rspec.Host)
   469  		}
   470  		if rspec.Pool != nil {
   471  			cmd.ResourcePool = object.NewResourcePool(cmd.Client, *rspec.Pool)
   472  		}
   473  	} else {
   474  		return nil, fmt.Errorf("please provide either a cluster, datastore or datastore-cluster")
   475  	}
   476  
   477  	if !cmd.force && !cmd.Spec {
   478  		vmxPath := fmt.Sprintf("%s/%s.vmx", cmd.name, cmd.name)
   479  
   480  		_, err := datastore.Stat(ctx, vmxPath)
   481  		if err == nil {
   482  			dsPath := cmd.Datastore.Path(vmxPath)
   483  			return nil, fmt.Errorf("file %s already exists", dsPath)
   484  		}
   485  	}
   486  
   487  	folder := cmd.Folder
   488  
   489  	spec.Files = &types.VirtualMachineFileInfo{
   490  		VmPathName: fmt.Sprintf("[%s]", datastore.Name()),
   491  	}
   492  
   493  	if cmd.Spec {
   494  		return nil, cmd.WriteAny(spec)
   495  	}
   496  
   497  	return folder.CreateVM(ctx, *spec, cmd.ResourcePool, cmd.HostSystem)
   498  }
   499  
   500  func (cmd *create) addStorage(devices object.VirtualDeviceList) (object.VirtualDeviceList, error) {
   501  	if cmd.controller != "ide" {
   502  		if cmd.controller == "nvme" {
   503  			nvme, err := devices.CreateNVMEController()
   504  			if err != nil {
   505  				return nil, err
   506  			}
   507  
   508  			devices = append(devices, nvme)
   509  			cmd.controller = devices.Name(nvme)
   510  		} else if cmd.controller == "sata" {
   511  			sata, err := devices.CreateSATAController()
   512  			if err != nil {
   513  				return nil, err
   514  			}
   515  
   516  			devices = append(devices, sata)
   517  			cmd.controller = devices.Name(sata)
   518  		} else {
   519  			scsi, err := devices.CreateSCSIController(cmd.controller)
   520  			if err != nil {
   521  				return nil, err
   522  			}
   523  
   524  			devices = append(devices, scsi)
   525  			cmd.controller = devices.Name(scsi)
   526  		}
   527  	}
   528  
   529  	// If controller is specified to be IDE or if an ISO is specified, add IDE controller.
   530  	if cmd.controller == "ide" || cmd.iso != "" {
   531  		ide, err := devices.CreateIDEController()
   532  		if err != nil {
   533  			return nil, err
   534  		}
   535  
   536  		devices = append(devices, ide)
   537  	}
   538  
   539  	if cmd.diskByteSize != 0 {
   540  		controller, err := devices.FindDiskController(cmd.controller)
   541  		if err != nil {
   542  			return nil, err
   543  		}
   544  
   545  		backing := &types.VirtualDiskFlatVer2BackingInfo{
   546  			DiskMode:        string(types.VirtualDiskModePersistent),
   547  			ThinProvisioned: types.NewBool(!cmd.thick),
   548  		}
   549  		if cmd.thick {
   550  			backing.EagerlyScrub = &cmd.eager
   551  		}
   552  		disk := &types.VirtualDisk{
   553  			VirtualDevice: types.VirtualDevice{
   554  				Key:     devices.NewKey(),
   555  				Backing: backing,
   556  			},
   557  			CapacityInKB: cmd.diskByteSize / 1024,
   558  		}
   559  
   560  		devices.AssignController(disk, controller)
   561  		devices = append(devices, disk)
   562  	} else if cmd.disk != "" {
   563  		controller, err := devices.FindDiskController(cmd.controller)
   564  		if err != nil {
   565  			return nil, err
   566  		}
   567  
   568  		ds := cmd.diskDatastore.Reference()
   569  		path := cmd.diskDatastore.Path(cmd.disk)
   570  		disk := devices.CreateDisk(controller, ds, path)
   571  
   572  		if cmd.link {
   573  			disk = devices.ChildDisk(disk)
   574  		}
   575  
   576  		devices = append(devices, disk)
   577  	}
   578  
   579  	if cmd.iso != "" {
   580  		ide, err := devices.FindIDEController("")
   581  		if err != nil {
   582  			return nil, err
   583  		}
   584  
   585  		cdrom, err := devices.CreateCdrom(ide)
   586  		if err != nil {
   587  			return nil, err
   588  		}
   589  
   590  		cdrom = devices.InsertIso(cdrom, cmd.iso)
   591  		devices = append(devices, cdrom)
   592  	}
   593  
   594  	return devices, nil
   595  }
   596  
   597  func (cmd *create) addNetwork(devices object.VirtualDeviceList) (object.VirtualDeviceList, error) {
   598  	netdev, err := cmd.NetworkFlag.Device()
   599  	if err != nil {
   600  		return nil, err
   601  	}
   602  
   603  	devices = append(devices, netdev)
   604  	return devices, nil
   605  }
   606  
   607  func (cmd *create) recommendDatastore(ctx context.Context, spec *types.VirtualMachineConfigSpec) (*object.Datastore, error) {
   608  	sp := cmd.StoragePod.Reference()
   609  
   610  	// Build pod selection spec from config spec
   611  	podSelectionSpec := types.StorageDrsPodSelectionSpec{
   612  		StoragePod: &sp,
   613  	}
   614  
   615  	// Keep list of disks that need to be placed
   616  	var disks []*types.VirtualDisk
   617  
   618  	// Collect disks eligible for placement
   619  	for _, deviceConfigSpec := range spec.DeviceChange {
   620  		s := deviceConfigSpec.GetVirtualDeviceConfigSpec()
   621  		if s.Operation != types.VirtualDeviceConfigSpecOperationAdd {
   622  			continue
   623  		}
   624  
   625  		if s.FileOperation != types.VirtualDeviceConfigSpecFileOperationCreate {
   626  			continue
   627  		}
   628  
   629  		d, ok := s.Device.(*types.VirtualDisk)
   630  		if !ok {
   631  			continue
   632  		}
   633  
   634  		podConfigForPlacement := types.VmPodConfigForPlacement{
   635  			StoragePod: sp,
   636  			Disk: []types.PodDiskLocator{
   637  				{
   638  					DiskId:          d.Key,
   639  					DiskBackingInfo: d.Backing,
   640  				},
   641  			},
   642  		}
   643  
   644  		podSelectionSpec.InitialVmConfig = append(podSelectionSpec.InitialVmConfig, podConfigForPlacement)
   645  		disks = append(disks, d)
   646  	}
   647  
   648  	sps := types.StoragePlacementSpec{
   649  		Type:             string(types.StoragePlacementSpecPlacementTypeCreate),
   650  		ResourcePool:     types.NewReference(cmd.ResourcePool.Reference()),
   651  		PodSelectionSpec: podSelectionSpec,
   652  		ConfigSpec:       spec,
   653  	}
   654  
   655  	srm := object.NewStorageResourceManager(cmd.Client)
   656  	result, err := srm.RecommendDatastores(ctx, sps)
   657  	if err != nil {
   658  		return nil, err
   659  	}
   660  
   661  	// Use result to pin disks to recommended datastores
   662  	recs := result.Recommendations
   663  	if len(recs) == 0 {
   664  		return nil, fmt.Errorf("no datastore-cluster recommendations")
   665  	}
   666  
   667  	ds := recs[0].Action[0].(*types.StoragePlacementAction).Destination
   668  
   669  	var mds mo.Datastore
   670  	err = property.DefaultCollector(cmd.Client).RetrieveOne(ctx, ds, []string{"name"}, &mds)
   671  	if err != nil {
   672  		return nil, err
   673  	}
   674  
   675  	datastore := object.NewDatastore(cmd.Client, ds)
   676  	datastore.InventoryPath = mds.Name
   677  
   678  	// Apply recommendation to eligible disks
   679  	for _, disk := range disks {
   680  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
   681  		backing.Datastore = &ds
   682  	}
   683  
   684  	return datastore, nil
   685  }