github.com/vmware/govmomi@v0.37.1/govc/vm/create.go (about) 1 /* 2 Copyright (c) 2014-2016 VMware, Inc. All Rights Reserved. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package vm 18 19 import ( 20 "context" 21 "flag" 22 "fmt" 23 "strings" 24 25 "github.com/vmware/govmomi/govc/cli" 26 "github.com/vmware/govmomi/govc/flags" 27 "github.com/vmware/govmomi/object" 28 "github.com/vmware/govmomi/property" 29 "github.com/vmware/govmomi/units" 30 "github.com/vmware/govmomi/vim25" 31 "github.com/vmware/govmomi/vim25/mo" 32 "github.com/vmware/govmomi/vim25/types" 33 ) 34 35 var ( 36 FirmwareTypes = []string{ 37 string(types.GuestOsDescriptorFirmwareTypeBios), 38 string(types.GuestOsDescriptorFirmwareTypeEfi), 39 } 40 FirmwareUsage = fmt.Sprintf("Firmware type [%s]", strings.Join(FirmwareTypes, "|")) 41 ) 42 43 type create struct { 44 *flags.ClientFlag 45 *flags.ClusterFlag 46 *flags.DatacenterFlag 47 *flags.DatastoreFlag 48 *flags.StoragePodFlag 49 *flags.ResourcePoolFlag 50 *flags.HostSystemFlag 51 *flags.NetworkFlag 52 *flags.FolderFlag 53 54 name string 55 memory int 56 cpus int 57 guestID string 58 link bool 59 on bool 60 force bool 61 controller string 62 annotation string 63 firmware string 64 version string 65 66 iso string 67 isoDatastoreFlag *flags.DatastoreFlag 68 isoDatastore *object.Datastore 69 70 disk string 71 diskDatastoreFlag *flags.DatastoreFlag 72 diskDatastore *object.Datastore 73 74 // Only set if the disk argument is a byte size, which means the disk 75 // doesn't exist yet and should be created 76 diskByteSize int64 77 78 Client *vim25.Client 79 Cluster *object.ClusterComputeResource 80 Datacenter *object.Datacenter 81 Datastore *object.Datastore 82 StoragePod *object.StoragePod 83 ResourcePool *object.ResourcePool 84 HostSystem *object.HostSystem 85 Folder *object.Folder 86 } 87 88 func init() { 89 cli.Register("vm.create", &create{}) 90 } 91 92 func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) { 93 cmd.ClientFlag, ctx = flags.NewClientFlag(ctx) 94 cmd.ClientFlag.Register(ctx, f) 95 96 cmd.ClusterFlag, ctx = flags.NewClusterFlag(ctx) 97 cmd.ClusterFlag.RegisterPlacement(ctx, f) 98 99 cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) 100 cmd.DatacenterFlag.Register(ctx, f) 101 102 cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) 103 cmd.DatastoreFlag.Register(ctx, f) 104 105 cmd.StoragePodFlag, ctx = flags.NewStoragePodFlag(ctx) 106 cmd.StoragePodFlag.Register(ctx, f) 107 108 cmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx) 109 cmd.ResourcePoolFlag.Register(ctx, f) 110 111 cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) 112 cmd.HostSystemFlag.Register(ctx, f) 113 114 cmd.NetworkFlag, ctx = flags.NewNetworkFlag(ctx) 115 cmd.NetworkFlag.Register(ctx, f) 116 117 cmd.FolderFlag, ctx = flags.NewFolderFlag(ctx) 118 cmd.FolderFlag.Register(ctx, f) 119 120 f.IntVar(&cmd.memory, "m", 1024, "Size in MB of memory") 121 f.IntVar(&cmd.cpus, "c", 1, "Number of CPUs") 122 f.StringVar(&cmd.guestID, "g", "otherGuest", "Guest OS ID") 123 f.BoolVar(&cmd.link, "link", true, "Link specified disk") 124 f.BoolVar(&cmd.on, "on", true, "Power on VM") 125 f.BoolVar(&cmd.force, "force", false, "Create VM if vmx already exists") 126 f.StringVar(&cmd.controller, "disk.controller", "scsi", "Disk controller type") 127 f.StringVar(&cmd.annotation, "annotation", "", "VM description") 128 129 f.StringVar(&cmd.firmware, "firmware", FirmwareTypes[0], FirmwareUsage) 130 131 esxiVersions := types.GetESXiVersions() 132 esxiVersionStrings := make([]string, len(esxiVersions)) 133 for i := range esxiVersions { 134 esxiVersionStrings[i] = esxiVersions[i].String() 135 } 136 f.StringVar(&cmd.version, "version", "", 137 fmt.Sprintf("ESXi hardware version [%s]", strings.Join(esxiVersionStrings, "|"))) 138 139 f.StringVar(&cmd.iso, "iso", "", "ISO path") 140 cmd.isoDatastoreFlag, ctx = flags.NewCustomDatastoreFlag(ctx) 141 f.StringVar(&cmd.isoDatastoreFlag.Name, "iso-datastore", "", "Datastore for ISO file") 142 143 f.StringVar(&cmd.disk, "disk", "", "Disk path (to use existing) OR size (to create new, e.g. 20GB)") 144 cmd.diskDatastoreFlag, _ = flags.NewCustomDatastoreFlag(ctx) 145 f.StringVar(&cmd.diskDatastoreFlag.Name, "disk-datastore", "", "Datastore for disk file") 146 } 147 148 func (cmd *create) Process(ctx context.Context) error { 149 if err := cmd.ClientFlag.Process(ctx); err != nil { 150 return err 151 } 152 if err := cmd.ClusterFlag.Process(ctx); err != nil { 153 return err 154 } 155 if err := cmd.DatacenterFlag.Process(ctx); err != nil { 156 return err 157 } 158 if err := cmd.DatastoreFlag.Process(ctx); err != nil { 159 return err 160 } 161 if err := cmd.StoragePodFlag.Process(ctx); err != nil { 162 return err 163 } 164 if err := cmd.ResourcePoolFlag.Process(ctx); err != nil { 165 return err 166 } 167 if err := cmd.HostSystemFlag.Process(ctx); err != nil { 168 return err 169 } 170 if err := cmd.NetworkFlag.Process(ctx); err != nil { 171 return err 172 } 173 if err := cmd.FolderFlag.Process(ctx); err != nil { 174 return err 175 } 176 177 // Default iso/disk datastores to the VM's datastore 178 if cmd.isoDatastoreFlag.Name == "" { 179 cmd.isoDatastoreFlag = cmd.DatastoreFlag 180 } 181 if cmd.diskDatastoreFlag.Name == "" { 182 cmd.diskDatastoreFlag = cmd.DatastoreFlag 183 } 184 185 return nil 186 } 187 188 func (cmd *create) Usage() string { 189 return "NAME" 190 } 191 192 func (cmd *create) Description() string { 193 return `Create VM. 194 195 For a list of possible '-g' IDs, use 'govc vm.option.info' or see: 196 https://code.vmware.com/apis/358/vsphere/doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html 197 198 Examples: 199 govc vm.create -on=false vm-name 200 govc vm.create -cluster cluster1 vm-name # use compute cluster placement 201 govc vm.create -datastore-cluster dscluster vm-name # use datastore cluster placement 202 govc vm.create -m 2048 -c 2 -g freebsd64Guest -net.adapter vmxnet3 -disk.controller pvscsi vm-name` 203 } 204 205 func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { 206 var err error 207 208 if len(f.Args()) != 1 { 209 return flag.ErrHelp 210 } 211 212 cmd.name = f.Arg(0) 213 if cmd.name == "" { 214 return flag.ErrHelp 215 } 216 217 cmd.Client, err = cmd.ClientFlag.Client() 218 if err != nil { 219 return err 220 } 221 222 cmd.Cluster, err = cmd.ClusterFlag.ClusterIfSpecified() 223 if err != nil { 224 return err 225 } 226 227 cmd.Datacenter, err = cmd.DatacenterFlag.Datacenter() 228 if err != nil { 229 return err 230 } 231 232 if cmd.StoragePodFlag.Isset() { 233 cmd.StoragePod, err = cmd.StoragePodFlag.StoragePod() 234 if err != nil { 235 return err 236 } 237 } else if cmd.Cluster == nil { 238 cmd.Datastore, err = cmd.DatastoreFlag.Datastore() 239 if err != nil { 240 return err 241 } 242 } 243 244 cmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified() 245 if err != nil { 246 return err 247 } 248 249 if cmd.HostSystem != nil { 250 if cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(ctx); err != nil { 251 return err 252 } 253 } else { 254 if cmd.Cluster == nil { 255 // -host is optional 256 if cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil { 257 return err 258 } 259 } else { 260 if cmd.ResourcePool, err = cmd.Cluster.ResourcePool(ctx); err != nil { 261 return err 262 } 263 } 264 } 265 266 if cmd.Folder, err = cmd.FolderFlag.Folder(); err != nil { 267 return err 268 } 269 270 // Verify ISO exists 271 if cmd.iso != "" { 272 _, err = cmd.isoDatastoreFlag.Stat(ctx, cmd.iso) 273 if err != nil { 274 return err 275 } 276 277 cmd.isoDatastore, err = cmd.isoDatastoreFlag.Datastore() 278 if err != nil { 279 return err 280 } 281 } 282 283 // Verify disk exists 284 if cmd.disk != "" { 285 var b units.ByteSize 286 287 // If disk can be parsed as byte units, don't stat 288 err = b.Set(cmd.disk) 289 if err == nil { 290 cmd.diskByteSize = int64(b) 291 } else { 292 _, err = cmd.diskDatastoreFlag.Stat(ctx, cmd.disk) 293 if err != nil { 294 return err 295 } 296 297 cmd.diskDatastore, err = cmd.diskDatastoreFlag.Datastore() 298 if err != nil { 299 return err 300 } 301 } 302 } 303 304 task, err := cmd.createVM(ctx) 305 if err != nil { 306 return err 307 } 308 309 info, err := task.WaitForResult(ctx, nil) 310 if err != nil { 311 return err 312 } 313 314 vm := object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference)) 315 316 if cmd.on { 317 task, err := vm.PowerOn(ctx) 318 if err != nil { 319 return err 320 } 321 322 _, err = task.WaitForResult(ctx, nil) 323 if err != nil { 324 return err 325 } 326 } 327 328 return nil 329 } 330 331 func (cmd *create) createVM(ctx context.Context) (*object.Task, error) { 332 var devices object.VirtualDeviceList 333 var err error 334 335 if cmd.version != "" { 336 if v, _ := types.ParseESXiVersion(cmd.version); v.IsValid() { 337 cmd.version = v.HardwareVersion().String() 338 } else if v, _ := types.ParseHardwareVersion(cmd.version); v.IsValid() { 339 cmd.version = v.String() 340 } else { 341 return nil, fmt.Errorf("invalid version: %s", cmd.version) 342 } 343 } 344 345 spec := &types.VirtualMachineConfigSpec{ 346 Name: cmd.name, 347 GuestId: cmd.guestID, 348 NumCPUs: int32(cmd.cpus), 349 MemoryMB: int64(cmd.memory), 350 Annotation: cmd.annotation, 351 Firmware: cmd.firmware, 352 Version: cmd.version, 353 } 354 355 devices, err = cmd.addStorage(nil) 356 if err != nil { 357 return nil, err 358 } 359 360 devices, err = cmd.addNetwork(devices) 361 if err != nil { 362 return nil, err 363 } 364 365 deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) 366 if err != nil { 367 return nil, err 368 } 369 370 spec.DeviceChange = deviceChange 371 372 var datastore *object.Datastore 373 374 // If storage pod is specified, collect placement recommendations 375 if cmd.StoragePod != nil { 376 datastore, err = cmd.recommendDatastore(ctx, spec) 377 if err != nil { 378 return nil, err 379 } 380 } else if cmd.Datastore != nil { 381 datastore = cmd.Datastore 382 } else if cmd.Cluster != nil { 383 pspec := types.PlacementSpec{ 384 PlacementType: string(types.PlacementSpecPlacementTypeCreate), 385 ConfigSpec: spec, 386 } 387 result, err := cmd.Cluster.PlaceVm(ctx, pspec) 388 if err != nil { 389 return nil, err 390 } 391 392 recs := result.Recommendations 393 if len(recs) == 0 { 394 return nil, fmt.Errorf("no cluster recommendations") 395 } 396 397 rspec := *recs[0].Action[0].(*types.PlacementAction).RelocateSpec 398 if rspec.Datastore != nil { 399 datastore = object.NewDatastore(cmd.Client, *rspec.Datastore) 400 datastore.InventoryPath, _ = datastore.ObjectName(ctx) 401 cmd.Datastore = datastore 402 } 403 if rspec.Host != nil { 404 cmd.HostSystem = object.NewHostSystem(cmd.Client, *rspec.Host) 405 } 406 if rspec.Pool != nil { 407 cmd.ResourcePool = object.NewResourcePool(cmd.Client, *rspec.Pool) 408 } 409 } else { 410 return nil, fmt.Errorf("please provide either a cluster, datastore or datastore-cluster") 411 } 412 413 if !cmd.force { 414 vmxPath := fmt.Sprintf("%s/%s.vmx", cmd.name, cmd.name) 415 416 _, err := datastore.Stat(ctx, vmxPath) 417 if err == nil { 418 dsPath := cmd.Datastore.Path(vmxPath) 419 return nil, fmt.Errorf("file %s already exists", dsPath) 420 } 421 } 422 423 folder := cmd.Folder 424 425 spec.Files = &types.VirtualMachineFileInfo{ 426 VmPathName: fmt.Sprintf("[%s]", datastore.Name()), 427 } 428 429 return folder.CreateVM(ctx, *spec, cmd.ResourcePool, cmd.HostSystem) 430 } 431 432 func (cmd *create) addStorage(devices object.VirtualDeviceList) (object.VirtualDeviceList, error) { 433 if cmd.controller != "ide" { 434 if cmd.controller == "nvme" { 435 nvme, err := devices.CreateNVMEController() 436 if err != nil { 437 return nil, err 438 } 439 440 devices = append(devices, nvme) 441 cmd.controller = devices.Name(nvme) 442 } else { 443 scsi, err := devices.CreateSCSIController(cmd.controller) 444 if err != nil { 445 return nil, err 446 } 447 448 devices = append(devices, scsi) 449 cmd.controller = devices.Name(scsi) 450 } 451 } 452 453 // If controller is specified to be IDE or if an ISO is specified, add IDE controller. 454 if cmd.controller == "ide" || cmd.iso != "" { 455 ide, err := devices.CreateIDEController() 456 if err != nil { 457 return nil, err 458 } 459 460 devices = append(devices, ide) 461 } 462 463 if cmd.diskByteSize != 0 { 464 controller, err := devices.FindDiskController(cmd.controller) 465 if err != nil { 466 return nil, err 467 } 468 469 disk := &types.VirtualDisk{ 470 VirtualDevice: types.VirtualDevice{ 471 Key: devices.NewKey(), 472 Backing: &types.VirtualDiskFlatVer2BackingInfo{ 473 DiskMode: string(types.VirtualDiskModePersistent), 474 ThinProvisioned: types.NewBool(true), 475 }, 476 }, 477 CapacityInKB: cmd.diskByteSize / 1024, 478 } 479 480 devices.AssignController(disk, controller) 481 devices = append(devices, disk) 482 } else if cmd.disk != "" { 483 controller, err := devices.FindDiskController(cmd.controller) 484 if err != nil { 485 return nil, err 486 } 487 488 ds := cmd.diskDatastore.Reference() 489 path := cmd.diskDatastore.Path(cmd.disk) 490 disk := devices.CreateDisk(controller, ds, path) 491 492 if cmd.link { 493 disk = devices.ChildDisk(disk) 494 } 495 496 devices = append(devices, disk) 497 } 498 499 if cmd.iso != "" { 500 ide, err := devices.FindIDEController("") 501 if err != nil { 502 return nil, err 503 } 504 505 cdrom, err := devices.CreateCdrom(ide) 506 if err != nil { 507 return nil, err 508 } 509 510 cdrom = devices.InsertIso(cdrom, cmd.isoDatastore.Path(cmd.iso)) 511 devices = append(devices, cdrom) 512 } 513 514 return devices, nil 515 } 516 517 func (cmd *create) addNetwork(devices object.VirtualDeviceList) (object.VirtualDeviceList, error) { 518 netdev, err := cmd.NetworkFlag.Device() 519 if err != nil { 520 return nil, err 521 } 522 523 devices = append(devices, netdev) 524 return devices, nil 525 } 526 527 func (cmd *create) recommendDatastore(ctx context.Context, spec *types.VirtualMachineConfigSpec) (*object.Datastore, error) { 528 sp := cmd.StoragePod.Reference() 529 530 // Build pod selection spec from config spec 531 podSelectionSpec := types.StorageDrsPodSelectionSpec{ 532 StoragePod: &sp, 533 } 534 535 // Keep list of disks that need to be placed 536 var disks []*types.VirtualDisk 537 538 // Collect disks eligible for placement 539 for _, deviceConfigSpec := range spec.DeviceChange { 540 s := deviceConfigSpec.GetVirtualDeviceConfigSpec() 541 if s.Operation != types.VirtualDeviceConfigSpecOperationAdd { 542 continue 543 } 544 545 if s.FileOperation != types.VirtualDeviceConfigSpecFileOperationCreate { 546 continue 547 } 548 549 d, ok := s.Device.(*types.VirtualDisk) 550 if !ok { 551 continue 552 } 553 554 podConfigForPlacement := types.VmPodConfigForPlacement{ 555 StoragePod: sp, 556 Disk: []types.PodDiskLocator{ 557 { 558 DiskId: d.Key, 559 DiskBackingInfo: d.Backing, 560 }, 561 }, 562 } 563 564 podSelectionSpec.InitialVmConfig = append(podSelectionSpec.InitialVmConfig, podConfigForPlacement) 565 disks = append(disks, d) 566 } 567 568 sps := types.StoragePlacementSpec{ 569 Type: string(types.StoragePlacementSpecPlacementTypeCreate), 570 ResourcePool: types.NewReference(cmd.ResourcePool.Reference()), 571 PodSelectionSpec: podSelectionSpec, 572 ConfigSpec: spec, 573 } 574 575 srm := object.NewStorageResourceManager(cmd.Client) 576 result, err := srm.RecommendDatastores(ctx, sps) 577 if err != nil { 578 return nil, err 579 } 580 581 // Use result to pin disks to recommended datastores 582 recs := result.Recommendations 583 if len(recs) == 0 { 584 return nil, fmt.Errorf("no datastore-cluster recommendations") 585 } 586 587 ds := recs[0].Action[0].(*types.StoragePlacementAction).Destination 588 589 var mds mo.Datastore 590 err = property.DefaultCollector(cmd.Client).RetrieveOne(ctx, ds, []string{"name"}, &mds) 591 if err != nil { 592 return nil, err 593 } 594 595 datastore := object.NewDatastore(cmd.Client, ds) 596 datastore.InventoryPath = mds.Name 597 598 // Apply recommendation to eligible disks 599 for _, disk := range disks { 600 backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) 601 backing.Datastore = &ds 602 } 603 604 return datastore, nil 605 }