github.com/subuk/terraform@v0.6.14-0.20160317140351-de1567c2e732/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about) 1 package vsphere 2 3 import ( 4 "fmt" 5 "log" 6 "net" 7 "strings" 8 "time" 9 10 "github.com/hashicorp/terraform/helper/resource" 11 "github.com/hashicorp/terraform/helper/schema" 12 "github.com/vmware/govmomi" 13 "github.com/vmware/govmomi/find" 14 "github.com/vmware/govmomi/object" 15 "github.com/vmware/govmomi/property" 16 "github.com/vmware/govmomi/vim25/mo" 17 "github.com/vmware/govmomi/vim25/types" 18 "golang.org/x/net/context" 19 ) 20 21 var DefaultDNSSuffixes = []string{ 22 "vsphere.local", 23 } 24 25 var DefaultDNSServers = []string{ 26 "8.8.8.8", 27 "8.8.4.4", 28 } 29 30 type networkInterface struct { 31 deviceName string 32 label string 33 ipv4Address string 34 ipv4PrefixLength int 35 ipv6Address string 36 ipv6PrefixLength int 37 adapterType string // TODO: Make "adapter_type" argument 38 } 39 40 type hardDisk struct { 41 size int64 42 iops int64 43 initType string 44 } 45 46 type virtualMachine struct { 47 name string 48 folder string 49 datacenter string 50 cluster string 51 resourcePool string 52 datastore string 53 vcpu int 54 memoryMb int64 55 template string 56 networkInterfaces []networkInterface 57 hardDisks []hardDisk 58 gateway string 59 domain string 60 timeZone string 61 dnsSuffixes []string 62 dnsServers []string 63 customConfigurations map[string](types.AnyType) 64 } 65 66 func (v virtualMachine) Path() string { 67 return vmPath(v.folder, v.name) 68 } 69 70 func vmPath(folder string, name string) string { 71 var path string 72 if len(folder) > 0 { 73 path += folder + "/" 74 } 75 return path + name 76 } 77 78 func resourceVSphereVirtualMachine() *schema.Resource { 79 return &schema.Resource{ 80 Create: resourceVSphereVirtualMachineCreate, 81 Read: resourceVSphereVirtualMachineRead, 82 Delete: resourceVSphereVirtualMachineDelete, 83 84 Schema: map[string]*schema.Schema{ 85 "name": &schema.Schema{ 86 Type: schema.TypeString, 87 Required: true, 88 ForceNew: true, 89 }, 90 91 "folder": &schema.Schema{ 92 Type: schema.TypeString, 93 Optional: true, 94 ForceNew: true, 95 }, 96 97 "vcpu": &schema.Schema{ 98 Type: schema.TypeInt, 99 Required: true, 100 ForceNew: true, 101 }, 102 103 "memory": &schema.Schema{ 104 Type: schema.TypeInt, 105 Required: true, 106 ForceNew: true, 107 }, 108 109 "datacenter": &schema.Schema{ 110 Type: schema.TypeString, 111 Optional: true, 112 ForceNew: true, 113 }, 114 115 "cluster": &schema.Schema{ 116 Type: schema.TypeString, 117 Optional: true, 118 ForceNew: true, 119 }, 120 121 "resource_pool": &schema.Schema{ 122 Type: schema.TypeString, 123 Optional: true, 124 ForceNew: true, 125 }, 126 127 "gateway": &schema.Schema{ 128 Type: schema.TypeString, 129 Optional: true, 130 ForceNew: true, 131 }, 132 133 "domain": &schema.Schema{ 134 Type: schema.TypeString, 135 Optional: true, 136 ForceNew: true, 137 Default: "vsphere.local", 138 }, 139 140 "time_zone": &schema.Schema{ 141 Type: schema.TypeString, 142 Optional: true, 143 ForceNew: true, 144 Default: "Etc/UTC", 145 }, 146 147 "dns_suffixes": &schema.Schema{ 148 Type: schema.TypeList, 149 Optional: true, 150 Elem: &schema.Schema{Type: schema.TypeString}, 151 ForceNew: true, 152 }, 153 154 "dns_servers": &schema.Schema{ 155 Type: schema.TypeList, 156 Optional: true, 157 Elem: &schema.Schema{Type: schema.TypeString}, 158 ForceNew: true, 159 }, 160 161 "custom_configuration_parameters": &schema.Schema{ 162 Type: schema.TypeMap, 163 Optional: true, 164 ForceNew: true, 165 }, 166 167 "network_interface": &schema.Schema{ 168 Type: schema.TypeList, 169 Required: true, 170 ForceNew: true, 171 Elem: &schema.Resource{ 172 Schema: map[string]*schema.Schema{ 173 "label": &schema.Schema{ 174 Type: schema.TypeString, 175 Required: true, 176 ForceNew: true, 177 }, 178 179 "ip_address": &schema.Schema{ 180 Type: schema.TypeString, 181 Optional: true, 182 Computed: true, 183 Deprecated: "Please use ipv4_address", 184 }, 185 186 "subnet_mask": &schema.Schema{ 187 Type: schema.TypeString, 188 Optional: true, 189 Computed: true, 190 Deprecated: "Please use ipv4_prefix_length", 191 }, 192 193 "ipv4_address": &schema.Schema{ 194 Type: schema.TypeString, 195 Optional: true, 196 Computed: true, 197 }, 198 199 "ipv4_prefix_length": &schema.Schema{ 200 Type: schema.TypeInt, 201 Optional: true, 202 Computed: true, 203 }, 204 205 // TODO: Imprement ipv6 parameters to be optional 206 "ipv6_address": &schema.Schema{ 207 Type: schema.TypeString, 208 Computed: true, 209 ForceNew: true, 210 }, 211 212 "ipv6_prefix_length": &schema.Schema{ 213 Type: schema.TypeInt, 214 Computed: true, 215 ForceNew: true, 216 }, 217 218 "adapter_type": &schema.Schema{ 219 Type: schema.TypeString, 220 Optional: true, 221 ForceNew: true, 222 }, 223 }, 224 }, 225 }, 226 227 "disk": &schema.Schema{ 228 Type: schema.TypeList, 229 Required: true, 230 ForceNew: true, 231 Elem: &schema.Resource{ 232 Schema: map[string]*schema.Schema{ 233 "template": &schema.Schema{ 234 Type: schema.TypeString, 235 Optional: true, 236 ForceNew: true, 237 }, 238 239 "type": &schema.Schema{ 240 Type: schema.TypeString, 241 Optional: true, 242 ForceNew: true, 243 Default: "eager_zeroed", 244 ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { 245 value := v.(string) 246 if value != "thin" && value != "eager_zeroed" { 247 errors = append(errors, fmt.Errorf( 248 "only 'thin' and 'eager_zeroed' are supported values for 'type'")) 249 } 250 return 251 }, 252 }, 253 254 "datastore": &schema.Schema{ 255 Type: schema.TypeString, 256 Optional: true, 257 ForceNew: true, 258 }, 259 260 "size": &schema.Schema{ 261 Type: schema.TypeInt, 262 Optional: true, 263 ForceNew: true, 264 }, 265 266 "iops": &schema.Schema{ 267 Type: schema.TypeInt, 268 Optional: true, 269 ForceNew: true, 270 }, 271 }, 272 }, 273 }, 274 275 "boot_delay": &schema.Schema{ 276 Type: schema.TypeInt, 277 Optional: true, 278 ForceNew: true, 279 }, 280 }, 281 } 282 } 283 284 func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { 285 client := meta.(*govmomi.Client) 286 287 vm := virtualMachine{ 288 name: d.Get("name").(string), 289 vcpu: d.Get("vcpu").(int), 290 memoryMb: int64(d.Get("memory").(int)), 291 } 292 293 if v, ok := d.GetOk("folder"); ok { 294 vm.folder = v.(string) 295 } 296 297 if v, ok := d.GetOk("datacenter"); ok { 298 vm.datacenter = v.(string) 299 } 300 301 if v, ok := d.GetOk("cluster"); ok { 302 vm.cluster = v.(string) 303 } 304 305 if v, ok := d.GetOk("resource_pool"); ok { 306 vm.resourcePool = v.(string) 307 } 308 309 if v, ok := d.GetOk("gateway"); ok { 310 vm.gateway = v.(string) 311 } 312 313 if v, ok := d.GetOk("domain"); ok { 314 vm.domain = v.(string) 315 } 316 317 if v, ok := d.GetOk("time_zone"); ok { 318 vm.timeZone = v.(string) 319 } 320 321 if raw, ok := d.GetOk("dns_suffixes"); ok { 322 for _, v := range raw.([]interface{}) { 323 vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string)) 324 } 325 } else { 326 vm.dnsSuffixes = DefaultDNSSuffixes 327 } 328 329 if raw, ok := d.GetOk("dns_servers"); ok { 330 for _, v := range raw.([]interface{}) { 331 vm.dnsServers = append(vm.dnsServers, v.(string)) 332 } 333 } else { 334 vm.dnsServers = DefaultDNSServers 335 } 336 337 if vL, ok := d.GetOk("custom_configuration_parameters"); ok { 338 if custom_configs, ok := vL.(map[string]interface{}); ok { 339 custom := make(map[string]types.AnyType) 340 for k, v := range custom_configs { 341 custom[k] = v 342 } 343 vm.customConfigurations = custom 344 log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations) 345 } 346 } 347 348 if vL, ok := d.GetOk("network_interface"); ok { 349 networks := make([]networkInterface, len(vL.([]interface{}))) 350 for i, v := range vL.([]interface{}) { 351 network := v.(map[string]interface{}) 352 networks[i].label = network["label"].(string) 353 if v, ok := network["ip_address"].(string); ok && v != "" { 354 networks[i].ipv4Address = v 355 } 356 if v, ok := network["subnet_mask"].(string); ok && v != "" { 357 ip := net.ParseIP(v).To4() 358 if ip != nil { 359 mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3]) 360 pl, _ := mask.Size() 361 networks[i].ipv4PrefixLength = pl 362 } else { 363 return fmt.Errorf("subnet_mask parameter is invalid.") 364 } 365 } 366 if v, ok := network["ipv4_address"].(string); ok && v != "" { 367 networks[i].ipv4Address = v 368 } 369 if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 { 370 networks[i].ipv4PrefixLength = v 371 } 372 } 373 vm.networkInterfaces = networks 374 log.Printf("[DEBUG] network_interface init: %v", networks) 375 } 376 377 if vL, ok := d.GetOk("disk"); ok { 378 disks := make([]hardDisk, len(vL.([]interface{}))) 379 for i, v := range vL.([]interface{}) { 380 disk := v.(map[string]interface{}) 381 if i == 0 { 382 if v, ok := disk["template"].(string); ok && v != "" { 383 vm.template = v 384 } else { 385 if v, ok := disk["size"].(int); ok && v != 0 { 386 disks[i].size = int64(v) 387 } else { 388 return fmt.Errorf("If template argument is not specified, size argument is required.") 389 } 390 } 391 if v, ok := disk["datastore"].(string); ok && v != "" { 392 vm.datastore = v 393 } 394 } else { 395 if v, ok := disk["size"].(int); ok && v != 0 { 396 disks[i].size = int64(v) 397 } else { 398 return fmt.Errorf("Size argument is required.") 399 } 400 401 } 402 if v, ok := disk["iops"].(int); ok && v != 0 { 403 disks[i].iops = int64(v) 404 } 405 if v, ok := disk["type"].(string); ok && v != "" { 406 disks[i].initType = v 407 } 408 } 409 vm.hardDisks = disks 410 log.Printf("[DEBUG] disk init: %v", disks) 411 } 412 413 if vm.template != "" { 414 err := vm.deployVirtualMachine(client) 415 if err != nil { 416 return err 417 } 418 } else { 419 err := vm.createVirtualMachine(client) 420 if err != nil { 421 return err 422 } 423 } 424 425 if _, ok := d.GetOk("network_interface.0.ipv4_address"); !ok { 426 if v, ok := d.GetOk("boot_delay"); ok { 427 stateConf := &resource.StateChangeConf{ 428 Pending: []string{"pending"}, 429 Target: []string{"active"}, 430 Refresh: waitForNetworkingActive(client, vm.datacenter, vm.Path()), 431 Timeout: 600 * time.Second, 432 Delay: time.Duration(v.(int)) * time.Second, 433 MinTimeout: 2 * time.Second, 434 } 435 436 _, err := stateConf.WaitForState() 437 if err != nil { 438 return err 439 } 440 } 441 } 442 d.SetId(vm.Path()) 443 log.Printf("[INFO] Created virtual machine: %s", d.Id()) 444 445 return resourceVSphereVirtualMachineRead(d, meta) 446 } 447 448 func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { 449 450 log.Printf("[DEBUG] reading virtual machine: %#v", d) 451 client := meta.(*govmomi.Client) 452 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 453 if err != nil { 454 return err 455 } 456 finder := find.NewFinder(client.Client, true) 457 finder = finder.SetDatacenter(dc) 458 459 vm, err := finder.VirtualMachine(context.TODO(), d.Id()) 460 if err != nil { 461 d.SetId("") 462 return nil 463 } 464 465 var mvm mo.VirtualMachine 466 467 collector := property.DefaultCollector(client.Client) 468 if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore"}, &mvm); err != nil { 469 return err 470 } 471 472 log.Printf("[DEBUG] %#v", dc) 473 log.Printf("[DEBUG] %#v", mvm.Summary.Config) 474 log.Printf("[DEBUG] %#v", mvm.Guest.Net) 475 476 networkInterfaces := make([]map[string]interface{}, 0) 477 for _, v := range mvm.Guest.Net { 478 if v.DeviceConfigId >= 0 { 479 log.Printf("[DEBUG] %#v", v.Network) 480 networkInterface := make(map[string]interface{}) 481 networkInterface["label"] = v.Network 482 for _, ip := range v.IpConfig.IpAddress { 483 p := net.ParseIP(ip.IpAddress) 484 if p.To4() != nil { 485 log.Printf("[DEBUG] %#v", p.String()) 486 log.Printf("[DEBUG] %#v", ip.PrefixLength) 487 networkInterface["ipv4_address"] = p.String() 488 networkInterface["ipv4_prefix_length"] = ip.PrefixLength 489 } else if p.To16() != nil { 490 log.Printf("[DEBUG] %#v", p.String()) 491 log.Printf("[DEBUG] %#v", ip.PrefixLength) 492 networkInterface["ipv6_address"] = p.String() 493 networkInterface["ipv6_prefix_length"] = ip.PrefixLength 494 } 495 log.Printf("[DEBUG] networkInterface: %#v", networkInterface) 496 } 497 log.Printf("[DEBUG] networkInterface: %#v", networkInterface) 498 networkInterfaces = append(networkInterfaces, networkInterface) 499 } 500 } 501 log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces) 502 err = d.Set("network_interface", networkInterfaces) 503 if err != nil { 504 return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces) 505 } 506 507 var rootDatastore string 508 for _, v := range mvm.Datastore { 509 var md mo.Datastore 510 if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil { 511 return err 512 } 513 if md.Parent.Type == "StoragePod" { 514 var msp mo.StoragePod 515 if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil { 516 return err 517 } 518 rootDatastore = msp.Name 519 log.Printf("[DEBUG] %#v", msp.Name) 520 } else { 521 rootDatastore = md.Name 522 log.Printf("[DEBUG] %#v", md.Name) 523 } 524 break 525 } 526 527 d.Set("datacenter", dc) 528 d.Set("memory", mvm.Summary.Config.MemorySizeMB) 529 d.Set("cpu", mvm.Summary.Config.NumCpu) 530 d.Set("datastore", rootDatastore) 531 532 return nil 533 } 534 535 func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { 536 client := meta.(*govmomi.Client) 537 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 538 if err != nil { 539 return err 540 } 541 finder := find.NewFinder(client.Client, true) 542 finder = finder.SetDatacenter(dc) 543 544 vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string))) 545 if err != nil { 546 return err 547 } 548 549 log.Printf("[INFO] Deleting virtual machine: %s", d.Id()) 550 551 task, err := vm.PowerOff(context.TODO()) 552 if err != nil { 553 return err 554 } 555 556 err = task.Wait(context.TODO()) 557 if err != nil { 558 return err 559 } 560 561 task, err = vm.Destroy(context.TODO()) 562 if err != nil { 563 return err 564 } 565 566 err = task.Wait(context.TODO()) 567 if err != nil { 568 return err 569 } 570 571 d.SetId("") 572 return nil 573 } 574 575 func waitForNetworkingActive(client *govmomi.Client, datacenter, name string) resource.StateRefreshFunc { 576 return func() (interface{}, string, error) { 577 dc, err := getDatacenter(client, datacenter) 578 if err != nil { 579 log.Printf("[ERROR] %#v", err) 580 return nil, "", err 581 } 582 finder := find.NewFinder(client.Client, true) 583 finder = finder.SetDatacenter(dc) 584 585 vm, err := finder.VirtualMachine(context.TODO(), name) 586 if err != nil { 587 log.Printf("[ERROR] %#v", err) 588 return nil, "", err 589 } 590 591 var mvm mo.VirtualMachine 592 collector := property.DefaultCollector(client.Client) 593 if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"summary"}, &mvm); err != nil { 594 log.Printf("[ERROR] %#v", err) 595 return nil, "", err 596 } 597 598 if mvm.Summary.Guest.IpAddress != "" { 599 log.Printf("[DEBUG] IP address with DHCP: %v", mvm.Summary.Guest.IpAddress) 600 return mvm.Summary, "active", err 601 } else { 602 log.Printf("[DEBUG] Waiting for IP address") 603 return nil, "pending", err 604 } 605 } 606 } 607 608 // addHardDisk adds a new Hard Disk to the VirtualMachine. 609 func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string) error { 610 devices, err := vm.Device(context.TODO()) 611 if err != nil { 612 return err 613 } 614 log.Printf("[DEBUG] vm devices: %#v\n", devices) 615 616 controller, err := devices.FindDiskController("scsi") 617 if err != nil { 618 return err 619 } 620 log.Printf("[DEBUG] disk controller: %#v\n", controller) 621 622 disk := devices.CreateDisk(controller, "") 623 existing := devices.SelectByBackingInfo(disk.Backing) 624 log.Printf("[DEBUG] disk: %#v\n", disk) 625 626 if len(existing) == 0 { 627 disk.CapacityInKB = int64(size * 1024 * 1024) 628 if iops != 0 { 629 disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ 630 Limit: iops, 631 } 632 } 633 backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) 634 635 if diskType == "eager_zeroed" { 636 // eager zeroed thick virtual disk 637 backing.ThinProvisioned = types.NewBool(false) 638 backing.EagerlyScrub = types.NewBool(true) 639 } else if diskType == "thin" { 640 // thin provisioned virtual disk 641 backing.ThinProvisioned = types.NewBool(true) 642 } 643 644 log.Printf("[DEBUG] addHardDisk: %#v\n", disk) 645 log.Printf("[DEBUG] addHardDisk: %#v\n", disk.CapacityInKB) 646 647 return vm.AddDevice(context.TODO(), disk) 648 } else { 649 log.Printf("[DEBUG] addHardDisk: Disk already present.\n") 650 651 return nil 652 } 653 } 654 655 // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device. 656 func buildNetworkDevice(f *find.Finder, label, adapterType string) (*types.VirtualDeviceConfigSpec, error) { 657 network, err := f.Network(context.TODO(), "*"+label) 658 if err != nil { 659 return nil, err 660 } 661 662 backing, err := network.EthernetCardBackingInfo(context.TODO()) 663 if err != nil { 664 return nil, err 665 } 666 667 if adapterType == "vmxnet3" { 668 return &types.VirtualDeviceConfigSpec{ 669 Operation: types.VirtualDeviceConfigSpecOperationAdd, 670 Device: &types.VirtualVmxnet3{ 671 VirtualVmxnet: types.VirtualVmxnet{ 672 VirtualEthernetCard: types.VirtualEthernetCard{ 673 VirtualDevice: types.VirtualDevice{ 674 Key: -1, 675 Backing: backing, 676 }, 677 AddressType: string(types.VirtualEthernetCardMacTypeGenerated), 678 }, 679 }, 680 }, 681 }, nil 682 } else if adapterType == "e1000" { 683 return &types.VirtualDeviceConfigSpec{ 684 Operation: types.VirtualDeviceConfigSpecOperationAdd, 685 Device: &types.VirtualE1000{ 686 VirtualEthernetCard: types.VirtualEthernetCard{ 687 VirtualDevice: types.VirtualDevice{ 688 Key: -1, 689 Backing: backing, 690 }, 691 AddressType: string(types.VirtualEthernetCardMacTypeGenerated), 692 }, 693 }, 694 }, nil 695 } else { 696 return nil, fmt.Errorf("Invalid network adapter type.") 697 } 698 } 699 700 // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. 701 func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, initType string) (types.VirtualMachineRelocateSpec, error) { 702 var key int 703 704 devices, err := vm.Device(context.TODO()) 705 if err != nil { 706 return types.VirtualMachineRelocateSpec{}, err 707 } 708 for _, d := range devices { 709 if devices.Type(d) == "disk" { 710 key = d.GetVirtualDevice().Key 711 } 712 } 713 714 isThin := initType == "thin" 715 rpr := rp.Reference() 716 dsr := ds.Reference() 717 return types.VirtualMachineRelocateSpec{ 718 Datastore: &dsr, 719 Pool: &rpr, 720 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 721 types.VirtualMachineRelocateSpecDiskLocator{ 722 Datastore: dsr, 723 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ 724 DiskMode: "persistent", 725 ThinProvisioned: types.NewBool(isThin), 726 EagerlyScrub: types.NewBool(!isThin), 727 }, 728 DiskId: key, 729 }, 730 }, 731 }, nil 732 } 733 734 // getDatastoreObject gets datastore object. 735 func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) { 736 s := object.NewSearchIndex(client.Client) 737 ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name) 738 if err != nil { 739 return types.ManagedObjectReference{}, err 740 } 741 if ref == nil { 742 return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name) 743 } 744 log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref) 745 return ref.Reference(), nil 746 } 747 748 // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action. 749 func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec { 750 vmfr := f.VmFolder.Reference() 751 rpr := rp.Reference() 752 spr := storagePod.Reference() 753 754 sps := types.StoragePlacementSpec{ 755 Type: "create", 756 ConfigSpec: &configSpec, 757 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 758 StoragePod: &spr, 759 }, 760 Folder: &vmfr, 761 ResourcePool: &rpr, 762 } 763 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 764 return sps 765 } 766 767 // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action. 768 func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec { 769 vmr := vm.Reference() 770 vmfr := f.VmFolder.Reference() 771 rpr := rp.Reference() 772 spr := storagePod.Reference() 773 774 var o mo.VirtualMachine 775 err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o) 776 if err != nil { 777 return types.StoragePlacementSpec{} 778 } 779 ds := object.NewDatastore(c.Client, o.Datastore[0]) 780 log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds) 781 782 devices, err := vm.Device(context.TODO()) 783 if err != nil { 784 return types.StoragePlacementSpec{} 785 } 786 787 var key int 788 for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) { 789 key = d.GetVirtualDevice().Key 790 log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice()) 791 } 792 793 sps := types.StoragePlacementSpec{ 794 Type: "clone", 795 Vm: &vmr, 796 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 797 StoragePod: &spr, 798 }, 799 CloneSpec: &types.VirtualMachineCloneSpec{ 800 Location: types.VirtualMachineRelocateSpec{ 801 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 802 types.VirtualMachineRelocateSpecDiskLocator{ 803 Datastore: ds.Reference(), 804 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{}, 805 DiskId: key, 806 }, 807 }, 808 Pool: &rpr, 809 }, 810 PowerOn: false, 811 Template: false, 812 }, 813 CloneName: "dummy", 814 Folder: &vmfr, 815 } 816 return sps 817 } 818 819 // findDatastore finds Datastore object. 820 func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) { 821 var datastore *object.Datastore 822 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 823 824 srm := object.NewStorageResourceManager(c.Client) 825 rds, err := srm.RecommendDatastores(context.TODO(), sps) 826 if err != nil { 827 return nil, err 828 } 829 log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) 830 831 spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) 832 datastore = object.NewDatastore(c.Client, spa.Destination) 833 log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore) 834 835 return datastore, nil 836 } 837 838 // createVirtualMachine creates a new VirtualMachine. 839 func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { 840 dc, err := getDatacenter(c, vm.datacenter) 841 842 if err != nil { 843 return err 844 } 845 finder := find.NewFinder(c.Client, true) 846 finder = finder.SetDatacenter(dc) 847 848 var resourcePool *object.ResourcePool 849 if vm.resourcePool == "" { 850 if vm.cluster == "" { 851 resourcePool, err = finder.DefaultResourcePool(context.TODO()) 852 if err != nil { 853 return err 854 } 855 } else { 856 resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") 857 if err != nil { 858 return err 859 } 860 } 861 } else { 862 resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) 863 if err != nil { 864 return err 865 } 866 } 867 log.Printf("[DEBUG] resource pool: %#v", resourcePool) 868 869 dcFolders, err := dc.Folders(context.TODO()) 870 if err != nil { 871 return err 872 } 873 874 log.Printf("[DEBUG] folder: %#v", vm.folder) 875 folder := dcFolders.VmFolder 876 if len(vm.folder) > 0 { 877 si := object.NewSearchIndex(c.Client) 878 folderRef, err := si.FindByInventoryPath( 879 context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) 880 if err != nil { 881 return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) 882 } else if folderRef == nil { 883 return fmt.Errorf("Cannot find folder %s", vm.folder) 884 } else { 885 folder = folderRef.(*object.Folder) 886 } 887 } 888 889 // network 890 networkDevices := []types.BaseVirtualDeviceConfigSpec{} 891 for _, network := range vm.networkInterfaces { 892 // network device 893 nd, err := buildNetworkDevice(finder, network.label, "e1000") 894 if err != nil { 895 return err 896 } 897 networkDevices = append(networkDevices, nd) 898 } 899 900 // make config spec 901 configSpec := types.VirtualMachineConfigSpec{ 902 GuestId: "otherLinux64Guest", 903 Name: vm.name, 904 NumCPUs: vm.vcpu, 905 NumCoresPerSocket: 1, 906 MemoryMB: vm.memoryMb, 907 DeviceChange: networkDevices, 908 } 909 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 910 911 // make ExtraConfig 912 log.Printf("[DEBUG] virtual machine Extra Config spec start") 913 if len(vm.customConfigurations) > 0 { 914 var ov []types.BaseOptionValue 915 for k, v := range vm.customConfigurations { 916 key := k 917 value := v 918 o := types.OptionValue{ 919 Key: key, 920 Value: &value, 921 } 922 log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) 923 ov = append(ov, &o) 924 } 925 configSpec.ExtraConfig = ov 926 log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) 927 } 928 929 var datastore *object.Datastore 930 if vm.datastore == "" { 931 datastore, err = finder.DefaultDatastore(context.TODO()) 932 if err != nil { 933 return err 934 } 935 } else { 936 datastore, err = finder.Datastore(context.TODO(), vm.datastore) 937 if err != nil { 938 // TODO: datastore cluster support in govmomi finder function 939 d, err := getDatastoreObject(c, dcFolders, vm.datastore) 940 if err != nil { 941 return err 942 } 943 944 if d.Type == "StoragePod" { 945 sp := object.StoragePod{ 946 Folder: object.NewFolder(c.Client, d), 947 } 948 sps := buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) 949 datastore, err = findDatastore(c, sps) 950 if err != nil { 951 return err 952 } 953 } else { 954 datastore = object.NewDatastore(c.Client, d) 955 } 956 } 957 } 958 959 log.Printf("[DEBUG] datastore: %#v", datastore) 960 961 var mds mo.Datastore 962 if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { 963 return err 964 } 965 log.Printf("[DEBUG] datastore: %#v", mds.Name) 966 scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") 967 if err != nil { 968 log.Printf("[ERROR] %s", err) 969 } 970 971 configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ 972 Operation: types.VirtualDeviceConfigSpecOperationAdd, 973 Device: scsi, 974 }) 975 configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} 976 977 task, err := folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) 978 if err != nil { 979 log.Printf("[ERROR] %s", err) 980 } 981 982 err = task.Wait(context.TODO()) 983 if err != nil { 984 log.Printf("[ERROR] %s", err) 985 } 986 987 newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) 988 if err != nil { 989 return err 990 } 991 log.Printf("[DEBUG] new vm: %v", newVM) 992 993 log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) 994 for _, hd := range vm.hardDisks { 995 log.Printf("[DEBUG] add hard disk: %v", hd.size) 996 log.Printf("[DEBUG] add hard disk: %v", hd.iops) 997 err = addHardDisk(newVM, hd.size, hd.iops, "thin") 998 if err != nil { 999 return err 1000 } 1001 } 1002 return nil 1003 } 1004 1005 // deployVirtualMachine deploys a new VirtualMachine. 1006 func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error { 1007 dc, err := getDatacenter(c, vm.datacenter) 1008 if err != nil { 1009 return err 1010 } 1011 finder := find.NewFinder(c.Client, true) 1012 finder = finder.SetDatacenter(dc) 1013 1014 template, err := finder.VirtualMachine(context.TODO(), vm.template) 1015 if err != nil { 1016 return err 1017 } 1018 log.Printf("[DEBUG] template: %#v", template) 1019 1020 var resourcePool *object.ResourcePool 1021 if vm.resourcePool == "" { 1022 if vm.cluster == "" { 1023 resourcePool, err = finder.DefaultResourcePool(context.TODO()) 1024 if err != nil { 1025 return err 1026 } 1027 } else { 1028 resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") 1029 if err != nil { 1030 return err 1031 } 1032 } 1033 } else { 1034 resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) 1035 if err != nil { 1036 return err 1037 } 1038 } 1039 log.Printf("[DEBUG] resource pool: %#v", resourcePool) 1040 1041 dcFolders, err := dc.Folders(context.TODO()) 1042 if err != nil { 1043 return err 1044 } 1045 1046 log.Printf("[DEBUG] folder: %#v", vm.folder) 1047 folder := dcFolders.VmFolder 1048 if len(vm.folder) > 0 { 1049 si := object.NewSearchIndex(c.Client) 1050 folderRef, err := si.FindByInventoryPath( 1051 context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) 1052 if err != nil { 1053 return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) 1054 } else if folderRef == nil { 1055 return fmt.Errorf("Cannot find folder %s", vm.folder) 1056 } else { 1057 folder = folderRef.(*object.Folder) 1058 } 1059 } 1060 1061 var datastore *object.Datastore 1062 if vm.datastore == "" { 1063 datastore, err = finder.DefaultDatastore(context.TODO()) 1064 if err != nil { 1065 return err 1066 } 1067 } else { 1068 datastore, err = finder.Datastore(context.TODO(), vm.datastore) 1069 if err != nil { 1070 // TODO: datastore cluster support in govmomi finder function 1071 d, err := getDatastoreObject(c, dcFolders, vm.datastore) 1072 if err != nil { 1073 return err 1074 } 1075 1076 if d.Type == "StoragePod" { 1077 sp := object.StoragePod{ 1078 Folder: object.NewFolder(c.Client, d), 1079 } 1080 sps := buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) 1081 1082 datastore, err = findDatastore(c, sps) 1083 if err != nil { 1084 return err 1085 } 1086 } else { 1087 datastore = object.NewDatastore(c.Client, d) 1088 } 1089 } 1090 } 1091 log.Printf("[DEBUG] datastore: %#v", datastore) 1092 1093 relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.hardDisks[0].initType) 1094 if err != nil { 1095 return err 1096 } 1097 1098 log.Printf("[DEBUG] relocate spec: %v", relocateSpec) 1099 1100 // network 1101 networkDevices := []types.BaseVirtualDeviceConfigSpec{} 1102 networkConfigs := []types.CustomizationAdapterMapping{} 1103 for _, network := range vm.networkInterfaces { 1104 // network device 1105 nd, err := buildNetworkDevice(finder, network.label, "vmxnet3") 1106 if err != nil { 1107 return err 1108 } 1109 networkDevices = append(networkDevices, nd) 1110 1111 // TODO: IPv6 support 1112 var ipSetting types.CustomizationIPSettings 1113 if network.ipv4Address == "" { 1114 ipSetting = types.CustomizationIPSettings{ 1115 Ip: &types.CustomizationDhcpIpGenerator{}, 1116 } 1117 } else { 1118 if network.ipv4PrefixLength == 0 { 1119 return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") 1120 } 1121 m := net.CIDRMask(network.ipv4PrefixLength, 32) 1122 sm := net.IPv4(m[0], m[1], m[2], m[3]) 1123 subnetMask := sm.String() 1124 log.Printf("[DEBUG] gateway: %v", vm.gateway) 1125 log.Printf("[DEBUG] ipv4 address: %v", network.ipv4Address) 1126 log.Printf("[DEBUG] ipv4 prefix length: %v", network.ipv4PrefixLength) 1127 log.Printf("[DEBUG] ipv4 subnet mask: %v", subnetMask) 1128 ipSetting = types.CustomizationIPSettings{ 1129 Gateway: []string{ 1130 vm.gateway, 1131 }, 1132 Ip: &types.CustomizationFixedIp{ 1133 IpAddress: network.ipv4Address, 1134 }, 1135 SubnetMask: subnetMask, 1136 } 1137 } 1138 1139 // network config 1140 config := types.CustomizationAdapterMapping{ 1141 Adapter: ipSetting, 1142 } 1143 networkConfigs = append(networkConfigs, config) 1144 } 1145 log.Printf("[DEBUG] network configs: %v", networkConfigs[0].Adapter) 1146 1147 // make config spec 1148 configSpec := types.VirtualMachineConfigSpec{ 1149 NumCPUs: vm.vcpu, 1150 NumCoresPerSocket: 1, 1151 MemoryMB: vm.memoryMb, 1152 } 1153 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 1154 1155 log.Printf("[DEBUG] starting extra custom config spec: %v", vm.customConfigurations) 1156 1157 // make ExtraConfig 1158 if len(vm.customConfigurations) > 0 { 1159 var ov []types.BaseOptionValue 1160 for k, v := range vm.customConfigurations { 1161 key := k 1162 value := v 1163 o := types.OptionValue{ 1164 Key: key, 1165 Value: &value, 1166 } 1167 ov = append(ov, &o) 1168 } 1169 configSpec.ExtraConfig = ov 1170 log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) 1171 } 1172 1173 // create CustomizationSpec 1174 customSpec := types.CustomizationSpec{ 1175 Identity: &types.CustomizationLinuxPrep{ 1176 HostName: &types.CustomizationFixedName{ 1177 Name: strings.Split(vm.name, ".")[0], 1178 }, 1179 Domain: vm.domain, 1180 TimeZone: vm.timeZone, 1181 HwClockUTC: types.NewBool(true), 1182 }, 1183 GlobalIPSettings: types.CustomizationGlobalIPSettings{ 1184 DnsSuffixList: vm.dnsSuffixes, 1185 DnsServerList: vm.dnsServers, 1186 }, 1187 NicSettingMap: networkConfigs, 1188 } 1189 log.Printf("[DEBUG] custom spec: %v", customSpec) 1190 1191 // make vm clone spec 1192 cloneSpec := types.VirtualMachineCloneSpec{ 1193 Location: relocateSpec, 1194 Template: false, 1195 Config: &configSpec, 1196 PowerOn: false, 1197 } 1198 log.Printf("[DEBUG] clone spec: %v", cloneSpec) 1199 1200 task, err := template.Clone(context.TODO(), folder, vm.name, cloneSpec) 1201 if err != nil { 1202 return err 1203 } 1204 1205 _, err = task.WaitForResult(context.TODO(), nil) 1206 if err != nil { 1207 return err 1208 } 1209 1210 newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) 1211 if err != nil { 1212 return err 1213 } 1214 log.Printf("[DEBUG] new vm: %v", newVM) 1215 1216 devices, err := newVM.Device(context.TODO()) 1217 if err != nil { 1218 log.Printf("[DEBUG] Template devices can't be found") 1219 return err 1220 } 1221 1222 for _, dvc := range devices { 1223 // Issue 3559/3560: Delete all ethernet devices to add the correct ones later 1224 if devices.Type(dvc) == "ethernet" { 1225 err := newVM.RemoveDevice(context.TODO(), dvc) 1226 if err != nil { 1227 return err 1228 } 1229 } 1230 } 1231 // Add Network devices 1232 for _, dvc := range networkDevices { 1233 err := newVM.AddDevice( 1234 context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) 1235 if err != nil { 1236 return err 1237 } 1238 } 1239 1240 taskb, err := newVM.Customize(context.TODO(), customSpec) 1241 if err != nil { 1242 return err 1243 } 1244 1245 _, err = taskb.WaitForResult(context.TODO(), nil) 1246 if err != nil { 1247 return err 1248 } 1249 log.Printf("[DEBUG]VM customization finished") 1250 1251 for i := 1; i < len(vm.hardDisks); i++ { 1252 err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType) 1253 if err != nil { 1254 return err 1255 } 1256 } 1257 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 1258 1259 newVM.PowerOn(context.TODO()) 1260 1261 ip, err := newVM.WaitForIP(context.TODO()) 1262 if err != nil { 1263 return err 1264 } 1265 log.Printf("[DEBUG] ip address: %v", ip) 1266 1267 return nil 1268 }