github.com/bradfeehan/terraform@v0.7.0-rc3.0.20170529055808-34b45c5ad841/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about) 1 package vsphere 2 3 import ( 4 "fmt" 5 "log" 6 "net" 7 "strconv" 8 "strings" 9 10 "github.com/hashicorp/terraform/helper/schema" 11 "github.com/vmware/govmomi" 12 "github.com/vmware/govmomi/find" 13 "github.com/vmware/govmomi/object" 14 "github.com/vmware/govmomi/property" 15 "github.com/vmware/govmomi/vim25/mo" 16 "github.com/vmware/govmomi/vim25/types" 17 "golang.org/x/net/context" 18 ) 19 20 var DefaultDNSSuffixes = []string{ 21 "vsphere.local", 22 } 23 24 var DefaultDNSServers = []string{ 25 "8.8.8.8", 26 "8.8.4.4", 27 } 28 29 var DiskControllerTypes = []string{ 30 "scsi", 31 "scsi-lsi-parallel", 32 "scsi-buslogic", 33 "scsi-paravirtual", 34 "scsi-lsi-sas", 35 "ide", 36 } 37 38 type networkInterface struct { 39 deviceName string 40 label string 41 ipv4Address string 42 ipv4PrefixLength int 43 ipv4Gateway string 44 ipv6Address string 45 ipv6PrefixLength int 46 ipv6Gateway string 47 adapterType string // TODO: Make "adapter_type" argument 48 macAddress string 49 } 50 51 type hardDisk struct { 52 name string 53 size int64 54 iops int64 55 initType string 56 vmdkPath string 57 controller string 58 bootable bool 59 } 60 61 //Additional options Vsphere can use clones of windows machines 62 type windowsOptConfig struct { 63 productKey string 64 adminPassword string 65 domainUser string 66 domain string 67 domainUserPassword string 68 } 69 70 type cdrom struct { 71 datastore string 72 path string 73 } 74 75 type memoryAllocation struct { 76 reservation int64 77 } 78 79 type virtualMachine struct { 80 name string 81 folder string 82 datacenter string 83 cluster string 84 resourcePool string 85 datastore string 86 vcpu int32 87 memoryMb int64 88 memoryAllocation memoryAllocation 89 template string 90 networkInterfaces []networkInterface 91 hardDisks []hardDisk 92 cdroms []cdrom 93 domain string 94 timeZone string 95 dnsSuffixes []string 96 dnsServers []string 97 hasBootableVmdk bool 98 linkedClone bool 99 skipCustomization bool 100 enableDiskUUID bool 101 moid string 102 windowsOptionalConfig windowsOptConfig 103 customConfigurations map[string](types.AnyType) 104 } 105 106 func (v virtualMachine) Path() string { 107 return vmPath(v.folder, v.name) 108 } 109 110 func vmPath(folder string, name string) string { 111 var path string 112 if len(folder) > 0 { 113 path += folder + "/" 114 } 115 return path + name 116 } 117 118 func resourceVSphereVirtualMachine() *schema.Resource { 119 return &schema.Resource{ 120 Create: resourceVSphereVirtualMachineCreate, 121 Read: resourceVSphereVirtualMachineRead, 122 Update: resourceVSphereVirtualMachineUpdate, 123 Delete: resourceVSphereVirtualMachineDelete, 124 125 SchemaVersion: 1, 126 MigrateState: resourceVSphereVirtualMachineMigrateState, 127 128 Schema: map[string]*schema.Schema{ 129 "name": &schema.Schema{ 130 Type: schema.TypeString, 131 Required: true, 132 ForceNew: true, 133 }, 134 135 "folder": &schema.Schema{ 136 Type: schema.TypeString, 137 Optional: true, 138 ForceNew: true, 139 }, 140 141 "vcpu": &schema.Schema{ 142 Type: schema.TypeInt, 143 Required: true, 144 }, 145 146 "memory": &schema.Schema{ 147 Type: schema.TypeInt, 148 Required: true, 149 }, 150 151 "memory_reservation": &schema.Schema{ 152 Type: schema.TypeInt, 153 Optional: true, 154 Default: 0, 155 ForceNew: true, 156 }, 157 158 "datacenter": &schema.Schema{ 159 Type: schema.TypeString, 160 Optional: true, 161 ForceNew: true, 162 }, 163 164 "cluster": &schema.Schema{ 165 Type: schema.TypeString, 166 Optional: true, 167 ForceNew: true, 168 }, 169 170 "resource_pool": &schema.Schema{ 171 Type: schema.TypeString, 172 Optional: true, 173 ForceNew: true, 174 }, 175 176 "linked_clone": &schema.Schema{ 177 Type: schema.TypeBool, 178 Optional: true, 179 Default: false, 180 ForceNew: true, 181 }, 182 "gateway": &schema.Schema{ 183 Type: schema.TypeString, 184 Optional: true, 185 ForceNew: true, 186 Deprecated: "Please use network_interface.ipv4_gateway", 187 }, 188 189 "domain": &schema.Schema{ 190 Type: schema.TypeString, 191 Optional: true, 192 ForceNew: true, 193 Default: "vsphere.local", 194 }, 195 196 "time_zone": &schema.Schema{ 197 Type: schema.TypeString, 198 Optional: true, 199 ForceNew: true, 200 Default: "Etc/UTC", 201 }, 202 203 "dns_suffixes": &schema.Schema{ 204 Type: schema.TypeList, 205 Optional: true, 206 Elem: &schema.Schema{Type: schema.TypeString}, 207 ForceNew: true, 208 }, 209 210 "dns_servers": &schema.Schema{ 211 Type: schema.TypeList, 212 Optional: true, 213 Elem: &schema.Schema{Type: schema.TypeString}, 214 ForceNew: true, 215 }, 216 217 "skip_customization": &schema.Schema{ 218 Type: schema.TypeBool, 219 Optional: true, 220 ForceNew: true, 221 Default: false, 222 }, 223 224 "enable_disk_uuid": &schema.Schema{ 225 Type: schema.TypeBool, 226 Optional: true, 227 ForceNew: true, 228 Default: false, 229 }, 230 231 "uuid": &schema.Schema{ 232 Type: schema.TypeString, 233 Computed: true, 234 }, 235 236 "moid": &schema.Schema{ 237 Type: schema.TypeString, 238 Computed: true, 239 }, 240 241 "custom_configuration_parameters": &schema.Schema{ 242 Type: schema.TypeMap, 243 Optional: true, 244 ForceNew: true, 245 }, 246 247 "windows_opt_config": &schema.Schema{ 248 Type: schema.TypeList, 249 Optional: true, 250 ForceNew: true, 251 Elem: &schema.Resource{ 252 Schema: map[string]*schema.Schema{ 253 "product_key": &schema.Schema{ 254 Type: schema.TypeString, 255 Optional: true, 256 ForceNew: true, 257 }, 258 259 "admin_password": &schema.Schema{ 260 Type: schema.TypeString, 261 Optional: true, 262 ForceNew: true, 263 }, 264 265 "domain_user": &schema.Schema{ 266 Type: schema.TypeString, 267 Optional: true, 268 ForceNew: true, 269 }, 270 271 "domain": &schema.Schema{ 272 Type: schema.TypeString, 273 Optional: true, 274 ForceNew: true, 275 }, 276 277 "domain_user_password": &schema.Schema{ 278 Type: schema.TypeString, 279 Optional: true, 280 ForceNew: true, 281 }, 282 }, 283 }, 284 }, 285 286 "network_interface": &schema.Schema{ 287 Type: schema.TypeList, 288 Required: true, 289 ForceNew: true, 290 Elem: &schema.Resource{ 291 Schema: map[string]*schema.Schema{ 292 "label": &schema.Schema{ 293 Type: schema.TypeString, 294 Required: true, 295 ForceNew: true, 296 }, 297 298 "ip_address": &schema.Schema{ 299 Type: schema.TypeString, 300 Optional: true, 301 Computed: true, 302 Deprecated: "Please use ipv4_address", 303 }, 304 305 "subnet_mask": &schema.Schema{ 306 Type: schema.TypeString, 307 Optional: true, 308 Computed: true, 309 Deprecated: "Please use ipv4_prefix_length", 310 }, 311 312 "ipv4_address": &schema.Schema{ 313 Type: schema.TypeString, 314 Optional: true, 315 Computed: true, 316 }, 317 318 "ipv4_prefix_length": &schema.Schema{ 319 Type: schema.TypeInt, 320 Optional: true, 321 Computed: true, 322 }, 323 324 "ipv4_gateway": &schema.Schema{ 325 Type: schema.TypeString, 326 Optional: true, 327 Computed: true, 328 }, 329 330 "ipv6_address": &schema.Schema{ 331 Type: schema.TypeString, 332 Optional: true, 333 Computed: true, 334 }, 335 336 "ipv6_prefix_length": &schema.Schema{ 337 Type: schema.TypeInt, 338 Optional: true, 339 Computed: true, 340 }, 341 342 "ipv6_gateway": &schema.Schema{ 343 Type: schema.TypeString, 344 Optional: true, 345 Computed: true, 346 }, 347 348 "adapter_type": &schema.Schema{ 349 Type: schema.TypeString, 350 Optional: true, 351 ForceNew: true, 352 }, 353 354 "mac_address": &schema.Schema{ 355 Type: schema.TypeString, 356 Optional: true, 357 Computed: true, 358 }, 359 }, 360 }, 361 }, 362 363 "disk": &schema.Schema{ 364 Type: schema.TypeSet, 365 Required: true, 366 Elem: &schema.Resource{ 367 Schema: map[string]*schema.Schema{ 368 "uuid": &schema.Schema{ 369 Type: schema.TypeString, 370 Computed: true, 371 }, 372 373 "key": &schema.Schema{ 374 Type: schema.TypeInt, 375 Computed: true, 376 }, 377 378 "template": &schema.Schema{ 379 Type: schema.TypeString, 380 Optional: true, 381 }, 382 383 "type": &schema.Schema{ 384 Type: schema.TypeString, 385 Optional: true, 386 Default: "eager_zeroed", 387 ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { 388 value := v.(string) 389 if value != "thin" && value != "eager_zeroed" && value != "lazy" { 390 errors = append(errors, fmt.Errorf( 391 "only 'thin', 'eager_zeroed', and 'lazy' are supported values for 'type'")) 392 } 393 return 394 }, 395 }, 396 397 "datastore": &schema.Schema{ 398 Type: schema.TypeString, 399 Optional: true, 400 }, 401 402 "size": &schema.Schema{ 403 Type: schema.TypeInt, 404 Optional: true, 405 }, 406 407 "name": &schema.Schema{ 408 Type: schema.TypeString, 409 Optional: true, 410 }, 411 412 "iops": &schema.Schema{ 413 Type: schema.TypeInt, 414 Optional: true, 415 }, 416 417 "vmdk": &schema.Schema{ 418 // TODO: Add ValidateFunc to confirm path exists 419 Type: schema.TypeString, 420 Optional: true, 421 }, 422 423 "bootable": &schema.Schema{ 424 Type: schema.TypeBool, 425 Optional: true, 426 }, 427 428 "keep_on_remove": &schema.Schema{ 429 Type: schema.TypeBool, 430 Optional: true, 431 }, 432 433 "controller_type": &schema.Schema{ 434 Type: schema.TypeString, 435 Optional: true, 436 Default: "scsi", 437 ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { 438 value := v.(string) 439 found := false 440 for _, t := range DiskControllerTypes { 441 if t == value { 442 found = true 443 } 444 } 445 if !found { 446 errors = append(errors, fmt.Errorf( 447 "Supported values for 'controller_type' are %v", strings.Join(DiskControllerTypes, ", "))) 448 } 449 return 450 }, 451 }, 452 }, 453 }, 454 }, 455 456 "detach_unknown_disks_on_delete": &schema.Schema{ 457 Type: schema.TypeBool, 458 Optional: true, 459 Default: false, 460 }, 461 462 "cdrom": &schema.Schema{ 463 Type: schema.TypeList, 464 Optional: true, 465 ForceNew: true, 466 Elem: &schema.Resource{ 467 Schema: map[string]*schema.Schema{ 468 "datastore": &schema.Schema{ 469 Type: schema.TypeString, 470 Required: true, 471 ForceNew: true, 472 }, 473 474 "path": &schema.Schema{ 475 Type: schema.TypeString, 476 Required: true, 477 ForceNew: true, 478 }, 479 }, 480 }, 481 }, 482 }, 483 } 484 } 485 486 func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error { 487 // flag if changes have to be applied 488 hasChanges := false 489 // flag if changes have to be done when powered off 490 rebootRequired := false 491 492 // make config spec 493 configSpec := types.VirtualMachineConfigSpec{} 494 495 if d.HasChange("vcpu") { 496 configSpec.NumCPUs = int32(d.Get("vcpu").(int)) 497 hasChanges = true 498 rebootRequired = true 499 } 500 501 if d.HasChange("memory") { 502 configSpec.MemoryMB = int64(d.Get("memory").(int)) 503 hasChanges = true 504 rebootRequired = true 505 } 506 507 client := meta.(*govmomi.Client) 508 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 509 if err != nil { 510 return err 511 } 512 finder := find.NewFinder(client.Client, true) 513 finder = finder.SetDatacenter(dc) 514 515 vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string))) 516 if err != nil { 517 return err 518 } 519 520 if d.HasChange("disk") { 521 hasChanges = true 522 oldDisks, newDisks := d.GetChange("disk") 523 oldDiskSet := oldDisks.(*schema.Set) 524 newDiskSet := newDisks.(*schema.Set) 525 526 addedDisks := newDiskSet.Difference(oldDiskSet) 527 removedDisks := oldDiskSet.Difference(newDiskSet) 528 529 // Removed disks 530 for _, diskRaw := range removedDisks.List() { 531 if disk, ok := diskRaw.(map[string]interface{}); ok { 532 devices, err := vm.Device(context.TODO()) 533 if err != nil { 534 return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err) 535 } 536 virtualDisk := devices.FindByKey(int32(disk["key"].(int))) 537 538 keep := false 539 if v, ok := disk["keep_on_remove"].(bool); ok { 540 keep = v 541 } 542 543 err = vm.RemoveDevice(context.TODO(), keep, virtualDisk) 544 if err != nil { 545 return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err) 546 } 547 } 548 } 549 // Added disks 550 for _, diskRaw := range addedDisks.List() { 551 if disk, ok := diskRaw.(map[string]interface{}); ok { 552 553 var datastore *object.Datastore 554 if disk["datastore"] == "" { 555 datastore, err = finder.DefaultDatastore(context.TODO()) 556 if err != nil { 557 return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err) 558 } 559 } else { 560 datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string)) 561 if err != nil { 562 log.Printf("[ERROR] Couldn't find datastore %v. %s", disk["datastore"].(string), err) 563 return err 564 } 565 } 566 567 var size int64 568 if disk["size"] == 0 { 569 size = 0 570 } else { 571 size = int64(disk["size"].(int)) 572 } 573 iops := int64(disk["iops"].(int)) 574 controller_type := disk["controller_type"].(string) 575 576 var mo mo.VirtualMachine 577 vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo) 578 579 var diskPath string 580 switch { 581 case disk["vmdk"] != "": 582 diskPath = disk["vmdk"].(string) 583 case disk["name"] != "": 584 snapshotFullDir := mo.Config.Files.SnapshotDirectory 585 split := strings.Split(snapshotFullDir, " ") 586 if len(split) != 2 { 587 return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir) 588 } 589 vmWorkingPath := split[1] 590 diskPath = vmWorkingPath + disk["name"].(string) 591 default: 592 return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given") 593 } 594 595 var initType string 596 if disk["type"] != "" { 597 initType = disk["type"].(string) 598 } else { 599 initType = "thin" 600 } 601 602 log.Printf("[INFO] Attaching disk: %v", diskPath) 603 err = addHardDisk(vm, size, iops, initType, datastore, diskPath, controller_type) 604 if err != nil { 605 log.Printf("[ERROR] Add Hard Disk Failed: %v", err) 606 return err 607 } 608 } 609 if err != nil { 610 return err 611 } 612 } 613 } 614 615 // do nothing if there are no changes 616 if !hasChanges { 617 return nil 618 } 619 620 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 621 622 if rebootRequired { 623 log.Printf("[INFO] Shutting down virtual machine: %s", d.Id()) 624 625 task, err := vm.PowerOff(context.TODO()) 626 if err != nil { 627 return err 628 } 629 630 err = task.Wait(context.TODO()) 631 if err != nil { 632 return err 633 } 634 } 635 636 log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id()) 637 638 task, err := vm.Reconfigure(context.TODO(), configSpec) 639 if err != nil { 640 log.Printf("[ERROR] %s", err) 641 } 642 643 err = task.Wait(context.TODO()) 644 if err != nil { 645 log.Printf("[ERROR] %s", err) 646 } 647 648 if rebootRequired { 649 task, err = vm.PowerOn(context.TODO()) 650 if err != nil { 651 return err 652 } 653 654 err = task.Wait(context.TODO()) 655 if err != nil { 656 log.Printf("[ERROR] %s", err) 657 } 658 } 659 660 return resourceVSphereVirtualMachineRead(d, meta) 661 } 662 663 func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { 664 client := meta.(*govmomi.Client) 665 666 vm := virtualMachine{ 667 name: d.Get("name").(string), 668 vcpu: int32(d.Get("vcpu").(int)), 669 memoryMb: int64(d.Get("memory").(int)), 670 memoryAllocation: memoryAllocation{ 671 reservation: int64(d.Get("memory_reservation").(int)), 672 }, 673 } 674 675 if v, ok := d.GetOk("folder"); ok { 676 vm.folder = v.(string) 677 } 678 679 if v, ok := d.GetOk("datacenter"); ok { 680 vm.datacenter = v.(string) 681 } 682 683 if v, ok := d.GetOk("cluster"); ok { 684 vm.cluster = v.(string) 685 } 686 687 if v, ok := d.GetOk("resource_pool"); ok { 688 vm.resourcePool = v.(string) 689 } 690 691 if v, ok := d.GetOk("domain"); ok { 692 vm.domain = v.(string) 693 } 694 695 if v, ok := d.GetOk("time_zone"); ok { 696 vm.timeZone = v.(string) 697 } 698 699 if v, ok := d.GetOk("linked_clone"); ok { 700 vm.linkedClone = v.(bool) 701 } 702 703 if v, ok := d.GetOk("skip_customization"); ok { 704 vm.skipCustomization = v.(bool) 705 } 706 707 if v, ok := d.GetOk("enable_disk_uuid"); ok { 708 vm.enableDiskUUID = v.(bool) 709 } 710 711 if raw, ok := d.GetOk("dns_suffixes"); ok { 712 for _, v := range raw.([]interface{}) { 713 vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string)) 714 } 715 } else { 716 vm.dnsSuffixes = DefaultDNSSuffixes 717 } 718 719 if raw, ok := d.GetOk("dns_servers"); ok { 720 for _, v := range raw.([]interface{}) { 721 vm.dnsServers = append(vm.dnsServers, v.(string)) 722 } 723 } else { 724 vm.dnsServers = DefaultDNSServers 725 } 726 727 if vL, ok := d.GetOk("custom_configuration_parameters"); ok { 728 if custom_configs, ok := vL.(map[string]interface{}); ok { 729 custom := make(map[string]types.AnyType) 730 for k, v := range custom_configs { 731 custom[k] = v 732 } 733 vm.customConfigurations = custom 734 log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations) 735 } 736 } 737 738 if vL, ok := d.GetOk("network_interface"); ok { 739 networks := make([]networkInterface, len(vL.([]interface{}))) 740 for i, v := range vL.([]interface{}) { 741 network := v.(map[string]interface{}) 742 networks[i].label = network["label"].(string) 743 if v, ok := network["ip_address"].(string); ok && v != "" { 744 networks[i].ipv4Address = v 745 } 746 if v, ok := d.GetOk("gateway"); ok { 747 networks[i].ipv4Gateway = v.(string) 748 } 749 if v, ok := network["subnet_mask"].(string); ok && v != "" { 750 ip := net.ParseIP(v).To4() 751 if ip != nil { 752 mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3]) 753 pl, _ := mask.Size() 754 networks[i].ipv4PrefixLength = pl 755 } else { 756 return fmt.Errorf("subnet_mask parameter is invalid.") 757 } 758 } 759 if v, ok := network["ipv4_address"].(string); ok && v != "" { 760 networks[i].ipv4Address = v 761 } 762 if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 { 763 networks[i].ipv4PrefixLength = v 764 } 765 if v, ok := network["ipv4_gateway"].(string); ok && v != "" { 766 networks[i].ipv4Gateway = v 767 } 768 if v, ok := network["ipv6_address"].(string); ok && v != "" { 769 networks[i].ipv6Address = v 770 } 771 if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 { 772 networks[i].ipv6PrefixLength = v 773 } 774 if v, ok := network["ipv6_gateway"].(string); ok && v != "" { 775 networks[i].ipv6Gateway = v 776 } 777 if v, ok := network["mac_address"].(string); ok && v != "" { 778 networks[i].macAddress = v 779 } 780 } 781 vm.networkInterfaces = networks 782 log.Printf("[DEBUG] network_interface init: %v", networks) 783 } 784 785 if vL, ok := d.GetOk("windows_opt_config"); ok { 786 var winOpt windowsOptConfig 787 custom_configs := (vL.([]interface{}))[0].(map[string]interface{}) 788 if v, ok := custom_configs["admin_password"].(string); ok && v != "" { 789 winOpt.adminPassword = v 790 } 791 if v, ok := custom_configs["domain"].(string); ok && v != "" { 792 winOpt.domain = v 793 } 794 if v, ok := custom_configs["domain_user"].(string); ok && v != "" { 795 winOpt.domainUser = v 796 } 797 if v, ok := custom_configs["product_key"].(string); ok && v != "" { 798 winOpt.productKey = v 799 } 800 if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" { 801 winOpt.domainUserPassword = v 802 } 803 vm.windowsOptionalConfig = winOpt 804 log.Printf("[DEBUG] windows config init: %v", winOpt) 805 } 806 807 if vL, ok := d.GetOk("disk"); ok { 808 if diskSet, ok := vL.(*schema.Set); ok { 809 810 disks := []hardDisk{} 811 for _, value := range diskSet.List() { 812 disk := value.(map[string]interface{}) 813 newDisk := hardDisk{} 814 815 if v, ok := disk["template"].(string); ok && v != "" { 816 if v, ok := disk["name"].(string); ok && v != "" { 817 return fmt.Errorf("Cannot specify name of a template") 818 } 819 vm.template = v 820 if vm.hasBootableVmdk { 821 return fmt.Errorf("[ERROR] Only one bootable disk or template may be given") 822 } 823 vm.hasBootableVmdk = true 824 } 825 826 if v, ok := disk["type"].(string); ok && v != "" { 827 newDisk.initType = v 828 } 829 830 if v, ok := disk["datastore"].(string); ok && v != "" { 831 vm.datastore = v 832 } 833 834 if v, ok := disk["size"].(int); ok && v != 0 { 835 if v, ok := disk["template"].(string); ok && v != "" { 836 return fmt.Errorf("Cannot specify size of a template") 837 } 838 839 if v, ok := disk["name"].(string); ok && v != "" { 840 newDisk.name = v 841 } else { 842 return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk") 843 } 844 845 newDisk.size = int64(v) 846 } 847 848 if v, ok := disk["iops"].(int); ok && v != 0 { 849 newDisk.iops = int64(v) 850 } 851 852 if v, ok := disk["controller_type"].(string); ok && v != "" { 853 newDisk.controller = v 854 } 855 856 if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" { 857 if v, ok := disk["template"].(string); ok && v != "" { 858 return fmt.Errorf("Cannot specify a vmdk for a template") 859 } 860 if v, ok := disk["size"].(string); ok && v != "" { 861 return fmt.Errorf("Cannot specify size of a vmdk") 862 } 863 if v, ok := disk["name"].(string); ok && v != "" { 864 return fmt.Errorf("Cannot specify name of a vmdk") 865 } 866 if vBootable, ok := disk["bootable"].(bool); ok { 867 if vBootable && vm.hasBootableVmdk { 868 return fmt.Errorf("[ERROR] Only one bootable disk or template may be given") 869 } 870 newDisk.bootable = vBootable 871 vm.hasBootableVmdk = vm.hasBootableVmdk || vBootable 872 } 873 newDisk.vmdkPath = vVmdk 874 } 875 // Preserves order so bootable disk is first 876 if newDisk.bootable == true || disk["template"] != "" { 877 disks = append([]hardDisk{newDisk}, disks...) 878 } else { 879 disks = append(disks, newDisk) 880 } 881 } 882 vm.hardDisks = disks 883 log.Printf("[DEBUG] disk init: %v", disks) 884 } 885 } 886 887 if vL, ok := d.GetOk("cdrom"); ok { 888 cdroms := make([]cdrom, len(vL.([]interface{}))) 889 for i, v := range vL.([]interface{}) { 890 c := v.(map[string]interface{}) 891 if v, ok := c["datastore"].(string); ok && v != "" { 892 cdroms[i].datastore = v 893 } else { 894 return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.") 895 } 896 if v, ok := c["path"].(string); ok && v != "" { 897 cdroms[i].path = v 898 } else { 899 return fmt.Errorf("Path argument must be specified when attaching a cdrom image.") 900 } 901 } 902 vm.cdroms = cdroms 903 log.Printf("[DEBUG] cdrom init: %v", cdroms) 904 } 905 906 err := vm.setupVirtualMachine(client) 907 if err != nil { 908 return err 909 } 910 911 d.SetId(vm.Path()) 912 log.Printf("[INFO] Created virtual machine: %s", d.Id()) 913 914 return resourceVSphereVirtualMachineRead(d, meta) 915 } 916 917 func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { 918 log.Printf("[DEBUG] virtual machine resource data: %#v", d) 919 client := meta.(*govmomi.Client) 920 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 921 if err != nil { 922 return err 923 } 924 finder := find.NewFinder(client.Client, true) 925 finder = finder.SetDatacenter(dc) 926 927 vm, err := finder.VirtualMachine(context.TODO(), d.Id()) 928 if err != nil { 929 d.SetId("") 930 return nil 931 } 932 933 err = d.Set("moid", vm.Reference().Value) 934 if err != nil { 935 return fmt.Errorf("Invalid moid to set: %#v", vm.Reference().Value) 936 } else { 937 log.Printf("[DEBUG] Set the moid: %#v", vm.Reference().Value) 938 } 939 940 state, err := vm.PowerState(context.TODO()) 941 if err != nil { 942 return err 943 } 944 945 if state == types.VirtualMachinePowerStatePoweredOn { 946 // wait for interfaces to appear 947 log.Printf("[DEBUG] Waiting for interfaces to appear") 948 949 _, err = vm.WaitForNetIP(context.TODO(), false) 950 if err != nil { 951 return err 952 } 953 954 log.Printf("[DEBUG] Successfully waited for interfaces to appear") 955 } 956 957 var mvm mo.VirtualMachine 958 collector := property.DefaultCollector(client.Client) 959 if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil { 960 return err 961 } 962 963 log.Printf("[DEBUG] Datacenter - %#v", dc) 964 log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config) 965 log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config) 966 log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net) 967 968 err = d.Set("moid", mvm.Reference().Value) 969 if err != nil { 970 return fmt.Errorf("Invalid moid to set: %#v", mvm.Reference().Value) 971 } else { 972 log.Printf("[DEBUG] Set the moid: %#v", mvm.Reference().Value) 973 } 974 975 disks := make([]map[string]interface{}, 0) 976 templateDisk := make(map[string]interface{}, 1) 977 for _, device := range mvm.Config.Hardware.Device { 978 if vd, ok := device.(*types.VirtualDisk); ok { 979 980 virtualDevice := vd.GetVirtualDevice() 981 982 backingInfo := virtualDevice.Backing 983 var diskFullPath string 984 var diskUuid string 985 if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok { 986 diskFullPath = v.FileName 987 diskUuid = v.Uuid 988 } else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok { 989 diskFullPath = v.FileName 990 diskUuid = v.Uuid 991 } 992 log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath) 993 994 // Separate datastore and path 995 diskFullPathSplit := strings.Split(diskFullPath, " ") 996 if len(diskFullPathSplit) != 2 { 997 return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath) 998 } 999 diskPath := diskFullPathSplit[1] 1000 // Isolate filename 1001 diskNameSplit := strings.Split(diskPath, "/") 1002 diskName := diskNameSplit[len(diskNameSplit)-1] 1003 // Remove possible extension 1004 diskName = strings.Split(diskName, ".")[0] 1005 1006 if prevDisks, ok := d.GetOk("disk"); ok { 1007 if prevDiskSet, ok := prevDisks.(*schema.Set); ok { 1008 for _, v := range prevDiskSet.List() { 1009 prevDisk := v.(map[string]interface{}) 1010 1011 // We're guaranteed only one template disk. Passing value directly through since templates should be immutable 1012 if prevDisk["template"] != "" { 1013 if len(templateDisk) == 0 { 1014 templateDisk = prevDisk 1015 disks = append(disks, templateDisk) 1016 break 1017 } 1018 } 1019 1020 // It is enforced that prevDisk["name"] should only be set in the case 1021 // of creating a new disk for the user. 1022 // size case: name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name 1023 // vmdk case: compare prevDisk["vmdk"] and mo.Filename 1024 if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] { 1025 1026 prevDisk["key"] = virtualDevice.Key 1027 prevDisk["uuid"] = diskUuid 1028 1029 disks = append(disks, prevDisk) 1030 break 1031 } 1032 } 1033 } 1034 } 1035 log.Printf("[DEBUG] disks: %#v", disks) 1036 } 1037 } 1038 err = d.Set("disk", disks) 1039 if err != nil { 1040 return fmt.Errorf("Invalid disks to set: %#v", disks) 1041 } 1042 1043 networkInterfaces := make([]map[string]interface{}, 0) 1044 for _, v := range mvm.Guest.Net { 1045 if v.DeviceConfigId >= 0 { 1046 log.Printf("[DEBUG] v.Network - %#v", v.Network) 1047 networkInterface := make(map[string]interface{}) 1048 networkInterface["label"] = v.Network 1049 networkInterface["mac_address"] = v.MacAddress 1050 for _, ip := range v.IpConfig.IpAddress { 1051 p := net.ParseIP(ip.IpAddress) 1052 if p.To4() != nil { 1053 log.Printf("[DEBUG] p.String - %#v", p.String()) 1054 log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength) 1055 networkInterface["ipv4_address"] = p.String() 1056 networkInterface["ipv4_prefix_length"] = ip.PrefixLength 1057 } else if p.To16() != nil { 1058 log.Printf("[DEBUG] p.String - %#v", p.String()) 1059 log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength) 1060 networkInterface["ipv6_address"] = p.String() 1061 networkInterface["ipv6_prefix_length"] = ip.PrefixLength 1062 } 1063 log.Printf("[DEBUG] networkInterface: %#v", networkInterface) 1064 } 1065 log.Printf("[DEBUG] networkInterface: %#v", networkInterface) 1066 networkInterfaces = append(networkInterfaces, networkInterface) 1067 } 1068 } 1069 if mvm.Guest.IpStack != nil { 1070 for _, v := range mvm.Guest.IpStack { 1071 if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil { 1072 for _, route := range v.IpRouteConfig.IpRoute { 1073 if route.Gateway.Device != "" { 1074 gatewaySetting := "" 1075 if route.Network == "::" { 1076 gatewaySetting = "ipv6_gateway" 1077 } else if route.Network == "0.0.0.0" { 1078 gatewaySetting = "ipv4_gateway" 1079 } 1080 if gatewaySetting != "" { 1081 deviceID, err := strconv.Atoi(route.Gateway.Device) 1082 if len(networkInterfaces) == 1 { 1083 deviceID = 0 1084 } 1085 if err != nil { 1086 log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err) 1087 } else { 1088 log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress) 1089 networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress 1090 } 1091 } 1092 } 1093 } 1094 } 1095 } 1096 } 1097 log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces) 1098 err = d.Set("network_interface", networkInterfaces) 1099 if err != nil { 1100 return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces) 1101 } 1102 1103 if len(networkInterfaces) > 0 { 1104 if _, ok := networkInterfaces[0]["ipv4_address"]; ok { 1105 log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string)) 1106 d.SetConnInfo(map[string]string{ 1107 "type": "ssh", 1108 "host": networkInterfaces[0]["ipv4_address"].(string), 1109 }) 1110 } 1111 } 1112 1113 var rootDatastore string 1114 for _, v := range mvm.Datastore { 1115 var md mo.Datastore 1116 if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil { 1117 return err 1118 } 1119 if md.Parent.Type == "StoragePod" { 1120 var msp mo.StoragePod 1121 if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil { 1122 return err 1123 } 1124 rootDatastore = msp.Name 1125 log.Printf("[DEBUG] %#v", msp.Name) 1126 } else { 1127 rootDatastore = md.Name 1128 log.Printf("[DEBUG] %#v", md.Name) 1129 } 1130 break 1131 } 1132 1133 d.Set("datacenter", dc) 1134 d.Set("memory", mvm.Summary.Config.MemorySizeMB) 1135 d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation) 1136 d.Set("cpu", mvm.Summary.Config.NumCpu) 1137 d.Set("datastore", rootDatastore) 1138 d.Set("uuid", mvm.Summary.Config.Uuid) 1139 1140 return nil 1141 } 1142 1143 func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { 1144 client := meta.(*govmomi.Client) 1145 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 1146 if err != nil { 1147 return err 1148 } 1149 finder := find.NewFinder(client.Client, true) 1150 finder = finder.SetDatacenter(dc) 1151 1152 vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string))) 1153 if err != nil { 1154 return err 1155 } 1156 devices, err := vm.Device(context.TODO()) 1157 if err != nil { 1158 log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err) 1159 return err 1160 } 1161 1162 log.Printf("[INFO] Deleting virtual machine: %s", d.Id()) 1163 state, err := vm.PowerState(context.TODO()) 1164 if err != nil { 1165 return err 1166 } 1167 1168 if state == types.VirtualMachinePowerStatePoweredOn { 1169 task, err := vm.PowerOff(context.TODO()) 1170 if err != nil { 1171 return err 1172 } 1173 1174 err = task.Wait(context.TODO()) 1175 if err != nil { 1176 return err 1177 } 1178 } 1179 1180 // Safely eject any disks the user marked as keep_on_remove 1181 var diskSetList []interface{} 1182 if vL, ok := d.GetOk("disk"); ok { 1183 if diskSet, ok := vL.(*schema.Set); ok { 1184 diskSetList = diskSet.List() 1185 for _, value := range diskSetList { 1186 disk := value.(map[string]interface{}) 1187 1188 if v, ok := disk["keep_on_remove"].(bool); ok && v == true { 1189 log.Printf("[DEBUG] not destroying %v", disk["name"]) 1190 virtualDisk := devices.FindByKey(int32(disk["key"].(int))) 1191 err = vm.RemoveDevice(context.TODO(), true, virtualDisk) 1192 if err != nil { 1193 log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) 1194 return err 1195 } 1196 } 1197 } 1198 } 1199 } 1200 1201 // Safely eject any disks that are not managed by this resource 1202 if v, ok := d.GetOk("detach_unknown_disks_on_delete"); ok && v.(bool) { 1203 var disksToRemove object.VirtualDeviceList 1204 for _, device := range devices { 1205 if devices.TypeName(device) != "VirtualDisk" { 1206 continue 1207 } 1208 vd := device.GetVirtualDevice() 1209 var skip bool 1210 for _, value := range diskSetList { 1211 disk := value.(map[string]interface{}) 1212 if int32(disk["key"].(int)) == vd.Key { 1213 skip = true 1214 break 1215 } 1216 } 1217 if skip { 1218 continue 1219 } 1220 disksToRemove = append(disksToRemove, device) 1221 } 1222 if len(disksToRemove) != 0 { 1223 err = vm.RemoveDevice(context.TODO(), true, disksToRemove...) 1224 if err != nil { 1225 log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) 1226 return err 1227 } 1228 } 1229 } 1230 1231 task, err := vm.Destroy(context.TODO()) 1232 if err != nil { 1233 return err 1234 } 1235 1236 err = task.Wait(context.TODO()) 1237 if err != nil { 1238 return err 1239 } 1240 1241 d.SetId("") 1242 return nil 1243 } 1244 1245 // addHardDisk adds a new Hard Disk to the VirtualMachine. 1246 func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error { 1247 devices, err := vm.Device(context.TODO()) 1248 if err != nil { 1249 return err 1250 } 1251 log.Printf("[DEBUG] vm devices: %#v\n", devices) 1252 1253 var controller types.BaseVirtualController 1254 switch controller_type { 1255 case "scsi": 1256 controller, err = devices.FindDiskController(controller_type) 1257 case "scsi-lsi-parallel": 1258 controller = devices.PickController(&types.VirtualLsiLogicController{}) 1259 case "scsi-buslogic": 1260 controller = devices.PickController(&types.VirtualBusLogicController{}) 1261 case "scsi-paravirtual": 1262 controller = devices.PickController(&types.ParaVirtualSCSIController{}) 1263 case "scsi-lsi-sas": 1264 controller = devices.PickController(&types.VirtualLsiLogicSASController{}) 1265 case "ide": 1266 controller, err = devices.FindDiskController(controller_type) 1267 default: 1268 return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type) 1269 } 1270 1271 if err != nil || controller == nil { 1272 // Check if max number of scsi controller are already used 1273 diskControllers := getSCSIControllers(devices) 1274 if len(diskControllers) >= 4 { 1275 return fmt.Errorf("[ERROR] Maximum number of SCSI controllers created") 1276 } 1277 1278 log.Printf("[DEBUG] Couldn't find a %v controller. Creating one..", controller_type) 1279 1280 var c types.BaseVirtualDevice 1281 switch controller_type { 1282 case "scsi": 1283 // Create scsi controller 1284 c, err = devices.CreateSCSIController("scsi") 1285 if err != nil { 1286 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1287 } 1288 case "scsi-lsi-parallel": 1289 // Create scsi controller 1290 c, err = devices.CreateSCSIController("lsilogic") 1291 if err != nil { 1292 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1293 } 1294 case "scsi-buslogic": 1295 // Create scsi controller 1296 c, err = devices.CreateSCSIController("buslogic") 1297 if err != nil { 1298 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1299 } 1300 case "scsi-paravirtual": 1301 // Create scsi controller 1302 c, err = devices.CreateSCSIController("pvscsi") 1303 if err != nil { 1304 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1305 } 1306 case "scsi-lsi-sas": 1307 // Create scsi controller 1308 c, err = devices.CreateSCSIController("lsilogic-sas") 1309 if err != nil { 1310 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1311 } 1312 case "ide": 1313 // Create ide controller 1314 c, err = devices.CreateIDEController() 1315 if err != nil { 1316 return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err) 1317 } 1318 default: 1319 return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type) 1320 } 1321 1322 vm.AddDevice(context.TODO(), c) 1323 // Update our devices list 1324 devices, err := vm.Device(context.TODO()) 1325 if err != nil { 1326 return err 1327 } 1328 controller = devices.PickController(c.(types.BaseVirtualController)) 1329 if controller == nil { 1330 log.Printf("[ERROR] Could not find the new %v controller", controller_type) 1331 return fmt.Errorf("Could not find the new %v controller", controller_type) 1332 } 1333 } 1334 1335 log.Printf("[DEBUG] disk controller: %#v\n", controller) 1336 1337 // TODO Check if diskPath & datastore exist 1338 // If diskPath is not specified, pass empty string to CreateDisk() 1339 if diskPath == "" { 1340 return fmt.Errorf("[ERROR] addHardDisk - No path proided") 1341 } else { 1342 diskPath = datastore.Path(diskPath) 1343 } 1344 log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath) 1345 disk := devices.CreateDisk(controller, datastore.Reference(), diskPath) 1346 1347 if strings.Contains(controller_type, "scsi") { 1348 unitNumber, err := getNextUnitNumber(devices, controller) 1349 if err != nil { 1350 return err 1351 } 1352 *disk.UnitNumber = unitNumber 1353 } 1354 1355 existing := devices.SelectByBackingInfo(disk.Backing) 1356 log.Printf("[DEBUG] disk: %#v\n", disk) 1357 1358 if len(existing) == 0 { 1359 disk.CapacityInKB = int64(size * 1024 * 1024) 1360 if iops != 0 { 1361 disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ 1362 Limit: iops, 1363 } 1364 } 1365 backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) 1366 1367 if diskType == "eager_zeroed" { 1368 // eager zeroed thick virtual disk 1369 backing.ThinProvisioned = types.NewBool(false) 1370 backing.EagerlyScrub = types.NewBool(true) 1371 } else if diskType == "lazy" { 1372 // lazy zeroed thick virtual disk 1373 backing.ThinProvisioned = types.NewBool(false) 1374 backing.EagerlyScrub = types.NewBool(false) 1375 } else if diskType == "thin" { 1376 // thin provisioned virtual disk 1377 backing.ThinProvisioned = types.NewBool(true) 1378 } 1379 1380 log.Printf("[DEBUG] addHardDisk: %#v\n", disk) 1381 log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB) 1382 1383 return vm.AddDevice(context.TODO(), disk) 1384 } else { 1385 log.Printf("[DEBUG] addHardDisk: Disk already present.\n") 1386 1387 return nil 1388 } 1389 } 1390 1391 func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController { 1392 // get virtual scsi controllers of all supported types 1393 var scsiControllers []*types.VirtualController 1394 for _, device := range vmDevices { 1395 devType := vmDevices.Type(device) 1396 switch devType { 1397 case "scsi", "lsilogic", "buslogic", "pvscsi", "lsilogic-sas": 1398 if c, ok := device.(types.BaseVirtualController); ok { 1399 scsiControllers = append(scsiControllers, c.GetVirtualController()) 1400 } 1401 } 1402 } 1403 return scsiControllers 1404 } 1405 1406 func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) { 1407 key := c.GetVirtualController().Key 1408 1409 var unitNumbers [16]bool 1410 unitNumbers[7] = true 1411 1412 for _, device := range devices { 1413 d := device.GetVirtualDevice() 1414 1415 if d.ControllerKey == key { 1416 if d.UnitNumber != nil { 1417 unitNumbers[*d.UnitNumber] = true 1418 } 1419 } 1420 } 1421 for i, taken := range unitNumbers { 1422 if !taken { 1423 return int32(i), nil 1424 } 1425 } 1426 return -1, fmt.Errorf("[ERROR] getNextUnitNumber - controller is full") 1427 } 1428 1429 // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path. 1430 func addCdrom(client *govmomi.Client, vm *object.VirtualMachine, datacenter *object.Datacenter, datastore, path string) error { 1431 devices, err := vm.Device(context.TODO()) 1432 if err != nil { 1433 return err 1434 } 1435 log.Printf("[DEBUG] vm devices: %#v", devices) 1436 1437 var controller *types.VirtualIDEController 1438 controller, err = devices.FindIDEController("") 1439 if err != nil { 1440 log.Printf("[DEBUG] Couldn't find a ide controller. Creating one..") 1441 1442 var c types.BaseVirtualDevice 1443 c, err := devices.CreateIDEController() 1444 if err != nil { 1445 return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err) 1446 } 1447 1448 if v, ok := c.(*types.VirtualIDEController); ok { 1449 controller = v 1450 } else { 1451 return fmt.Errorf("[ERROR] Controller type could not be asserted") 1452 } 1453 vm.AddDevice(context.TODO(), c) 1454 // Update our devices list 1455 devices, err := vm.Device(context.TODO()) 1456 if err != nil { 1457 return err 1458 } 1459 controller, err = devices.FindIDEController("") 1460 if err != nil { 1461 log.Printf("[ERROR] Could not find the new disk IDE controller: %v", err) 1462 return err 1463 } 1464 } 1465 log.Printf("[DEBUG] ide controller: %#v", controller) 1466 1467 c, err := devices.CreateCdrom(controller) 1468 if err != nil { 1469 return err 1470 } 1471 1472 finder := find.NewFinder(client.Client, true) 1473 finder = finder.SetDatacenter(datacenter) 1474 ds, err := getDatastore(finder, datastore) 1475 if err != nil { 1476 return err 1477 } 1478 1479 c = devices.InsertIso(c, ds.Path(path)) 1480 log.Printf("[DEBUG] addCdrom: %#v", c) 1481 1482 return vm.AddDevice(context.TODO(), c) 1483 } 1484 1485 // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device. 1486 func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) { 1487 network, err := f.Network(context.TODO(), "*"+label) 1488 if err != nil { 1489 return nil, err 1490 } 1491 1492 backing, err := network.EthernetCardBackingInfo(context.TODO()) 1493 if err != nil { 1494 return nil, err 1495 } 1496 1497 var address_type string 1498 if macAddress == "" { 1499 address_type = string(types.VirtualEthernetCardMacTypeGenerated) 1500 } else { 1501 address_type = string(types.VirtualEthernetCardMacTypeManual) 1502 } 1503 1504 if adapterType == "vmxnet3" { 1505 return &types.VirtualDeviceConfigSpec{ 1506 Operation: types.VirtualDeviceConfigSpecOperationAdd, 1507 Device: &types.VirtualVmxnet3{ 1508 VirtualVmxnet: types.VirtualVmxnet{ 1509 VirtualEthernetCard: types.VirtualEthernetCard{ 1510 VirtualDevice: types.VirtualDevice{ 1511 Key: -1, 1512 Backing: backing, 1513 }, 1514 AddressType: address_type, 1515 MacAddress: macAddress, 1516 }, 1517 }, 1518 }, 1519 }, nil 1520 } else if adapterType == "e1000" { 1521 return &types.VirtualDeviceConfigSpec{ 1522 Operation: types.VirtualDeviceConfigSpecOperationAdd, 1523 Device: &types.VirtualE1000{ 1524 VirtualEthernetCard: types.VirtualEthernetCard{ 1525 VirtualDevice: types.VirtualDevice{ 1526 Key: -1, 1527 Backing: backing, 1528 }, 1529 AddressType: address_type, 1530 MacAddress: macAddress, 1531 }, 1532 }, 1533 }, nil 1534 } else { 1535 return nil, fmt.Errorf("Invalid network adapter type.") 1536 } 1537 } 1538 1539 // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. 1540 func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) { 1541 var key int32 1542 var moveType string 1543 if linkedClone { 1544 moveType = "createNewChildDiskBacking" 1545 } else { 1546 moveType = "moveAllDiskBackingsAndDisallowSharing" 1547 } 1548 log.Printf("[DEBUG] relocate type: [%s]", moveType) 1549 1550 devices, err := vm.Device(context.TODO()) 1551 if err != nil { 1552 return types.VirtualMachineRelocateSpec{}, err 1553 } 1554 for _, d := range devices { 1555 if devices.Type(d) == "disk" { 1556 key = int32(d.GetVirtualDevice().Key) 1557 } 1558 } 1559 1560 isThin := initType == "thin" 1561 eagerScrub := initType == "eager_zeroed" 1562 rpr := rp.Reference() 1563 dsr := ds.Reference() 1564 return types.VirtualMachineRelocateSpec{ 1565 Datastore: &dsr, 1566 Pool: &rpr, 1567 DiskMoveType: moveType, 1568 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 1569 { 1570 Datastore: dsr, 1571 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ 1572 DiskMode: "persistent", 1573 ThinProvisioned: types.NewBool(isThin), 1574 EagerlyScrub: types.NewBool(eagerScrub), 1575 }, 1576 DiskId: key, 1577 }, 1578 }, 1579 }, nil 1580 } 1581 1582 // getDatastoreObject gets datastore object. 1583 func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) { 1584 s := object.NewSearchIndex(client.Client) 1585 ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name) 1586 if err != nil { 1587 return types.ManagedObjectReference{}, err 1588 } 1589 if ref == nil { 1590 return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name) 1591 } 1592 log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref) 1593 return ref.Reference(), nil 1594 } 1595 1596 // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action. 1597 func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec { 1598 vmfr := f.VmFolder.Reference() 1599 rpr := rp.Reference() 1600 spr := storagePod.Reference() 1601 1602 sps := types.StoragePlacementSpec{ 1603 Type: "create", 1604 ConfigSpec: &configSpec, 1605 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 1606 StoragePod: &spr, 1607 }, 1608 Folder: &vmfr, 1609 ResourcePool: &rpr, 1610 } 1611 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 1612 return sps 1613 } 1614 1615 // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action. 1616 func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec { 1617 vmr := vm.Reference() 1618 vmfr := f.VmFolder.Reference() 1619 rpr := rp.Reference() 1620 spr := storagePod.Reference() 1621 1622 var o mo.VirtualMachine 1623 err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o) 1624 if err != nil { 1625 return types.StoragePlacementSpec{} 1626 } 1627 ds := object.NewDatastore(c.Client, o.Datastore[0]) 1628 log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds) 1629 1630 devices, err := vm.Device(context.TODO()) 1631 if err != nil { 1632 return types.StoragePlacementSpec{} 1633 } 1634 1635 var key int32 1636 for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) { 1637 key = int32(d.GetVirtualDevice().Key) 1638 log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice()) 1639 } 1640 1641 sps := types.StoragePlacementSpec{ 1642 Type: "clone", 1643 Vm: &vmr, 1644 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 1645 StoragePod: &spr, 1646 }, 1647 CloneSpec: &types.VirtualMachineCloneSpec{ 1648 Location: types.VirtualMachineRelocateSpec{ 1649 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 1650 { 1651 Datastore: ds.Reference(), 1652 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{}, 1653 DiskId: key, 1654 }, 1655 }, 1656 Pool: &rpr, 1657 }, 1658 PowerOn: false, 1659 Template: false, 1660 }, 1661 CloneName: "dummy", 1662 Folder: &vmfr, 1663 } 1664 return sps 1665 } 1666 1667 // findDatastore finds Datastore object. 1668 func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) { 1669 var datastore *object.Datastore 1670 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 1671 1672 srm := object.NewStorageResourceManager(c.Client) 1673 rds, err := srm.RecommendDatastores(context.TODO(), sps) 1674 if err != nil { 1675 return nil, err 1676 } 1677 log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) 1678 1679 spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) 1680 datastore = object.NewDatastore(c.Client, spa.Destination) 1681 log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore) 1682 1683 return datastore, nil 1684 } 1685 1686 // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller. 1687 func createCdroms(client *govmomi.Client, vm *object.VirtualMachine, datacenter *object.Datacenter, cdroms []cdrom) error { 1688 log.Printf("[DEBUG] add cdroms: %v", cdroms) 1689 for _, cd := range cdroms { 1690 log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore) 1691 log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path) 1692 err := addCdrom(client, vm, datacenter, cd.datastore, cd.path) 1693 if err != nil { 1694 return err 1695 } 1696 } 1697 1698 return nil 1699 } 1700 1701 func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error { 1702 dc, err := getDatacenter(c, vm.datacenter) 1703 1704 if err != nil { 1705 return err 1706 } 1707 finder := find.NewFinder(c.Client, true) 1708 finder = finder.SetDatacenter(dc) 1709 1710 var template *object.VirtualMachine 1711 var template_mo mo.VirtualMachine 1712 var vm_mo mo.VirtualMachine 1713 if vm.template != "" { 1714 template, err = finder.VirtualMachine(context.TODO(), vm.template) 1715 if err != nil { 1716 return err 1717 } 1718 log.Printf("[DEBUG] template: %#v", template) 1719 1720 err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo) 1721 if err != nil { 1722 return err 1723 } 1724 } 1725 1726 var resourcePool *object.ResourcePool 1727 if vm.resourcePool == "" { 1728 if vm.cluster == "" { 1729 resourcePool, err = finder.DefaultResourcePool(context.TODO()) 1730 if err != nil { 1731 return err 1732 } 1733 } else { 1734 resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") 1735 if err != nil { 1736 return err 1737 } 1738 } 1739 } else { 1740 resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) 1741 if err != nil { 1742 return err 1743 } 1744 } 1745 log.Printf("[DEBUG] resource pool: %#v", resourcePool) 1746 1747 dcFolders, err := dc.Folders(context.TODO()) 1748 if err != nil { 1749 return err 1750 } 1751 log.Printf("[DEBUG] folder: %#v", vm.folder) 1752 1753 folder := dcFolders.VmFolder 1754 if len(vm.folder) > 0 { 1755 si := object.NewSearchIndex(c.Client) 1756 folderRef, err := si.FindByInventoryPath( 1757 context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) 1758 if err != nil { 1759 return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) 1760 } else if folderRef == nil { 1761 return fmt.Errorf("Cannot find folder %s", vm.folder) 1762 } else { 1763 folder = folderRef.(*object.Folder) 1764 } 1765 } 1766 1767 // make config spec 1768 configSpec := types.VirtualMachineConfigSpec{ 1769 Name: vm.name, 1770 NumCPUs: vm.vcpu, 1771 NumCoresPerSocket: 1, 1772 MemoryMB: vm.memoryMb, 1773 MemoryAllocation: &types.ResourceAllocationInfo{ 1774 Reservation: vm.memoryAllocation.reservation, 1775 }, 1776 Flags: &types.VirtualMachineFlagInfo{ 1777 DiskUuidEnabled: &vm.enableDiskUUID, 1778 }, 1779 } 1780 if vm.template == "" { 1781 configSpec.GuestId = "otherLinux64Guest" 1782 } 1783 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 1784 1785 // make ExtraConfig 1786 log.Printf("[DEBUG] virtual machine Extra Config spec start") 1787 if len(vm.customConfigurations) > 0 { 1788 var ov []types.BaseOptionValue 1789 for k, v := range vm.customConfigurations { 1790 key := k 1791 value := v 1792 o := types.OptionValue{ 1793 Key: key, 1794 Value: &value, 1795 } 1796 log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) 1797 ov = append(ov, &o) 1798 } 1799 configSpec.ExtraConfig = ov 1800 log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) 1801 } 1802 1803 var datastore *object.Datastore 1804 if vm.datastore == "" { 1805 datastore, err = finder.DefaultDatastore(context.TODO()) 1806 if err != nil { 1807 return err 1808 } 1809 } else { 1810 datastore, err = finder.Datastore(context.TODO(), vm.datastore) 1811 if err != nil { 1812 // TODO: datastore cluster support in govmomi finder function 1813 d, err := getDatastoreObject(c, dcFolders, vm.datastore) 1814 if err != nil { 1815 return err 1816 } 1817 1818 if d.Type == "StoragePod" { 1819 sp := object.StoragePod{ 1820 Folder: object.NewFolder(c.Client, d), 1821 } 1822 1823 var sps types.StoragePlacementSpec 1824 if vm.template != "" { 1825 sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) 1826 } else { 1827 sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) 1828 } 1829 1830 datastore, err = findDatastore(c, sps) 1831 if err != nil { 1832 return err 1833 } 1834 } else { 1835 datastore = object.NewDatastore(c.Client, d) 1836 } 1837 } 1838 } 1839 1840 log.Printf("[DEBUG] datastore: %#v", datastore) 1841 1842 // network 1843 networkDevices := []types.BaseVirtualDeviceConfigSpec{} 1844 networkConfigs := []types.CustomizationAdapterMapping{} 1845 for _, network := range vm.networkInterfaces { 1846 // network device 1847 var networkDeviceType string 1848 if vm.template == "" { 1849 networkDeviceType = "e1000" 1850 } else { 1851 networkDeviceType = "vmxnet3" 1852 } 1853 nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress) 1854 if err != nil { 1855 return err 1856 } 1857 log.Printf("[DEBUG] network device: %+v", nd.Device) 1858 networkDevices = append(networkDevices, nd) 1859 1860 if vm.template != "" { 1861 var ipSetting types.CustomizationIPSettings 1862 if network.ipv4Address == "" { 1863 ipSetting.Ip = &types.CustomizationDhcpIpGenerator{} 1864 } else { 1865 if network.ipv4PrefixLength == 0 { 1866 return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") 1867 } 1868 m := net.CIDRMask(network.ipv4PrefixLength, 32) 1869 sm := net.IPv4(m[0], m[1], m[2], m[3]) 1870 subnetMask := sm.String() 1871 log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway) 1872 log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address) 1873 log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength) 1874 log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask) 1875 ipSetting.Gateway = []string{ 1876 network.ipv4Gateway, 1877 } 1878 ipSetting.Ip = &types.CustomizationFixedIp{ 1879 IpAddress: network.ipv4Address, 1880 } 1881 ipSetting.SubnetMask = subnetMask 1882 } 1883 1884 ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{} 1885 if network.ipv6Address == "" { 1886 ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ 1887 &types.CustomizationDhcpIpV6Generator{}, 1888 } 1889 } else { 1890 log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway) 1891 log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address) 1892 log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength) 1893 1894 ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ 1895 &types.CustomizationFixedIpV6{ 1896 IpAddress: network.ipv6Address, 1897 SubnetMask: int32(network.ipv6PrefixLength), 1898 }, 1899 } 1900 ipv6Spec.Gateway = []string{network.ipv6Gateway} 1901 } 1902 ipSetting.IpV6Spec = ipv6Spec 1903 1904 // network config 1905 config := types.CustomizationAdapterMapping{ 1906 Adapter: ipSetting, 1907 } 1908 networkConfigs = append(networkConfigs, config) 1909 } 1910 } 1911 log.Printf("[DEBUG] network devices: %#v", networkDevices) 1912 log.Printf("[DEBUG] network configs: %#v", networkConfigs) 1913 1914 var task *object.Task 1915 if vm.template == "" { 1916 var mds mo.Datastore 1917 if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { 1918 return err 1919 } 1920 log.Printf("[DEBUG] datastore: %#v", mds.Name) 1921 scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") 1922 if err != nil { 1923 log.Printf("[ERROR] %s", err) 1924 } 1925 1926 configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ 1927 Operation: types.VirtualDeviceConfigSpecOperationAdd, 1928 Device: scsi, 1929 }) 1930 1931 configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} 1932 1933 task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) 1934 if err != nil { 1935 log.Printf("[ERROR] %s", err) 1936 } 1937 1938 err = task.Wait(context.TODO()) 1939 if err != nil { 1940 log.Printf("[ERROR] %s", err) 1941 } 1942 1943 } else { 1944 1945 relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType) 1946 if err != nil { 1947 return err 1948 } 1949 1950 log.Printf("[DEBUG] relocate spec: %v", relocateSpec) 1951 1952 // make vm clone spec 1953 cloneSpec := types.VirtualMachineCloneSpec{ 1954 Location: relocateSpec, 1955 Template: false, 1956 Config: &configSpec, 1957 PowerOn: false, 1958 } 1959 if vm.linkedClone { 1960 if template_mo.Snapshot == nil { 1961 return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots") 1962 } 1963 cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot 1964 } 1965 log.Printf("[DEBUG] clone spec: %v", cloneSpec) 1966 1967 task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec) 1968 if err != nil { 1969 return err 1970 } 1971 } 1972 1973 err = task.Wait(context.TODO()) 1974 if err != nil { 1975 log.Printf("[ERROR] %s", err) 1976 } 1977 1978 newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) 1979 if err != nil { 1980 return err 1981 } 1982 log.Printf("[DEBUG] new vm: %v", newVM) 1983 1984 devices, err := newVM.Device(context.TODO()) 1985 if err != nil { 1986 log.Printf("[DEBUG] Template devices can't be found") 1987 return err 1988 } 1989 1990 for _, dvc := range devices { 1991 // Issue 3559/3560: Delete all ethernet devices to add the correct ones later 1992 if devices.Type(dvc) == "ethernet" { 1993 err := newVM.RemoveDevice(context.TODO(), false, dvc) 1994 if err != nil { 1995 return err 1996 } 1997 } 1998 } 1999 // Add Network devices 2000 for _, dvc := range networkDevices { 2001 err := newVM.AddDevice( 2002 context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) 2003 if err != nil { 2004 return err 2005 } 2006 } 2007 2008 // Create the cdroms if needed. 2009 if err := createCdroms(c, newVM, dc, vm.cdroms); err != nil { 2010 return err 2011 } 2012 2013 newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo) 2014 firstDisk := 0 2015 if vm.template != "" { 2016 firstDisk++ 2017 } 2018 for i := firstDisk; i < len(vm.hardDisks); i++ { 2019 log.Printf("[DEBUG] disk index: %v", i) 2020 2021 var diskPath string 2022 switch { 2023 case vm.hardDisks[i].vmdkPath != "": 2024 diskPath = vm.hardDisks[i].vmdkPath 2025 case vm.hardDisks[i].name != "": 2026 snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory 2027 split := strings.Split(snapshotFullDir, " ") 2028 if len(split) != 2 { 2029 return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir) 2030 } 2031 vmWorkingPath := split[1] 2032 diskPath = vmWorkingPath + vm.hardDisks[i].name 2033 default: 2034 return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i]) 2035 } 2036 err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) 2037 if err != nil { 2038 err2 := addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) 2039 if err2 != nil { 2040 return err2 2041 } 2042 return err 2043 } 2044 } 2045 2046 if vm.skipCustomization || vm.template == "" { 2047 log.Printf("[DEBUG] VM customization skipped") 2048 } else { 2049 var identity_options types.BaseCustomizationIdentitySettings 2050 if strings.HasPrefix(template_mo.Config.GuestId, "win") { 2051 var timeZone int 2052 if vm.timeZone == "Etc/UTC" { 2053 vm.timeZone = "085" 2054 } 2055 timeZone, err := strconv.Atoi(vm.timeZone) 2056 if err != nil { 2057 return fmt.Errorf("Error converting TimeZone: %s", err) 2058 } 2059 2060 guiUnattended := types.CustomizationGuiUnattended{ 2061 AutoLogon: false, 2062 AutoLogonCount: 1, 2063 TimeZone: int32(timeZone), 2064 } 2065 2066 customIdentification := types.CustomizationIdentification{} 2067 2068 userData := types.CustomizationUserData{ 2069 ComputerName: &types.CustomizationFixedName{ 2070 Name: strings.Split(vm.name, ".")[0], 2071 }, 2072 ProductId: vm.windowsOptionalConfig.productKey, 2073 FullName: "terraform", 2074 OrgName: "terraform", 2075 } 2076 2077 if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" { 2078 customIdentification.DomainAdminPassword = &types.CustomizationPassword{ 2079 PlainText: true, 2080 Value: vm.windowsOptionalConfig.domainUserPassword, 2081 } 2082 customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser 2083 customIdentification.JoinDomain = vm.windowsOptionalConfig.domain 2084 } 2085 2086 if vm.windowsOptionalConfig.adminPassword != "" { 2087 guiUnattended.Password = &types.CustomizationPassword{ 2088 PlainText: true, 2089 Value: vm.windowsOptionalConfig.adminPassword, 2090 } 2091 } 2092 2093 identity_options = &types.CustomizationSysprep{ 2094 GuiUnattended: guiUnattended, 2095 Identification: customIdentification, 2096 UserData: userData, 2097 } 2098 } else { 2099 identity_options = &types.CustomizationLinuxPrep{ 2100 HostName: &types.CustomizationFixedName{ 2101 Name: strings.Split(vm.name, ".")[0], 2102 }, 2103 Domain: vm.domain, 2104 TimeZone: vm.timeZone, 2105 HwClockUTC: types.NewBool(true), 2106 } 2107 } 2108 2109 // create CustomizationSpec 2110 customSpec := types.CustomizationSpec{ 2111 Identity: identity_options, 2112 GlobalIPSettings: types.CustomizationGlobalIPSettings{ 2113 DnsSuffixList: vm.dnsSuffixes, 2114 DnsServerList: vm.dnsServers, 2115 }, 2116 NicSettingMap: networkConfigs, 2117 } 2118 log.Printf("[DEBUG] custom spec: %v", customSpec) 2119 2120 log.Printf("[DEBUG] VM customization starting") 2121 taskb, err := newVM.Customize(context.TODO(), customSpec) 2122 if err != nil { 2123 return err 2124 } 2125 _, err = taskb.WaitForResult(context.TODO(), nil) 2126 if err != nil { 2127 return err 2128 } 2129 log.Printf("[DEBUG] VM customization finished") 2130 } 2131 2132 if vm.hasBootableVmdk || vm.template != "" { 2133 t, err := newVM.PowerOn(context.TODO()) 2134 if err != nil { 2135 return err 2136 } 2137 _, err = t.WaitForResult(context.TODO(), nil) 2138 if err != nil { 2139 return err 2140 } 2141 err = newVM.WaitForPowerState(context.TODO(), types.VirtualMachinePowerStatePoweredOn) 2142 if err != nil { 2143 return err 2144 } 2145 } 2146 return nil 2147 }