github.com/recobe182/terraform@v0.8.5-0.20170117231232-49ab22a935b7/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about) 1 package vsphere 2 3 import ( 4 "fmt" 5 "log" 6 "net" 7 "strconv" 8 "strings" 9 10 "github.com/hashicorp/terraform/helper/schema" 11 "github.com/vmware/govmomi" 12 "github.com/vmware/govmomi/find" 13 "github.com/vmware/govmomi/object" 14 "github.com/vmware/govmomi/property" 15 "github.com/vmware/govmomi/vim25/mo" 16 "github.com/vmware/govmomi/vim25/types" 17 "golang.org/x/net/context" 18 ) 19 20 var DefaultDNSSuffixes = []string{ 21 "vsphere.local", 22 } 23 24 var DefaultDNSServers = []string{ 25 "8.8.8.8", 26 "8.8.4.4", 27 } 28 29 var DiskControllerTypes = []string{ 30 "scsi", 31 "scsi-lsi-parallel", 32 "scsi-buslogic", 33 "scsi-paravirtual", 34 "scsi-lsi-sas", 35 "ide", 36 } 37 38 type networkInterface struct { 39 deviceName string 40 label string 41 ipv4Address string 42 ipv4PrefixLength int 43 ipv4Gateway string 44 ipv6Address string 45 ipv6PrefixLength int 46 ipv6Gateway string 47 adapterType string // TODO: Make "adapter_type" argument 48 macAddress string 49 } 50 51 type hardDisk struct { 52 name string 53 size int64 54 iops int64 55 initType string 56 vmdkPath string 57 controller string 58 bootable bool 59 } 60 61 //Additional options Vsphere can use clones of windows machines 62 type windowsOptConfig struct { 63 productKey string 64 adminPassword string 65 domainUser string 66 domain string 67 domainUserPassword string 68 } 69 70 type cdrom struct { 71 datastore string 72 path string 73 } 74 75 type memoryAllocation struct { 76 reservation int64 77 } 78 79 type virtualMachine struct { 80 name string 81 folder string 82 datacenter string 83 cluster string 84 resourcePool string 85 datastore string 86 vcpu int32 87 memoryMb int64 88 memoryAllocation memoryAllocation 89 template string 90 networkInterfaces []networkInterface 91 hardDisks []hardDisk 92 cdroms []cdrom 93 domain string 94 timeZone string 95 dnsSuffixes []string 96 dnsServers []string 97 hasBootableVmdk bool 98 linkedClone bool 99 skipCustomization bool 100 enableDiskUUID bool 101 windowsOptionalConfig windowsOptConfig 102 customConfigurations map[string](types.AnyType) 103 } 104 105 func (v virtualMachine) Path() string { 106 return vmPath(v.folder, v.name) 107 } 108 109 func vmPath(folder string, name string) string { 110 var path string 111 if len(folder) > 0 { 112 path += folder + "/" 113 } 114 return path + name 115 } 116 117 func resourceVSphereVirtualMachine() *schema.Resource { 118 return &schema.Resource{ 119 Create: resourceVSphereVirtualMachineCreate, 120 Read: resourceVSphereVirtualMachineRead, 121 Update: resourceVSphereVirtualMachineUpdate, 122 Delete: resourceVSphereVirtualMachineDelete, 123 124 SchemaVersion: 1, 125 MigrateState: resourceVSphereVirtualMachineMigrateState, 126 127 Schema: map[string]*schema.Schema{ 128 "name": &schema.Schema{ 129 Type: schema.TypeString, 130 Required: true, 131 ForceNew: true, 132 }, 133 134 "folder": &schema.Schema{ 135 Type: schema.TypeString, 136 Optional: true, 137 ForceNew: true, 138 }, 139 140 "vcpu": &schema.Schema{ 141 Type: schema.TypeInt, 142 Required: true, 143 }, 144 145 "memory": &schema.Schema{ 146 Type: schema.TypeInt, 147 Required: true, 148 }, 149 150 "memory_reservation": &schema.Schema{ 151 Type: schema.TypeInt, 152 Optional: true, 153 Default: 0, 154 ForceNew: true, 155 }, 156 157 "datacenter": &schema.Schema{ 158 Type: schema.TypeString, 159 Optional: true, 160 ForceNew: true, 161 }, 162 163 "cluster": &schema.Schema{ 164 Type: schema.TypeString, 165 Optional: true, 166 ForceNew: true, 167 }, 168 169 "resource_pool": &schema.Schema{ 170 Type: schema.TypeString, 171 Optional: true, 172 ForceNew: true, 173 }, 174 175 "linked_clone": &schema.Schema{ 176 Type: schema.TypeBool, 177 Optional: true, 178 Default: false, 179 ForceNew: true, 180 }, 181 "gateway": &schema.Schema{ 182 Type: schema.TypeString, 183 Optional: true, 184 ForceNew: true, 185 Deprecated: "Please use network_interface.ipv4_gateway", 186 }, 187 188 "domain": &schema.Schema{ 189 Type: schema.TypeString, 190 Optional: true, 191 ForceNew: true, 192 Default: "vsphere.local", 193 }, 194 195 "time_zone": &schema.Schema{ 196 Type: schema.TypeString, 197 Optional: true, 198 ForceNew: true, 199 Default: "Etc/UTC", 200 }, 201 202 "dns_suffixes": &schema.Schema{ 203 Type: schema.TypeList, 204 Optional: true, 205 Elem: &schema.Schema{Type: schema.TypeString}, 206 ForceNew: true, 207 }, 208 209 "dns_servers": &schema.Schema{ 210 Type: schema.TypeList, 211 Optional: true, 212 Elem: &schema.Schema{Type: schema.TypeString}, 213 ForceNew: true, 214 }, 215 216 "skip_customization": &schema.Schema{ 217 Type: schema.TypeBool, 218 Optional: true, 219 ForceNew: true, 220 Default: false, 221 }, 222 223 "enable_disk_uuid": &schema.Schema{ 224 Type: schema.TypeBool, 225 Optional: true, 226 ForceNew: true, 227 Default: false, 228 }, 229 230 "uuid": &schema.Schema{ 231 Type: schema.TypeString, 232 Computed: true, 233 }, 234 235 "custom_configuration_parameters": &schema.Schema{ 236 Type: schema.TypeMap, 237 Optional: true, 238 ForceNew: true, 239 }, 240 241 "windows_opt_config": &schema.Schema{ 242 Type: schema.TypeList, 243 Optional: true, 244 ForceNew: true, 245 Elem: &schema.Resource{ 246 Schema: map[string]*schema.Schema{ 247 "product_key": &schema.Schema{ 248 Type: schema.TypeString, 249 Optional: true, 250 ForceNew: true, 251 }, 252 253 "admin_password": &schema.Schema{ 254 Type: schema.TypeString, 255 Optional: true, 256 ForceNew: true, 257 }, 258 259 "domain_user": &schema.Schema{ 260 Type: schema.TypeString, 261 Optional: true, 262 ForceNew: true, 263 }, 264 265 "domain": &schema.Schema{ 266 Type: schema.TypeString, 267 Optional: true, 268 ForceNew: true, 269 }, 270 271 "domain_user_password": &schema.Schema{ 272 Type: schema.TypeString, 273 Optional: true, 274 ForceNew: true, 275 }, 276 }, 277 }, 278 }, 279 280 "network_interface": &schema.Schema{ 281 Type: schema.TypeList, 282 Required: true, 283 ForceNew: true, 284 Elem: &schema.Resource{ 285 Schema: map[string]*schema.Schema{ 286 "label": &schema.Schema{ 287 Type: schema.TypeString, 288 Required: true, 289 ForceNew: true, 290 }, 291 292 "ip_address": &schema.Schema{ 293 Type: schema.TypeString, 294 Optional: true, 295 Computed: true, 296 Deprecated: "Please use ipv4_address", 297 }, 298 299 "subnet_mask": &schema.Schema{ 300 Type: schema.TypeString, 301 Optional: true, 302 Computed: true, 303 Deprecated: "Please use ipv4_prefix_length", 304 }, 305 306 "ipv4_address": &schema.Schema{ 307 Type: schema.TypeString, 308 Optional: true, 309 Computed: true, 310 }, 311 312 "ipv4_prefix_length": &schema.Schema{ 313 Type: schema.TypeInt, 314 Optional: true, 315 Computed: true, 316 }, 317 318 "ipv4_gateway": &schema.Schema{ 319 Type: schema.TypeString, 320 Optional: true, 321 Computed: true, 322 }, 323 324 "ipv6_address": &schema.Schema{ 325 Type: schema.TypeString, 326 Optional: true, 327 Computed: true, 328 }, 329 330 "ipv6_prefix_length": &schema.Schema{ 331 Type: schema.TypeInt, 332 Optional: true, 333 Computed: true, 334 }, 335 336 "ipv6_gateway": &schema.Schema{ 337 Type: schema.TypeString, 338 Optional: true, 339 Computed: true, 340 }, 341 342 "adapter_type": &schema.Schema{ 343 Type: schema.TypeString, 344 Optional: true, 345 ForceNew: true, 346 }, 347 348 "mac_address": &schema.Schema{ 349 Type: schema.TypeString, 350 Optional: true, 351 Computed: true, 352 }, 353 }, 354 }, 355 }, 356 357 "disk": &schema.Schema{ 358 Type: schema.TypeSet, 359 Required: true, 360 Elem: &schema.Resource{ 361 Schema: map[string]*schema.Schema{ 362 "uuid": &schema.Schema{ 363 Type: schema.TypeString, 364 Computed: true, 365 }, 366 367 "key": &schema.Schema{ 368 Type: schema.TypeInt, 369 Computed: true, 370 }, 371 372 "template": &schema.Schema{ 373 Type: schema.TypeString, 374 Optional: true, 375 }, 376 377 "type": &schema.Schema{ 378 Type: schema.TypeString, 379 Optional: true, 380 Default: "eager_zeroed", 381 ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { 382 value := v.(string) 383 if value != "thin" && value != "eager_zeroed" && value != "lazy" { 384 errors = append(errors, fmt.Errorf( 385 "only 'thin', 'eager_zeroed', and 'lazy' are supported values for 'type'")) 386 } 387 return 388 }, 389 }, 390 391 "datastore": &schema.Schema{ 392 Type: schema.TypeString, 393 Optional: true, 394 }, 395 396 "size": &schema.Schema{ 397 Type: schema.TypeInt, 398 Optional: true, 399 }, 400 401 "name": &schema.Schema{ 402 Type: schema.TypeString, 403 Optional: true, 404 }, 405 406 "iops": &schema.Schema{ 407 Type: schema.TypeInt, 408 Optional: true, 409 }, 410 411 "vmdk": &schema.Schema{ 412 // TODO: Add ValidateFunc to confirm path exists 413 Type: schema.TypeString, 414 Optional: true, 415 }, 416 417 "bootable": &schema.Schema{ 418 Type: schema.TypeBool, 419 Optional: true, 420 }, 421 422 "keep_on_remove": &schema.Schema{ 423 Type: schema.TypeBool, 424 Optional: true, 425 }, 426 427 "controller_type": &schema.Schema{ 428 Type: schema.TypeString, 429 Optional: true, 430 Default: "scsi", 431 ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { 432 value := v.(string) 433 found := false 434 for _, t := range DiskControllerTypes { 435 if t == value { 436 found = true 437 } 438 } 439 if !found { 440 errors = append(errors, fmt.Errorf( 441 "Supported values for 'controller_type' are %v", strings.Join(DiskControllerTypes, ", "))) 442 } 443 return 444 }, 445 }, 446 }, 447 }, 448 }, 449 450 "detach_unknown_disks_on_delete": &schema.Schema{ 451 Type: schema.TypeBool, 452 Optional: true, 453 Default: false, 454 }, 455 456 "cdrom": &schema.Schema{ 457 Type: schema.TypeList, 458 Optional: true, 459 ForceNew: true, 460 Elem: &schema.Resource{ 461 Schema: map[string]*schema.Schema{ 462 "datastore": &schema.Schema{ 463 Type: schema.TypeString, 464 Required: true, 465 ForceNew: true, 466 }, 467 468 "path": &schema.Schema{ 469 Type: schema.TypeString, 470 Required: true, 471 ForceNew: true, 472 }, 473 }, 474 }, 475 }, 476 }, 477 } 478 } 479 480 func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error { 481 // flag if changes have to be applied 482 hasChanges := false 483 // flag if changes have to be done when powered off 484 rebootRequired := false 485 486 // make config spec 487 configSpec := types.VirtualMachineConfigSpec{} 488 489 if d.HasChange("vcpu") { 490 configSpec.NumCPUs = int32(d.Get("vcpu").(int)) 491 hasChanges = true 492 rebootRequired = true 493 } 494 495 if d.HasChange("memory") { 496 configSpec.MemoryMB = int64(d.Get("memory").(int)) 497 hasChanges = true 498 rebootRequired = true 499 } 500 501 client := meta.(*govmomi.Client) 502 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 503 if err != nil { 504 return err 505 } 506 finder := find.NewFinder(client.Client, true) 507 finder = finder.SetDatacenter(dc) 508 509 vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string))) 510 if err != nil { 511 return err 512 } 513 514 if d.HasChange("disk") { 515 hasChanges = true 516 oldDisks, newDisks := d.GetChange("disk") 517 oldDiskSet := oldDisks.(*schema.Set) 518 newDiskSet := newDisks.(*schema.Set) 519 520 addedDisks := newDiskSet.Difference(oldDiskSet) 521 removedDisks := oldDiskSet.Difference(newDiskSet) 522 523 // Removed disks 524 for _, diskRaw := range removedDisks.List() { 525 if disk, ok := diskRaw.(map[string]interface{}); ok { 526 devices, err := vm.Device(context.TODO()) 527 if err != nil { 528 return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err) 529 } 530 virtualDisk := devices.FindByKey(int32(disk["key"].(int))) 531 532 keep := false 533 if v, ok := disk["keep_on_remove"].(bool); ok { 534 keep = v 535 } 536 537 err = vm.RemoveDevice(context.TODO(), keep, virtualDisk) 538 if err != nil { 539 return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err) 540 } 541 } 542 } 543 // Added disks 544 for _, diskRaw := range addedDisks.List() { 545 if disk, ok := diskRaw.(map[string]interface{}); ok { 546 547 var datastore *object.Datastore 548 if disk["datastore"] == "" { 549 datastore, err = finder.DefaultDatastore(context.TODO()) 550 if err != nil { 551 return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err) 552 } 553 } else { 554 datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string)) 555 if err != nil { 556 log.Printf("[ERROR] Couldn't find datastore %v. %s", disk["datastore"].(string), err) 557 return err 558 } 559 } 560 561 var size int64 562 if disk["size"] == 0 { 563 size = 0 564 } else { 565 size = int64(disk["size"].(int)) 566 } 567 iops := int64(disk["iops"].(int)) 568 controller_type := disk["controller_type"].(string) 569 570 var mo mo.VirtualMachine 571 vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo) 572 573 var diskPath string 574 switch { 575 case disk["vmdk"] != "": 576 diskPath = disk["vmdk"].(string) 577 case disk["name"] != "": 578 snapshotFullDir := mo.Config.Files.SnapshotDirectory 579 split := strings.Split(snapshotFullDir, " ") 580 if len(split) != 2 { 581 return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir) 582 } 583 vmWorkingPath := split[1] 584 diskPath = vmWorkingPath + disk["name"].(string) 585 default: 586 return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given") 587 } 588 589 var initType string 590 if disk["type"] != "" { 591 initType = disk["type"].(string) 592 } else { 593 initType = "thin" 594 } 595 596 log.Printf("[INFO] Attaching disk: %v", diskPath) 597 err = addHardDisk(vm, size, iops, initType, datastore, diskPath, controller_type) 598 if err != nil { 599 log.Printf("[ERROR] Add Hard Disk Failed: %v", err) 600 return err 601 } 602 } 603 if err != nil { 604 return err 605 } 606 } 607 } 608 609 // do nothing if there are no changes 610 if !hasChanges { 611 return nil 612 } 613 614 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 615 616 if rebootRequired { 617 log.Printf("[INFO] Shutting down virtual machine: %s", d.Id()) 618 619 task, err := vm.PowerOff(context.TODO()) 620 if err != nil { 621 return err 622 } 623 624 err = task.Wait(context.TODO()) 625 if err != nil { 626 return err 627 } 628 } 629 630 log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id()) 631 632 task, err := vm.Reconfigure(context.TODO(), configSpec) 633 if err != nil { 634 log.Printf("[ERROR] %s", err) 635 } 636 637 err = task.Wait(context.TODO()) 638 if err != nil { 639 log.Printf("[ERROR] %s", err) 640 } 641 642 if rebootRequired { 643 task, err = vm.PowerOn(context.TODO()) 644 if err != nil { 645 return err 646 } 647 648 err = task.Wait(context.TODO()) 649 if err != nil { 650 log.Printf("[ERROR] %s", err) 651 } 652 } 653 654 return resourceVSphereVirtualMachineRead(d, meta) 655 } 656 657 func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { 658 client := meta.(*govmomi.Client) 659 660 vm := virtualMachine{ 661 name: d.Get("name").(string), 662 vcpu: int32(d.Get("vcpu").(int)), 663 memoryMb: int64(d.Get("memory").(int)), 664 memoryAllocation: memoryAllocation{ 665 reservation: int64(d.Get("memory_reservation").(int)), 666 }, 667 } 668 669 if v, ok := d.GetOk("folder"); ok { 670 vm.folder = v.(string) 671 } 672 673 if v, ok := d.GetOk("datacenter"); ok { 674 vm.datacenter = v.(string) 675 } 676 677 if v, ok := d.GetOk("cluster"); ok { 678 vm.cluster = v.(string) 679 } 680 681 if v, ok := d.GetOk("resource_pool"); ok { 682 vm.resourcePool = v.(string) 683 } 684 685 if v, ok := d.GetOk("domain"); ok { 686 vm.domain = v.(string) 687 } 688 689 if v, ok := d.GetOk("time_zone"); ok { 690 vm.timeZone = v.(string) 691 } 692 693 if v, ok := d.GetOk("linked_clone"); ok { 694 vm.linkedClone = v.(bool) 695 } 696 697 if v, ok := d.GetOk("skip_customization"); ok { 698 vm.skipCustomization = v.(bool) 699 } 700 701 if v, ok := d.GetOk("enable_disk_uuid"); ok { 702 vm.enableDiskUUID = v.(bool) 703 } 704 705 if raw, ok := d.GetOk("dns_suffixes"); ok { 706 for _, v := range raw.([]interface{}) { 707 vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string)) 708 } 709 } else { 710 vm.dnsSuffixes = DefaultDNSSuffixes 711 } 712 713 if raw, ok := d.GetOk("dns_servers"); ok { 714 for _, v := range raw.([]interface{}) { 715 vm.dnsServers = append(vm.dnsServers, v.(string)) 716 } 717 } else { 718 vm.dnsServers = DefaultDNSServers 719 } 720 721 if vL, ok := d.GetOk("custom_configuration_parameters"); ok { 722 if custom_configs, ok := vL.(map[string]interface{}); ok { 723 custom := make(map[string]types.AnyType) 724 for k, v := range custom_configs { 725 custom[k] = v 726 } 727 vm.customConfigurations = custom 728 log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations) 729 } 730 } 731 732 if vL, ok := d.GetOk("network_interface"); ok { 733 networks := make([]networkInterface, len(vL.([]interface{}))) 734 for i, v := range vL.([]interface{}) { 735 network := v.(map[string]interface{}) 736 networks[i].label = network["label"].(string) 737 if v, ok := network["ip_address"].(string); ok && v != "" { 738 networks[i].ipv4Address = v 739 } 740 if v, ok := d.GetOk("gateway"); ok { 741 networks[i].ipv4Gateway = v.(string) 742 } 743 if v, ok := network["subnet_mask"].(string); ok && v != "" { 744 ip := net.ParseIP(v).To4() 745 if ip != nil { 746 mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3]) 747 pl, _ := mask.Size() 748 networks[i].ipv4PrefixLength = pl 749 } else { 750 return fmt.Errorf("subnet_mask parameter is invalid.") 751 } 752 } 753 if v, ok := network["ipv4_address"].(string); ok && v != "" { 754 networks[i].ipv4Address = v 755 } 756 if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 { 757 networks[i].ipv4PrefixLength = v 758 } 759 if v, ok := network["ipv4_gateway"].(string); ok && v != "" { 760 networks[i].ipv4Gateway = v 761 } 762 if v, ok := network["ipv6_address"].(string); ok && v != "" { 763 networks[i].ipv6Address = v 764 } 765 if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 { 766 networks[i].ipv6PrefixLength = v 767 } 768 if v, ok := network["ipv6_gateway"].(string); ok && v != "" { 769 networks[i].ipv6Gateway = v 770 } 771 if v, ok := network["mac_address"].(string); ok && v != "" { 772 networks[i].macAddress = v 773 } 774 } 775 vm.networkInterfaces = networks 776 log.Printf("[DEBUG] network_interface init: %v", networks) 777 } 778 779 if vL, ok := d.GetOk("windows_opt_config"); ok { 780 var winOpt windowsOptConfig 781 custom_configs := (vL.([]interface{}))[0].(map[string]interface{}) 782 if v, ok := custom_configs["admin_password"].(string); ok && v != "" { 783 winOpt.adminPassword = v 784 } 785 if v, ok := custom_configs["domain"].(string); ok && v != "" { 786 winOpt.domain = v 787 } 788 if v, ok := custom_configs["domain_user"].(string); ok && v != "" { 789 winOpt.domainUser = v 790 } 791 if v, ok := custom_configs["product_key"].(string); ok && v != "" { 792 winOpt.productKey = v 793 } 794 if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" { 795 winOpt.domainUserPassword = v 796 } 797 vm.windowsOptionalConfig = winOpt 798 log.Printf("[DEBUG] windows config init: %v", winOpt) 799 } 800 801 if vL, ok := d.GetOk("disk"); ok { 802 if diskSet, ok := vL.(*schema.Set); ok { 803 804 disks := []hardDisk{} 805 for _, value := range diskSet.List() { 806 disk := value.(map[string]interface{}) 807 newDisk := hardDisk{} 808 809 if v, ok := disk["template"].(string); ok && v != "" { 810 if v, ok := disk["name"].(string); ok && v != "" { 811 return fmt.Errorf("Cannot specify name of a template") 812 } 813 vm.template = v 814 if vm.hasBootableVmdk { 815 return fmt.Errorf("[ERROR] Only one bootable disk or template may be given") 816 } 817 vm.hasBootableVmdk = true 818 } 819 820 if v, ok := disk["type"].(string); ok && v != "" { 821 newDisk.initType = v 822 } 823 824 if v, ok := disk["datastore"].(string); ok && v != "" { 825 vm.datastore = v 826 } 827 828 if v, ok := disk["size"].(int); ok && v != 0 { 829 if v, ok := disk["template"].(string); ok && v != "" { 830 return fmt.Errorf("Cannot specify size of a template") 831 } 832 833 if v, ok := disk["name"].(string); ok && v != "" { 834 newDisk.name = v 835 } else { 836 return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk") 837 } 838 839 newDisk.size = int64(v) 840 } 841 842 if v, ok := disk["iops"].(int); ok && v != 0 { 843 newDisk.iops = int64(v) 844 } 845 846 if v, ok := disk["controller_type"].(string); ok && v != "" { 847 newDisk.controller = v 848 } 849 850 if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" { 851 if v, ok := disk["template"].(string); ok && v != "" { 852 return fmt.Errorf("Cannot specify a vmdk for a template") 853 } 854 if v, ok := disk["size"].(string); ok && v != "" { 855 return fmt.Errorf("Cannot specify size of a vmdk") 856 } 857 if v, ok := disk["name"].(string); ok && v != "" { 858 return fmt.Errorf("Cannot specify name of a vmdk") 859 } 860 if vBootable, ok := disk["bootable"].(bool); ok { 861 if vBootable && vm.hasBootableVmdk { 862 return fmt.Errorf("[ERROR] Only one bootable disk or template may be given") 863 } 864 newDisk.bootable = vBootable 865 vm.hasBootableVmdk = vm.hasBootableVmdk || vBootable 866 } 867 newDisk.vmdkPath = vVmdk 868 } 869 // Preserves order so bootable disk is first 870 if newDisk.bootable == true || disk["template"] != "" { 871 disks = append([]hardDisk{newDisk}, disks...) 872 } else { 873 disks = append(disks, newDisk) 874 } 875 } 876 vm.hardDisks = disks 877 log.Printf("[DEBUG] disk init: %v", disks) 878 } 879 } 880 881 if vL, ok := d.GetOk("cdrom"); ok { 882 cdroms := make([]cdrom, len(vL.([]interface{}))) 883 for i, v := range vL.([]interface{}) { 884 c := v.(map[string]interface{}) 885 if v, ok := c["datastore"].(string); ok && v != "" { 886 cdroms[i].datastore = v 887 } else { 888 return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.") 889 } 890 if v, ok := c["path"].(string); ok && v != "" { 891 cdroms[i].path = v 892 } else { 893 return fmt.Errorf("Path argument must be specified when attaching a cdrom image.") 894 } 895 } 896 vm.cdroms = cdroms 897 log.Printf("[DEBUG] cdrom init: %v", cdroms) 898 } 899 900 err := vm.setupVirtualMachine(client) 901 if err != nil { 902 return err 903 } 904 905 d.SetId(vm.Path()) 906 log.Printf("[INFO] Created virtual machine: %s", d.Id()) 907 908 return resourceVSphereVirtualMachineRead(d, meta) 909 } 910 911 func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { 912 log.Printf("[DEBUG] virtual machine resource data: %#v", d) 913 client := meta.(*govmomi.Client) 914 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 915 if err != nil { 916 return err 917 } 918 finder := find.NewFinder(client.Client, true) 919 finder = finder.SetDatacenter(dc) 920 921 vm, err := finder.VirtualMachine(context.TODO(), d.Id()) 922 if err != nil { 923 d.SetId("") 924 return nil 925 } 926 927 state, err := vm.PowerState(context.TODO()) 928 if err != nil { 929 return err 930 } 931 932 if state == types.VirtualMachinePowerStatePoweredOn { 933 // wait for interfaces to appear 934 log.Printf("[DEBUG] Waiting for interfaces to appear") 935 936 _, err = vm.WaitForNetIP(context.TODO(), false) 937 if err != nil { 938 return err 939 } 940 941 log.Printf("[DEBUG] Successfully waited for interfaces to appear") 942 } 943 944 var mvm mo.VirtualMachine 945 collector := property.DefaultCollector(client.Client) 946 if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil { 947 return err 948 } 949 950 log.Printf("[DEBUG] Datacenter - %#v", dc) 951 log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config) 952 log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config) 953 log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net) 954 955 disks := make([]map[string]interface{}, 0) 956 templateDisk := make(map[string]interface{}, 1) 957 for _, device := range mvm.Config.Hardware.Device { 958 if vd, ok := device.(*types.VirtualDisk); ok { 959 960 virtualDevice := vd.GetVirtualDevice() 961 962 backingInfo := virtualDevice.Backing 963 var diskFullPath string 964 var diskUuid string 965 if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok { 966 diskFullPath = v.FileName 967 diskUuid = v.Uuid 968 } else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok { 969 diskFullPath = v.FileName 970 diskUuid = v.Uuid 971 } 972 log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath) 973 974 // Separate datastore and path 975 diskFullPathSplit := strings.Split(diskFullPath, " ") 976 if len(diskFullPathSplit) != 2 { 977 return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath) 978 } 979 diskPath := diskFullPathSplit[1] 980 // Isolate filename 981 diskNameSplit := strings.Split(diskPath, "/") 982 diskName := diskNameSplit[len(diskNameSplit)-1] 983 // Remove possible extension 984 diskName = strings.Split(diskName, ".")[0] 985 986 if prevDisks, ok := d.GetOk("disk"); ok { 987 if prevDiskSet, ok := prevDisks.(*schema.Set); ok { 988 for _, v := range prevDiskSet.List() { 989 prevDisk := v.(map[string]interface{}) 990 991 // We're guaranteed only one template disk. Passing value directly through since templates should be immutable 992 if prevDisk["template"] != "" { 993 if len(templateDisk) == 0 { 994 templateDisk = prevDisk 995 disks = append(disks, templateDisk) 996 break 997 } 998 } 999 1000 // It is enforced that prevDisk["name"] should only be set in the case 1001 // of creating a new disk for the user. 1002 // size case: name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name 1003 // vmdk case: compare prevDisk["vmdk"] and mo.Filename 1004 if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] { 1005 1006 prevDisk["key"] = virtualDevice.Key 1007 prevDisk["uuid"] = diskUuid 1008 1009 disks = append(disks, prevDisk) 1010 break 1011 } 1012 } 1013 } 1014 } 1015 log.Printf("[DEBUG] disks: %#v", disks) 1016 } 1017 } 1018 err = d.Set("disk", disks) 1019 if err != nil { 1020 return fmt.Errorf("Invalid disks to set: %#v", disks) 1021 } 1022 1023 networkInterfaces := make([]map[string]interface{}, 0) 1024 for _, v := range mvm.Guest.Net { 1025 if v.DeviceConfigId >= 0 { 1026 log.Printf("[DEBUG] v.Network - %#v", v.Network) 1027 networkInterface := make(map[string]interface{}) 1028 networkInterface["label"] = v.Network 1029 networkInterface["mac_address"] = v.MacAddress 1030 for _, ip := range v.IpConfig.IpAddress { 1031 p := net.ParseIP(ip.IpAddress) 1032 if p.To4() != nil { 1033 log.Printf("[DEBUG] p.String - %#v", p.String()) 1034 log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength) 1035 networkInterface["ipv4_address"] = p.String() 1036 networkInterface["ipv4_prefix_length"] = ip.PrefixLength 1037 } else if p.To16() != nil { 1038 log.Printf("[DEBUG] p.String - %#v", p.String()) 1039 log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength) 1040 networkInterface["ipv6_address"] = p.String() 1041 networkInterface["ipv6_prefix_length"] = ip.PrefixLength 1042 } 1043 log.Printf("[DEBUG] networkInterface: %#v", networkInterface) 1044 } 1045 log.Printf("[DEBUG] networkInterface: %#v", networkInterface) 1046 networkInterfaces = append(networkInterfaces, networkInterface) 1047 } 1048 } 1049 if mvm.Guest.IpStack != nil { 1050 for _, v := range mvm.Guest.IpStack { 1051 if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil { 1052 for _, route := range v.IpRouteConfig.IpRoute { 1053 if route.Gateway.Device != "" { 1054 gatewaySetting := "" 1055 if route.Network == "::" { 1056 gatewaySetting = "ipv6_gateway" 1057 } else if route.Network == "0.0.0.0" { 1058 gatewaySetting = "ipv4_gateway" 1059 } 1060 if gatewaySetting != "" { 1061 deviceID, err := strconv.Atoi(route.Gateway.Device) 1062 if err != nil { 1063 log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err) 1064 } else { 1065 log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress) 1066 networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress 1067 } 1068 } 1069 } 1070 } 1071 } 1072 } 1073 } 1074 log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces) 1075 err = d.Set("network_interface", networkInterfaces) 1076 if err != nil { 1077 return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces) 1078 } 1079 1080 if len(networkInterfaces) > 0 { 1081 if _, ok := networkInterfaces[0]["ipv4_address"]; ok { 1082 log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string)) 1083 d.SetConnInfo(map[string]string{ 1084 "type": "ssh", 1085 "host": networkInterfaces[0]["ipv4_address"].(string), 1086 }) 1087 } 1088 } 1089 1090 var rootDatastore string 1091 for _, v := range mvm.Datastore { 1092 var md mo.Datastore 1093 if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil { 1094 return err 1095 } 1096 if md.Parent.Type == "StoragePod" { 1097 var msp mo.StoragePod 1098 if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil { 1099 return err 1100 } 1101 rootDatastore = msp.Name 1102 log.Printf("[DEBUG] %#v", msp.Name) 1103 } else { 1104 rootDatastore = md.Name 1105 log.Printf("[DEBUG] %#v", md.Name) 1106 } 1107 break 1108 } 1109 1110 d.Set("datacenter", dc) 1111 d.Set("memory", mvm.Summary.Config.MemorySizeMB) 1112 d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation) 1113 d.Set("cpu", mvm.Summary.Config.NumCpu) 1114 d.Set("datastore", rootDatastore) 1115 d.Set("uuid", mvm.Summary.Config.Uuid) 1116 1117 return nil 1118 } 1119 1120 func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { 1121 client := meta.(*govmomi.Client) 1122 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 1123 if err != nil { 1124 return err 1125 } 1126 finder := find.NewFinder(client.Client, true) 1127 finder = finder.SetDatacenter(dc) 1128 1129 vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string))) 1130 if err != nil { 1131 return err 1132 } 1133 devices, err := vm.Device(context.TODO()) 1134 if err != nil { 1135 log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err) 1136 return err 1137 } 1138 1139 log.Printf("[INFO] Deleting virtual machine: %s", d.Id()) 1140 state, err := vm.PowerState(context.TODO()) 1141 if err != nil { 1142 return err 1143 } 1144 1145 if state == types.VirtualMachinePowerStatePoweredOn { 1146 task, err := vm.PowerOff(context.TODO()) 1147 if err != nil { 1148 return err 1149 } 1150 1151 err = task.Wait(context.TODO()) 1152 if err != nil { 1153 return err 1154 } 1155 } 1156 1157 // Safely eject any disks the user marked as keep_on_remove 1158 var diskSetList []interface{} 1159 if vL, ok := d.GetOk("disk"); ok { 1160 if diskSet, ok := vL.(*schema.Set); ok { 1161 diskSetList = diskSet.List() 1162 for _, value := range diskSetList { 1163 disk := value.(map[string]interface{}) 1164 1165 if v, ok := disk["keep_on_remove"].(bool); ok && v == true { 1166 log.Printf("[DEBUG] not destroying %v", disk["name"]) 1167 virtualDisk := devices.FindByKey(int32(disk["key"].(int))) 1168 err = vm.RemoveDevice(context.TODO(), true, virtualDisk) 1169 if err != nil { 1170 log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) 1171 return err 1172 } 1173 } 1174 } 1175 } 1176 } 1177 1178 // Safely eject any disks that are not managed by this resource 1179 if v, ok := d.GetOk("detach_unknown_disks_on_delete"); ok && v.(bool) { 1180 var disksToRemove object.VirtualDeviceList 1181 for _, device := range devices { 1182 if devices.TypeName(device) != "VirtualDisk" { 1183 continue 1184 } 1185 vd := device.GetVirtualDevice() 1186 var skip bool 1187 for _, value := range diskSetList { 1188 disk := value.(map[string]interface{}) 1189 if int32(disk["key"].(int)) == vd.Key { 1190 skip = true 1191 break 1192 } 1193 } 1194 if skip { 1195 continue 1196 } 1197 disksToRemove = append(disksToRemove, device) 1198 } 1199 if len(disksToRemove) != 0 { 1200 err = vm.RemoveDevice(context.TODO(), true, disksToRemove...) 1201 if err != nil { 1202 log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) 1203 return err 1204 } 1205 } 1206 } 1207 1208 task, err := vm.Destroy(context.TODO()) 1209 if err != nil { 1210 return err 1211 } 1212 1213 err = task.Wait(context.TODO()) 1214 if err != nil { 1215 return err 1216 } 1217 1218 d.SetId("") 1219 return nil 1220 } 1221 1222 // addHardDisk adds a new Hard Disk to the VirtualMachine. 1223 func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error { 1224 devices, err := vm.Device(context.TODO()) 1225 if err != nil { 1226 return err 1227 } 1228 log.Printf("[DEBUG] vm devices: %#v\n", devices) 1229 1230 var controller types.BaseVirtualController 1231 switch controller_type { 1232 case "scsi": 1233 controller, err = devices.FindDiskController(controller_type) 1234 case "scsi-lsi-parallel": 1235 controller = devices.PickController(&types.VirtualLsiLogicController{}) 1236 case "scsi-buslogic": 1237 controller = devices.PickController(&types.VirtualBusLogicController{}) 1238 case "scsi-paravirtual": 1239 controller = devices.PickController(&types.ParaVirtualSCSIController{}) 1240 case "scsi-lsi-sas": 1241 controller = devices.PickController(&types.VirtualLsiLogicSASController{}) 1242 case "ide": 1243 controller, err = devices.FindDiskController(controller_type) 1244 default: 1245 return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type) 1246 } 1247 1248 if err != nil || controller == nil { 1249 // Check if max number of scsi controller are already used 1250 diskControllers := getSCSIControllers(devices) 1251 if len(diskControllers) >= 4 { 1252 return fmt.Errorf("[ERROR] Maximum number of SCSI controllers created") 1253 } 1254 1255 log.Printf("[DEBUG] Couldn't find a %v controller. Creating one..", controller_type) 1256 1257 var c types.BaseVirtualDevice 1258 switch controller_type { 1259 case "scsi": 1260 // Create scsi controller 1261 c, err = devices.CreateSCSIController("scsi") 1262 if err != nil { 1263 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1264 } 1265 case "scsi-lsi-parallel": 1266 // Create scsi controller 1267 c, err = devices.CreateSCSIController("lsilogic") 1268 if err != nil { 1269 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1270 } 1271 case "scsi-buslogic": 1272 // Create scsi controller 1273 c, err = devices.CreateSCSIController("buslogic") 1274 if err != nil { 1275 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1276 } 1277 case "scsi-paravirtual": 1278 // Create scsi controller 1279 c, err = devices.CreateSCSIController("pvscsi") 1280 if err != nil { 1281 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1282 } 1283 case "scsi-lsi-sas": 1284 // Create scsi controller 1285 c, err = devices.CreateSCSIController("lsilogic-sas") 1286 if err != nil { 1287 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1288 } 1289 case "ide": 1290 // Create ide controller 1291 c, err = devices.CreateIDEController() 1292 if err != nil { 1293 return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err) 1294 } 1295 default: 1296 return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type) 1297 } 1298 1299 vm.AddDevice(context.TODO(), c) 1300 // Update our devices list 1301 devices, err := vm.Device(context.TODO()) 1302 if err != nil { 1303 return err 1304 } 1305 controller = devices.PickController(c.(types.BaseVirtualController)) 1306 if controller == nil { 1307 log.Printf("[ERROR] Could not find the new %v controller", controller_type) 1308 return fmt.Errorf("Could not find the new %v controller", controller_type) 1309 } 1310 } 1311 1312 log.Printf("[DEBUG] disk controller: %#v\n", controller) 1313 1314 // TODO Check if diskPath & datastore exist 1315 // If diskPath is not specified, pass empty string to CreateDisk() 1316 if diskPath == "" { 1317 return fmt.Errorf("[ERROR] addHardDisk - No path proided") 1318 } else { 1319 diskPath = datastore.Path(diskPath) 1320 } 1321 log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath) 1322 disk := devices.CreateDisk(controller, datastore.Reference(), diskPath) 1323 1324 if strings.Contains(controller_type, "scsi") { 1325 unitNumber, err := getNextUnitNumber(devices, controller) 1326 if err != nil { 1327 return err 1328 } 1329 *disk.UnitNumber = unitNumber 1330 } 1331 1332 existing := devices.SelectByBackingInfo(disk.Backing) 1333 log.Printf("[DEBUG] disk: %#v\n", disk) 1334 1335 if len(existing) == 0 { 1336 disk.CapacityInKB = int64(size * 1024 * 1024) 1337 if iops != 0 { 1338 disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ 1339 Limit: iops, 1340 } 1341 } 1342 backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) 1343 1344 if diskType == "eager_zeroed" { 1345 // eager zeroed thick virtual disk 1346 backing.ThinProvisioned = types.NewBool(false) 1347 backing.EagerlyScrub = types.NewBool(true) 1348 } else if diskType == "lazy" { 1349 // lazy zeroed thick virtual disk 1350 backing.ThinProvisioned = types.NewBool(false) 1351 backing.EagerlyScrub = types.NewBool(false) 1352 } else if diskType == "thin" { 1353 // thin provisioned virtual disk 1354 backing.ThinProvisioned = types.NewBool(true) 1355 } 1356 1357 log.Printf("[DEBUG] addHardDisk: %#v\n", disk) 1358 log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB) 1359 1360 return vm.AddDevice(context.TODO(), disk) 1361 } else { 1362 log.Printf("[DEBUG] addHardDisk: Disk already present.\n") 1363 1364 return nil 1365 } 1366 } 1367 1368 func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController { 1369 // get virtual scsi controllers of all supported types 1370 var scsiControllers []*types.VirtualController 1371 for _, device := range vmDevices { 1372 devType := vmDevices.Type(device) 1373 switch devType { 1374 case "scsi", "lsilogic", "buslogic", "pvscsi", "lsilogic-sas": 1375 if c, ok := device.(types.BaseVirtualController); ok { 1376 scsiControllers = append(scsiControllers, c.GetVirtualController()) 1377 } 1378 } 1379 } 1380 return scsiControllers 1381 } 1382 1383 func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) { 1384 key := c.GetVirtualController().Key 1385 1386 var unitNumbers [16]bool 1387 unitNumbers[7] = true 1388 1389 for _, device := range devices { 1390 d := device.GetVirtualDevice() 1391 1392 if d.ControllerKey == key { 1393 if d.UnitNumber != nil { 1394 unitNumbers[*d.UnitNumber] = true 1395 } 1396 } 1397 } 1398 for i, taken := range unitNumbers { 1399 if !taken { 1400 return int32(i), nil 1401 } 1402 } 1403 return -1, fmt.Errorf("[ERROR] getNextUnitNumber - controller is full") 1404 } 1405 1406 // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path. 1407 func addCdrom(client *govmomi.Client, vm *object.VirtualMachine, datacenter *object.Datacenter, datastore, path string) error { 1408 devices, err := vm.Device(context.TODO()) 1409 if err != nil { 1410 return err 1411 } 1412 log.Printf("[DEBUG] vm devices: %#v", devices) 1413 1414 var controller *types.VirtualIDEController 1415 controller, err = devices.FindIDEController("") 1416 if err != nil { 1417 log.Printf("[DEBUG] Couldn't find a ide controller. Creating one..") 1418 1419 var c types.BaseVirtualDevice 1420 c, err := devices.CreateIDEController() 1421 if err != nil { 1422 return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err) 1423 } 1424 1425 if v, ok := c.(*types.VirtualIDEController); ok { 1426 controller = v 1427 } else { 1428 return fmt.Errorf("[ERROR] Controller type could not be asserted") 1429 } 1430 vm.AddDevice(context.TODO(), c) 1431 // Update our devices list 1432 devices, err := vm.Device(context.TODO()) 1433 if err != nil { 1434 return err 1435 } 1436 controller, err = devices.FindIDEController("") 1437 if err != nil { 1438 log.Printf("[ERROR] Could not find the new disk IDE controller: %v", err) 1439 return err 1440 } 1441 } 1442 log.Printf("[DEBUG] ide controller: %#v", controller) 1443 1444 c, err := devices.CreateCdrom(controller) 1445 if err != nil { 1446 return err 1447 } 1448 1449 finder := find.NewFinder(client.Client, true) 1450 finder = finder.SetDatacenter(datacenter) 1451 ds, err := getDatastore(finder, datastore) 1452 if err != nil { 1453 return err 1454 } 1455 1456 c = devices.InsertIso(c, ds.Path(path)) 1457 log.Printf("[DEBUG] addCdrom: %#v", c) 1458 1459 return vm.AddDevice(context.TODO(), c) 1460 } 1461 1462 // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device. 1463 func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) { 1464 network, err := f.Network(context.TODO(), "*"+label) 1465 if err != nil { 1466 return nil, err 1467 } 1468 1469 backing, err := network.EthernetCardBackingInfo(context.TODO()) 1470 if err != nil { 1471 return nil, err 1472 } 1473 1474 var address_type string 1475 if macAddress == "" { 1476 address_type = string(types.VirtualEthernetCardMacTypeGenerated) 1477 } else { 1478 address_type = string(types.VirtualEthernetCardMacTypeManual) 1479 } 1480 1481 if adapterType == "vmxnet3" { 1482 return &types.VirtualDeviceConfigSpec{ 1483 Operation: types.VirtualDeviceConfigSpecOperationAdd, 1484 Device: &types.VirtualVmxnet3{ 1485 VirtualVmxnet: types.VirtualVmxnet{ 1486 VirtualEthernetCard: types.VirtualEthernetCard{ 1487 VirtualDevice: types.VirtualDevice{ 1488 Key: -1, 1489 Backing: backing, 1490 }, 1491 AddressType: address_type, 1492 MacAddress: macAddress, 1493 }, 1494 }, 1495 }, 1496 }, nil 1497 } else if adapterType == "e1000" { 1498 return &types.VirtualDeviceConfigSpec{ 1499 Operation: types.VirtualDeviceConfigSpecOperationAdd, 1500 Device: &types.VirtualE1000{ 1501 VirtualEthernetCard: types.VirtualEthernetCard{ 1502 VirtualDevice: types.VirtualDevice{ 1503 Key: -1, 1504 Backing: backing, 1505 }, 1506 AddressType: address_type, 1507 MacAddress: macAddress, 1508 }, 1509 }, 1510 }, nil 1511 } else { 1512 return nil, fmt.Errorf("Invalid network adapter type.") 1513 } 1514 } 1515 1516 // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. 1517 func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) { 1518 var key int32 1519 var moveType string 1520 if linkedClone { 1521 moveType = "createNewChildDiskBacking" 1522 } else { 1523 moveType = "moveAllDiskBackingsAndDisallowSharing" 1524 } 1525 log.Printf("[DEBUG] relocate type: [%s]", moveType) 1526 1527 devices, err := vm.Device(context.TODO()) 1528 if err != nil { 1529 return types.VirtualMachineRelocateSpec{}, err 1530 } 1531 for _, d := range devices { 1532 if devices.Type(d) == "disk" { 1533 key = int32(d.GetVirtualDevice().Key) 1534 } 1535 } 1536 1537 isThin := initType == "thin" 1538 eagerScrub := initType == "eager_zeroed" 1539 rpr := rp.Reference() 1540 dsr := ds.Reference() 1541 return types.VirtualMachineRelocateSpec{ 1542 Datastore: &dsr, 1543 Pool: &rpr, 1544 DiskMoveType: moveType, 1545 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 1546 { 1547 Datastore: dsr, 1548 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ 1549 DiskMode: "persistent", 1550 ThinProvisioned: types.NewBool(isThin), 1551 EagerlyScrub: types.NewBool(eagerScrub), 1552 }, 1553 DiskId: key, 1554 }, 1555 }, 1556 }, nil 1557 } 1558 1559 // getDatastoreObject gets datastore object. 1560 func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) { 1561 s := object.NewSearchIndex(client.Client) 1562 ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name) 1563 if err != nil { 1564 return types.ManagedObjectReference{}, err 1565 } 1566 if ref == nil { 1567 return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name) 1568 } 1569 log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref) 1570 return ref.Reference(), nil 1571 } 1572 1573 // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action. 1574 func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec { 1575 vmfr := f.VmFolder.Reference() 1576 rpr := rp.Reference() 1577 spr := storagePod.Reference() 1578 1579 sps := types.StoragePlacementSpec{ 1580 Type: "create", 1581 ConfigSpec: &configSpec, 1582 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 1583 StoragePod: &spr, 1584 }, 1585 Folder: &vmfr, 1586 ResourcePool: &rpr, 1587 } 1588 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 1589 return sps 1590 } 1591 1592 // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action. 1593 func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec { 1594 vmr := vm.Reference() 1595 vmfr := f.VmFolder.Reference() 1596 rpr := rp.Reference() 1597 spr := storagePod.Reference() 1598 1599 var o mo.VirtualMachine 1600 err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o) 1601 if err != nil { 1602 return types.StoragePlacementSpec{} 1603 } 1604 ds := object.NewDatastore(c.Client, o.Datastore[0]) 1605 log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds) 1606 1607 devices, err := vm.Device(context.TODO()) 1608 if err != nil { 1609 return types.StoragePlacementSpec{} 1610 } 1611 1612 var key int32 1613 for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) { 1614 key = int32(d.GetVirtualDevice().Key) 1615 log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice()) 1616 } 1617 1618 sps := types.StoragePlacementSpec{ 1619 Type: "clone", 1620 Vm: &vmr, 1621 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 1622 StoragePod: &spr, 1623 }, 1624 CloneSpec: &types.VirtualMachineCloneSpec{ 1625 Location: types.VirtualMachineRelocateSpec{ 1626 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 1627 { 1628 Datastore: ds.Reference(), 1629 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{}, 1630 DiskId: key, 1631 }, 1632 }, 1633 Pool: &rpr, 1634 }, 1635 PowerOn: false, 1636 Template: false, 1637 }, 1638 CloneName: "dummy", 1639 Folder: &vmfr, 1640 } 1641 return sps 1642 } 1643 1644 // findDatastore finds Datastore object. 1645 func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) { 1646 var datastore *object.Datastore 1647 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 1648 1649 srm := object.NewStorageResourceManager(c.Client) 1650 rds, err := srm.RecommendDatastores(context.TODO(), sps) 1651 if err != nil { 1652 return nil, err 1653 } 1654 log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) 1655 1656 spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) 1657 datastore = object.NewDatastore(c.Client, spa.Destination) 1658 log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore) 1659 1660 return datastore, nil 1661 } 1662 1663 // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller. 1664 func createCdroms(client *govmomi.Client, vm *object.VirtualMachine, datacenter *object.Datacenter, cdroms []cdrom) error { 1665 log.Printf("[DEBUG] add cdroms: %v", cdroms) 1666 for _, cd := range cdroms { 1667 log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore) 1668 log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path) 1669 err := addCdrom(client, vm, datacenter, cd.datastore, cd.path) 1670 if err != nil { 1671 return err 1672 } 1673 } 1674 1675 return nil 1676 } 1677 1678 func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error { 1679 dc, err := getDatacenter(c, vm.datacenter) 1680 1681 if err != nil { 1682 return err 1683 } 1684 finder := find.NewFinder(c.Client, true) 1685 finder = finder.SetDatacenter(dc) 1686 1687 var template *object.VirtualMachine 1688 var template_mo mo.VirtualMachine 1689 var vm_mo mo.VirtualMachine 1690 if vm.template != "" { 1691 template, err = finder.VirtualMachine(context.TODO(), vm.template) 1692 if err != nil { 1693 return err 1694 } 1695 log.Printf("[DEBUG] template: %#v", template) 1696 1697 err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo) 1698 if err != nil { 1699 return err 1700 } 1701 } 1702 1703 var resourcePool *object.ResourcePool 1704 if vm.resourcePool == "" { 1705 if vm.cluster == "" { 1706 resourcePool, err = finder.DefaultResourcePool(context.TODO()) 1707 if err != nil { 1708 return err 1709 } 1710 } else { 1711 resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") 1712 if err != nil { 1713 return err 1714 } 1715 } 1716 } else { 1717 resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) 1718 if err != nil { 1719 return err 1720 } 1721 } 1722 log.Printf("[DEBUG] resource pool: %#v", resourcePool) 1723 1724 dcFolders, err := dc.Folders(context.TODO()) 1725 if err != nil { 1726 return err 1727 } 1728 log.Printf("[DEBUG] folder: %#v", vm.folder) 1729 1730 folder := dcFolders.VmFolder 1731 if len(vm.folder) > 0 { 1732 si := object.NewSearchIndex(c.Client) 1733 folderRef, err := si.FindByInventoryPath( 1734 context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) 1735 if err != nil { 1736 return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) 1737 } else if folderRef == nil { 1738 return fmt.Errorf("Cannot find folder %s", vm.folder) 1739 } else { 1740 folder = folderRef.(*object.Folder) 1741 } 1742 } 1743 1744 // make config spec 1745 configSpec := types.VirtualMachineConfigSpec{ 1746 Name: vm.name, 1747 NumCPUs: vm.vcpu, 1748 NumCoresPerSocket: 1, 1749 MemoryMB: vm.memoryMb, 1750 MemoryAllocation: &types.ResourceAllocationInfo{ 1751 Reservation: vm.memoryAllocation.reservation, 1752 }, 1753 Flags: &types.VirtualMachineFlagInfo{ 1754 DiskUuidEnabled: &vm.enableDiskUUID, 1755 }, 1756 } 1757 if vm.template == "" { 1758 configSpec.GuestId = "otherLinux64Guest" 1759 } 1760 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 1761 1762 // make ExtraConfig 1763 log.Printf("[DEBUG] virtual machine Extra Config spec start") 1764 if len(vm.customConfigurations) > 0 { 1765 var ov []types.BaseOptionValue 1766 for k, v := range vm.customConfigurations { 1767 key := k 1768 value := v 1769 o := types.OptionValue{ 1770 Key: key, 1771 Value: &value, 1772 } 1773 log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) 1774 ov = append(ov, &o) 1775 } 1776 configSpec.ExtraConfig = ov 1777 log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) 1778 } 1779 1780 var datastore *object.Datastore 1781 if vm.datastore == "" { 1782 datastore, err = finder.DefaultDatastore(context.TODO()) 1783 if err != nil { 1784 return err 1785 } 1786 } else { 1787 datastore, err = finder.Datastore(context.TODO(), vm.datastore) 1788 if err != nil { 1789 // TODO: datastore cluster support in govmomi finder function 1790 d, err := getDatastoreObject(c, dcFolders, vm.datastore) 1791 if err != nil { 1792 return err 1793 } 1794 1795 if d.Type == "StoragePod" { 1796 sp := object.StoragePod{ 1797 Folder: object.NewFolder(c.Client, d), 1798 } 1799 1800 var sps types.StoragePlacementSpec 1801 if vm.template != "" { 1802 sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) 1803 } else { 1804 sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) 1805 } 1806 1807 datastore, err = findDatastore(c, sps) 1808 if err != nil { 1809 return err 1810 } 1811 } else { 1812 datastore = object.NewDatastore(c.Client, d) 1813 } 1814 } 1815 } 1816 1817 log.Printf("[DEBUG] datastore: %#v", datastore) 1818 1819 // network 1820 networkDevices := []types.BaseVirtualDeviceConfigSpec{} 1821 networkConfigs := []types.CustomizationAdapterMapping{} 1822 for _, network := range vm.networkInterfaces { 1823 // network device 1824 var networkDeviceType string 1825 if vm.template == "" { 1826 networkDeviceType = "e1000" 1827 } else { 1828 networkDeviceType = "vmxnet3" 1829 } 1830 nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress) 1831 if err != nil { 1832 return err 1833 } 1834 log.Printf("[DEBUG] network device: %+v", nd.Device) 1835 networkDevices = append(networkDevices, nd) 1836 1837 if vm.template != "" { 1838 var ipSetting types.CustomizationIPSettings 1839 if network.ipv4Address == "" { 1840 ipSetting.Ip = &types.CustomizationDhcpIpGenerator{} 1841 } else { 1842 if network.ipv4PrefixLength == 0 { 1843 return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") 1844 } 1845 m := net.CIDRMask(network.ipv4PrefixLength, 32) 1846 sm := net.IPv4(m[0], m[1], m[2], m[3]) 1847 subnetMask := sm.String() 1848 log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway) 1849 log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address) 1850 log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength) 1851 log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask) 1852 ipSetting.Gateway = []string{ 1853 network.ipv4Gateway, 1854 } 1855 ipSetting.Ip = &types.CustomizationFixedIp{ 1856 IpAddress: network.ipv4Address, 1857 } 1858 ipSetting.SubnetMask = subnetMask 1859 } 1860 1861 ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{} 1862 if network.ipv6Address == "" { 1863 ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ 1864 &types.CustomizationDhcpIpV6Generator{}, 1865 } 1866 } else { 1867 log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway) 1868 log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address) 1869 log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength) 1870 1871 ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ 1872 &types.CustomizationFixedIpV6{ 1873 IpAddress: network.ipv6Address, 1874 SubnetMask: int32(network.ipv6PrefixLength), 1875 }, 1876 } 1877 ipv6Spec.Gateway = []string{network.ipv6Gateway} 1878 } 1879 ipSetting.IpV6Spec = ipv6Spec 1880 1881 // network config 1882 config := types.CustomizationAdapterMapping{ 1883 Adapter: ipSetting, 1884 } 1885 networkConfigs = append(networkConfigs, config) 1886 } 1887 } 1888 log.Printf("[DEBUG] network devices: %#v", networkDevices) 1889 log.Printf("[DEBUG] network configs: %#v", networkConfigs) 1890 1891 var task *object.Task 1892 if vm.template == "" { 1893 var mds mo.Datastore 1894 if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { 1895 return err 1896 } 1897 log.Printf("[DEBUG] datastore: %#v", mds.Name) 1898 scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") 1899 if err != nil { 1900 log.Printf("[ERROR] %s", err) 1901 } 1902 1903 configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ 1904 Operation: types.VirtualDeviceConfigSpecOperationAdd, 1905 Device: scsi, 1906 }) 1907 1908 configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} 1909 1910 task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) 1911 if err != nil { 1912 log.Printf("[ERROR] %s", err) 1913 } 1914 1915 err = task.Wait(context.TODO()) 1916 if err != nil { 1917 log.Printf("[ERROR] %s", err) 1918 } 1919 1920 } else { 1921 1922 relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType) 1923 if err != nil { 1924 return err 1925 } 1926 1927 log.Printf("[DEBUG] relocate spec: %v", relocateSpec) 1928 1929 // make vm clone spec 1930 cloneSpec := types.VirtualMachineCloneSpec{ 1931 Location: relocateSpec, 1932 Template: false, 1933 Config: &configSpec, 1934 PowerOn: false, 1935 } 1936 if vm.linkedClone { 1937 if template_mo.Snapshot == nil { 1938 return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots") 1939 } 1940 cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot 1941 } 1942 log.Printf("[DEBUG] clone spec: %v", cloneSpec) 1943 1944 task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec) 1945 if err != nil { 1946 return err 1947 } 1948 } 1949 1950 err = task.Wait(context.TODO()) 1951 if err != nil { 1952 log.Printf("[ERROR] %s", err) 1953 } 1954 1955 newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) 1956 if err != nil { 1957 return err 1958 } 1959 log.Printf("[DEBUG] new vm: %v", newVM) 1960 1961 devices, err := newVM.Device(context.TODO()) 1962 if err != nil { 1963 log.Printf("[DEBUG] Template devices can't be found") 1964 return err 1965 } 1966 1967 for _, dvc := range devices { 1968 // Issue 3559/3560: Delete all ethernet devices to add the correct ones later 1969 if devices.Type(dvc) == "ethernet" { 1970 err := newVM.RemoveDevice(context.TODO(), false, dvc) 1971 if err != nil { 1972 return err 1973 } 1974 } 1975 } 1976 // Add Network devices 1977 for _, dvc := range networkDevices { 1978 err := newVM.AddDevice( 1979 context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) 1980 if err != nil { 1981 return err 1982 } 1983 } 1984 1985 // Create the cdroms if needed. 1986 if err := createCdroms(c, newVM, dc, vm.cdroms); err != nil { 1987 return err 1988 } 1989 1990 newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo) 1991 firstDisk := 0 1992 if vm.template != "" { 1993 firstDisk++ 1994 } 1995 for i := firstDisk; i < len(vm.hardDisks); i++ { 1996 log.Printf("[DEBUG] disk index: %v", i) 1997 1998 var diskPath string 1999 switch { 2000 case vm.hardDisks[i].vmdkPath != "": 2001 diskPath = vm.hardDisks[i].vmdkPath 2002 case vm.hardDisks[i].name != "": 2003 snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory 2004 split := strings.Split(snapshotFullDir, " ") 2005 if len(split) != 2 { 2006 return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir) 2007 } 2008 vmWorkingPath := split[1] 2009 diskPath = vmWorkingPath + vm.hardDisks[i].name 2010 default: 2011 return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i]) 2012 } 2013 err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) 2014 if err != nil { 2015 err2 := addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) 2016 if err2 != nil { 2017 return err2 2018 } 2019 return err 2020 } 2021 } 2022 2023 if vm.skipCustomization || vm.template == "" { 2024 log.Printf("[DEBUG] VM customization skipped") 2025 } else { 2026 var identity_options types.BaseCustomizationIdentitySettings 2027 if strings.HasPrefix(template_mo.Config.GuestId, "win") { 2028 var timeZone int 2029 if vm.timeZone == "Etc/UTC" { 2030 vm.timeZone = "085" 2031 } 2032 timeZone, err := strconv.Atoi(vm.timeZone) 2033 if err != nil { 2034 return fmt.Errorf("Error converting TimeZone: %s", err) 2035 } 2036 2037 guiUnattended := types.CustomizationGuiUnattended{ 2038 AutoLogon: false, 2039 AutoLogonCount: 1, 2040 TimeZone: int32(timeZone), 2041 } 2042 2043 customIdentification := types.CustomizationIdentification{} 2044 2045 userData := types.CustomizationUserData{ 2046 ComputerName: &types.CustomizationFixedName{ 2047 Name: strings.Split(vm.name, ".")[0], 2048 }, 2049 ProductId: vm.windowsOptionalConfig.productKey, 2050 FullName: "terraform", 2051 OrgName: "terraform", 2052 } 2053 2054 if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" { 2055 customIdentification.DomainAdminPassword = &types.CustomizationPassword{ 2056 PlainText: true, 2057 Value: vm.windowsOptionalConfig.domainUserPassword, 2058 } 2059 customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser 2060 customIdentification.JoinDomain = vm.windowsOptionalConfig.domain 2061 } 2062 2063 if vm.windowsOptionalConfig.adminPassword != "" { 2064 guiUnattended.Password = &types.CustomizationPassword{ 2065 PlainText: true, 2066 Value: vm.windowsOptionalConfig.adminPassword, 2067 } 2068 } 2069 2070 identity_options = &types.CustomizationSysprep{ 2071 GuiUnattended: guiUnattended, 2072 Identification: customIdentification, 2073 UserData: userData, 2074 } 2075 } else { 2076 identity_options = &types.CustomizationLinuxPrep{ 2077 HostName: &types.CustomizationFixedName{ 2078 Name: strings.Split(vm.name, ".")[0], 2079 }, 2080 Domain: vm.domain, 2081 TimeZone: vm.timeZone, 2082 HwClockUTC: types.NewBool(true), 2083 } 2084 } 2085 2086 // create CustomizationSpec 2087 customSpec := types.CustomizationSpec{ 2088 Identity: identity_options, 2089 GlobalIPSettings: types.CustomizationGlobalIPSettings{ 2090 DnsSuffixList: vm.dnsSuffixes, 2091 DnsServerList: vm.dnsServers, 2092 }, 2093 NicSettingMap: networkConfigs, 2094 } 2095 log.Printf("[DEBUG] custom spec: %v", customSpec) 2096 2097 log.Printf("[DEBUG] VM customization starting") 2098 taskb, err := newVM.Customize(context.TODO(), customSpec) 2099 if err != nil { 2100 return err 2101 } 2102 _, err = taskb.WaitForResult(context.TODO(), nil) 2103 if err != nil { 2104 return err 2105 } 2106 log.Printf("[DEBUG] VM customization finished") 2107 } 2108 2109 if vm.hasBootableVmdk || vm.template != "" { 2110 newVM.PowerOn(context.TODO()) 2111 err = newVM.WaitForPowerState(context.TODO(), types.VirtualMachinePowerStatePoweredOn) 2112 if err != nil { 2113 return err 2114 } 2115 } 2116 return nil 2117 }