github.com/boyvanduuren/terraform@v0.7.0-rc2.0.20160805175930-de822d909c40/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about) 1 package vsphere 2 3 import ( 4 "fmt" 5 "log" 6 "net" 7 "strconv" 8 "strings" 9 10 "github.com/hashicorp/terraform/helper/schema" 11 "github.com/vmware/govmomi" 12 "github.com/vmware/govmomi/find" 13 "github.com/vmware/govmomi/object" 14 "github.com/vmware/govmomi/property" 15 "github.com/vmware/govmomi/vim25/mo" 16 "github.com/vmware/govmomi/vim25/types" 17 "golang.org/x/net/context" 18 ) 19 20 var DefaultDNSSuffixes = []string{ 21 "vsphere.local", 22 } 23 24 var DefaultDNSServers = []string{ 25 "8.8.8.8", 26 "8.8.4.4", 27 } 28 29 var DiskControllerTypes = []string{ 30 "scsi", 31 "scsi-lsi-parallel", 32 "scsi-buslogic", 33 "scsi-paravirtual", 34 "scsi-lsi-sas", 35 "ide", 36 } 37 38 type networkInterface struct { 39 deviceName string 40 label string 41 ipv4Address string 42 ipv4PrefixLength int 43 ipv4Gateway string 44 ipv6Address string 45 ipv6PrefixLength int 46 ipv6Gateway string 47 adapterType string // TODO: Make "adapter_type" argument 48 macAddress string 49 } 50 51 type hardDisk struct { 52 name string 53 size int64 54 iops int64 55 initType string 56 vmdkPath string 57 controller string 58 bootable bool 59 } 60 61 //Additional options Vsphere can use clones of windows machines 62 type windowsOptConfig struct { 63 productKey string 64 adminPassword string 65 domainUser string 66 domain string 67 domainUserPassword string 68 } 69 70 type cdrom struct { 71 datastore string 72 path string 73 } 74 75 type memoryAllocation struct { 76 reservation int64 77 } 78 79 type virtualMachine struct { 80 name string 81 folder string 82 datacenter string 83 cluster string 84 resourcePool string 85 datastore string 86 vcpu int32 87 memoryMb int64 88 memoryAllocation memoryAllocation 89 template string 90 networkInterfaces []networkInterface 91 hardDisks []hardDisk 92 cdroms []cdrom 93 domain string 94 timeZone string 95 dnsSuffixes []string 96 dnsServers []string 97 hasBootableVmdk bool 98 linkedClone bool 99 skipCustomization bool 100 enableDiskUUID bool 101 windowsOptionalConfig windowsOptConfig 102 customConfigurations map[string](types.AnyType) 103 } 104 105 func (v virtualMachine) Path() string { 106 return vmPath(v.folder, v.name) 107 } 108 109 func vmPath(folder string, name string) string { 110 var path string 111 if len(folder) > 0 { 112 path += folder + "/" 113 } 114 return path + name 115 } 116 117 func resourceVSphereVirtualMachine() *schema.Resource { 118 return &schema.Resource{ 119 Create: resourceVSphereVirtualMachineCreate, 120 Read: resourceVSphereVirtualMachineRead, 121 Update: resourceVSphereVirtualMachineUpdate, 122 Delete: resourceVSphereVirtualMachineDelete, 123 124 SchemaVersion: 1, 125 MigrateState: resourceVSphereVirtualMachineMigrateState, 126 127 Schema: map[string]*schema.Schema{ 128 "name": &schema.Schema{ 129 Type: schema.TypeString, 130 Required: true, 131 ForceNew: true, 132 }, 133 134 "folder": &schema.Schema{ 135 Type: schema.TypeString, 136 Optional: true, 137 ForceNew: true, 138 }, 139 140 "vcpu": &schema.Schema{ 141 Type: schema.TypeInt, 142 Required: true, 143 }, 144 145 "memory": &schema.Schema{ 146 Type: schema.TypeInt, 147 Required: true, 148 }, 149 150 "memory_reservation": &schema.Schema{ 151 Type: schema.TypeInt, 152 Optional: true, 153 Default: 0, 154 ForceNew: true, 155 }, 156 157 "datacenter": &schema.Schema{ 158 Type: schema.TypeString, 159 Optional: true, 160 ForceNew: true, 161 }, 162 163 "cluster": &schema.Schema{ 164 Type: schema.TypeString, 165 Optional: true, 166 ForceNew: true, 167 }, 168 169 "resource_pool": &schema.Schema{ 170 Type: schema.TypeString, 171 Optional: true, 172 ForceNew: true, 173 }, 174 175 "linked_clone": &schema.Schema{ 176 Type: schema.TypeBool, 177 Optional: true, 178 Default: false, 179 ForceNew: true, 180 }, 181 "gateway": &schema.Schema{ 182 Type: schema.TypeString, 183 Optional: true, 184 ForceNew: true, 185 Deprecated: "Please use network_interface.ipv4_gateway", 186 }, 187 188 "domain": &schema.Schema{ 189 Type: schema.TypeString, 190 Optional: true, 191 ForceNew: true, 192 Default: "vsphere.local", 193 }, 194 195 "time_zone": &schema.Schema{ 196 Type: schema.TypeString, 197 Optional: true, 198 ForceNew: true, 199 Default: "Etc/UTC", 200 }, 201 202 "dns_suffixes": &schema.Schema{ 203 Type: schema.TypeList, 204 Optional: true, 205 Elem: &schema.Schema{Type: schema.TypeString}, 206 ForceNew: true, 207 }, 208 209 "dns_servers": &schema.Schema{ 210 Type: schema.TypeList, 211 Optional: true, 212 Elem: &schema.Schema{Type: schema.TypeString}, 213 ForceNew: true, 214 }, 215 216 "skip_customization": &schema.Schema{ 217 Type: schema.TypeBool, 218 Optional: true, 219 ForceNew: true, 220 Default: false, 221 }, 222 223 "enable_disk_uuid": &schema.Schema{ 224 Type: schema.TypeBool, 225 Optional: true, 226 ForceNew: true, 227 Default: false, 228 }, 229 230 "uuid": &schema.Schema{ 231 Type: schema.TypeString, 232 Computed: true, 233 }, 234 235 "custom_configuration_parameters": &schema.Schema{ 236 Type: schema.TypeMap, 237 Optional: true, 238 ForceNew: true, 239 }, 240 241 "windows_opt_config": &schema.Schema{ 242 Type: schema.TypeList, 243 Optional: true, 244 ForceNew: true, 245 Elem: &schema.Resource{ 246 Schema: map[string]*schema.Schema{ 247 "product_key": &schema.Schema{ 248 Type: schema.TypeString, 249 Optional: true, 250 ForceNew: true, 251 }, 252 253 "admin_password": &schema.Schema{ 254 Type: schema.TypeString, 255 Optional: true, 256 ForceNew: true, 257 }, 258 259 "domain_user": &schema.Schema{ 260 Type: schema.TypeString, 261 Optional: true, 262 ForceNew: true, 263 }, 264 265 "domain": &schema.Schema{ 266 Type: schema.TypeString, 267 Optional: true, 268 ForceNew: true, 269 }, 270 271 "domain_user_password": &schema.Schema{ 272 Type: schema.TypeString, 273 Optional: true, 274 ForceNew: true, 275 }, 276 }, 277 }, 278 }, 279 280 "network_interface": &schema.Schema{ 281 Type: schema.TypeList, 282 Required: true, 283 ForceNew: true, 284 Elem: &schema.Resource{ 285 Schema: map[string]*schema.Schema{ 286 "label": &schema.Schema{ 287 Type: schema.TypeString, 288 Required: true, 289 ForceNew: true, 290 }, 291 292 "ip_address": &schema.Schema{ 293 Type: schema.TypeString, 294 Optional: true, 295 Computed: true, 296 Deprecated: "Please use ipv4_address", 297 }, 298 299 "subnet_mask": &schema.Schema{ 300 Type: schema.TypeString, 301 Optional: true, 302 Computed: true, 303 Deprecated: "Please use ipv4_prefix_length", 304 }, 305 306 "ipv4_address": &schema.Schema{ 307 Type: schema.TypeString, 308 Optional: true, 309 Computed: true, 310 }, 311 312 "ipv4_prefix_length": &schema.Schema{ 313 Type: schema.TypeInt, 314 Optional: true, 315 Computed: true, 316 }, 317 318 "ipv4_gateway": &schema.Schema{ 319 Type: schema.TypeString, 320 Optional: true, 321 Computed: true, 322 }, 323 324 "ipv6_address": &schema.Schema{ 325 Type: schema.TypeString, 326 Optional: true, 327 Computed: true, 328 }, 329 330 "ipv6_prefix_length": &schema.Schema{ 331 Type: schema.TypeInt, 332 Optional: true, 333 Computed: true, 334 }, 335 336 "ipv6_gateway": &schema.Schema{ 337 Type: schema.TypeString, 338 Optional: true, 339 Computed: true, 340 }, 341 342 "adapter_type": &schema.Schema{ 343 Type: schema.TypeString, 344 Optional: true, 345 ForceNew: true, 346 }, 347 348 "mac_address": &schema.Schema{ 349 Type: schema.TypeString, 350 Optional: true, 351 Computed: true, 352 }, 353 }, 354 }, 355 }, 356 357 "disk": &schema.Schema{ 358 Type: schema.TypeSet, 359 Required: true, 360 Elem: &schema.Resource{ 361 Schema: map[string]*schema.Schema{ 362 "uuid": &schema.Schema{ 363 Type: schema.TypeString, 364 Computed: true, 365 }, 366 367 "key": &schema.Schema{ 368 Type: schema.TypeInt, 369 Computed: true, 370 }, 371 372 "template": &schema.Schema{ 373 Type: schema.TypeString, 374 Optional: true, 375 }, 376 377 "type": &schema.Schema{ 378 Type: schema.TypeString, 379 Optional: true, 380 Default: "eager_zeroed", 381 ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { 382 value := v.(string) 383 if value != "thin" && value != "eager_zeroed" { 384 errors = append(errors, fmt.Errorf( 385 "only 'thin' and 'eager_zeroed' are supported values for 'type'")) 386 } 387 return 388 }, 389 }, 390 391 "datastore": &schema.Schema{ 392 Type: schema.TypeString, 393 Optional: true, 394 }, 395 396 "size": &schema.Schema{ 397 Type: schema.TypeInt, 398 Optional: true, 399 }, 400 401 "name": &schema.Schema{ 402 Type: schema.TypeString, 403 Optional: true, 404 }, 405 406 "iops": &schema.Schema{ 407 Type: schema.TypeInt, 408 Optional: true, 409 }, 410 411 "vmdk": &schema.Schema{ 412 // TODO: Add ValidateFunc to confirm path exists 413 Type: schema.TypeString, 414 Optional: true, 415 }, 416 417 "bootable": &schema.Schema{ 418 Type: schema.TypeBool, 419 Optional: true, 420 }, 421 422 "keep_on_remove": &schema.Schema{ 423 Type: schema.TypeBool, 424 Optional: true, 425 }, 426 427 "controller_type": &schema.Schema{ 428 Type: schema.TypeString, 429 Optional: true, 430 Default: "scsi", 431 ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { 432 value := v.(string) 433 found := false 434 for _, t := range DiskControllerTypes { 435 if t == value { 436 found = true 437 } 438 } 439 if !found { 440 errors = append(errors, fmt.Errorf( 441 "Supported values for 'controller_type' are %v", strings.Join(DiskControllerTypes, ", "))) 442 } 443 return 444 }, 445 }, 446 }, 447 }, 448 }, 449 450 "cdrom": &schema.Schema{ 451 Type: schema.TypeList, 452 Optional: true, 453 ForceNew: true, 454 Elem: &schema.Resource{ 455 Schema: map[string]*schema.Schema{ 456 "datastore": &schema.Schema{ 457 Type: schema.TypeString, 458 Required: true, 459 ForceNew: true, 460 }, 461 462 "path": &schema.Schema{ 463 Type: schema.TypeString, 464 Required: true, 465 ForceNew: true, 466 }, 467 }, 468 }, 469 }, 470 }, 471 } 472 } 473 474 func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error { 475 // flag if changes have to be applied 476 hasChanges := false 477 // flag if changes have to be done when powered off 478 rebootRequired := false 479 480 // make config spec 481 configSpec := types.VirtualMachineConfigSpec{} 482 483 if d.HasChange("vcpu") { 484 configSpec.NumCPUs = int32(d.Get("vcpu").(int)) 485 hasChanges = true 486 rebootRequired = true 487 } 488 489 if d.HasChange("memory") { 490 configSpec.MemoryMB = int64(d.Get("memory").(int)) 491 hasChanges = true 492 rebootRequired = true 493 } 494 495 client := meta.(*govmomi.Client) 496 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 497 if err != nil { 498 return err 499 } 500 finder := find.NewFinder(client.Client, true) 501 finder = finder.SetDatacenter(dc) 502 503 vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string))) 504 if err != nil { 505 return err 506 } 507 508 if d.HasChange("disk") { 509 hasChanges = true 510 oldDisks, newDisks := d.GetChange("disk") 511 oldDiskSet := oldDisks.(*schema.Set) 512 newDiskSet := newDisks.(*schema.Set) 513 514 addedDisks := newDiskSet.Difference(oldDiskSet) 515 removedDisks := oldDiskSet.Difference(newDiskSet) 516 517 // Removed disks 518 for _, diskRaw := range removedDisks.List() { 519 if disk, ok := diskRaw.(map[string]interface{}); ok { 520 devices, err := vm.Device(context.TODO()) 521 if err != nil { 522 return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err) 523 } 524 virtualDisk := devices.FindByKey(int32(disk["key"].(int))) 525 526 keep := false 527 if v, ok := disk["keep_on_remove"].(bool); ok { 528 keep = v 529 } 530 531 err = vm.RemoveDevice(context.TODO(), keep, virtualDisk) 532 if err != nil { 533 return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err) 534 } 535 } 536 } 537 // Added disks 538 for _, diskRaw := range addedDisks.List() { 539 if disk, ok := diskRaw.(map[string]interface{}); ok { 540 541 var datastore *object.Datastore 542 if disk["datastore"] == "" { 543 datastore, err = finder.DefaultDatastore(context.TODO()) 544 if err != nil { 545 return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err) 546 } 547 } else { 548 datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string)) 549 if err != nil { 550 log.Printf("[ERROR] Couldn't find datastore %v. %s", disk["datastore"].(string), err) 551 return err 552 } 553 } 554 555 var size int64 556 if disk["size"] == 0 { 557 size = 0 558 } else { 559 size = int64(disk["size"].(int)) 560 } 561 iops := int64(disk["iops"].(int)) 562 controller_type := disk["controller_type"].(string) 563 564 var mo mo.VirtualMachine 565 vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo) 566 567 var diskPath string 568 switch { 569 case disk["vmdk"] != "": 570 diskPath = disk["vmdk"].(string) 571 case disk["name"] != "": 572 snapshotFullDir := mo.Config.Files.SnapshotDirectory 573 split := strings.Split(snapshotFullDir, " ") 574 if len(split) != 2 { 575 return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir) 576 } 577 vmWorkingPath := split[1] 578 diskPath = vmWorkingPath + disk["name"].(string) 579 default: 580 return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given") 581 } 582 583 log.Printf("[INFO] Attaching disk: %v", diskPath) 584 err = addHardDisk(vm, size, iops, "thin", datastore, diskPath, controller_type) 585 if err != nil { 586 log.Printf("[ERROR] Add Hard Disk Failed: %v", err) 587 return err 588 } 589 } 590 if err != nil { 591 return err 592 } 593 } 594 } 595 596 // do nothing if there are no changes 597 if !hasChanges { 598 return nil 599 } 600 601 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 602 603 if rebootRequired { 604 log.Printf("[INFO] Shutting down virtual machine: %s", d.Id()) 605 606 task, err := vm.PowerOff(context.TODO()) 607 if err != nil { 608 return err 609 } 610 611 err = task.Wait(context.TODO()) 612 if err != nil { 613 return err 614 } 615 } 616 617 log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id()) 618 619 task, err := vm.Reconfigure(context.TODO(), configSpec) 620 if err != nil { 621 log.Printf("[ERROR] %s", err) 622 } 623 624 err = task.Wait(context.TODO()) 625 if err != nil { 626 log.Printf("[ERROR] %s", err) 627 } 628 629 if rebootRequired { 630 task, err = vm.PowerOn(context.TODO()) 631 if err != nil { 632 return err 633 } 634 635 err = task.Wait(context.TODO()) 636 if err != nil { 637 log.Printf("[ERROR] %s", err) 638 } 639 } 640 641 return resourceVSphereVirtualMachineRead(d, meta) 642 } 643 644 func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { 645 client := meta.(*govmomi.Client) 646 647 vm := virtualMachine{ 648 name: d.Get("name").(string), 649 vcpu: int32(d.Get("vcpu").(int)), 650 memoryMb: int64(d.Get("memory").(int)), 651 memoryAllocation: memoryAllocation{ 652 reservation: int64(d.Get("memory_reservation").(int)), 653 }, 654 } 655 656 if v, ok := d.GetOk("folder"); ok { 657 vm.folder = v.(string) 658 } 659 660 if v, ok := d.GetOk("datacenter"); ok { 661 vm.datacenter = v.(string) 662 } 663 664 if v, ok := d.GetOk("cluster"); ok { 665 vm.cluster = v.(string) 666 } 667 668 if v, ok := d.GetOk("resource_pool"); ok { 669 vm.resourcePool = v.(string) 670 } 671 672 if v, ok := d.GetOk("domain"); ok { 673 vm.domain = v.(string) 674 } 675 676 if v, ok := d.GetOk("time_zone"); ok { 677 vm.timeZone = v.(string) 678 } 679 680 if v, ok := d.GetOk("linked_clone"); ok { 681 vm.linkedClone = v.(bool) 682 } 683 684 if v, ok := d.GetOk("skip_customization"); ok { 685 vm.skipCustomization = v.(bool) 686 } 687 688 if v, ok := d.GetOk("enable_disk_uuid"); ok { 689 vm.enableDiskUUID = v.(bool) 690 } 691 692 if raw, ok := d.GetOk("dns_suffixes"); ok { 693 for _, v := range raw.([]interface{}) { 694 vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string)) 695 } 696 } else { 697 vm.dnsSuffixes = DefaultDNSSuffixes 698 } 699 700 if raw, ok := d.GetOk("dns_servers"); ok { 701 for _, v := range raw.([]interface{}) { 702 vm.dnsServers = append(vm.dnsServers, v.(string)) 703 } 704 } else { 705 vm.dnsServers = DefaultDNSServers 706 } 707 708 if vL, ok := d.GetOk("custom_configuration_parameters"); ok { 709 if custom_configs, ok := vL.(map[string]interface{}); ok { 710 custom := make(map[string]types.AnyType) 711 for k, v := range custom_configs { 712 custom[k] = v 713 } 714 vm.customConfigurations = custom 715 log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations) 716 } 717 } 718 719 if vL, ok := d.GetOk("network_interface"); ok { 720 networks := make([]networkInterface, len(vL.([]interface{}))) 721 for i, v := range vL.([]interface{}) { 722 network := v.(map[string]interface{}) 723 networks[i].label = network["label"].(string) 724 if v, ok := network["ip_address"].(string); ok && v != "" { 725 networks[i].ipv4Address = v 726 } 727 if v, ok := d.GetOk("gateway"); ok { 728 networks[i].ipv4Gateway = v.(string) 729 } 730 if v, ok := network["subnet_mask"].(string); ok && v != "" { 731 ip := net.ParseIP(v).To4() 732 if ip != nil { 733 mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3]) 734 pl, _ := mask.Size() 735 networks[i].ipv4PrefixLength = pl 736 } else { 737 return fmt.Errorf("subnet_mask parameter is invalid.") 738 } 739 } 740 if v, ok := network["ipv4_address"].(string); ok && v != "" { 741 networks[i].ipv4Address = v 742 } 743 if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 { 744 networks[i].ipv4PrefixLength = v 745 } 746 if v, ok := network["ipv4_gateway"].(string); ok && v != "" { 747 networks[i].ipv4Gateway = v 748 } 749 if v, ok := network["ipv6_address"].(string); ok && v != "" { 750 networks[i].ipv6Address = v 751 } 752 if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 { 753 networks[i].ipv6PrefixLength = v 754 } 755 if v, ok := network["ipv6_gateway"].(string); ok && v != "" { 756 networks[i].ipv6Gateway = v 757 } 758 if v, ok := network["mac_address"].(string); ok && v != "" { 759 networks[i].macAddress = v 760 } 761 } 762 vm.networkInterfaces = networks 763 log.Printf("[DEBUG] network_interface init: %v", networks) 764 } 765 766 if vL, ok := d.GetOk("windows_opt_config"); ok { 767 var winOpt windowsOptConfig 768 custom_configs := (vL.([]interface{}))[0].(map[string]interface{}) 769 if v, ok := custom_configs["admin_password"].(string); ok && v != "" { 770 winOpt.adminPassword = v 771 } 772 if v, ok := custom_configs["domain"].(string); ok && v != "" { 773 winOpt.domain = v 774 } 775 if v, ok := custom_configs["domain_user"].(string); ok && v != "" { 776 winOpt.domainUser = v 777 } 778 if v, ok := custom_configs["product_key"].(string); ok && v != "" { 779 winOpt.productKey = v 780 } 781 if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" { 782 winOpt.domainUserPassword = v 783 } 784 vm.windowsOptionalConfig = winOpt 785 log.Printf("[DEBUG] windows config init: %v", winOpt) 786 } 787 788 if vL, ok := d.GetOk("disk"); ok { 789 if diskSet, ok := vL.(*schema.Set); ok { 790 791 disks := []hardDisk{} 792 for _, value := range diskSet.List() { 793 disk := value.(map[string]interface{}) 794 newDisk := hardDisk{} 795 796 if v, ok := disk["template"].(string); ok && v != "" { 797 if v, ok := disk["name"].(string); ok && v != "" { 798 return fmt.Errorf("Cannot specify name of a template") 799 } 800 vm.template = v 801 if vm.hasBootableVmdk { 802 return fmt.Errorf("[ERROR] Only one bootable disk or template may be given") 803 } 804 vm.hasBootableVmdk = true 805 } 806 807 if v, ok := disk["type"].(string); ok && v != "" { 808 newDisk.initType = v 809 } 810 811 if v, ok := disk["datastore"].(string); ok && v != "" { 812 vm.datastore = v 813 } 814 815 if v, ok := disk["size"].(int); ok && v != 0 { 816 if v, ok := disk["template"].(string); ok && v != "" { 817 return fmt.Errorf("Cannot specify size of a template") 818 } 819 820 if v, ok := disk["name"].(string); ok && v != "" { 821 newDisk.name = v 822 } else { 823 return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk") 824 } 825 826 newDisk.size = int64(v) 827 } 828 829 if v, ok := disk["iops"].(int); ok && v != 0 { 830 newDisk.iops = int64(v) 831 } 832 833 if v, ok := disk["controller_type"].(string); ok && v != "" { 834 newDisk.controller = v 835 } 836 837 if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" { 838 if v, ok := disk["template"].(string); ok && v != "" { 839 return fmt.Errorf("Cannot specify a vmdk for a template") 840 } 841 if v, ok := disk["size"].(string); ok && v != "" { 842 return fmt.Errorf("Cannot specify size of a vmdk") 843 } 844 if v, ok := disk["name"].(string); ok && v != "" { 845 return fmt.Errorf("Cannot specify name of a vmdk") 846 } 847 if vBootable, ok := disk["bootable"].(bool); ok { 848 if vBootable && vm.hasBootableVmdk { 849 return fmt.Errorf("[ERROR] Only one bootable disk or template may be given") 850 } 851 newDisk.bootable = vBootable 852 vm.hasBootableVmdk = vm.hasBootableVmdk || vBootable 853 } 854 newDisk.vmdkPath = vVmdk 855 } 856 // Preserves order so bootable disk is first 857 if newDisk.bootable == true || disk["template"] != "" { 858 disks = append([]hardDisk{newDisk}, disks...) 859 } else { 860 disks = append(disks, newDisk) 861 } 862 } 863 vm.hardDisks = disks 864 log.Printf("[DEBUG] disk init: %v", disks) 865 } 866 } 867 868 if vL, ok := d.GetOk("cdrom"); ok { 869 cdroms := make([]cdrom, len(vL.([]interface{}))) 870 for i, v := range vL.([]interface{}) { 871 c := v.(map[string]interface{}) 872 if v, ok := c["datastore"].(string); ok && v != "" { 873 cdroms[i].datastore = v 874 } else { 875 return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.") 876 } 877 if v, ok := c["path"].(string); ok && v != "" { 878 cdroms[i].path = v 879 } else { 880 return fmt.Errorf("Path argument must be specified when attaching a cdrom image.") 881 } 882 } 883 vm.cdroms = cdroms 884 log.Printf("[DEBUG] cdrom init: %v", cdroms) 885 } 886 887 err := vm.setupVirtualMachine(client) 888 if err != nil { 889 return err 890 } 891 892 d.SetId(vm.Path()) 893 log.Printf("[INFO] Created virtual machine: %s", d.Id()) 894 895 return resourceVSphereVirtualMachineRead(d, meta) 896 } 897 898 func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { 899 log.Printf("[DEBUG] virtual machine resource data: %#v", d) 900 client := meta.(*govmomi.Client) 901 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 902 if err != nil { 903 return err 904 } 905 finder := find.NewFinder(client.Client, true) 906 finder = finder.SetDatacenter(dc) 907 908 vm, err := finder.VirtualMachine(context.TODO(), d.Id()) 909 if err != nil { 910 d.SetId("") 911 return nil 912 } 913 914 state, err := vm.PowerState(context.TODO()) 915 if err != nil { 916 return err 917 } 918 919 if state == types.VirtualMachinePowerStatePoweredOn { 920 // wait for interfaces to appear 921 _, err = vm.WaitForNetIP(context.TODO(), true) 922 if err != nil { 923 return err 924 } 925 } 926 927 var mvm mo.VirtualMachine 928 collector := property.DefaultCollector(client.Client) 929 if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil { 930 return err 931 } 932 933 log.Printf("[DEBUG] Datacenter - %#v", dc) 934 log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config) 935 log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config) 936 log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net) 937 938 disks := make([]map[string]interface{}, 0) 939 templateDisk := make(map[string]interface{}, 1) 940 for _, device := range mvm.Config.Hardware.Device { 941 if vd, ok := device.(*types.VirtualDisk); ok { 942 943 virtualDevice := vd.GetVirtualDevice() 944 945 backingInfo := virtualDevice.Backing 946 var diskFullPath string 947 var diskUuid string 948 if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok { 949 diskFullPath = v.FileName 950 diskUuid = v.Uuid 951 } else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok { 952 diskFullPath = v.FileName 953 diskUuid = v.Uuid 954 } 955 log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath) 956 957 // Separate datastore and path 958 diskFullPathSplit := strings.Split(diskFullPath, " ") 959 if len(diskFullPathSplit) != 2 { 960 return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath) 961 } 962 diskPath := diskFullPathSplit[1] 963 // Isolate filename 964 diskNameSplit := strings.Split(diskPath, "/") 965 diskName := diskNameSplit[len(diskNameSplit)-1] 966 // Remove possible extension 967 diskName = strings.Split(diskName, ".")[0] 968 969 if prevDisks, ok := d.GetOk("disk"); ok { 970 if prevDiskSet, ok := prevDisks.(*schema.Set); ok { 971 for _, v := range prevDiskSet.List() { 972 prevDisk := v.(map[string]interface{}) 973 974 // We're guaranteed only one template disk. Passing value directly through since templates should be immutable 975 if prevDisk["template"] != "" { 976 if len(templateDisk) == 0 { 977 templateDisk = prevDisk 978 disks = append(disks, templateDisk) 979 break 980 } 981 } 982 983 // It is enforced that prevDisk["name"] should only be set in the case 984 // of creating a new disk for the user. 985 // size case: name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name 986 // vmdk case: compare prevDisk["vmdk"] and mo.Filename 987 if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] { 988 989 prevDisk["key"] = virtualDevice.Key 990 prevDisk["uuid"] = diskUuid 991 992 disks = append(disks, prevDisk) 993 break 994 } 995 } 996 } 997 } 998 log.Printf("[DEBUG] disks: %#v", disks) 999 } 1000 } 1001 err = d.Set("disk", disks) 1002 if err != nil { 1003 return fmt.Errorf("Invalid disks to set: %#v", disks) 1004 } 1005 1006 networkInterfaces := make([]map[string]interface{}, 0) 1007 for _, v := range mvm.Guest.Net { 1008 if v.DeviceConfigId >= 0 { 1009 log.Printf("[DEBUG] v.Network - %#v", v.Network) 1010 networkInterface := make(map[string]interface{}) 1011 networkInterface["label"] = v.Network 1012 networkInterface["mac_address"] = v.MacAddress 1013 for _, ip := range v.IpConfig.IpAddress { 1014 p := net.ParseIP(ip.IpAddress) 1015 if p.To4() != nil { 1016 log.Printf("[DEBUG] p.String - %#v", p.String()) 1017 log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength) 1018 networkInterface["ipv4_address"] = p.String() 1019 networkInterface["ipv4_prefix_length"] = ip.PrefixLength 1020 } else if p.To16() != nil { 1021 log.Printf("[DEBUG] p.String - %#v", p.String()) 1022 log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength) 1023 networkInterface["ipv6_address"] = p.String() 1024 networkInterface["ipv6_prefix_length"] = ip.PrefixLength 1025 } 1026 log.Printf("[DEBUG] networkInterface: %#v", networkInterface) 1027 } 1028 log.Printf("[DEBUG] networkInterface: %#v", networkInterface) 1029 networkInterfaces = append(networkInterfaces, networkInterface) 1030 } 1031 } 1032 if mvm.Guest.IpStack != nil { 1033 for _, v := range mvm.Guest.IpStack { 1034 if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil { 1035 for _, route := range v.IpRouteConfig.IpRoute { 1036 if route.Gateway.Device != "" { 1037 gatewaySetting := "" 1038 if route.Network == "::" { 1039 gatewaySetting = "ipv6_gateway" 1040 } else if route.Network == "0.0.0.0" { 1041 gatewaySetting = "ipv4_gateway" 1042 } 1043 if gatewaySetting != "" { 1044 deviceID, err := strconv.Atoi(route.Gateway.Device) 1045 if err != nil { 1046 log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err) 1047 } else { 1048 log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress) 1049 networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress 1050 } 1051 } 1052 } 1053 } 1054 } 1055 } 1056 } 1057 log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces) 1058 err = d.Set("network_interface", networkInterfaces) 1059 if err != nil { 1060 return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces) 1061 } 1062 1063 if len(networkInterfaces) > 0 { 1064 if _, ok := networkInterfaces[0]["ipv4_address"]; ok { 1065 log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string)) 1066 d.SetConnInfo(map[string]string{ 1067 "type": "ssh", 1068 "host": networkInterfaces[0]["ipv4_address"].(string), 1069 }) 1070 } 1071 } 1072 1073 var rootDatastore string 1074 for _, v := range mvm.Datastore { 1075 var md mo.Datastore 1076 if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil { 1077 return err 1078 } 1079 if md.Parent.Type == "StoragePod" { 1080 var msp mo.StoragePod 1081 if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil { 1082 return err 1083 } 1084 rootDatastore = msp.Name 1085 log.Printf("[DEBUG] %#v", msp.Name) 1086 } else { 1087 rootDatastore = md.Name 1088 log.Printf("[DEBUG] %#v", md.Name) 1089 } 1090 break 1091 } 1092 1093 d.Set("datacenter", dc) 1094 d.Set("memory", mvm.Summary.Config.MemorySizeMB) 1095 d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation) 1096 d.Set("cpu", mvm.Summary.Config.NumCpu) 1097 d.Set("datastore", rootDatastore) 1098 d.Set("uuid", mvm.Summary.Config.Uuid) 1099 1100 return nil 1101 } 1102 1103 func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { 1104 client := meta.(*govmomi.Client) 1105 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 1106 if err != nil { 1107 return err 1108 } 1109 finder := find.NewFinder(client.Client, true) 1110 finder = finder.SetDatacenter(dc) 1111 1112 vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string))) 1113 if err != nil { 1114 return err 1115 } 1116 devices, err := vm.Device(context.TODO()) 1117 if err != nil { 1118 log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err) 1119 return err 1120 } 1121 1122 log.Printf("[INFO] Deleting virtual machine: %s", d.Id()) 1123 state, err := vm.PowerState(context.TODO()) 1124 if err != nil { 1125 return err 1126 } 1127 1128 if state == types.VirtualMachinePowerStatePoweredOn { 1129 task, err := vm.PowerOff(context.TODO()) 1130 if err != nil { 1131 return err 1132 } 1133 1134 err = task.Wait(context.TODO()) 1135 if err != nil { 1136 return err 1137 } 1138 } 1139 1140 // Safely eject any disks the user marked as keep_on_remove 1141 if vL, ok := d.GetOk("disk"); ok { 1142 if diskSet, ok := vL.(*schema.Set); ok { 1143 1144 for _, value := range diskSet.List() { 1145 disk := value.(map[string]interface{}) 1146 1147 if v, ok := disk["keep_on_remove"].(bool); ok && v == true { 1148 log.Printf("[DEBUG] not destroying %v", disk["name"]) 1149 virtualDisk := devices.FindByKey(int32(disk["key"].(int))) 1150 err = vm.RemoveDevice(context.TODO(), true, virtualDisk) 1151 if err != nil { 1152 log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) 1153 return err 1154 } 1155 } 1156 } 1157 } 1158 } 1159 1160 task, err := vm.Destroy(context.TODO()) 1161 if err != nil { 1162 return err 1163 } 1164 1165 err = task.Wait(context.TODO()) 1166 if err != nil { 1167 return err 1168 } 1169 1170 d.SetId("") 1171 return nil 1172 } 1173 1174 // addHardDisk adds a new Hard Disk to the VirtualMachine. 1175 func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error { 1176 devices, err := vm.Device(context.TODO()) 1177 if err != nil { 1178 return err 1179 } 1180 log.Printf("[DEBUG] vm devices: %#v\n", devices) 1181 1182 var controller types.BaseVirtualController 1183 switch controller_type { 1184 case "scsi": 1185 controller, err = devices.FindDiskController(controller_type) 1186 case "scsi-lsi-parallel": 1187 controller = devices.PickController(&types.VirtualLsiLogicController{}) 1188 case "scsi-buslogic": 1189 controller = devices.PickController(&types.VirtualBusLogicController{}) 1190 case "scsi-paravirtual": 1191 controller = devices.PickController(&types.ParaVirtualSCSIController{}) 1192 case "scsi-lsi-sas": 1193 controller = devices.PickController(&types.VirtualLsiLogicSASController{}) 1194 case "ide": 1195 controller, err = devices.FindDiskController(controller_type) 1196 default: 1197 return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type) 1198 } 1199 1200 if err != nil || controller == nil { 1201 // Check if max number of scsi controller are already used 1202 diskControllers := getSCSIControllers(devices) 1203 if len(diskControllers) >= 4 { 1204 return fmt.Errorf("[ERROR] Maximum number of SCSI controllers created") 1205 } 1206 1207 log.Printf("[DEBUG] Couldn't find a %v controller. Creating one..", controller_type) 1208 1209 var c types.BaseVirtualDevice 1210 switch controller_type { 1211 case "scsi": 1212 // Create scsi controller 1213 c, err = devices.CreateSCSIController("scsi") 1214 if err != nil { 1215 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1216 } 1217 case "scsi-lsi-parallel": 1218 // Create scsi controller 1219 c, err = devices.CreateSCSIController("lsilogic") 1220 if err != nil { 1221 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1222 } 1223 case "scsi-buslogic": 1224 // Create scsi controller 1225 c, err = devices.CreateSCSIController("buslogic") 1226 if err != nil { 1227 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1228 } 1229 case "scsi-paravirtual": 1230 // Create scsi controller 1231 c, err = devices.CreateSCSIController("pvscsi") 1232 if err != nil { 1233 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1234 } 1235 case "scsi-lsi-sas": 1236 // Create scsi controller 1237 c, err = devices.CreateSCSIController("lsilogic-sas") 1238 if err != nil { 1239 return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) 1240 } 1241 case "ide": 1242 // Create ide controller 1243 c, err = devices.CreateIDEController() 1244 if err != nil { 1245 return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err) 1246 } 1247 default: 1248 return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type) 1249 } 1250 1251 vm.AddDevice(context.TODO(), c) 1252 // Update our devices list 1253 devices, err := vm.Device(context.TODO()) 1254 if err != nil { 1255 return err 1256 } 1257 controller = devices.PickController(c.(types.BaseVirtualController)) 1258 if controller == nil { 1259 log.Printf("[ERROR] Could not find the new %v controller", controller_type) 1260 return fmt.Errorf("Could not find the new %v controller", controller_type) 1261 } 1262 } 1263 1264 log.Printf("[DEBUG] disk controller: %#v\n", controller) 1265 1266 // TODO Check if diskPath & datastore exist 1267 // If diskPath is not specified, pass empty string to CreateDisk() 1268 if diskPath == "" { 1269 return fmt.Errorf("[ERROR] addHardDisk - No path proided") 1270 } else { 1271 // TODO Check if diskPath & datastore exist 1272 diskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath) 1273 } 1274 log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath) 1275 disk := devices.CreateDisk(controller, datastore.Reference(), diskPath) 1276 1277 if strings.Contains(controller_type, "scsi") { 1278 unitNumber, err := getNextUnitNumber(devices, controller) 1279 if err != nil { 1280 return err 1281 } 1282 *disk.UnitNumber = unitNumber 1283 } 1284 1285 existing := devices.SelectByBackingInfo(disk.Backing) 1286 log.Printf("[DEBUG] disk: %#v\n", disk) 1287 1288 if len(existing) == 0 { 1289 disk.CapacityInKB = int64(size * 1024 * 1024) 1290 if iops != 0 { 1291 disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ 1292 Limit: iops, 1293 } 1294 } 1295 backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) 1296 1297 if diskType == "eager_zeroed" { 1298 // eager zeroed thick virtual disk 1299 backing.ThinProvisioned = types.NewBool(false) 1300 backing.EagerlyScrub = types.NewBool(true) 1301 } else if diskType == "thin" { 1302 // thin provisioned virtual disk 1303 backing.ThinProvisioned = types.NewBool(true) 1304 } 1305 1306 log.Printf("[DEBUG] addHardDisk: %#v\n", disk) 1307 log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB) 1308 1309 return vm.AddDevice(context.TODO(), disk) 1310 } else { 1311 log.Printf("[DEBUG] addHardDisk: Disk already present.\n") 1312 1313 return nil 1314 } 1315 } 1316 1317 func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController { 1318 // get virtual scsi controllers of all supported types 1319 var scsiControllers []*types.VirtualController 1320 for _, device := range vmDevices { 1321 devType := vmDevices.Type(device) 1322 switch devType { 1323 case "scsi", "lsilogic", "buslogic", "pvscsi", "lsilogic-sas": 1324 if c, ok := device.(types.BaseVirtualController); ok { 1325 scsiControllers = append(scsiControllers, c.GetVirtualController()) 1326 } 1327 } 1328 } 1329 return scsiControllers 1330 } 1331 1332 func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) { 1333 key := c.GetVirtualController().Key 1334 1335 var unitNumbers [16]bool 1336 unitNumbers[7] = true 1337 1338 for _, device := range devices { 1339 d := device.GetVirtualDevice() 1340 1341 if d.ControllerKey == key { 1342 if d.UnitNumber != nil { 1343 unitNumbers[*d.UnitNumber] = true 1344 } 1345 } 1346 } 1347 for i, taken := range unitNumbers { 1348 if !taken { 1349 return int32(i), nil 1350 } 1351 } 1352 return -1, fmt.Errorf("[ERROR] getNextUnitNumber - controller is full") 1353 } 1354 1355 // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path. 1356 func addCdrom(vm *object.VirtualMachine, datastore, path string) error { 1357 devices, err := vm.Device(context.TODO()) 1358 if err != nil { 1359 return err 1360 } 1361 log.Printf("[DEBUG] vm devices: %#v", devices) 1362 1363 var controller *types.VirtualIDEController 1364 controller, err = devices.FindIDEController("") 1365 if err != nil { 1366 log.Printf("[DEBUG] Couldn't find a ide controller. Creating one..") 1367 1368 var c types.BaseVirtualDevice 1369 c, err := devices.CreateIDEController() 1370 if err != nil { 1371 return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err) 1372 } 1373 1374 if v, ok := c.(*types.VirtualIDEController); ok { 1375 controller = v 1376 } else { 1377 return fmt.Errorf("[ERROR] Controller type could not be asserted") 1378 } 1379 vm.AddDevice(context.TODO(), c) 1380 // Update our devices list 1381 devices, err := vm.Device(context.TODO()) 1382 if err != nil { 1383 return err 1384 } 1385 controller, err = devices.FindIDEController("") 1386 if err != nil { 1387 log.Printf("[ERROR] Could not find the new disk IDE controller: %v", err) 1388 return err 1389 } 1390 } 1391 log.Printf("[DEBUG] ide controller: %#v", controller) 1392 1393 c, err := devices.CreateCdrom(controller) 1394 if err != nil { 1395 return err 1396 } 1397 1398 c = devices.InsertIso(c, fmt.Sprintf("[%s] %s", datastore, path)) 1399 log.Printf("[DEBUG] addCdrom: %#v", c) 1400 1401 return vm.AddDevice(context.TODO(), c) 1402 } 1403 1404 // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device. 1405 func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) { 1406 network, err := f.Network(context.TODO(), "*"+label) 1407 if err != nil { 1408 return nil, err 1409 } 1410 1411 backing, err := network.EthernetCardBackingInfo(context.TODO()) 1412 if err != nil { 1413 return nil, err 1414 } 1415 1416 var address_type string 1417 if macAddress == "" { 1418 address_type = string(types.VirtualEthernetCardMacTypeGenerated) 1419 } else { 1420 address_type = string(types.VirtualEthernetCardMacTypeManual) 1421 } 1422 1423 if adapterType == "vmxnet3" { 1424 return &types.VirtualDeviceConfigSpec{ 1425 Operation: types.VirtualDeviceConfigSpecOperationAdd, 1426 Device: &types.VirtualVmxnet3{ 1427 VirtualVmxnet: types.VirtualVmxnet{ 1428 VirtualEthernetCard: types.VirtualEthernetCard{ 1429 VirtualDevice: types.VirtualDevice{ 1430 Key: -1, 1431 Backing: backing, 1432 }, 1433 AddressType: address_type, 1434 MacAddress: macAddress, 1435 }, 1436 }, 1437 }, 1438 }, nil 1439 } else if adapterType == "e1000" { 1440 return &types.VirtualDeviceConfigSpec{ 1441 Operation: types.VirtualDeviceConfigSpecOperationAdd, 1442 Device: &types.VirtualE1000{ 1443 VirtualEthernetCard: types.VirtualEthernetCard{ 1444 VirtualDevice: types.VirtualDevice{ 1445 Key: -1, 1446 Backing: backing, 1447 }, 1448 AddressType: address_type, 1449 MacAddress: macAddress, 1450 }, 1451 }, 1452 }, nil 1453 } else { 1454 return nil, fmt.Errorf("Invalid network adapter type.") 1455 } 1456 } 1457 1458 // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. 1459 func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) { 1460 var key int32 1461 var moveType string 1462 if linkedClone { 1463 moveType = "createNewChildDiskBacking" 1464 } else { 1465 moveType = "moveAllDiskBackingsAndDisallowSharing" 1466 } 1467 log.Printf("[DEBUG] relocate type: [%s]", moveType) 1468 1469 devices, err := vm.Device(context.TODO()) 1470 if err != nil { 1471 return types.VirtualMachineRelocateSpec{}, err 1472 } 1473 for _, d := range devices { 1474 if devices.Type(d) == "disk" { 1475 key = int32(d.GetVirtualDevice().Key) 1476 } 1477 } 1478 1479 isThin := initType == "thin" 1480 rpr := rp.Reference() 1481 dsr := ds.Reference() 1482 return types.VirtualMachineRelocateSpec{ 1483 Datastore: &dsr, 1484 Pool: &rpr, 1485 DiskMoveType: moveType, 1486 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 1487 { 1488 Datastore: dsr, 1489 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ 1490 DiskMode: "persistent", 1491 ThinProvisioned: types.NewBool(isThin), 1492 EagerlyScrub: types.NewBool(!isThin), 1493 }, 1494 DiskId: key, 1495 }, 1496 }, 1497 }, nil 1498 } 1499 1500 // getDatastoreObject gets datastore object. 1501 func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) { 1502 s := object.NewSearchIndex(client.Client) 1503 ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name) 1504 if err != nil { 1505 return types.ManagedObjectReference{}, err 1506 } 1507 if ref == nil { 1508 return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name) 1509 } 1510 log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref) 1511 return ref.Reference(), nil 1512 } 1513 1514 // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action. 1515 func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec { 1516 vmfr := f.VmFolder.Reference() 1517 rpr := rp.Reference() 1518 spr := storagePod.Reference() 1519 1520 sps := types.StoragePlacementSpec{ 1521 Type: "create", 1522 ConfigSpec: &configSpec, 1523 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 1524 StoragePod: &spr, 1525 }, 1526 Folder: &vmfr, 1527 ResourcePool: &rpr, 1528 } 1529 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 1530 return sps 1531 } 1532 1533 // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action. 1534 func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec { 1535 vmr := vm.Reference() 1536 vmfr := f.VmFolder.Reference() 1537 rpr := rp.Reference() 1538 spr := storagePod.Reference() 1539 1540 var o mo.VirtualMachine 1541 err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o) 1542 if err != nil { 1543 return types.StoragePlacementSpec{} 1544 } 1545 ds := object.NewDatastore(c.Client, o.Datastore[0]) 1546 log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds) 1547 1548 devices, err := vm.Device(context.TODO()) 1549 if err != nil { 1550 return types.StoragePlacementSpec{} 1551 } 1552 1553 var key int32 1554 for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) { 1555 key = int32(d.GetVirtualDevice().Key) 1556 log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice()) 1557 } 1558 1559 sps := types.StoragePlacementSpec{ 1560 Type: "clone", 1561 Vm: &vmr, 1562 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 1563 StoragePod: &spr, 1564 }, 1565 CloneSpec: &types.VirtualMachineCloneSpec{ 1566 Location: types.VirtualMachineRelocateSpec{ 1567 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 1568 { 1569 Datastore: ds.Reference(), 1570 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{}, 1571 DiskId: key, 1572 }, 1573 }, 1574 Pool: &rpr, 1575 }, 1576 PowerOn: false, 1577 Template: false, 1578 }, 1579 CloneName: "dummy", 1580 Folder: &vmfr, 1581 } 1582 return sps 1583 } 1584 1585 // findDatastore finds Datastore object. 1586 func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) { 1587 var datastore *object.Datastore 1588 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 1589 1590 srm := object.NewStorageResourceManager(c.Client) 1591 rds, err := srm.RecommendDatastores(context.TODO(), sps) 1592 if err != nil { 1593 return nil, err 1594 } 1595 log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) 1596 1597 spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) 1598 datastore = object.NewDatastore(c.Client, spa.Destination) 1599 log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore) 1600 1601 return datastore, nil 1602 } 1603 1604 // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller. 1605 func createCdroms(vm *object.VirtualMachine, cdroms []cdrom) error { 1606 log.Printf("[DEBUG] add cdroms: %v", cdroms) 1607 for _, cd := range cdroms { 1608 log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore) 1609 log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path) 1610 err := addCdrom(vm, cd.datastore, cd.path) 1611 if err != nil { 1612 return err 1613 } 1614 } 1615 1616 return nil 1617 } 1618 1619 func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error { 1620 dc, err := getDatacenter(c, vm.datacenter) 1621 1622 if err != nil { 1623 return err 1624 } 1625 finder := find.NewFinder(c.Client, true) 1626 finder = finder.SetDatacenter(dc) 1627 1628 var template *object.VirtualMachine 1629 var template_mo mo.VirtualMachine 1630 var vm_mo mo.VirtualMachine 1631 if vm.template != "" { 1632 template, err = finder.VirtualMachine(context.TODO(), vm.template) 1633 if err != nil { 1634 return err 1635 } 1636 log.Printf("[DEBUG] template: %#v", template) 1637 1638 err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo) 1639 if err != nil { 1640 return err 1641 } 1642 } 1643 1644 var resourcePool *object.ResourcePool 1645 if vm.resourcePool == "" { 1646 if vm.cluster == "" { 1647 resourcePool, err = finder.DefaultResourcePool(context.TODO()) 1648 if err != nil { 1649 return err 1650 } 1651 } else { 1652 resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") 1653 if err != nil { 1654 return err 1655 } 1656 } 1657 } else { 1658 resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) 1659 if err != nil { 1660 return err 1661 } 1662 } 1663 log.Printf("[DEBUG] resource pool: %#v", resourcePool) 1664 1665 dcFolders, err := dc.Folders(context.TODO()) 1666 if err != nil { 1667 return err 1668 } 1669 log.Printf("[DEBUG] folder: %#v", vm.folder) 1670 1671 folder := dcFolders.VmFolder 1672 if len(vm.folder) > 0 { 1673 si := object.NewSearchIndex(c.Client) 1674 folderRef, err := si.FindByInventoryPath( 1675 context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) 1676 if err != nil { 1677 return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) 1678 } else if folderRef == nil { 1679 return fmt.Errorf("Cannot find folder %s", vm.folder) 1680 } else { 1681 folder = folderRef.(*object.Folder) 1682 } 1683 } 1684 1685 // make config spec 1686 configSpec := types.VirtualMachineConfigSpec{ 1687 Name: vm.name, 1688 NumCPUs: vm.vcpu, 1689 NumCoresPerSocket: 1, 1690 MemoryMB: vm.memoryMb, 1691 MemoryAllocation: &types.ResourceAllocationInfo{ 1692 Reservation: vm.memoryAllocation.reservation, 1693 }, 1694 Flags: &types.VirtualMachineFlagInfo{ 1695 DiskUuidEnabled: &vm.enableDiskUUID, 1696 }, 1697 } 1698 if vm.template == "" { 1699 configSpec.GuestId = "otherLinux64Guest" 1700 } 1701 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 1702 1703 // make ExtraConfig 1704 log.Printf("[DEBUG] virtual machine Extra Config spec start") 1705 if len(vm.customConfigurations) > 0 { 1706 var ov []types.BaseOptionValue 1707 for k, v := range vm.customConfigurations { 1708 key := k 1709 value := v 1710 o := types.OptionValue{ 1711 Key: key, 1712 Value: &value, 1713 } 1714 log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) 1715 ov = append(ov, &o) 1716 } 1717 configSpec.ExtraConfig = ov 1718 log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) 1719 } 1720 1721 var datastore *object.Datastore 1722 if vm.datastore == "" { 1723 datastore, err = finder.DefaultDatastore(context.TODO()) 1724 if err != nil { 1725 return err 1726 } 1727 } else { 1728 datastore, err = finder.Datastore(context.TODO(), vm.datastore) 1729 if err != nil { 1730 // TODO: datastore cluster support in govmomi finder function 1731 d, err := getDatastoreObject(c, dcFolders, vm.datastore) 1732 if err != nil { 1733 return err 1734 } 1735 1736 if d.Type == "StoragePod" { 1737 sp := object.StoragePod{ 1738 Folder: object.NewFolder(c.Client, d), 1739 } 1740 1741 var sps types.StoragePlacementSpec 1742 if vm.template != "" { 1743 sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) 1744 } else { 1745 sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) 1746 } 1747 1748 datastore, err = findDatastore(c, sps) 1749 if err != nil { 1750 return err 1751 } 1752 } else { 1753 datastore = object.NewDatastore(c.Client, d) 1754 } 1755 } 1756 } 1757 1758 log.Printf("[DEBUG] datastore: %#v", datastore) 1759 1760 // network 1761 networkDevices := []types.BaseVirtualDeviceConfigSpec{} 1762 networkConfigs := []types.CustomizationAdapterMapping{} 1763 for _, network := range vm.networkInterfaces { 1764 // network device 1765 var networkDeviceType string 1766 if vm.template == "" { 1767 networkDeviceType = "e1000" 1768 } else { 1769 networkDeviceType = "vmxnet3" 1770 } 1771 nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress) 1772 if err != nil { 1773 return err 1774 } 1775 log.Printf("[DEBUG] network device: %+v", nd.Device) 1776 networkDevices = append(networkDevices, nd) 1777 1778 if vm.template != "" { 1779 var ipSetting types.CustomizationIPSettings 1780 if network.ipv4Address == "" { 1781 ipSetting.Ip = &types.CustomizationDhcpIpGenerator{} 1782 } else { 1783 if network.ipv4PrefixLength == 0 { 1784 return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") 1785 } 1786 m := net.CIDRMask(network.ipv4PrefixLength, 32) 1787 sm := net.IPv4(m[0], m[1], m[2], m[3]) 1788 subnetMask := sm.String() 1789 log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway) 1790 log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address) 1791 log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength) 1792 log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask) 1793 ipSetting.Gateway = []string{ 1794 network.ipv4Gateway, 1795 } 1796 ipSetting.Ip = &types.CustomizationFixedIp{ 1797 IpAddress: network.ipv4Address, 1798 } 1799 ipSetting.SubnetMask = subnetMask 1800 } 1801 1802 ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{} 1803 if network.ipv6Address == "" { 1804 ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ 1805 &types.CustomizationDhcpIpV6Generator{}, 1806 } 1807 } else { 1808 log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway) 1809 log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address) 1810 log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength) 1811 1812 ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ 1813 &types.CustomizationFixedIpV6{ 1814 IpAddress: network.ipv6Address, 1815 SubnetMask: int32(network.ipv6PrefixLength), 1816 }, 1817 } 1818 ipv6Spec.Gateway = []string{network.ipv6Gateway} 1819 } 1820 ipSetting.IpV6Spec = ipv6Spec 1821 1822 // network config 1823 config := types.CustomizationAdapterMapping{ 1824 Adapter: ipSetting, 1825 } 1826 networkConfigs = append(networkConfigs, config) 1827 } 1828 } 1829 log.Printf("[DEBUG] network devices: %#v", networkDevices) 1830 log.Printf("[DEBUG] network configs: %#v", networkConfigs) 1831 1832 var task *object.Task 1833 if vm.template == "" { 1834 var mds mo.Datastore 1835 if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { 1836 return err 1837 } 1838 log.Printf("[DEBUG] datastore: %#v", mds.Name) 1839 scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") 1840 if err != nil { 1841 log.Printf("[ERROR] %s", err) 1842 } 1843 1844 configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ 1845 Operation: types.VirtualDeviceConfigSpecOperationAdd, 1846 Device: scsi, 1847 }) 1848 1849 configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} 1850 1851 task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) 1852 if err != nil { 1853 log.Printf("[ERROR] %s", err) 1854 } 1855 1856 err = task.Wait(context.TODO()) 1857 if err != nil { 1858 log.Printf("[ERROR] %s", err) 1859 } 1860 1861 } else { 1862 1863 relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType) 1864 if err != nil { 1865 return err 1866 } 1867 1868 log.Printf("[DEBUG] relocate spec: %v", relocateSpec) 1869 1870 // make vm clone spec 1871 cloneSpec := types.VirtualMachineCloneSpec{ 1872 Location: relocateSpec, 1873 Template: false, 1874 Config: &configSpec, 1875 PowerOn: false, 1876 } 1877 if vm.linkedClone { 1878 if template_mo.Snapshot == nil { 1879 return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots") 1880 } 1881 cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot 1882 } 1883 log.Printf("[DEBUG] clone spec: %v", cloneSpec) 1884 1885 task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec) 1886 if err != nil { 1887 return err 1888 } 1889 } 1890 1891 err = task.Wait(context.TODO()) 1892 if err != nil { 1893 log.Printf("[ERROR] %s", err) 1894 } 1895 1896 newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) 1897 if err != nil { 1898 return err 1899 } 1900 log.Printf("[DEBUG] new vm: %v", newVM) 1901 1902 devices, err := newVM.Device(context.TODO()) 1903 if err != nil { 1904 log.Printf("[DEBUG] Template devices can't be found") 1905 return err 1906 } 1907 1908 for _, dvc := range devices { 1909 // Issue 3559/3560: Delete all ethernet devices to add the correct ones later 1910 if devices.Type(dvc) == "ethernet" { 1911 err := newVM.RemoveDevice(context.TODO(), false, dvc) 1912 if err != nil { 1913 return err 1914 } 1915 } 1916 } 1917 // Add Network devices 1918 for _, dvc := range networkDevices { 1919 err := newVM.AddDevice( 1920 context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) 1921 if err != nil { 1922 return err 1923 } 1924 } 1925 1926 // Create the cdroms if needed. 1927 if err := createCdroms(newVM, vm.cdroms); err != nil { 1928 return err 1929 } 1930 1931 newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo) 1932 firstDisk := 0 1933 if vm.template != "" { 1934 firstDisk++ 1935 } 1936 for i := firstDisk; i < len(vm.hardDisks); i++ { 1937 log.Printf("[DEBUG] disk index: %v", i) 1938 1939 var diskPath string 1940 switch { 1941 case vm.hardDisks[i].vmdkPath != "": 1942 diskPath = vm.hardDisks[i].vmdkPath 1943 case vm.hardDisks[i].name != "": 1944 snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory 1945 split := strings.Split(snapshotFullDir, " ") 1946 if len(split) != 2 { 1947 return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir) 1948 } 1949 vmWorkingPath := split[1] 1950 diskPath = vmWorkingPath + vm.hardDisks[i].name 1951 default: 1952 return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i]) 1953 } 1954 1955 err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) 1956 if err != nil { 1957 err2 := addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) 1958 if err2 != nil { 1959 return err2 1960 } 1961 return err 1962 } 1963 } 1964 1965 if vm.skipCustomization || vm.template == "" { 1966 log.Printf("[DEBUG] VM customization skipped") 1967 } else { 1968 var identity_options types.BaseCustomizationIdentitySettings 1969 if strings.HasPrefix(template_mo.Config.GuestId, "win") { 1970 var timeZone int 1971 if vm.timeZone == "Etc/UTC" { 1972 vm.timeZone = "085" 1973 } 1974 timeZone, err := strconv.Atoi(vm.timeZone) 1975 if err != nil { 1976 return fmt.Errorf("Error converting TimeZone: %s", err) 1977 } 1978 1979 guiUnattended := types.CustomizationGuiUnattended{ 1980 AutoLogon: false, 1981 AutoLogonCount: 1, 1982 TimeZone: int32(timeZone), 1983 } 1984 1985 customIdentification := types.CustomizationIdentification{} 1986 1987 userData := types.CustomizationUserData{ 1988 ComputerName: &types.CustomizationFixedName{ 1989 Name: strings.Split(vm.name, ".")[0], 1990 }, 1991 ProductId: vm.windowsOptionalConfig.productKey, 1992 FullName: "terraform", 1993 OrgName: "terraform", 1994 } 1995 1996 if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" { 1997 customIdentification.DomainAdminPassword = &types.CustomizationPassword{ 1998 PlainText: true, 1999 Value: vm.windowsOptionalConfig.domainUserPassword, 2000 } 2001 customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser 2002 customIdentification.JoinDomain = vm.windowsOptionalConfig.domain 2003 } 2004 2005 if vm.windowsOptionalConfig.adminPassword != "" { 2006 guiUnattended.Password = &types.CustomizationPassword{ 2007 PlainText: true, 2008 Value: vm.windowsOptionalConfig.adminPassword, 2009 } 2010 } 2011 2012 identity_options = &types.CustomizationSysprep{ 2013 GuiUnattended: guiUnattended, 2014 Identification: customIdentification, 2015 UserData: userData, 2016 } 2017 } else { 2018 identity_options = &types.CustomizationLinuxPrep{ 2019 HostName: &types.CustomizationFixedName{ 2020 Name: strings.Split(vm.name, ".")[0], 2021 }, 2022 Domain: vm.domain, 2023 TimeZone: vm.timeZone, 2024 HwClockUTC: types.NewBool(true), 2025 } 2026 } 2027 2028 // create CustomizationSpec 2029 customSpec := types.CustomizationSpec{ 2030 Identity: identity_options, 2031 GlobalIPSettings: types.CustomizationGlobalIPSettings{ 2032 DnsSuffixList: vm.dnsSuffixes, 2033 DnsServerList: vm.dnsServers, 2034 }, 2035 NicSettingMap: networkConfigs, 2036 } 2037 log.Printf("[DEBUG] custom spec: %v", customSpec) 2038 2039 log.Printf("[DEBUG] VM customization starting") 2040 taskb, err := newVM.Customize(context.TODO(), customSpec) 2041 if err != nil { 2042 return err 2043 } 2044 _, err = taskb.WaitForResult(context.TODO(), nil) 2045 if err != nil { 2046 return err 2047 } 2048 log.Printf("[DEBUG] VM customization finished") 2049 } 2050 2051 if vm.hasBootableVmdk || vm.template != "" { 2052 newVM.PowerOn(context.TODO()) 2053 err = newVM.WaitForPowerState(context.TODO(), types.VirtualMachinePowerStatePoweredOn) 2054 if err != nil { 2055 return err 2056 } 2057 } 2058 return nil 2059 }