github.com/anfernee/terraform@v0.6.16-0.20160430000239-06e5085a92f2/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about) 1 package vsphere 2 3 import ( 4 "fmt" 5 "log" 6 "net" 7 "strconv" 8 "strings" 9 "time" 10 11 "github.com/hashicorp/terraform/helper/resource" 12 "github.com/hashicorp/terraform/helper/schema" 13 "github.com/vmware/govmomi" 14 "github.com/vmware/govmomi/find" 15 "github.com/vmware/govmomi/object" 16 "github.com/vmware/govmomi/property" 17 "github.com/vmware/govmomi/vim25/mo" 18 "github.com/vmware/govmomi/vim25/types" 19 "golang.org/x/net/context" 20 ) 21 22 var DefaultDNSSuffixes = []string{ 23 "vsphere.local", 24 } 25 26 var DefaultDNSServers = []string{ 27 "8.8.8.8", 28 "8.8.4.4", 29 } 30 31 type networkInterface struct { 32 deviceName string 33 label string 34 ipv4Address string 35 ipv4PrefixLength int 36 ipv6Address string 37 ipv6PrefixLength int 38 adapterType string // TODO: Make "adapter_type" argument 39 } 40 41 type hardDisk struct { 42 size int64 43 iops int64 44 initType string 45 vmdkPath string 46 } 47 48 //Additional options Vsphere can use clones of windows machines 49 type windowsOptConfig struct { 50 productKey string 51 adminPassword string 52 domainUser string 53 domain string 54 domainUserPassword string 55 } 56 57 type cdrom struct { 58 datastore string 59 path string 60 } 61 62 type memoryAllocation struct { 63 reservation int64 64 } 65 66 type virtualMachine struct { 67 name string 68 folder string 69 datacenter string 70 cluster string 71 resourcePool string 72 datastore string 73 vcpu int 74 memoryMb int64 75 memoryAllocation memoryAllocation 76 template string 77 networkInterfaces []networkInterface 78 hardDisks []hardDisk 79 cdroms []cdrom 80 gateway string 81 domain string 82 timeZone string 83 dnsSuffixes []string 84 dnsServers []string 85 bootableVmdk bool 86 linkedClone bool 87 skipCustomization bool 88 windowsOptionalConfig windowsOptConfig 89 customConfigurations map[string](types.AnyType) 90 } 91 92 func (v virtualMachine) Path() string { 93 return vmPath(v.folder, v.name) 94 } 95 96 func vmPath(folder string, name string) string { 97 var path string 98 if len(folder) > 0 { 99 path += folder + "/" 100 } 101 return path + name 102 } 103 104 func resourceVSphereVirtualMachine() *schema.Resource { 105 return &schema.Resource{ 106 Create: resourceVSphereVirtualMachineCreate, 107 Read: resourceVSphereVirtualMachineRead, 108 Delete: resourceVSphereVirtualMachineDelete, 109 110 Schema: map[string]*schema.Schema{ 111 "name": &schema.Schema{ 112 Type: schema.TypeString, 113 Required: true, 114 ForceNew: true, 115 }, 116 117 "folder": &schema.Schema{ 118 Type: schema.TypeString, 119 Optional: true, 120 ForceNew: true, 121 }, 122 123 "vcpu": &schema.Schema{ 124 Type: schema.TypeInt, 125 Required: true, 126 ForceNew: true, 127 }, 128 129 "memory": &schema.Schema{ 130 Type: schema.TypeInt, 131 Required: true, 132 ForceNew: true, 133 }, 134 135 "memory_reservation": &schema.Schema{ 136 Type: schema.TypeInt, 137 Optional: true, 138 Default: 0, 139 ForceNew: true, 140 }, 141 142 "datacenter": &schema.Schema{ 143 Type: schema.TypeString, 144 Optional: true, 145 ForceNew: true, 146 }, 147 148 "cluster": &schema.Schema{ 149 Type: schema.TypeString, 150 Optional: true, 151 ForceNew: true, 152 }, 153 154 "resource_pool": &schema.Schema{ 155 Type: schema.TypeString, 156 Optional: true, 157 ForceNew: true, 158 }, 159 160 "linked_clone": &schema.Schema{ 161 Type: schema.TypeBool, 162 Optional: true, 163 Default: false, 164 ForceNew: true, 165 }, 166 "gateway": &schema.Schema{ 167 Type: schema.TypeString, 168 Optional: true, 169 ForceNew: true, 170 }, 171 172 "domain": &schema.Schema{ 173 Type: schema.TypeString, 174 Optional: true, 175 ForceNew: true, 176 Default: "vsphere.local", 177 }, 178 179 "time_zone": &schema.Schema{ 180 Type: schema.TypeString, 181 Optional: true, 182 ForceNew: true, 183 Default: "Etc/UTC", 184 }, 185 186 "dns_suffixes": &schema.Schema{ 187 Type: schema.TypeList, 188 Optional: true, 189 Elem: &schema.Schema{Type: schema.TypeString}, 190 ForceNew: true, 191 }, 192 193 "dns_servers": &schema.Schema{ 194 Type: schema.TypeList, 195 Optional: true, 196 Elem: &schema.Schema{Type: schema.TypeString}, 197 ForceNew: true, 198 }, 199 200 "skip_customization": &schema.Schema{ 201 Type: schema.TypeBool, 202 Optional: true, 203 ForceNew: true, 204 Default: false, 205 }, 206 207 "custom_configuration_parameters": &schema.Schema{ 208 Type: schema.TypeMap, 209 Optional: true, 210 ForceNew: true, 211 }, 212 "windows_opt_config": &schema.Schema{ 213 Type: schema.TypeList, 214 Optional: true, 215 ForceNew: true, 216 Elem: &schema.Resource{ 217 Schema: map[string]*schema.Schema{ 218 "product_key": &schema.Schema{ 219 Type: schema.TypeString, 220 Required: true, 221 ForceNew: true, 222 }, 223 224 "admin_password": &schema.Schema{ 225 Type: schema.TypeString, 226 Optional: true, 227 ForceNew: true, 228 }, 229 230 "domain_user": &schema.Schema{ 231 Type: schema.TypeString, 232 Optional: true, 233 ForceNew: true, 234 }, 235 236 "domain": &schema.Schema{ 237 Type: schema.TypeString, 238 Optional: true, 239 ForceNew: true, 240 }, 241 242 "domain_user_password": &schema.Schema{ 243 Type: schema.TypeString, 244 Optional: true, 245 ForceNew: true, 246 }, 247 }, 248 }, 249 }, 250 251 "network_interface": &schema.Schema{ 252 Type: schema.TypeList, 253 Required: true, 254 ForceNew: true, 255 Elem: &schema.Resource{ 256 Schema: map[string]*schema.Schema{ 257 "label": &schema.Schema{ 258 Type: schema.TypeString, 259 Required: true, 260 ForceNew: true, 261 }, 262 263 "ip_address": &schema.Schema{ 264 Type: schema.TypeString, 265 Optional: true, 266 Computed: true, 267 Deprecated: "Please use ipv4_address", 268 }, 269 270 "subnet_mask": &schema.Schema{ 271 Type: schema.TypeString, 272 Optional: true, 273 Computed: true, 274 Deprecated: "Please use ipv4_prefix_length", 275 }, 276 277 "ipv4_address": &schema.Schema{ 278 Type: schema.TypeString, 279 Optional: true, 280 Computed: true, 281 }, 282 283 "ipv4_prefix_length": &schema.Schema{ 284 Type: schema.TypeInt, 285 Optional: true, 286 Computed: true, 287 }, 288 289 // TODO: Imprement ipv6 parameters to be optional 290 "ipv6_address": &schema.Schema{ 291 Type: schema.TypeString, 292 Computed: true, 293 ForceNew: true, 294 }, 295 296 "ipv6_prefix_length": &schema.Schema{ 297 Type: schema.TypeInt, 298 Computed: true, 299 ForceNew: true, 300 }, 301 302 "adapter_type": &schema.Schema{ 303 Type: schema.TypeString, 304 Optional: true, 305 ForceNew: true, 306 }, 307 }, 308 }, 309 }, 310 311 "disk": &schema.Schema{ 312 Type: schema.TypeList, 313 Required: true, 314 ForceNew: true, 315 Elem: &schema.Resource{ 316 Schema: map[string]*schema.Schema{ 317 "template": &schema.Schema{ 318 Type: schema.TypeString, 319 Optional: true, 320 ForceNew: true, 321 }, 322 323 "type": &schema.Schema{ 324 Type: schema.TypeString, 325 Optional: true, 326 ForceNew: true, 327 Default: "eager_zeroed", 328 ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { 329 value := v.(string) 330 if value != "thin" && value != "eager_zeroed" { 331 errors = append(errors, fmt.Errorf( 332 "only 'thin' and 'eager_zeroed' are supported values for 'type'")) 333 } 334 return 335 }, 336 }, 337 338 "datastore": &schema.Schema{ 339 Type: schema.TypeString, 340 Optional: true, 341 ForceNew: true, 342 }, 343 344 "size": &schema.Schema{ 345 Type: schema.TypeInt, 346 Optional: true, 347 ForceNew: true, 348 }, 349 350 "iops": &schema.Schema{ 351 Type: schema.TypeInt, 352 Optional: true, 353 ForceNew: true, 354 }, 355 356 "vmdk": &schema.Schema{ 357 // TODO: Add ValidateFunc to confirm path exists 358 Type: schema.TypeString, 359 Optional: true, 360 ForceNew: true, 361 Default: "", 362 }, 363 364 "bootable": &schema.Schema{ 365 Type: schema.TypeBool, 366 Optional: true, 367 Default: false, 368 ForceNew: true, 369 }, 370 }, 371 }, 372 }, 373 374 "cdrom": &schema.Schema{ 375 Type: schema.TypeList, 376 Optional: true, 377 ForceNew: true, 378 Elem: &schema.Resource{ 379 Schema: map[string]*schema.Schema{ 380 "datastore": &schema.Schema{ 381 Type: schema.TypeString, 382 Required: true, 383 ForceNew: true, 384 }, 385 386 "path": &schema.Schema{ 387 Type: schema.TypeString, 388 Required: true, 389 ForceNew: true, 390 }, 391 }, 392 }, 393 }, 394 395 "boot_delay": &schema.Schema{ 396 Type: schema.TypeInt, 397 Optional: true, 398 ForceNew: true, 399 }, 400 }, 401 } 402 } 403 404 func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { 405 client := meta.(*govmomi.Client) 406 407 vm := virtualMachine{ 408 name: d.Get("name").(string), 409 vcpu: d.Get("vcpu").(int), 410 memoryMb: int64(d.Get("memory").(int)), 411 memoryAllocation: memoryAllocation{ 412 reservation: int64(d.Get("memory_reservation").(int)), 413 }, 414 } 415 416 if v, ok := d.GetOk("folder"); ok { 417 vm.folder = v.(string) 418 } 419 420 if v, ok := d.GetOk("datacenter"); ok { 421 vm.datacenter = v.(string) 422 } 423 424 if v, ok := d.GetOk("cluster"); ok { 425 vm.cluster = v.(string) 426 } 427 428 if v, ok := d.GetOk("resource_pool"); ok { 429 vm.resourcePool = v.(string) 430 } 431 432 if v, ok := d.GetOk("gateway"); ok { 433 vm.gateway = v.(string) 434 } 435 436 if v, ok := d.GetOk("domain"); ok { 437 vm.domain = v.(string) 438 } 439 440 if v, ok := d.GetOk("time_zone"); ok { 441 vm.timeZone = v.(string) 442 } 443 444 if v, ok := d.GetOk("linked_clone"); ok { 445 vm.linkedClone = v.(bool) 446 } 447 448 if v, ok := d.GetOk("skip_customization"); ok { 449 vm.skipCustomization = v.(bool) 450 } 451 452 if raw, ok := d.GetOk("dns_suffixes"); ok { 453 for _, v := range raw.([]interface{}) { 454 vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string)) 455 } 456 } else { 457 vm.dnsSuffixes = DefaultDNSSuffixes 458 } 459 460 if raw, ok := d.GetOk("dns_servers"); ok { 461 for _, v := range raw.([]interface{}) { 462 vm.dnsServers = append(vm.dnsServers, v.(string)) 463 } 464 } else { 465 vm.dnsServers = DefaultDNSServers 466 } 467 468 if vL, ok := d.GetOk("custom_configuration_parameters"); ok { 469 if custom_configs, ok := vL.(map[string]interface{}); ok { 470 custom := make(map[string]types.AnyType) 471 for k, v := range custom_configs { 472 custom[k] = v 473 } 474 vm.customConfigurations = custom 475 log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations) 476 } 477 } 478 479 if vL, ok := d.GetOk("network_interface"); ok { 480 networks := make([]networkInterface, len(vL.([]interface{}))) 481 for i, v := range vL.([]interface{}) { 482 network := v.(map[string]interface{}) 483 networks[i].label = network["label"].(string) 484 if v, ok := network["ip_address"].(string); ok && v != "" { 485 networks[i].ipv4Address = v 486 } 487 if v, ok := network["subnet_mask"].(string); ok && v != "" { 488 ip := net.ParseIP(v).To4() 489 if ip != nil { 490 mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3]) 491 pl, _ := mask.Size() 492 networks[i].ipv4PrefixLength = pl 493 } else { 494 return fmt.Errorf("subnet_mask parameter is invalid.") 495 } 496 } 497 if v, ok := network["ipv4_address"].(string); ok && v != "" { 498 networks[i].ipv4Address = v 499 } 500 if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 { 501 networks[i].ipv4PrefixLength = v 502 } 503 } 504 vm.networkInterfaces = networks 505 log.Printf("[DEBUG] network_interface init: %v", networks) 506 } 507 508 if vL, ok := d.GetOk("windows_opt_config"); ok { 509 var winOpt windowsOptConfig 510 custom_configs := (vL.([]interface{}))[0].(map[string]interface{}) 511 if v, ok := custom_configs["admin_password"].(string); ok && v != "" { 512 winOpt.adminPassword = v 513 } 514 if v, ok := custom_configs["domain"].(string); ok && v != "" { 515 winOpt.domain = v 516 } 517 if v, ok := custom_configs["domain_user"].(string); ok && v != "" { 518 winOpt.domainUser = v 519 } 520 if v, ok := custom_configs["product_key"].(string); ok && v != "" { 521 winOpt.productKey = v 522 } 523 if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" { 524 winOpt.domainUserPassword = v 525 } 526 vm.windowsOptionalConfig = winOpt 527 log.Printf("[DEBUG] windows config init: %v", winOpt) 528 } 529 530 if vL, ok := d.GetOk("disk"); ok { 531 disks := make([]hardDisk, len(vL.([]interface{}))) 532 for i, v := range vL.([]interface{}) { 533 disk := v.(map[string]interface{}) 534 if i == 0 { 535 if v, ok := disk["template"].(string); ok && v != "" { 536 vm.template = v 537 } else { 538 if v, ok := disk["size"].(int); ok && v != 0 { 539 disks[i].size = int64(v) 540 } else if v, ok := disk["vmdk"].(string); ok && v != "" { 541 disks[i].vmdkPath = v 542 if v, ok := disk["bootable"].(bool); ok { 543 vm.bootableVmdk = v 544 } 545 } else { 546 return fmt.Errorf("template, size, or vmdk argument is required") 547 } 548 } 549 if v, ok := disk["datastore"].(string); ok && v != "" { 550 vm.datastore = v 551 } 552 } else { 553 if v, ok := disk["size"].(int); ok && v != 0 { 554 disks[i].size = int64(v) 555 } else if v, ok := disk["vmdk"].(string); ok && v != "" { 556 disks[i].vmdkPath = v 557 } else { 558 return fmt.Errorf("size or vmdk argument is required") 559 } 560 561 } 562 if v, ok := disk["iops"].(int); ok && v != 0 { 563 disks[i].iops = int64(v) 564 } 565 if v, ok := disk["type"].(string); ok && v != "" { 566 disks[i].initType = v 567 } 568 } 569 vm.hardDisks = disks 570 log.Printf("[DEBUG] disk init: %v", disks) 571 } 572 573 if vL, ok := d.GetOk("cdrom"); ok { 574 cdroms := make([]cdrom, len(vL.([]interface{}))) 575 for i, v := range vL.([]interface{}) { 576 c := v.(map[string]interface{}) 577 if v, ok := c["datastore"].(string); ok && v != "" { 578 cdroms[i].datastore = v 579 } else { 580 return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.") 581 } 582 if v, ok := c["path"].(string); ok && v != "" { 583 cdroms[i].path = v 584 } else { 585 return fmt.Errorf("Path argument must be specified when attaching a cdrom image.") 586 } 587 } 588 vm.cdroms = cdroms 589 log.Printf("[DEBUG] cdrom init: %v", cdroms) 590 } 591 592 if vm.template != "" { 593 err := vm.deployVirtualMachine(client) 594 if err != nil { 595 return err 596 } 597 } else { 598 err := vm.createVirtualMachine(client) 599 if err != nil { 600 return err 601 } 602 } 603 604 if _, ok := d.GetOk("network_interface.0.ipv4_address"); !ok { 605 if v, ok := d.GetOk("boot_delay"); ok { 606 stateConf := &resource.StateChangeConf{ 607 Pending: []string{"pending"}, 608 Target: []string{"active"}, 609 Refresh: waitForNetworkingActive(client, vm.datacenter, vm.Path()), 610 Timeout: 600 * time.Second, 611 Delay: time.Duration(v.(int)) * time.Second, 612 MinTimeout: 2 * time.Second, 613 } 614 615 _, err := stateConf.WaitForState() 616 if err != nil { 617 return err 618 } 619 } 620 } 621 622 if ip, ok := d.GetOk("network_interface.0.ipv4_address"); ok { 623 d.SetConnInfo(map[string]string{ 624 "host": ip.(string), 625 }) 626 } else { 627 log.Printf("[DEBUG] Could not get IP address for %s", d.Id()) 628 } 629 630 d.SetId(vm.Path()) 631 log.Printf("[INFO] Created virtual machine: %s", d.Id()) 632 633 return resourceVSphereVirtualMachineRead(d, meta) 634 } 635 636 func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { 637 log.Printf("[DEBUG] reading virtual machine: %#v", d) 638 client := meta.(*govmomi.Client) 639 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 640 if err != nil { 641 return err 642 } 643 finder := find.NewFinder(client.Client, true) 644 finder = finder.SetDatacenter(dc) 645 646 vm, err := finder.VirtualMachine(context.TODO(), d.Id()) 647 if err != nil { 648 d.SetId("") 649 return nil 650 } 651 652 var mvm mo.VirtualMachine 653 654 collector := property.DefaultCollector(client.Client) 655 if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore"}, &mvm); err != nil { 656 return err 657 } 658 659 log.Printf("[DEBUG] %#v", dc) 660 log.Printf("[DEBUG] %#v", mvm.Summary.Config) 661 log.Printf("[DEBUG] %#v", mvm.Guest.Net) 662 663 networkInterfaces := make([]map[string]interface{}, 0) 664 for _, v := range mvm.Guest.Net { 665 if v.DeviceConfigId >= 0 { 666 log.Printf("[DEBUG] %#v", v.Network) 667 networkInterface := make(map[string]interface{}) 668 networkInterface["label"] = v.Network 669 for _, ip := range v.IpConfig.IpAddress { 670 p := net.ParseIP(ip.IpAddress) 671 if p.To4() != nil { 672 log.Printf("[DEBUG] %#v", p.String()) 673 log.Printf("[DEBUG] %#v", ip.PrefixLength) 674 networkInterface["ipv4_address"] = p.String() 675 networkInterface["ipv4_prefix_length"] = ip.PrefixLength 676 } else if p.To16() != nil { 677 log.Printf("[DEBUG] %#v", p.String()) 678 log.Printf("[DEBUG] %#v", ip.PrefixLength) 679 networkInterface["ipv6_address"] = p.String() 680 networkInterface["ipv6_prefix_length"] = ip.PrefixLength 681 } 682 log.Printf("[DEBUG] networkInterface: %#v", networkInterface) 683 } 684 log.Printf("[DEBUG] networkInterface: %#v", networkInterface) 685 networkInterfaces = append(networkInterfaces, networkInterface) 686 } 687 } 688 log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces) 689 err = d.Set("network_interface", networkInterfaces) 690 if err != nil { 691 return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces) 692 } 693 694 ip, err := vm.WaitForIP(context.TODO()) 695 if err != nil { 696 return err 697 } 698 log.Printf("[DEBUG] ip address: %v", ip) 699 d.SetConnInfo(map[string]string{ 700 "type": "ssh", 701 "host": ip, 702 }) 703 704 var rootDatastore string 705 for _, v := range mvm.Datastore { 706 var md mo.Datastore 707 if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil { 708 return err 709 } 710 if md.Parent.Type == "StoragePod" { 711 var msp mo.StoragePod 712 if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil { 713 return err 714 } 715 rootDatastore = msp.Name 716 log.Printf("[DEBUG] %#v", msp.Name) 717 } else { 718 rootDatastore = md.Name 719 log.Printf("[DEBUG] %#v", md.Name) 720 } 721 break 722 } 723 724 d.Set("datacenter", dc) 725 d.Set("memory", mvm.Summary.Config.MemorySizeMB) 726 d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation) 727 d.Set("cpu", mvm.Summary.Config.NumCpu) 728 d.Set("datastore", rootDatastore) 729 730 return nil 731 } 732 733 func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { 734 client := meta.(*govmomi.Client) 735 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 736 if err != nil { 737 return err 738 } 739 finder := find.NewFinder(client.Client, true) 740 finder = finder.SetDatacenter(dc) 741 742 vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string))) 743 if err != nil { 744 return err 745 } 746 747 log.Printf("[INFO] Deleting virtual machine: %s", d.Id()) 748 state, err := vm.PowerState(context.TODO()) 749 if err != nil { 750 return err 751 } 752 753 if state == types.VirtualMachinePowerStatePoweredOn { 754 task, err := vm.PowerOff(context.TODO()) 755 if err != nil { 756 return err 757 } 758 759 err = task.Wait(context.TODO()) 760 if err != nil { 761 return err 762 } 763 } 764 765 task, err := vm.Destroy(context.TODO()) 766 if err != nil { 767 return err 768 } 769 770 err = task.Wait(context.TODO()) 771 if err != nil { 772 return err 773 } 774 775 d.SetId("") 776 return nil 777 } 778 779 func waitForNetworkingActive(client *govmomi.Client, datacenter, name string) resource.StateRefreshFunc { 780 return func() (interface{}, string, error) { 781 dc, err := getDatacenter(client, datacenter) 782 if err != nil { 783 log.Printf("[ERROR] %#v", err) 784 return nil, "", err 785 } 786 finder := find.NewFinder(client.Client, true) 787 finder = finder.SetDatacenter(dc) 788 789 vm, err := finder.VirtualMachine(context.TODO(), name) 790 if err != nil { 791 log.Printf("[ERROR] %#v", err) 792 return nil, "", err 793 } 794 795 var mvm mo.VirtualMachine 796 collector := property.DefaultCollector(client.Client) 797 if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"summary"}, &mvm); err != nil { 798 log.Printf("[ERROR] %#v", err) 799 return nil, "", err 800 } 801 802 if mvm.Summary.Guest.IpAddress != "" { 803 log.Printf("[DEBUG] IP address with DHCP: %v", mvm.Summary.Guest.IpAddress) 804 return mvm.Summary, "active", err 805 } else { 806 log.Printf("[DEBUG] Waiting for IP address") 807 return nil, "pending", err 808 } 809 } 810 } 811 812 // addHardDisk adds a new Hard Disk to the VirtualMachine. 813 func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string) error { 814 devices, err := vm.Device(context.TODO()) 815 if err != nil { 816 return err 817 } 818 log.Printf("[DEBUG] vm devices: %#v\n", devices) 819 820 controller, err := devices.FindDiskController("scsi") 821 if err != nil { 822 return err 823 } 824 log.Printf("[DEBUG] disk controller: %#v\n", controller) 825 826 // If diskPath is not specified, pass empty string to CreateDisk() 827 var newDiskPath string 828 if diskPath == "" { 829 newDiskPath = "" 830 } else { 831 // TODO Check if diskPath & datastore exist 832 newDiskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath) 833 } 834 disk := devices.CreateDisk(controller, newDiskPath) 835 existing := devices.SelectByBackingInfo(disk.Backing) 836 log.Printf("[DEBUG] disk: %#v\n", disk) 837 838 if len(existing) == 0 { 839 disk.CapacityInKB = int64(size * 1024 * 1024) 840 if iops != 0 { 841 disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ 842 Limit: iops, 843 } 844 } 845 backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) 846 847 if diskType == "eager_zeroed" { 848 // eager zeroed thick virtual disk 849 backing.ThinProvisioned = types.NewBool(false) 850 backing.EagerlyScrub = types.NewBool(true) 851 } else if diskType == "thin" { 852 // thin provisioned virtual disk 853 backing.ThinProvisioned = types.NewBool(true) 854 } 855 856 log.Printf("[DEBUG] addHardDisk: %#v\n", disk) 857 log.Printf("[DEBUG] addHardDisk: %#v\n", disk.CapacityInKB) 858 859 return vm.AddDevice(context.TODO(), disk) 860 } else { 861 log.Printf("[DEBUG] addHardDisk: Disk already present.\n") 862 863 return nil 864 } 865 } 866 867 // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path. 868 func addCdrom(vm *object.VirtualMachine, datastore, path string) error { 869 devices, err := vm.Device(context.TODO()) 870 if err != nil { 871 return err 872 } 873 log.Printf("[DEBUG] vm devices: %#v", devices) 874 875 controller, err := devices.FindIDEController("") 876 if err != nil { 877 return err 878 } 879 log.Printf("[DEBUG] ide controller: %#v", controller) 880 881 c, err := devices.CreateCdrom(controller) 882 if err != nil { 883 return err 884 } 885 886 c = devices.InsertIso(c, fmt.Sprintf("[%s] %s", datastore, path)) 887 log.Printf("[DEBUG] addCdrom: %#v", c) 888 889 return vm.AddDevice(context.TODO(), c) 890 } 891 892 // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device. 893 func buildNetworkDevice(f *find.Finder, label, adapterType string) (*types.VirtualDeviceConfigSpec, error) { 894 network, err := f.Network(context.TODO(), "*"+label) 895 if err != nil { 896 return nil, err 897 } 898 899 backing, err := network.EthernetCardBackingInfo(context.TODO()) 900 if err != nil { 901 return nil, err 902 } 903 904 if adapterType == "vmxnet3" { 905 return &types.VirtualDeviceConfigSpec{ 906 Operation: types.VirtualDeviceConfigSpecOperationAdd, 907 Device: &types.VirtualVmxnet3{ 908 VirtualVmxnet: types.VirtualVmxnet{ 909 VirtualEthernetCard: types.VirtualEthernetCard{ 910 VirtualDevice: types.VirtualDevice{ 911 Key: -1, 912 Backing: backing, 913 }, 914 AddressType: string(types.VirtualEthernetCardMacTypeGenerated), 915 }, 916 }, 917 }, 918 }, nil 919 } else if adapterType == "e1000" { 920 return &types.VirtualDeviceConfigSpec{ 921 Operation: types.VirtualDeviceConfigSpecOperationAdd, 922 Device: &types.VirtualE1000{ 923 VirtualEthernetCard: types.VirtualEthernetCard{ 924 VirtualDevice: types.VirtualDevice{ 925 Key: -1, 926 Backing: backing, 927 }, 928 AddressType: string(types.VirtualEthernetCardMacTypeGenerated), 929 }, 930 }, 931 }, nil 932 } else { 933 return nil, fmt.Errorf("Invalid network adapter type.") 934 } 935 } 936 937 // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. 938 func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) { 939 var key int 940 var moveType string 941 if linkedClone { 942 moveType = "createNewChildDiskBacking" 943 } else { 944 moveType = "moveAllDiskBackingsAndDisallowSharing" 945 } 946 log.Printf("[DEBUG] relocate type: [%s]", moveType) 947 948 devices, err := vm.Device(context.TODO()) 949 if err != nil { 950 return types.VirtualMachineRelocateSpec{}, err 951 } 952 for _, d := range devices { 953 if devices.Type(d) == "disk" { 954 key = d.GetVirtualDevice().Key 955 } 956 } 957 958 isThin := initType == "thin" 959 rpr := rp.Reference() 960 dsr := ds.Reference() 961 return types.VirtualMachineRelocateSpec{ 962 Datastore: &dsr, 963 Pool: &rpr, 964 DiskMoveType: moveType, 965 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 966 types.VirtualMachineRelocateSpecDiskLocator{ 967 Datastore: dsr, 968 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ 969 DiskMode: "persistent", 970 ThinProvisioned: types.NewBool(isThin), 971 EagerlyScrub: types.NewBool(!isThin), 972 }, 973 DiskId: key, 974 }, 975 }, 976 }, nil 977 } 978 979 // getDatastoreObject gets datastore object. 980 func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) { 981 s := object.NewSearchIndex(client.Client) 982 ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name) 983 if err != nil { 984 return types.ManagedObjectReference{}, err 985 } 986 if ref == nil { 987 return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name) 988 } 989 log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref) 990 return ref.Reference(), nil 991 } 992 993 // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action. 994 func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec { 995 vmfr := f.VmFolder.Reference() 996 rpr := rp.Reference() 997 spr := storagePod.Reference() 998 999 sps := types.StoragePlacementSpec{ 1000 Type: "create", 1001 ConfigSpec: &configSpec, 1002 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 1003 StoragePod: &spr, 1004 }, 1005 Folder: &vmfr, 1006 ResourcePool: &rpr, 1007 } 1008 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 1009 return sps 1010 } 1011 1012 // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action. 1013 func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec { 1014 vmr := vm.Reference() 1015 vmfr := f.VmFolder.Reference() 1016 rpr := rp.Reference() 1017 spr := storagePod.Reference() 1018 1019 var o mo.VirtualMachine 1020 err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o) 1021 if err != nil { 1022 return types.StoragePlacementSpec{} 1023 } 1024 ds := object.NewDatastore(c.Client, o.Datastore[0]) 1025 log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds) 1026 1027 devices, err := vm.Device(context.TODO()) 1028 if err != nil { 1029 return types.StoragePlacementSpec{} 1030 } 1031 1032 var key int 1033 for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) { 1034 key = d.GetVirtualDevice().Key 1035 log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice()) 1036 } 1037 1038 sps := types.StoragePlacementSpec{ 1039 Type: "clone", 1040 Vm: &vmr, 1041 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 1042 StoragePod: &spr, 1043 }, 1044 CloneSpec: &types.VirtualMachineCloneSpec{ 1045 Location: types.VirtualMachineRelocateSpec{ 1046 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 1047 types.VirtualMachineRelocateSpecDiskLocator{ 1048 Datastore: ds.Reference(), 1049 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{}, 1050 DiskId: key, 1051 }, 1052 }, 1053 Pool: &rpr, 1054 }, 1055 PowerOn: false, 1056 Template: false, 1057 }, 1058 CloneName: "dummy", 1059 Folder: &vmfr, 1060 } 1061 return sps 1062 } 1063 1064 // findDatastore finds Datastore object. 1065 func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) { 1066 var datastore *object.Datastore 1067 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 1068 1069 srm := object.NewStorageResourceManager(c.Client) 1070 rds, err := srm.RecommendDatastores(context.TODO(), sps) 1071 if err != nil { 1072 return nil, err 1073 } 1074 log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) 1075 1076 spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) 1077 datastore = object.NewDatastore(c.Client, spa.Destination) 1078 log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore) 1079 1080 return datastore, nil 1081 } 1082 1083 // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller. 1084 func createCdroms(vm *object.VirtualMachine, cdroms []cdrom) error { 1085 log.Printf("[DEBUG] add cdroms: %v", cdroms) 1086 for _, cd := range cdroms { 1087 log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore) 1088 log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path) 1089 err := addCdrom(vm, cd.datastore, cd.path) 1090 if err != nil { 1091 return err 1092 } 1093 } 1094 1095 return nil 1096 } 1097 1098 // createVirtualMachine creates a new VirtualMachine. 1099 func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { 1100 dc, err := getDatacenter(c, vm.datacenter) 1101 1102 if err != nil { 1103 return err 1104 } 1105 finder := find.NewFinder(c.Client, true) 1106 finder = finder.SetDatacenter(dc) 1107 1108 var resourcePool *object.ResourcePool 1109 if vm.resourcePool == "" { 1110 if vm.cluster == "" { 1111 resourcePool, err = finder.DefaultResourcePool(context.TODO()) 1112 if err != nil { 1113 return err 1114 } 1115 } else { 1116 resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") 1117 if err != nil { 1118 return err 1119 } 1120 } 1121 } else { 1122 resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) 1123 if err != nil { 1124 return err 1125 } 1126 } 1127 log.Printf("[DEBUG] resource pool: %#v", resourcePool) 1128 1129 dcFolders, err := dc.Folders(context.TODO()) 1130 if err != nil { 1131 return err 1132 } 1133 1134 log.Printf("[DEBUG] folder: %#v", vm.folder) 1135 folder := dcFolders.VmFolder 1136 if len(vm.folder) > 0 { 1137 si := object.NewSearchIndex(c.Client) 1138 folderRef, err := si.FindByInventoryPath( 1139 context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) 1140 if err != nil { 1141 return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) 1142 } else if folderRef == nil { 1143 return fmt.Errorf("Cannot find folder %s", vm.folder) 1144 } else { 1145 folder = folderRef.(*object.Folder) 1146 } 1147 } 1148 1149 // network 1150 networkDevices := []types.BaseVirtualDeviceConfigSpec{} 1151 for _, network := range vm.networkInterfaces { 1152 // network device 1153 nd, err := buildNetworkDevice(finder, network.label, "e1000") 1154 if err != nil { 1155 return err 1156 } 1157 networkDevices = append(networkDevices, nd) 1158 } 1159 1160 // make config spec 1161 configSpec := types.VirtualMachineConfigSpec{ 1162 GuestId: "otherLinux64Guest", 1163 Name: vm.name, 1164 NumCPUs: vm.vcpu, 1165 NumCoresPerSocket: 1, 1166 MemoryMB: vm.memoryMb, 1167 MemoryAllocation: &types.ResourceAllocationInfo{ 1168 Reservation: vm.memoryAllocation.reservation, 1169 }, 1170 DeviceChange: networkDevices, 1171 } 1172 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 1173 1174 // make ExtraConfig 1175 log.Printf("[DEBUG] virtual machine Extra Config spec start") 1176 if len(vm.customConfigurations) > 0 { 1177 var ov []types.BaseOptionValue 1178 for k, v := range vm.customConfigurations { 1179 key := k 1180 value := v 1181 o := types.OptionValue{ 1182 Key: key, 1183 Value: &value, 1184 } 1185 log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) 1186 ov = append(ov, &o) 1187 } 1188 configSpec.ExtraConfig = ov 1189 log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) 1190 } 1191 1192 var datastore *object.Datastore 1193 if vm.datastore == "" { 1194 datastore, err = finder.DefaultDatastore(context.TODO()) 1195 if err != nil { 1196 return err 1197 } 1198 } else { 1199 datastore, err = finder.Datastore(context.TODO(), vm.datastore) 1200 if err != nil { 1201 // TODO: datastore cluster support in govmomi finder function 1202 d, err := getDatastoreObject(c, dcFolders, vm.datastore) 1203 if err != nil { 1204 return err 1205 } 1206 1207 if d.Type == "StoragePod" { 1208 sp := object.StoragePod{ 1209 Folder: object.NewFolder(c.Client, d), 1210 } 1211 sps := buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) 1212 datastore, err = findDatastore(c, sps) 1213 if err != nil { 1214 return err 1215 } 1216 } else { 1217 datastore = object.NewDatastore(c.Client, d) 1218 } 1219 } 1220 } 1221 1222 log.Printf("[DEBUG] datastore: %#v", datastore) 1223 1224 var mds mo.Datastore 1225 if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { 1226 return err 1227 } 1228 log.Printf("[DEBUG] datastore: %#v", mds.Name) 1229 scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") 1230 if err != nil { 1231 log.Printf("[ERROR] %s", err) 1232 } 1233 1234 configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ 1235 Operation: types.VirtualDeviceConfigSpecOperationAdd, 1236 Device: scsi, 1237 }) 1238 1239 configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} 1240 1241 task, err := folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) 1242 if err != nil { 1243 log.Printf("[ERROR] %s", err) 1244 } 1245 1246 err = task.Wait(context.TODO()) 1247 if err != nil { 1248 log.Printf("[ERROR] %s", err) 1249 } 1250 1251 newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) 1252 if err != nil { 1253 return err 1254 } 1255 log.Printf("[DEBUG] new vm: %v", newVM) 1256 1257 log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) 1258 for _, hd := range vm.hardDisks { 1259 log.Printf("[DEBUG] add hard disk: %v", hd.size) 1260 log.Printf("[DEBUG] add hard disk: %v", hd.iops) 1261 err = addHardDisk(newVM, hd.size, hd.iops, "thin", datastore, hd.vmdkPath) 1262 if err != nil { 1263 return err 1264 } 1265 } 1266 1267 // Create the cdroms if needed. 1268 if err := createCdroms(newVM, vm.cdroms); err != nil { 1269 return err 1270 } 1271 1272 if vm.bootableVmdk { 1273 newVM.PowerOn(context.TODO()) 1274 ip, err := newVM.WaitForIP(context.TODO()) 1275 if err != nil { 1276 return err 1277 } 1278 log.Printf("[DEBUG] ip address: %v", ip) 1279 } 1280 1281 return nil 1282 } 1283 1284 // deployVirtualMachine deploys a new VirtualMachine. 1285 func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error { 1286 dc, err := getDatacenter(c, vm.datacenter) 1287 if err != nil { 1288 return err 1289 } 1290 finder := find.NewFinder(c.Client, true) 1291 finder = finder.SetDatacenter(dc) 1292 1293 template, err := finder.VirtualMachine(context.TODO(), vm.template) 1294 if err != nil { 1295 return err 1296 } 1297 log.Printf("[DEBUG] template: %#v", template) 1298 1299 var resourcePool *object.ResourcePool 1300 if vm.resourcePool == "" { 1301 if vm.cluster == "" { 1302 resourcePool, err = finder.DefaultResourcePool(context.TODO()) 1303 if err != nil { 1304 return err 1305 } 1306 } else { 1307 resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") 1308 if err != nil { 1309 return err 1310 } 1311 } 1312 } else { 1313 resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) 1314 if err != nil { 1315 return err 1316 } 1317 } 1318 log.Printf("[DEBUG] resource pool: %#v", resourcePool) 1319 1320 dcFolders, err := dc.Folders(context.TODO()) 1321 if err != nil { 1322 return err 1323 } 1324 1325 log.Printf("[DEBUG] folder: %#v", vm.folder) 1326 folder := dcFolders.VmFolder 1327 if len(vm.folder) > 0 { 1328 si := object.NewSearchIndex(c.Client) 1329 folderRef, err := si.FindByInventoryPath( 1330 context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) 1331 if err != nil { 1332 return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) 1333 } else if folderRef == nil { 1334 return fmt.Errorf("Cannot find folder %s", vm.folder) 1335 } else { 1336 folder = folderRef.(*object.Folder) 1337 } 1338 } 1339 1340 var datastore *object.Datastore 1341 if vm.datastore == "" { 1342 datastore, err = finder.DefaultDatastore(context.TODO()) 1343 if err != nil { 1344 return err 1345 } 1346 } else { 1347 datastore, err = finder.Datastore(context.TODO(), vm.datastore) 1348 if err != nil { 1349 // TODO: datastore cluster support in govmomi finder function 1350 d, err := getDatastoreObject(c, dcFolders, vm.datastore) 1351 if err != nil { 1352 return err 1353 } 1354 1355 if d.Type == "StoragePod" { 1356 sp := object.StoragePod{ 1357 Folder: object.NewFolder(c.Client, d), 1358 } 1359 sps := buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) 1360 1361 datastore, err = findDatastore(c, sps) 1362 if err != nil { 1363 return err 1364 } 1365 } else { 1366 datastore = object.NewDatastore(c.Client, d) 1367 } 1368 } 1369 } 1370 log.Printf("[DEBUG] datastore: %#v", datastore) 1371 1372 relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType) 1373 if err != nil { 1374 return err 1375 } 1376 1377 log.Printf("[DEBUG] relocate spec: %v", relocateSpec) 1378 1379 // network 1380 networkDevices := []types.BaseVirtualDeviceConfigSpec{} 1381 networkConfigs := []types.CustomizationAdapterMapping{} 1382 for _, network := range vm.networkInterfaces { 1383 // network device 1384 nd, err := buildNetworkDevice(finder, network.label, "vmxnet3") 1385 if err != nil { 1386 return err 1387 } 1388 networkDevices = append(networkDevices, nd) 1389 1390 // TODO: IPv6 support 1391 var ipSetting types.CustomizationIPSettings 1392 if network.ipv4Address == "" { 1393 ipSetting = types.CustomizationIPSettings{ 1394 Ip: &types.CustomizationDhcpIpGenerator{}, 1395 } 1396 } else { 1397 if network.ipv4PrefixLength == 0 { 1398 return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") 1399 } 1400 m := net.CIDRMask(network.ipv4PrefixLength, 32) 1401 sm := net.IPv4(m[0], m[1], m[2], m[3]) 1402 subnetMask := sm.String() 1403 log.Printf("[DEBUG] gateway: %v", vm.gateway) 1404 log.Printf("[DEBUG] ipv4 address: %v", network.ipv4Address) 1405 log.Printf("[DEBUG] ipv4 prefix length: %v", network.ipv4PrefixLength) 1406 log.Printf("[DEBUG] ipv4 subnet mask: %v", subnetMask) 1407 ipSetting = types.CustomizationIPSettings{ 1408 Gateway: []string{ 1409 vm.gateway, 1410 }, 1411 Ip: &types.CustomizationFixedIp{ 1412 IpAddress: network.ipv4Address, 1413 }, 1414 SubnetMask: subnetMask, 1415 } 1416 } 1417 1418 // network config 1419 config := types.CustomizationAdapterMapping{ 1420 Adapter: ipSetting, 1421 } 1422 networkConfigs = append(networkConfigs, config) 1423 } 1424 log.Printf("[DEBUG] network configs: %v", networkConfigs[0].Adapter) 1425 1426 // make config spec 1427 configSpec := types.VirtualMachineConfigSpec{ 1428 NumCPUs: vm.vcpu, 1429 NumCoresPerSocket: 1, 1430 MemoryMB: vm.memoryMb, 1431 } 1432 1433 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 1434 1435 log.Printf("[DEBUG] starting extra custom config spec: %v", vm.customConfigurations) 1436 1437 // make ExtraConfig 1438 if len(vm.customConfigurations) > 0 { 1439 var ov []types.BaseOptionValue 1440 for k, v := range vm.customConfigurations { 1441 key := k 1442 value := v 1443 o := types.OptionValue{ 1444 Key: key, 1445 Value: &value, 1446 } 1447 ov = append(ov, &o) 1448 } 1449 configSpec.ExtraConfig = ov 1450 log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) 1451 } 1452 1453 var template_mo mo.VirtualMachine 1454 err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo) 1455 1456 var identity_options types.BaseCustomizationIdentitySettings 1457 if strings.HasPrefix(template_mo.Config.GuestId, "win") { 1458 var timeZone int 1459 if vm.timeZone == "Etc/UTC" { 1460 vm.timeZone = "085" 1461 } 1462 timeZone, err := strconv.Atoi(vm.timeZone) 1463 if err != nil { 1464 return fmt.Errorf("Error converting TimeZone: %s", err) 1465 } 1466 1467 guiUnattended := types.CustomizationGuiUnattended{ 1468 AutoLogon: false, 1469 AutoLogonCount: 1, 1470 TimeZone: timeZone, 1471 } 1472 1473 customIdentification := types.CustomizationIdentification{} 1474 1475 userData := types.CustomizationUserData{ 1476 ComputerName: &types.CustomizationFixedName{ 1477 Name: strings.Split(vm.name, ".")[0], 1478 }, 1479 ProductId: vm.windowsOptionalConfig.productKey, 1480 FullName: "terraform", 1481 OrgName: "terraform", 1482 } 1483 1484 if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" { 1485 customIdentification.DomainAdminPassword = &types.CustomizationPassword{ 1486 PlainText: true, 1487 Value: vm.windowsOptionalConfig.domainUserPassword, 1488 } 1489 customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser 1490 customIdentification.JoinDomain = vm.windowsOptionalConfig.domain 1491 } 1492 1493 if vm.windowsOptionalConfig.adminPassword != "" { 1494 guiUnattended.Password = &types.CustomizationPassword{ 1495 PlainText: true, 1496 Value: vm.windowsOptionalConfig.adminPassword, 1497 } 1498 } 1499 1500 identity_options = &types.CustomizationSysprep{ 1501 GuiUnattended: guiUnattended, 1502 Identification: customIdentification, 1503 UserData: userData, 1504 } 1505 } else { 1506 identity_options = &types.CustomizationLinuxPrep{ 1507 HostName: &types.CustomizationFixedName{ 1508 Name: strings.Split(vm.name, ".")[0], 1509 }, 1510 Domain: vm.domain, 1511 TimeZone: vm.timeZone, 1512 HwClockUTC: types.NewBool(true), 1513 } 1514 } 1515 1516 // create CustomizationSpec 1517 customSpec := types.CustomizationSpec{ 1518 Identity: identity_options, 1519 GlobalIPSettings: types.CustomizationGlobalIPSettings{ 1520 DnsSuffixList: vm.dnsSuffixes, 1521 DnsServerList: vm.dnsServers, 1522 }, 1523 NicSettingMap: networkConfigs, 1524 } 1525 log.Printf("[DEBUG] custom spec: %v", customSpec) 1526 1527 // make vm clone spec 1528 cloneSpec := types.VirtualMachineCloneSpec{ 1529 Location: relocateSpec, 1530 Template: false, 1531 Config: &configSpec, 1532 PowerOn: false, 1533 } 1534 if vm.linkedClone { 1535 if err != nil { 1536 return fmt.Errorf("Error reading base VM properties: %s", err) 1537 } 1538 if template_mo.Snapshot == nil { 1539 return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots") 1540 } 1541 cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot 1542 } 1543 log.Printf("[DEBUG] clone spec: %v", cloneSpec) 1544 1545 task, err := template.Clone(context.TODO(), folder, vm.name, cloneSpec) 1546 if err != nil { 1547 return err 1548 } 1549 1550 _, err = task.WaitForResult(context.TODO(), nil) 1551 if err != nil { 1552 return err 1553 } 1554 1555 newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) 1556 if err != nil { 1557 return err 1558 } 1559 log.Printf("[DEBUG] new vm: %v", newVM) 1560 1561 devices, err := newVM.Device(context.TODO()) 1562 if err != nil { 1563 log.Printf("[DEBUG] Template devices can't be found") 1564 return err 1565 } 1566 1567 for _, dvc := range devices { 1568 // Issue 3559/3560: Delete all ethernet devices to add the correct ones later 1569 if devices.Type(dvc) == "ethernet" { 1570 err := newVM.RemoveDevice(context.TODO(), dvc) 1571 if err != nil { 1572 return err 1573 } 1574 } 1575 } 1576 // Add Network devices 1577 for _, dvc := range networkDevices { 1578 err := newVM.AddDevice( 1579 context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) 1580 if err != nil { 1581 return err 1582 } 1583 } 1584 1585 // Create the cdroms if needed. 1586 if err := createCdroms(newVM, vm.cdroms); err != nil { 1587 return err 1588 } 1589 1590 if vm.skipCustomization { 1591 log.Printf("[DEBUG] VM customization skipped") 1592 } else { 1593 log.Printf("[DEBUG] VM customization starting") 1594 taskb, err := newVM.Customize(context.TODO(), customSpec) 1595 if err != nil { 1596 return err 1597 } 1598 _, err = taskb.WaitForResult(context.TODO(), nil) 1599 if err != nil { 1600 return err 1601 } 1602 log.Printf("[DEBUG] VM customization finished") 1603 } 1604 1605 for i := 1; i < len(vm.hardDisks); i++ { 1606 err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, vm.hardDisks[i].vmdkPath) 1607 if err != nil { 1608 return err 1609 } 1610 } 1611 1612 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 1613 1614 newVM.PowerOn(context.TODO()) 1615 1616 ip, err := newVM.WaitForIP(context.TODO()) 1617 if err != nil { 1618 return err 1619 } 1620 log.Printf("[DEBUG] ip address: %v", ip) 1621 1622 return nil 1623 }