github.com/danrjohnson/terraform@v0.7.0-rc2.0.20160627135212-d0fc1fa086ff/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about)

     1  package vsphere
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/hashicorp/terraform/helper/schema"
    11  	"github.com/vmware/govmomi"
    12  	"github.com/vmware/govmomi/find"
    13  	"github.com/vmware/govmomi/object"
    14  	"github.com/vmware/govmomi/property"
    15  	"github.com/vmware/govmomi/vim25/mo"
    16  	"github.com/vmware/govmomi/vim25/types"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  var DefaultDNSSuffixes = []string{
    21  	"vsphere.local",
    22  }
    23  
    24  var DefaultDNSServers = []string{
    25  	"8.8.8.8",
    26  	"8.8.4.4",
    27  }
    28  
    29  type networkInterface struct {
    30  	deviceName       string
    31  	label            string
    32  	ipv4Address      string
    33  	ipv4PrefixLength int
    34  	ipv4Gateway      string
    35  	ipv6Address      string
    36  	ipv6PrefixLength int
    37  	ipv6Gateway      string
    38  	adapterType      string // TODO: Make "adapter_type" argument
    39  	macAddress       string
    40  }
    41  
    42  type hardDisk struct {
    43  	name       string
    44  	size       int64
    45  	iops       int64
    46  	initType   string
    47  	vmdkPath   string
    48  	controller string
    49  	bootable   bool
    50  }
    51  
    52  //Additional options Vsphere can use clones of windows machines
    53  type windowsOptConfig struct {
    54  	productKey         string
    55  	adminPassword      string
    56  	domainUser         string
    57  	domain             string
    58  	domainUserPassword string
    59  }
    60  
    61  type cdrom struct {
    62  	datastore string
    63  	path      string
    64  }
    65  
    66  type memoryAllocation struct {
    67  	reservation int64
    68  }
    69  
    70  type virtualMachine struct {
    71  	name                  string
    72  	folder                string
    73  	datacenter            string
    74  	cluster               string
    75  	resourcePool          string
    76  	datastore             string
    77  	vcpu                  int32
    78  	memoryMb              int64
    79  	memoryAllocation      memoryAllocation
    80  	template              string
    81  	networkInterfaces     []networkInterface
    82  	hardDisks             []hardDisk
    83  	cdroms                []cdrom
    84  	domain                string
    85  	timeZone              string
    86  	dnsSuffixes           []string
    87  	dnsServers            []string
    88  	hasBootableVmdk       bool
    89  	linkedClone           bool
    90  	skipCustomization     bool
    91  	enableDiskUUID        bool
    92  	windowsOptionalConfig windowsOptConfig
    93  	customConfigurations  map[string](types.AnyType)
    94  }
    95  
    96  func (v virtualMachine) Path() string {
    97  	return vmPath(v.folder, v.name)
    98  }
    99  
   100  func vmPath(folder string, name string) string {
   101  	var path string
   102  	if len(folder) > 0 {
   103  		path += folder + "/"
   104  	}
   105  	return path + name
   106  }
   107  
   108  func resourceVSphereVirtualMachine() *schema.Resource {
   109  	return &schema.Resource{
   110  		Create: resourceVSphereVirtualMachineCreate,
   111  		Read:   resourceVSphereVirtualMachineRead,
   112  		Update: resourceVSphereVirtualMachineUpdate,
   113  		Delete: resourceVSphereVirtualMachineDelete,
   114  
   115  		SchemaVersion: 1,
   116  		MigrateState:  resourceVSphereVirtualMachineMigrateState,
   117  
   118  		Schema: map[string]*schema.Schema{
   119  			"name": &schema.Schema{
   120  				Type:     schema.TypeString,
   121  				Required: true,
   122  				ForceNew: true,
   123  			},
   124  
   125  			"folder": &schema.Schema{
   126  				Type:     schema.TypeString,
   127  				Optional: true,
   128  				ForceNew: true,
   129  			},
   130  
   131  			"vcpu": &schema.Schema{
   132  				Type:     schema.TypeInt,
   133  				Required: true,
   134  			},
   135  
   136  			"memory": &schema.Schema{
   137  				Type:     schema.TypeInt,
   138  				Required: true,
   139  			},
   140  
   141  			"memory_reservation": &schema.Schema{
   142  				Type:     schema.TypeInt,
   143  				Optional: true,
   144  				Default:  0,
   145  				ForceNew: true,
   146  			},
   147  
   148  			"datacenter": &schema.Schema{
   149  				Type:     schema.TypeString,
   150  				Optional: true,
   151  				ForceNew: true,
   152  			},
   153  
   154  			"cluster": &schema.Schema{
   155  				Type:     schema.TypeString,
   156  				Optional: true,
   157  				ForceNew: true,
   158  			},
   159  
   160  			"resource_pool": &schema.Schema{
   161  				Type:     schema.TypeString,
   162  				Optional: true,
   163  				ForceNew: true,
   164  			},
   165  
   166  			"linked_clone": &schema.Schema{
   167  				Type:     schema.TypeBool,
   168  				Optional: true,
   169  				Default:  false,
   170  				ForceNew: true,
   171  			},
   172  			"gateway": &schema.Schema{
   173  				Type:       schema.TypeString,
   174  				Optional:   true,
   175  				ForceNew:   true,
   176  				Deprecated: "Please use network_interface.ipv4_gateway",
   177  			},
   178  
   179  			"domain": &schema.Schema{
   180  				Type:     schema.TypeString,
   181  				Optional: true,
   182  				ForceNew: true,
   183  				Default:  "vsphere.local",
   184  			},
   185  
   186  			"time_zone": &schema.Schema{
   187  				Type:     schema.TypeString,
   188  				Optional: true,
   189  				ForceNew: true,
   190  				Default:  "Etc/UTC",
   191  			},
   192  
   193  			"dns_suffixes": &schema.Schema{
   194  				Type:     schema.TypeList,
   195  				Optional: true,
   196  				Elem:     &schema.Schema{Type: schema.TypeString},
   197  				ForceNew: true,
   198  			},
   199  
   200  			"dns_servers": &schema.Schema{
   201  				Type:     schema.TypeList,
   202  				Optional: true,
   203  				Elem:     &schema.Schema{Type: schema.TypeString},
   204  				ForceNew: true,
   205  			},
   206  
   207  			"skip_customization": &schema.Schema{
   208  				Type:     schema.TypeBool,
   209  				Optional: true,
   210  				ForceNew: true,
   211  				Default:  false,
   212  			},
   213  
   214  			"enable_disk_uuid": &schema.Schema{
   215  				Type:     schema.TypeBool,
   216  				Optional: true,
   217  				ForceNew: true,
   218  				Default:  false,
   219  			},
   220  
   221  			"custom_configuration_parameters": &schema.Schema{
   222  				Type:     schema.TypeMap,
   223  				Optional: true,
   224  				ForceNew: true,
   225  			},
   226  
   227  			"windows_opt_config": &schema.Schema{
   228  				Type:     schema.TypeList,
   229  				Optional: true,
   230  				ForceNew: true,
   231  				Elem: &schema.Resource{
   232  					Schema: map[string]*schema.Schema{
   233  						"product_key": &schema.Schema{
   234  							Type:     schema.TypeString,
   235  							Required: true,
   236  							ForceNew: true,
   237  						},
   238  
   239  						"admin_password": &schema.Schema{
   240  							Type:     schema.TypeString,
   241  							Optional: true,
   242  							ForceNew: true,
   243  						},
   244  
   245  						"domain_user": &schema.Schema{
   246  							Type:     schema.TypeString,
   247  							Optional: true,
   248  							ForceNew: true,
   249  						},
   250  
   251  						"domain": &schema.Schema{
   252  							Type:     schema.TypeString,
   253  							Optional: true,
   254  							ForceNew: true,
   255  						},
   256  
   257  						"domain_user_password": &schema.Schema{
   258  							Type:     schema.TypeString,
   259  							Optional: true,
   260  							ForceNew: true,
   261  						},
   262  					},
   263  				},
   264  			},
   265  
   266  			"network_interface": &schema.Schema{
   267  				Type:     schema.TypeList,
   268  				Required: true,
   269  				ForceNew: true,
   270  				Elem: &schema.Resource{
   271  					Schema: map[string]*schema.Schema{
   272  						"label": &schema.Schema{
   273  							Type:     schema.TypeString,
   274  							Required: true,
   275  							ForceNew: true,
   276  						},
   277  
   278  						"ip_address": &schema.Schema{
   279  							Type:       schema.TypeString,
   280  							Optional:   true,
   281  							Computed:   true,
   282  							Deprecated: "Please use ipv4_address",
   283  						},
   284  
   285  						"subnet_mask": &schema.Schema{
   286  							Type:       schema.TypeString,
   287  							Optional:   true,
   288  							Computed:   true,
   289  							Deprecated: "Please use ipv4_prefix_length",
   290  						},
   291  
   292  						"ipv4_address": &schema.Schema{
   293  							Type:     schema.TypeString,
   294  							Optional: true,
   295  							Computed: true,
   296  						},
   297  
   298  						"ipv4_prefix_length": &schema.Schema{
   299  							Type:     schema.TypeInt,
   300  							Optional: true,
   301  							Computed: true,
   302  						},
   303  
   304  						"ipv4_gateway": &schema.Schema{
   305  							Type:     schema.TypeString,
   306  							Optional: true,
   307  							Computed: true,
   308  						},
   309  
   310  						"ipv6_address": &schema.Schema{
   311  							Type:     schema.TypeString,
   312  							Optional: true,
   313  							Computed: true,
   314  						},
   315  
   316  						"ipv6_prefix_length": &schema.Schema{
   317  							Type:     schema.TypeInt,
   318  							Optional: true,
   319  							Computed: true,
   320  						},
   321  
   322  						"ipv6_gateway": &schema.Schema{
   323  							Type:     schema.TypeString,
   324  							Optional: true,
   325  							Computed: true,
   326  						},
   327  
   328  						"adapter_type": &schema.Schema{
   329  							Type:     schema.TypeString,
   330  							Optional: true,
   331  							ForceNew: true,
   332  						},
   333  
   334  						"mac_address": &schema.Schema{
   335  							Type:     schema.TypeString,
   336  							Optional: true,
   337  							Computed: true,
   338  						},
   339  					},
   340  				},
   341  			},
   342  
   343  			"disk": &schema.Schema{
   344  				Type:     schema.TypeSet,
   345  				Required: true,
   346  				Elem: &schema.Resource{
   347  					Schema: map[string]*schema.Schema{
   348  						"uuid": &schema.Schema{
   349  							Type:     schema.TypeString,
   350  							Computed: true,
   351  						},
   352  
   353  						"key": &schema.Schema{
   354  							Type:     schema.TypeInt,
   355  							Computed: true,
   356  						},
   357  
   358  						"template": &schema.Schema{
   359  							Type:     schema.TypeString,
   360  							Optional: true,
   361  						},
   362  
   363  						"type": &schema.Schema{
   364  							Type:     schema.TypeString,
   365  							Optional: true,
   366  							Default:  "eager_zeroed",
   367  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   368  								value := v.(string)
   369  								if value != "thin" && value != "eager_zeroed" {
   370  									errors = append(errors, fmt.Errorf(
   371  										"only 'thin' and 'eager_zeroed' are supported values for 'type'"))
   372  								}
   373  								return
   374  							},
   375  						},
   376  
   377  						"datastore": &schema.Schema{
   378  							Type:     schema.TypeString,
   379  							Optional: true,
   380  						},
   381  
   382  						"size": &schema.Schema{
   383  							Type:     schema.TypeInt,
   384  							Optional: true,
   385  						},
   386  
   387  						"name": &schema.Schema{
   388  							Type:     schema.TypeString,
   389  							Optional: true,
   390  						},
   391  
   392  						"iops": &schema.Schema{
   393  							Type:     schema.TypeInt,
   394  							Optional: true,
   395  						},
   396  
   397  						"vmdk": &schema.Schema{
   398  							// TODO: Add ValidateFunc to confirm path exists
   399  							Type:     schema.TypeString,
   400  							Optional: true,
   401  						},
   402  
   403  						"bootable": &schema.Schema{
   404  							Type:     schema.TypeBool,
   405  							Optional: true,
   406  						},
   407  
   408  						"keep_on_remove": &schema.Schema{
   409  							Type:     schema.TypeBool,
   410  							Optional: true,
   411  						},
   412  
   413  						"controller_type": &schema.Schema{
   414  							Type:     schema.TypeString,
   415  							Optional: true,
   416  							Default:  "scsi",
   417  							ForceNew: true,
   418  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   419  								value := v.(string)
   420  								if value != "scsi" && value != "ide" {
   421  									errors = append(errors, fmt.Errorf(
   422  										"only 'scsi' and 'ide' are supported values for 'controller_type'"))
   423  								}
   424  								return
   425  							},
   426  						},
   427  					},
   428  				},
   429  			},
   430  
   431  			"cdrom": &schema.Schema{
   432  				Type:     schema.TypeList,
   433  				Optional: true,
   434  				ForceNew: true,
   435  				Elem: &schema.Resource{
   436  					Schema: map[string]*schema.Schema{
   437  						"datastore": &schema.Schema{
   438  							Type:     schema.TypeString,
   439  							Required: true,
   440  							ForceNew: true,
   441  						},
   442  
   443  						"path": &schema.Schema{
   444  							Type:     schema.TypeString,
   445  							Required: true,
   446  							ForceNew: true,
   447  						},
   448  					},
   449  				},
   450  			},
   451  		},
   452  	}
   453  }
   454  
   455  func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error {
   456  	// flag if changes have to be applied
   457  	hasChanges := false
   458  	// flag if changes have to be done when powered off
   459  	rebootRequired := false
   460  
   461  	// make config spec
   462  	configSpec := types.VirtualMachineConfigSpec{}
   463  
   464  	if d.HasChange("vcpu") {
   465  		configSpec.NumCPUs = int32(d.Get("vcpu").(int))
   466  		hasChanges = true
   467  		rebootRequired = true
   468  	}
   469  
   470  	if d.HasChange("memory") {
   471  		configSpec.MemoryMB = int64(d.Get("memory").(int))
   472  		hasChanges = true
   473  		rebootRequired = true
   474  	}
   475  
   476  	client := meta.(*govmomi.Client)
   477  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   478  	if err != nil {
   479  		return err
   480  	}
   481  	finder := find.NewFinder(client.Client, true)
   482  	finder = finder.SetDatacenter(dc)
   483  
   484  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
   485  	if err != nil {
   486  		return err
   487  	}
   488  
   489  	if d.HasChange("disk") {
   490  		hasChanges = true
   491  		oldDisks, newDisks := d.GetChange("disk")
   492  		oldDiskSet := oldDisks.(*schema.Set)
   493  		newDiskSet := newDisks.(*schema.Set)
   494  
   495  		addedDisks := newDiskSet.Difference(oldDiskSet)
   496  		removedDisks := oldDiskSet.Difference(newDiskSet)
   497  
   498  		// Removed disks
   499  		for _, diskRaw := range removedDisks.List() {
   500  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   501  				devices, err := vm.Device(context.TODO())
   502  				if err != nil {
   503  					return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err)
   504  				}
   505  				virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
   506  
   507  				keep := false
   508  				if v, ok := d.GetOk("keep_on_remove"); ok {
   509  					keep = v.(bool)
   510  				}
   511  
   512  				err = vm.RemoveDevice(context.TODO(), keep, virtualDisk)
   513  				if err != nil {
   514  					return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
   515  				}
   516  			}
   517  		}
   518  		// Added disks
   519  		for _, diskRaw := range addedDisks.List() {
   520  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   521  
   522  				var datastore *object.Datastore
   523  				if disk["datastore"] == "" {
   524  					datastore, err = finder.DefaultDatastore(context.TODO())
   525  					if err != nil {
   526  						return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err)
   527  					}
   528  				} else {
   529  					datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string))
   530  					if err != nil {
   531  						log.Printf("[ERROR] Couldn't find datastore %v.  %s", disk["datastore"].(string), err)
   532  						return err
   533  					}
   534  				}
   535  
   536  				var size int64
   537  				if disk["size"] == 0 {
   538  					size = 0
   539  				} else {
   540  					size = int64(disk["size"].(int))
   541  				}
   542  				iops := int64(disk["iops"].(int))
   543  				controller_type := disk["controller"].(string)
   544  
   545  				var mo mo.VirtualMachine
   546  				vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo)
   547  
   548  				var diskPath string
   549  				switch {
   550  				case disk["vmdk"] != "":
   551  					diskPath = disk["vmdk"].(string)
   552  				case disk["name"] != "":
   553  					snapshotFullDir := mo.Config.Files.SnapshotDirectory
   554  					split := strings.Split(snapshotFullDir, " ")
   555  					if len(split) != 2 {
   556  						return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
   557  					}
   558  					vmWorkingPath := split[1]
   559  					diskPath = vmWorkingPath + disk["name"].(string)
   560  				default:
   561  					return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given")
   562  				}
   563  
   564  				log.Printf("[INFO] Attaching disk: %v", diskPath)
   565  				err = addHardDisk(vm, size, iops, "thin", datastore, diskPath, controller_type)
   566  				if err != nil {
   567  					log.Printf("[ERROR] Add Hard Disk Failed: %v", err)
   568  					return err
   569  				}
   570  			}
   571  			if err != nil {
   572  				return err
   573  			}
   574  		}
   575  	}
   576  
   577  	// do nothing if there are no changes
   578  	if !hasChanges {
   579  		return nil
   580  	}
   581  
   582  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
   583  
   584  	if rebootRequired {
   585  		log.Printf("[INFO] Shutting down virtual machine: %s", d.Id())
   586  
   587  		task, err := vm.PowerOff(context.TODO())
   588  		if err != nil {
   589  			return err
   590  		}
   591  
   592  		err = task.Wait(context.TODO())
   593  		if err != nil {
   594  			return err
   595  		}
   596  	}
   597  
   598  	log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id())
   599  
   600  	task, err := vm.Reconfigure(context.TODO(), configSpec)
   601  	if err != nil {
   602  		log.Printf("[ERROR] %s", err)
   603  	}
   604  
   605  	err = task.Wait(context.TODO())
   606  	if err != nil {
   607  		log.Printf("[ERROR] %s", err)
   608  	}
   609  
   610  	if rebootRequired {
   611  		task, err = vm.PowerOn(context.TODO())
   612  		if err != nil {
   613  			return err
   614  		}
   615  
   616  		err = task.Wait(context.TODO())
   617  		if err != nil {
   618  			log.Printf("[ERROR] %s", err)
   619  		}
   620  	}
   621  
   622  	ip, err := vm.WaitForIP(context.TODO())
   623  	if err != nil {
   624  		return err
   625  	}
   626  	log.Printf("[DEBUG] ip address: %v", ip)
   627  
   628  	return resourceVSphereVirtualMachineRead(d, meta)
   629  }
   630  
   631  func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
   632  	client := meta.(*govmomi.Client)
   633  
   634  	vm := virtualMachine{
   635  		name:     d.Get("name").(string),
   636  		vcpu:     int32(d.Get("vcpu").(int)),
   637  		memoryMb: int64(d.Get("memory").(int)),
   638  		memoryAllocation: memoryAllocation{
   639  			reservation: int64(d.Get("memory_reservation").(int)),
   640  		},
   641  	}
   642  
   643  	if v, ok := d.GetOk("folder"); ok {
   644  		vm.folder = v.(string)
   645  	}
   646  
   647  	if v, ok := d.GetOk("datacenter"); ok {
   648  		vm.datacenter = v.(string)
   649  	}
   650  
   651  	if v, ok := d.GetOk("cluster"); ok {
   652  		vm.cluster = v.(string)
   653  	}
   654  
   655  	if v, ok := d.GetOk("resource_pool"); ok {
   656  		vm.resourcePool = v.(string)
   657  	}
   658  
   659  	if v, ok := d.GetOk("domain"); ok {
   660  		vm.domain = v.(string)
   661  	}
   662  
   663  	if v, ok := d.GetOk("time_zone"); ok {
   664  		vm.timeZone = v.(string)
   665  	}
   666  
   667  	if v, ok := d.GetOk("linked_clone"); ok {
   668  		vm.linkedClone = v.(bool)
   669  	}
   670  
   671  	if v, ok := d.GetOk("skip_customization"); ok {
   672  		vm.skipCustomization = v.(bool)
   673  	}
   674  
   675  	if v, ok := d.GetOk("enable_disk_uuid"); ok {
   676  		vm.enableDiskUUID = v.(bool)
   677  	}
   678  
   679  	if raw, ok := d.GetOk("dns_suffixes"); ok {
   680  		for _, v := range raw.([]interface{}) {
   681  			vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string))
   682  		}
   683  	} else {
   684  		vm.dnsSuffixes = DefaultDNSSuffixes
   685  	}
   686  
   687  	if raw, ok := d.GetOk("dns_servers"); ok {
   688  		for _, v := range raw.([]interface{}) {
   689  			vm.dnsServers = append(vm.dnsServers, v.(string))
   690  		}
   691  	} else {
   692  		vm.dnsServers = DefaultDNSServers
   693  	}
   694  
   695  	if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
   696  		if custom_configs, ok := vL.(map[string]interface{}); ok {
   697  			custom := make(map[string]types.AnyType)
   698  			for k, v := range custom_configs {
   699  				custom[k] = v
   700  			}
   701  			vm.customConfigurations = custom
   702  			log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
   703  		}
   704  	}
   705  
   706  	if vL, ok := d.GetOk("network_interface"); ok {
   707  		networks := make([]networkInterface, len(vL.([]interface{})))
   708  		for i, v := range vL.([]interface{}) {
   709  			network := v.(map[string]interface{})
   710  			networks[i].label = network["label"].(string)
   711  			if v, ok := network["ip_address"].(string); ok && v != "" {
   712  				networks[i].ipv4Address = v
   713  			}
   714  			if v, ok := d.GetOk("gateway"); ok {
   715  				networks[i].ipv4Gateway = v.(string)
   716  			}
   717  			if v, ok := network["subnet_mask"].(string); ok && v != "" {
   718  				ip := net.ParseIP(v).To4()
   719  				if ip != nil {
   720  					mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3])
   721  					pl, _ := mask.Size()
   722  					networks[i].ipv4PrefixLength = pl
   723  				} else {
   724  					return fmt.Errorf("subnet_mask parameter is invalid.")
   725  				}
   726  			}
   727  			if v, ok := network["ipv4_address"].(string); ok && v != "" {
   728  				networks[i].ipv4Address = v
   729  			}
   730  			if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 {
   731  				networks[i].ipv4PrefixLength = v
   732  			}
   733  			if v, ok := network["ipv4_gateway"].(string); ok && v != "" {
   734  				networks[i].ipv4Gateway = v
   735  			}
   736  			if v, ok := network["ipv6_address"].(string); ok && v != "" {
   737  				networks[i].ipv6Address = v
   738  			}
   739  			if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 {
   740  				networks[i].ipv6PrefixLength = v
   741  			}
   742  			if v, ok := network["ipv6_gateway"].(string); ok && v != "" {
   743  				networks[i].ipv6Gateway = v
   744  			}
   745  			if v, ok := network["mac_address"].(string); ok && v != "" {
   746  				networks[i].macAddress = v
   747  			}
   748  		}
   749  		vm.networkInterfaces = networks
   750  		log.Printf("[DEBUG] network_interface init: %v", networks)
   751  	}
   752  
   753  	if vL, ok := d.GetOk("windows_opt_config"); ok {
   754  		var winOpt windowsOptConfig
   755  		custom_configs := (vL.([]interface{}))[0].(map[string]interface{})
   756  		if v, ok := custom_configs["admin_password"].(string); ok && v != "" {
   757  			winOpt.adminPassword = v
   758  		}
   759  		if v, ok := custom_configs["domain"].(string); ok && v != "" {
   760  			winOpt.domain = v
   761  		}
   762  		if v, ok := custom_configs["domain_user"].(string); ok && v != "" {
   763  			winOpt.domainUser = v
   764  		}
   765  		if v, ok := custom_configs["product_key"].(string); ok && v != "" {
   766  			winOpt.productKey = v
   767  		}
   768  		if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" {
   769  			winOpt.domainUserPassword = v
   770  		}
   771  		vm.windowsOptionalConfig = winOpt
   772  		log.Printf("[DEBUG] windows config init: %v", winOpt)
   773  	}
   774  
   775  	if vL, ok := d.GetOk("disk"); ok {
   776  		if diskSet, ok := vL.(*schema.Set); ok {
   777  
   778  			disks := []hardDisk{}
   779  			hasBootableDisk := false
   780  			for _, value := range diskSet.List() {
   781  				disk := value.(map[string]interface{})
   782  				newDisk := hardDisk{}
   783  
   784  				if v, ok := disk["template"].(string); ok && v != "" {
   785  					if v, ok := disk["name"].(string); ok && v != "" {
   786  						return fmt.Errorf("Cannot specify name of a template")
   787  					}
   788  					vm.template = v
   789  					if hasBootableDisk {
   790  						return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   791  					}
   792  					hasBootableDisk = true
   793  				}
   794  
   795  				if v, ok := disk["type"].(string); ok && v != "" {
   796  					newDisk.initType = v
   797  				}
   798  
   799  				if v, ok := disk["datastore"].(string); ok && v != "" {
   800  					vm.datastore = v
   801  				}
   802  
   803  				if v, ok := disk["size"].(int); ok && v != 0 {
   804  					if v, ok := disk["template"].(string); ok && v != "" {
   805  						return fmt.Errorf("Cannot specify size of a template")
   806  					}
   807  
   808  					if v, ok := disk["name"].(string); ok && v != "" {
   809  						newDisk.name = v
   810  					} else {
   811  						return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk")
   812  					}
   813  
   814  					newDisk.size = int64(v)
   815  				}
   816  
   817  				if v, ok := disk["iops"].(int); ok && v != 0 {
   818  					newDisk.iops = int64(v)
   819  				}
   820  
   821  				if v, ok := disk["controller_type"].(string); ok && v != "" {
   822  					newDisk.controller = v
   823  				}
   824  
   825  				if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" {
   826  					if v, ok := disk["template"].(string); ok && v != "" {
   827  						return fmt.Errorf("Cannot specify a vmdk for a template")
   828  					}
   829  					if v, ok := disk["size"].(string); ok && v != "" {
   830  						return fmt.Errorf("Cannot specify size of a vmdk")
   831  					}
   832  					if v, ok := disk["name"].(string); ok && v != "" {
   833  						return fmt.Errorf("Cannot specify name of a vmdk")
   834  					}
   835  					if vBootable, ok := disk["bootable"].(bool); ok {
   836  						hasBootableDisk = true
   837  						newDisk.bootable = vBootable
   838  						vm.hasBootableVmdk = vBootable
   839  					}
   840  					newDisk.vmdkPath = vVmdk
   841  				}
   842  				// Preserves order so bootable disk is first
   843  				if newDisk.bootable == true || disk["template"] != "" {
   844  					disks = append([]hardDisk{newDisk}, disks...)
   845  				} else {
   846  					disks = append(disks, newDisk)
   847  				}
   848  			}
   849  			vm.hardDisks = disks
   850  			log.Printf("[DEBUG] disk init: %v", disks)
   851  		}
   852  	}
   853  
   854  	if vL, ok := d.GetOk("cdrom"); ok {
   855  		cdroms := make([]cdrom, len(vL.([]interface{})))
   856  		for i, v := range vL.([]interface{}) {
   857  			c := v.(map[string]interface{})
   858  			if v, ok := c["datastore"].(string); ok && v != "" {
   859  				cdroms[i].datastore = v
   860  			} else {
   861  				return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.")
   862  			}
   863  			if v, ok := c["path"].(string); ok && v != "" {
   864  				cdroms[i].path = v
   865  			} else {
   866  				return fmt.Errorf("Path argument must be specified when attaching a cdrom image.")
   867  			}
   868  		}
   869  		vm.cdroms = cdroms
   870  		log.Printf("[DEBUG] cdrom init: %v", cdroms)
   871  	}
   872  
   873  	err := vm.setupVirtualMachine(client)
   874  	if err != nil {
   875  		return err
   876  	}
   877  
   878  	d.SetId(vm.Path())
   879  	log.Printf("[INFO] Created virtual machine: %s", d.Id())
   880  
   881  	return resourceVSphereVirtualMachineRead(d, meta)
   882  }
   883  
   884  func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
   885  	log.Printf("[DEBUG] virtual machine resource data: %#v", d)
   886  	client := meta.(*govmomi.Client)
   887  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   888  	if err != nil {
   889  		return err
   890  	}
   891  	finder := find.NewFinder(client.Client, true)
   892  	finder = finder.SetDatacenter(dc)
   893  
   894  	vm, err := finder.VirtualMachine(context.TODO(), d.Id())
   895  	if err != nil {
   896  		d.SetId("")
   897  		return nil
   898  	}
   899  
   900  	var mvm mo.VirtualMachine
   901  
   902  	// wait for interfaces to appear
   903  	_, err = vm.WaitForNetIP(context.TODO(), true)
   904  	if err != nil {
   905  		return err
   906  	}
   907  
   908  	collector := property.DefaultCollector(client.Client)
   909  	if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil {
   910  		return err
   911  	}
   912  
   913  	log.Printf("[DEBUG] Datacenter - %#v", dc)
   914  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config)
   915  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config)
   916  	log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net)
   917  
   918  	disks := make([]map[string]interface{}, 0)
   919  	templateDisk := make(map[string]interface{}, 1)
   920  	for _, device := range mvm.Config.Hardware.Device {
   921  		if vd, ok := device.(*types.VirtualDisk); ok {
   922  
   923  			virtualDevice := vd.GetVirtualDevice()
   924  
   925  			backingInfo := virtualDevice.Backing
   926  			var diskFullPath string
   927  			var diskUuid string
   928  			if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok {
   929  				diskFullPath = v.FileName
   930  				diskUuid = v.Uuid
   931  			} else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok {
   932  				diskFullPath = v.FileName
   933  				diskUuid = v.Uuid
   934  			}
   935  			log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath)
   936  
   937  			// Separate datastore and path
   938  			diskFullPathSplit := strings.Split(diskFullPath, " ")
   939  			if len(diskFullPathSplit) != 2 {
   940  				return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath)
   941  			}
   942  			diskPath := diskFullPathSplit[1]
   943  			// Isolate filename
   944  			diskNameSplit := strings.Split(diskPath, "/")
   945  			diskName := diskNameSplit[len(diskNameSplit)-1]
   946  			// Remove possible extension
   947  			diskName = strings.Split(diskName, ".")[0]
   948  
   949  			if prevDisks, ok := d.GetOk("disk"); ok {
   950  				if prevDiskSet, ok := prevDisks.(*schema.Set); ok {
   951  					for _, v := range prevDiskSet.List() {
   952  						prevDisk := v.(map[string]interface{})
   953  
   954  						// We're guaranteed only one template disk.  Passing value directly through since templates should be immutable
   955  						if prevDisk["template"] != "" {
   956  							if len(templateDisk) == 0 {
   957  								templateDisk = prevDisk
   958  								disks = append(disks, templateDisk)
   959  								break
   960  							}
   961  						}
   962  
   963  						// It is enforced that prevDisk["name"] should only be set in the case
   964  						// of creating a new disk for the user.
   965  						// size case:  name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name
   966  						// vmdk case:  compare prevDisk["vmdk"] and mo.Filename
   967  						if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] {
   968  
   969  							prevDisk["key"] = virtualDevice.Key
   970  							prevDisk["uuid"] = diskUuid
   971  
   972  							disks = append(disks, prevDisk)
   973  							break
   974  						}
   975  					}
   976  				}
   977  			}
   978  			log.Printf("[DEBUG] disks: %#v", disks)
   979  		}
   980  	}
   981  	err = d.Set("disk", disks)
   982  	if err != nil {
   983  		return fmt.Errorf("Invalid disks to set: %#v", disks)
   984  	}
   985  
   986  	networkInterfaces := make([]map[string]interface{}, 0)
   987  	for _, v := range mvm.Guest.Net {
   988  		if v.DeviceConfigId >= 0 {
   989  			log.Printf("[DEBUG] v.Network - %#v", v.Network)
   990  			networkInterface := make(map[string]interface{})
   991  			networkInterface["label"] = v.Network
   992  			networkInterface["mac_address"] = v.MacAddress
   993  			for _, ip := range v.IpConfig.IpAddress {
   994  				p := net.ParseIP(ip.IpAddress)
   995  				if p.To4() != nil {
   996  					log.Printf("[DEBUG] p.String - %#v", p.String())
   997  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
   998  					networkInterface["ipv4_address"] = p.String()
   999  					networkInterface["ipv4_prefix_length"] = ip.PrefixLength
  1000  				} else if p.To16() != nil {
  1001  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1002  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1003  					networkInterface["ipv6_address"] = p.String()
  1004  					networkInterface["ipv6_prefix_length"] = ip.PrefixLength
  1005  				}
  1006  				log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1007  			}
  1008  			log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1009  			networkInterfaces = append(networkInterfaces, networkInterface)
  1010  		}
  1011  	}
  1012  	if mvm.Guest.IpStack != nil {
  1013  		for _, v := range mvm.Guest.IpStack {
  1014  			if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil {
  1015  				for _, route := range v.IpRouteConfig.IpRoute {
  1016  					if route.Gateway.Device != "" {
  1017  						gatewaySetting := ""
  1018  						if route.Network == "::" {
  1019  							gatewaySetting = "ipv6_gateway"
  1020  						} else if route.Network == "0.0.0.0" {
  1021  							gatewaySetting = "ipv4_gateway"
  1022  						}
  1023  						if gatewaySetting != "" {
  1024  							deviceID, err := strconv.Atoi(route.Gateway.Device)
  1025  							if err != nil {
  1026  								log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err)
  1027  							} else {
  1028  								log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress)
  1029  								networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress
  1030  							}
  1031  						}
  1032  					}
  1033  				}
  1034  			}
  1035  		}
  1036  	}
  1037  	log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces)
  1038  	err = d.Set("network_interface", networkInterfaces)
  1039  	if err != nil {
  1040  		return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces)
  1041  	}
  1042  
  1043  	log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string))
  1044  	d.SetConnInfo(map[string]string{
  1045  		"type": "ssh",
  1046  		"host": networkInterfaces[0]["ipv4_address"].(string),
  1047  	})
  1048  
  1049  	var rootDatastore string
  1050  	for _, v := range mvm.Datastore {
  1051  		var md mo.Datastore
  1052  		if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil {
  1053  			return err
  1054  		}
  1055  		if md.Parent.Type == "StoragePod" {
  1056  			var msp mo.StoragePod
  1057  			if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil {
  1058  				return err
  1059  			}
  1060  			rootDatastore = msp.Name
  1061  			log.Printf("[DEBUG] %#v", msp.Name)
  1062  		} else {
  1063  			rootDatastore = md.Name
  1064  			log.Printf("[DEBUG] %#v", md.Name)
  1065  		}
  1066  		break
  1067  	}
  1068  
  1069  	d.Set("datacenter", dc)
  1070  	d.Set("memory", mvm.Summary.Config.MemorySizeMB)
  1071  	d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation)
  1072  	d.Set("cpu", mvm.Summary.Config.NumCpu)
  1073  	d.Set("datastore", rootDatastore)
  1074  
  1075  	return nil
  1076  }
  1077  
  1078  func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
  1079  	client := meta.(*govmomi.Client)
  1080  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
  1081  	if err != nil {
  1082  		return err
  1083  	}
  1084  	finder := find.NewFinder(client.Client, true)
  1085  	finder = finder.SetDatacenter(dc)
  1086  
  1087  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
  1088  	if err != nil {
  1089  		return err
  1090  	}
  1091  
  1092  	log.Printf("[INFO] Deleting virtual machine: %s", d.Id())
  1093  	state, err := vm.PowerState(context.TODO())
  1094  	if err != nil {
  1095  		return err
  1096  	}
  1097  
  1098  	if state == types.VirtualMachinePowerStatePoweredOn {
  1099  		task, err := vm.PowerOff(context.TODO())
  1100  		if err != nil {
  1101  			return err
  1102  		}
  1103  
  1104  		err = task.Wait(context.TODO())
  1105  		if err != nil {
  1106  			return err
  1107  		}
  1108  	}
  1109  
  1110  	task, err := vm.Destroy(context.TODO())
  1111  	if err != nil {
  1112  		return err
  1113  	}
  1114  
  1115  	err = task.Wait(context.TODO())
  1116  	if err != nil {
  1117  		return err
  1118  	}
  1119  
  1120  	d.SetId("")
  1121  	return nil
  1122  }
  1123  
  1124  // addHardDisk adds a new Hard Disk to the VirtualMachine.
  1125  func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error {
  1126  	devices, err := vm.Device(context.TODO())
  1127  	if err != nil {
  1128  		return err
  1129  	}
  1130  	log.Printf("[DEBUG] vm devices: %#v\n", devices)
  1131  
  1132  	var controller types.BaseVirtualController
  1133  	controller, err = devices.FindDiskController(controller_type)
  1134  	if err != nil {
  1135  		log.Printf("[DEBUG] Couldn't find a %v controller.  Creating one..", controller_type)
  1136  
  1137  		var c types.BaseVirtualDevice
  1138  		switch controller_type {
  1139  		case "scsi":
  1140  			// Create scsi controller
  1141  			c, err = devices.CreateSCSIController("scsi")
  1142  			if err != nil {
  1143  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1144  			}
  1145  		case "ide":
  1146  			// Create ide controller
  1147  			c, err = devices.CreateIDEController()
  1148  			if err != nil {
  1149  				return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1150  			}
  1151  		default:
  1152  			return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1153  		}
  1154  
  1155  		vm.AddDevice(context.TODO(), c)
  1156  		controller, err = devices.FindDiskController(controller_type)
  1157  		if err != nil {
  1158  			return fmt.Errorf("[ERROR] Could not find the controller we just created")
  1159  		}
  1160  	}
  1161  
  1162  	log.Printf("[DEBUG] disk controller: %#v\n", controller)
  1163  
  1164  	// TODO Check if diskPath & datastore exist
  1165  	// If diskPath is not specified, pass empty string to CreateDisk()
  1166  	if diskPath == "" {
  1167  		return fmt.Errorf("[ERROR] addHardDisk - No path proided")
  1168  	} else {
  1169  		// TODO Check if diskPath & datastore exist
  1170  		diskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath)
  1171  	}
  1172  	log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath)
  1173  	disk := devices.CreateDisk(controller, datastore.Reference(), diskPath)
  1174  
  1175  	existing := devices.SelectByBackingInfo(disk.Backing)
  1176  	log.Printf("[DEBUG] disk: %#v\n", disk)
  1177  
  1178  	if len(existing) == 0 {
  1179  		disk.CapacityInKB = int64(size * 1024 * 1024)
  1180  		if iops != 0 {
  1181  			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
  1182  				Limit: iops,
  1183  			}
  1184  		}
  1185  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
  1186  
  1187  		if diskType == "eager_zeroed" {
  1188  			// eager zeroed thick virtual disk
  1189  			backing.ThinProvisioned = types.NewBool(false)
  1190  			backing.EagerlyScrub = types.NewBool(true)
  1191  		} else if diskType == "thin" {
  1192  			// thin provisioned virtual disk
  1193  			backing.ThinProvisioned = types.NewBool(true)
  1194  		}
  1195  
  1196  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
  1197  		log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB)
  1198  
  1199  		return vm.AddDevice(context.TODO(), disk)
  1200  	} else {
  1201  		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")
  1202  
  1203  		return nil
  1204  	}
  1205  }
  1206  
  1207  // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path.
  1208  func addCdrom(vm *object.VirtualMachine, datastore, path string) error {
  1209  	devices, err := vm.Device(context.TODO())
  1210  	if err != nil {
  1211  		return err
  1212  	}
  1213  	log.Printf("[DEBUG] vm devices: %#v", devices)
  1214  
  1215  	var controller *types.VirtualIDEController
  1216  	controller, err = devices.FindIDEController("")
  1217  	if err != nil {
  1218  		log.Printf("[DEBUG] Couldn't find a ide controller.  Creating one..")
  1219  
  1220  		var c types.BaseVirtualDevice
  1221  		c, err := devices.CreateIDEController()
  1222  		if err != nil {
  1223  			return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1224  		}
  1225  
  1226  		if v, ok := c.(*types.VirtualIDEController); ok {
  1227  			controller = v
  1228  		} else {
  1229  			return fmt.Errorf("[ERROR] Controller type could not be asserted")
  1230  		}
  1231  		vm.AddDevice(context.TODO(), c)
  1232  		controller, err = devices.FindIDEController("")
  1233  		if err != nil {
  1234  			return fmt.Errorf("[ERROR] Could not find the controller we just created")
  1235  		}
  1236  	}
  1237  	log.Printf("[DEBUG] ide controller: %#v", controller)
  1238  
  1239  	c, err := devices.CreateCdrom(controller)
  1240  	if err != nil {
  1241  		return err
  1242  	}
  1243  
  1244  	c = devices.InsertIso(c, fmt.Sprintf("[%s] %s", datastore, path))
  1245  	log.Printf("[DEBUG] addCdrom: %#v", c)
  1246  
  1247  	return vm.AddDevice(context.TODO(), c)
  1248  }
  1249  
  1250  // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device.
  1251  func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) {
  1252  	network, err := f.Network(context.TODO(), "*"+label)
  1253  	if err != nil {
  1254  		return nil, err
  1255  	}
  1256  
  1257  	backing, err := network.EthernetCardBackingInfo(context.TODO())
  1258  	if err != nil {
  1259  		return nil, err
  1260  	}
  1261  
  1262  	var address_type string
  1263  	if macAddress == "" {
  1264  		address_type = string(types.VirtualEthernetCardMacTypeGenerated)
  1265  	} else {
  1266  		address_type = string(types.VirtualEthernetCardMacTypeManual)
  1267  	}
  1268  
  1269  	if adapterType == "vmxnet3" {
  1270  		return &types.VirtualDeviceConfigSpec{
  1271  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1272  			Device: &types.VirtualVmxnet3{
  1273  				VirtualVmxnet: types.VirtualVmxnet{
  1274  					VirtualEthernetCard: types.VirtualEthernetCard{
  1275  						VirtualDevice: types.VirtualDevice{
  1276  							Key:     -1,
  1277  							Backing: backing,
  1278  						},
  1279  						AddressType: address_type,
  1280  						MacAddress:  macAddress,
  1281  					},
  1282  				},
  1283  			},
  1284  		}, nil
  1285  	} else if adapterType == "e1000" {
  1286  		return &types.VirtualDeviceConfigSpec{
  1287  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1288  			Device: &types.VirtualE1000{
  1289  				VirtualEthernetCard: types.VirtualEthernetCard{
  1290  					VirtualDevice: types.VirtualDevice{
  1291  						Key:     -1,
  1292  						Backing: backing,
  1293  					},
  1294  					AddressType: address_type,
  1295  					MacAddress:  macAddress,
  1296  				},
  1297  			},
  1298  		}, nil
  1299  	} else {
  1300  		return nil, fmt.Errorf("Invalid network adapter type.")
  1301  	}
  1302  }
  1303  
  1304  // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
  1305  func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) {
  1306  	var key int32
  1307  	var moveType string
  1308  	if linkedClone {
  1309  		moveType = "createNewChildDiskBacking"
  1310  	} else {
  1311  		moveType = "moveAllDiskBackingsAndDisallowSharing"
  1312  	}
  1313  	log.Printf("[DEBUG] relocate type: [%s]", moveType)
  1314  
  1315  	devices, err := vm.Device(context.TODO())
  1316  	if err != nil {
  1317  		return types.VirtualMachineRelocateSpec{}, err
  1318  	}
  1319  	for _, d := range devices {
  1320  		if devices.Type(d) == "disk" {
  1321  			key = int32(d.GetVirtualDevice().Key)
  1322  		}
  1323  	}
  1324  
  1325  	isThin := initType == "thin"
  1326  	rpr := rp.Reference()
  1327  	dsr := ds.Reference()
  1328  	return types.VirtualMachineRelocateSpec{
  1329  		Datastore:    &dsr,
  1330  		Pool:         &rpr,
  1331  		DiskMoveType: moveType,
  1332  		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1333  			{
  1334  				Datastore: dsr,
  1335  				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
  1336  					DiskMode:        "persistent",
  1337  					ThinProvisioned: types.NewBool(isThin),
  1338  					EagerlyScrub:    types.NewBool(!isThin),
  1339  				},
  1340  				DiskId: key,
  1341  			},
  1342  		},
  1343  	}, nil
  1344  }
  1345  
  1346  // getDatastoreObject gets datastore object.
  1347  func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) {
  1348  	s := object.NewSearchIndex(client.Client)
  1349  	ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name)
  1350  	if err != nil {
  1351  		return types.ManagedObjectReference{}, err
  1352  	}
  1353  	if ref == nil {
  1354  		return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name)
  1355  	}
  1356  	log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref)
  1357  	return ref.Reference(), nil
  1358  }
  1359  
  1360  // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action.
  1361  func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec {
  1362  	vmfr := f.VmFolder.Reference()
  1363  	rpr := rp.Reference()
  1364  	spr := storagePod.Reference()
  1365  
  1366  	sps := types.StoragePlacementSpec{
  1367  		Type:       "create",
  1368  		ConfigSpec: &configSpec,
  1369  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1370  			StoragePod: &spr,
  1371  		},
  1372  		Folder:       &vmfr,
  1373  		ResourcePool: &rpr,
  1374  	}
  1375  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1376  	return sps
  1377  }
  1378  
  1379  // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action.
  1380  func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
  1381  	vmr := vm.Reference()
  1382  	vmfr := f.VmFolder.Reference()
  1383  	rpr := rp.Reference()
  1384  	spr := storagePod.Reference()
  1385  
  1386  	var o mo.VirtualMachine
  1387  	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
  1388  	if err != nil {
  1389  		return types.StoragePlacementSpec{}
  1390  	}
  1391  	ds := object.NewDatastore(c.Client, o.Datastore[0])
  1392  	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)
  1393  
  1394  	devices, err := vm.Device(context.TODO())
  1395  	if err != nil {
  1396  		return types.StoragePlacementSpec{}
  1397  	}
  1398  
  1399  	var key int32
  1400  	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
  1401  		key = int32(d.GetVirtualDevice().Key)
  1402  		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
  1403  	}
  1404  
  1405  	sps := types.StoragePlacementSpec{
  1406  		Type: "clone",
  1407  		Vm:   &vmr,
  1408  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1409  			StoragePod: &spr,
  1410  		},
  1411  		CloneSpec: &types.VirtualMachineCloneSpec{
  1412  			Location: types.VirtualMachineRelocateSpec{
  1413  				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1414  					{
  1415  						Datastore:       ds.Reference(),
  1416  						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
  1417  						DiskId:          key,
  1418  					},
  1419  				},
  1420  				Pool: &rpr,
  1421  			},
  1422  			PowerOn:  false,
  1423  			Template: false,
  1424  		},
  1425  		CloneName: "dummy",
  1426  		Folder:    &vmfr,
  1427  	}
  1428  	return sps
  1429  }
  1430  
  1431  // findDatastore finds Datastore object.
  1432  func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) {
  1433  	var datastore *object.Datastore
  1434  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1435  
  1436  	srm := object.NewStorageResourceManager(c.Client)
  1437  	rds, err := srm.RecommendDatastores(context.TODO(), sps)
  1438  	if err != nil {
  1439  		return nil, err
  1440  	}
  1441  	log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)
  1442  
  1443  	spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
  1444  	datastore = object.NewDatastore(c.Client, spa.Destination)
  1445  	log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore)
  1446  
  1447  	return datastore, nil
  1448  }
  1449  
  1450  // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller.
  1451  func createCdroms(vm *object.VirtualMachine, cdroms []cdrom) error {
  1452  	log.Printf("[DEBUG] add cdroms: %v", cdroms)
  1453  	for _, cd := range cdroms {
  1454  		log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore)
  1455  		log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path)
  1456  		err := addCdrom(vm, cd.datastore, cd.path)
  1457  		if err != nil {
  1458  			return err
  1459  		}
  1460  	}
  1461  
  1462  	return nil
  1463  }
  1464  
  1465  func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error {
  1466  	dc, err := getDatacenter(c, vm.datacenter)
  1467  
  1468  	if err != nil {
  1469  		return err
  1470  	}
  1471  	finder := find.NewFinder(c.Client, true)
  1472  	finder = finder.SetDatacenter(dc)
  1473  
  1474  	var template *object.VirtualMachine
  1475  	var template_mo mo.VirtualMachine
  1476  	var vm_mo mo.VirtualMachine
  1477  	if vm.template != "" {
  1478  		template, err = finder.VirtualMachine(context.TODO(), vm.template)
  1479  		if err != nil {
  1480  			return err
  1481  		}
  1482  		log.Printf("[DEBUG] template: %#v", template)
  1483  
  1484  		err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)
  1485  		if err != nil {
  1486  			return err
  1487  		}
  1488  	}
  1489  
  1490  	var resourcePool *object.ResourcePool
  1491  	if vm.resourcePool == "" {
  1492  		if vm.cluster == "" {
  1493  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
  1494  			if err != nil {
  1495  				return err
  1496  			}
  1497  		} else {
  1498  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
  1499  			if err != nil {
  1500  				return err
  1501  			}
  1502  		}
  1503  	} else {
  1504  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
  1505  		if err != nil {
  1506  			return err
  1507  		}
  1508  	}
  1509  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
  1510  
  1511  	dcFolders, err := dc.Folders(context.TODO())
  1512  	if err != nil {
  1513  		return err
  1514  	}
  1515  	log.Printf("[DEBUG] folder: %#v", vm.folder)
  1516  
  1517  	folder := dcFolders.VmFolder
  1518  	if len(vm.folder) > 0 {
  1519  		si := object.NewSearchIndex(c.Client)
  1520  		folderRef, err := si.FindByInventoryPath(
  1521  			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
  1522  		if err != nil {
  1523  			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
  1524  		} else if folderRef == nil {
  1525  			return fmt.Errorf("Cannot find folder %s", vm.folder)
  1526  		} else {
  1527  			folder = folderRef.(*object.Folder)
  1528  		}
  1529  	}
  1530  
  1531  	// make config spec
  1532  	configSpec := types.VirtualMachineConfigSpec{
  1533  		Name:              vm.name,
  1534  		NumCPUs:           vm.vcpu,
  1535  		NumCoresPerSocket: 1,
  1536  		MemoryMB:          vm.memoryMb,
  1537  		MemoryAllocation: &types.ResourceAllocationInfo{
  1538  			Reservation: vm.memoryAllocation.reservation,
  1539  		},
  1540  		Flags: &types.VirtualMachineFlagInfo{
  1541  			DiskUuidEnabled: &vm.enableDiskUUID,
  1542  		},
  1543  	}
  1544  	if vm.template == "" {
  1545  		configSpec.GuestId = "otherLinux64Guest"
  1546  	}
  1547  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
  1548  
  1549  	// make ExtraConfig
  1550  	log.Printf("[DEBUG] virtual machine Extra Config spec start")
  1551  	if len(vm.customConfigurations) > 0 {
  1552  		var ov []types.BaseOptionValue
  1553  		for k, v := range vm.customConfigurations {
  1554  			key := k
  1555  			value := v
  1556  			o := types.OptionValue{
  1557  				Key:   key,
  1558  				Value: &value,
  1559  			}
  1560  			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
  1561  			ov = append(ov, &o)
  1562  		}
  1563  		configSpec.ExtraConfig = ov
  1564  		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
  1565  	}
  1566  
  1567  	var datastore *object.Datastore
  1568  	if vm.datastore == "" {
  1569  		datastore, err = finder.DefaultDatastore(context.TODO())
  1570  		if err != nil {
  1571  			return err
  1572  		}
  1573  	} else {
  1574  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
  1575  		if err != nil {
  1576  			// TODO: datastore cluster support in govmomi finder function
  1577  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
  1578  			if err != nil {
  1579  				return err
  1580  			}
  1581  
  1582  			if d.Type == "StoragePod" {
  1583  				sp := object.StoragePod{
  1584  					Folder: object.NewFolder(c.Client, d),
  1585  				}
  1586  
  1587  				var sps types.StoragePlacementSpec
  1588  				if vm.template != "" {
  1589  					sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
  1590  				} else {
  1591  					sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
  1592  				}
  1593  
  1594  				datastore, err = findDatastore(c, sps)
  1595  				if err != nil {
  1596  					return err
  1597  				}
  1598  			} else {
  1599  				datastore = object.NewDatastore(c.Client, d)
  1600  			}
  1601  		}
  1602  	}
  1603  
  1604  	log.Printf("[DEBUG] datastore: %#v", datastore)
  1605  
  1606  	// network
  1607  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
  1608  	networkConfigs := []types.CustomizationAdapterMapping{}
  1609  	for _, network := range vm.networkInterfaces {
  1610  		// network device
  1611  		var networkDeviceType string
  1612  		if vm.template == "" {
  1613  			networkDeviceType = "e1000"
  1614  		} else {
  1615  			networkDeviceType = "vmxnet3"
  1616  		}
  1617  		nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress)
  1618  		if err != nil {
  1619  			return err
  1620  		}
  1621  		log.Printf("[DEBUG] network device: %+v", nd.Device)
  1622  		networkDevices = append(networkDevices, nd)
  1623  
  1624  		if vm.template != "" {
  1625  			var ipSetting types.CustomizationIPSettings
  1626  			if network.ipv4Address == "" {
  1627  				ipSetting.Ip = &types.CustomizationDhcpIpGenerator{}
  1628  			} else {
  1629  				if network.ipv4PrefixLength == 0 {
  1630  					return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
  1631  				}
  1632  				m := net.CIDRMask(network.ipv4PrefixLength, 32)
  1633  				sm := net.IPv4(m[0], m[1], m[2], m[3])
  1634  				subnetMask := sm.String()
  1635  				log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway)
  1636  				log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address)
  1637  				log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength)
  1638  				log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask)
  1639  				ipSetting.Gateway = []string{
  1640  					network.ipv4Gateway,
  1641  				}
  1642  				ipSetting.Ip = &types.CustomizationFixedIp{
  1643  					IpAddress: network.ipv4Address,
  1644  				}
  1645  				ipSetting.SubnetMask = subnetMask
  1646  			}
  1647  
  1648  			ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{}
  1649  			if network.ipv6Address == "" {
  1650  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1651  					&types.CustomizationDhcpIpV6Generator{},
  1652  				}
  1653  			} else {
  1654  				log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway)
  1655  				log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address)
  1656  				log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength)
  1657  
  1658  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1659  					&types.CustomizationFixedIpV6{
  1660  						IpAddress:  network.ipv6Address,
  1661  						SubnetMask: int32(network.ipv6PrefixLength),
  1662  					},
  1663  				}
  1664  				ipv6Spec.Gateway = []string{network.ipv6Gateway}
  1665  			}
  1666  			ipSetting.IpV6Spec = ipv6Spec
  1667  
  1668  			// network config
  1669  			config := types.CustomizationAdapterMapping{
  1670  				Adapter: ipSetting,
  1671  			}
  1672  			networkConfigs = append(networkConfigs, config)
  1673  		}
  1674  	}
  1675  	log.Printf("[DEBUG] network devices: %#v", networkDevices)
  1676  	log.Printf("[DEBUG] network configs: %#v", networkConfigs)
  1677  
  1678  	var task *object.Task
  1679  	if vm.template == "" {
  1680  		var mds mo.Datastore
  1681  		if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
  1682  			return err
  1683  		}
  1684  		log.Printf("[DEBUG] datastore: %#v", mds.Name)
  1685  		scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
  1686  		if err != nil {
  1687  			log.Printf("[ERROR] %s", err)
  1688  		}
  1689  
  1690  		configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
  1691  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1692  			Device:    scsi,
  1693  		})
  1694  
  1695  		configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
  1696  
  1697  		task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
  1698  		if err != nil {
  1699  			log.Printf("[ERROR] %s", err)
  1700  		}
  1701  
  1702  		err = task.Wait(context.TODO())
  1703  		if err != nil {
  1704  			log.Printf("[ERROR] %s", err)
  1705  		}
  1706  
  1707  	} else {
  1708  
  1709  		relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
  1710  		if err != nil {
  1711  			return err
  1712  		}
  1713  
  1714  		log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
  1715  
  1716  		// make vm clone spec
  1717  		cloneSpec := types.VirtualMachineCloneSpec{
  1718  			Location: relocateSpec,
  1719  			Template: false,
  1720  			Config:   &configSpec,
  1721  			PowerOn:  false,
  1722  		}
  1723  		if vm.linkedClone {
  1724  			if template_mo.Snapshot == nil {
  1725  				return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
  1726  			}
  1727  			cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
  1728  		}
  1729  		log.Printf("[DEBUG] clone spec: %v", cloneSpec)
  1730  
  1731  		task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec)
  1732  		if err != nil {
  1733  			return err
  1734  		}
  1735  	}
  1736  
  1737  	err = task.Wait(context.TODO())
  1738  	if err != nil {
  1739  		log.Printf("[ERROR] %s", err)
  1740  	}
  1741  
  1742  	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
  1743  	if err != nil {
  1744  		return err
  1745  	}
  1746  	log.Printf("[DEBUG] new vm: %v", newVM)
  1747  
  1748  	devices, err := newVM.Device(context.TODO())
  1749  	if err != nil {
  1750  		log.Printf("[DEBUG] Template devices can't be found")
  1751  		return err
  1752  	}
  1753  
  1754  	for _, dvc := range devices {
  1755  		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
  1756  		if devices.Type(dvc) == "ethernet" {
  1757  			err := newVM.RemoveDevice(context.TODO(), false, dvc)
  1758  			if err != nil {
  1759  				return err
  1760  			}
  1761  		}
  1762  	}
  1763  	// Add Network devices
  1764  	for _, dvc := range networkDevices {
  1765  		err := newVM.AddDevice(
  1766  			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
  1767  		if err != nil {
  1768  			return err
  1769  		}
  1770  	}
  1771  
  1772  	// Create the cdroms if needed.
  1773  	if err := createCdroms(newVM, vm.cdroms); err != nil {
  1774  		return err
  1775  	}
  1776  
  1777  	newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo)
  1778  	firstDisk := 0
  1779  	if vm.template != "" {
  1780  		firstDisk++
  1781  	}
  1782  	for i := firstDisk; i < len(vm.hardDisks); i++ {
  1783  		log.Printf("[DEBUG] disk index: %v", i)
  1784  
  1785  		var diskPath string
  1786  		switch {
  1787  		case vm.hardDisks[i].vmdkPath != "":
  1788  			diskPath = vm.hardDisks[i].vmdkPath
  1789  		case vm.hardDisks[i].name != "":
  1790  			snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory
  1791  			split := strings.Split(snapshotFullDir, " ")
  1792  			if len(split) != 2 {
  1793  				return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
  1794  			}
  1795  			vmWorkingPath := split[1]
  1796  			diskPath = vmWorkingPath + vm.hardDisks[i].name
  1797  		default:
  1798  			return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i])
  1799  		}
  1800  
  1801  		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  1802  		if err != nil {
  1803  			return err
  1804  		}
  1805  	}
  1806  
  1807  	if vm.skipCustomization || vm.template == "" {
  1808  		log.Printf("[DEBUG] VM customization skipped")
  1809  	} else {
  1810  		var identity_options types.BaseCustomizationIdentitySettings
  1811  		if strings.HasPrefix(template_mo.Config.GuestId, "win") {
  1812  			var timeZone int
  1813  			if vm.timeZone == "Etc/UTC" {
  1814  				vm.timeZone = "085"
  1815  			}
  1816  			timeZone, err := strconv.Atoi(vm.timeZone)
  1817  			if err != nil {
  1818  				return fmt.Errorf("Error converting TimeZone: %s", err)
  1819  			}
  1820  
  1821  			guiUnattended := types.CustomizationGuiUnattended{
  1822  				AutoLogon:      false,
  1823  				AutoLogonCount: 1,
  1824  				TimeZone:       int32(timeZone),
  1825  			}
  1826  
  1827  			customIdentification := types.CustomizationIdentification{}
  1828  
  1829  			userData := types.CustomizationUserData{
  1830  				ComputerName: &types.CustomizationFixedName{
  1831  					Name: strings.Split(vm.name, ".")[0],
  1832  				},
  1833  				ProductId: vm.windowsOptionalConfig.productKey,
  1834  				FullName:  "terraform",
  1835  				OrgName:   "terraform",
  1836  			}
  1837  
  1838  			if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
  1839  				customIdentification.DomainAdminPassword = &types.CustomizationPassword{
  1840  					PlainText: true,
  1841  					Value:     vm.windowsOptionalConfig.domainUserPassword,
  1842  				}
  1843  				customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
  1844  				customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
  1845  			}
  1846  
  1847  			if vm.windowsOptionalConfig.adminPassword != "" {
  1848  				guiUnattended.Password = &types.CustomizationPassword{
  1849  					PlainText: true,
  1850  					Value:     vm.windowsOptionalConfig.adminPassword,
  1851  				}
  1852  			}
  1853  
  1854  			identity_options = &types.CustomizationSysprep{
  1855  				GuiUnattended:  guiUnattended,
  1856  				Identification: customIdentification,
  1857  				UserData:       userData,
  1858  			}
  1859  		} else {
  1860  			identity_options = &types.CustomizationLinuxPrep{
  1861  				HostName: &types.CustomizationFixedName{
  1862  					Name: strings.Split(vm.name, ".")[0],
  1863  				},
  1864  				Domain:     vm.domain,
  1865  				TimeZone:   vm.timeZone,
  1866  				HwClockUTC: types.NewBool(true),
  1867  			}
  1868  		}
  1869  
  1870  		// create CustomizationSpec
  1871  		customSpec := types.CustomizationSpec{
  1872  			Identity: identity_options,
  1873  			GlobalIPSettings: types.CustomizationGlobalIPSettings{
  1874  				DnsSuffixList: vm.dnsSuffixes,
  1875  				DnsServerList: vm.dnsServers,
  1876  			},
  1877  			NicSettingMap: networkConfigs,
  1878  		}
  1879  		log.Printf("[DEBUG] custom spec: %v", customSpec)
  1880  
  1881  		log.Printf("[DEBUG] VM customization starting")
  1882  		taskb, err := newVM.Customize(context.TODO(), customSpec)
  1883  		if err != nil {
  1884  			return err
  1885  		}
  1886  		_, err = taskb.WaitForResult(context.TODO(), nil)
  1887  		if err != nil {
  1888  			return err
  1889  		}
  1890  		log.Printf("[DEBUG] VM customization finished")
  1891  	}
  1892  
  1893  	if vm.hasBootableVmdk || vm.template != "" {
  1894  		newVM.PowerOn(context.TODO())
  1895  	}
  1896  	return nil
  1897  }