github.com/serbaut/terraform@v0.6.12-0.20160607213102-ac2d195cc560/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about)

     1  package vsphere
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/hashicorp/terraform/helper/schema"
    11  	"github.com/vmware/govmomi"
    12  	"github.com/vmware/govmomi/find"
    13  	"github.com/vmware/govmomi/object"
    14  	"github.com/vmware/govmomi/property"
    15  	"github.com/vmware/govmomi/vim25/mo"
    16  	"github.com/vmware/govmomi/vim25/types"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  var DefaultDNSSuffixes = []string{
    21  	"vsphere.local",
    22  }
    23  
    24  var DefaultDNSServers = []string{
    25  	"8.8.8.8",
    26  	"8.8.4.4",
    27  }
    28  
    29  type networkInterface struct {
    30  	deviceName       string
    31  	label            string
    32  	ipv4Address      string
    33  	ipv4PrefixLength int
    34  	ipv4Gateway      string
    35  	ipv6Address      string
    36  	ipv6PrefixLength int
    37  	ipv6Gateway      string
    38  	adapterType      string // TODO: Make "adapter_type" argument
    39  	macAddress       string
    40  }
    41  
    42  type hardDisk struct {
    43  	name       string
    44  	size       int64
    45  	iops       int64
    46  	initType   string
    47  	vmdkPath   string
    48  	controller string
    49  	bootable   bool
    50  }
    51  
    52  //Additional options Vsphere can use clones of windows machines
    53  type windowsOptConfig struct {
    54  	productKey         string
    55  	adminPassword      string
    56  	domainUser         string
    57  	domain             string
    58  	domainUserPassword string
    59  }
    60  
    61  type cdrom struct {
    62  	datastore string
    63  	path      string
    64  }
    65  
    66  type memoryAllocation struct {
    67  	reservation int64
    68  }
    69  
    70  type virtualMachine struct {
    71  	name                  string
    72  	folder                string
    73  	datacenter            string
    74  	cluster               string
    75  	resourcePool          string
    76  	datastore             string
    77  	vcpu                  int32
    78  	memoryMb              int64
    79  	memoryAllocation      memoryAllocation
    80  	template              string
    81  	networkInterfaces     []networkInterface
    82  	hardDisks             []hardDisk
    83  	cdroms                []cdrom
    84  	domain                string
    85  	timeZone              string
    86  	dnsSuffixes           []string
    87  	dnsServers            []string
    88  	hasBootableVmdk       bool
    89  	linkedClone           bool
    90  	skipCustomization     bool
    91  	windowsOptionalConfig windowsOptConfig
    92  	customConfigurations  map[string](types.AnyType)
    93  }
    94  
    95  func (v virtualMachine) Path() string {
    96  	return vmPath(v.folder, v.name)
    97  }
    98  
    99  func vmPath(folder string, name string) string {
   100  	var path string
   101  	if len(folder) > 0 {
   102  		path += folder + "/"
   103  	}
   104  	return path + name
   105  }
   106  
   107  func resourceVSphereVirtualMachine() *schema.Resource {
   108  	return &schema.Resource{
   109  		Create: resourceVSphereVirtualMachineCreate,
   110  		Read:   resourceVSphereVirtualMachineRead,
   111  		Update: resourceVSphereVirtualMachineUpdate,
   112  		Delete: resourceVSphereVirtualMachineDelete,
   113  
   114  		SchemaVersion: 1,
   115  		MigrateState:  resourceVSphereVirtualMachineMigrateState,
   116  
   117  		Schema: map[string]*schema.Schema{
   118  			"name": &schema.Schema{
   119  				Type:     schema.TypeString,
   120  				Required: true,
   121  				ForceNew: true,
   122  			},
   123  
   124  			"folder": &schema.Schema{
   125  				Type:     schema.TypeString,
   126  				Optional: true,
   127  				ForceNew: true,
   128  			},
   129  
   130  			"vcpu": &schema.Schema{
   131  				Type:     schema.TypeInt,
   132  				Required: true,
   133  			},
   134  
   135  			"memory": &schema.Schema{
   136  				Type:     schema.TypeInt,
   137  				Required: true,
   138  			},
   139  
   140  			"memory_reservation": &schema.Schema{
   141  				Type:     schema.TypeInt,
   142  				Optional: true,
   143  				Default:  0,
   144  				ForceNew: true,
   145  			},
   146  
   147  			"datacenter": &schema.Schema{
   148  				Type:     schema.TypeString,
   149  				Optional: true,
   150  				ForceNew: true,
   151  			},
   152  
   153  			"cluster": &schema.Schema{
   154  				Type:     schema.TypeString,
   155  				Optional: true,
   156  				ForceNew: true,
   157  			},
   158  
   159  			"resource_pool": &schema.Schema{
   160  				Type:     schema.TypeString,
   161  				Optional: true,
   162  				ForceNew: true,
   163  			},
   164  
   165  			"linked_clone": &schema.Schema{
   166  				Type:     schema.TypeBool,
   167  				Optional: true,
   168  				Default:  false,
   169  				ForceNew: true,
   170  			},
   171  			"gateway": &schema.Schema{
   172  				Type:       schema.TypeString,
   173  				Optional:   true,
   174  				ForceNew:   true,
   175  				Deprecated: "Please use network_interface.ipv4_gateway",
   176  			},
   177  
   178  			"domain": &schema.Schema{
   179  				Type:     schema.TypeString,
   180  				Optional: true,
   181  				ForceNew: true,
   182  				Default:  "vsphere.local",
   183  			},
   184  
   185  			"time_zone": &schema.Schema{
   186  				Type:     schema.TypeString,
   187  				Optional: true,
   188  				ForceNew: true,
   189  				Default:  "Etc/UTC",
   190  			},
   191  
   192  			"dns_suffixes": &schema.Schema{
   193  				Type:     schema.TypeList,
   194  				Optional: true,
   195  				Elem:     &schema.Schema{Type: schema.TypeString},
   196  				ForceNew: true,
   197  			},
   198  
   199  			"dns_servers": &schema.Schema{
   200  				Type:     schema.TypeList,
   201  				Optional: true,
   202  				Elem:     &schema.Schema{Type: schema.TypeString},
   203  				ForceNew: true,
   204  			},
   205  
   206  			"skip_customization": &schema.Schema{
   207  				Type:     schema.TypeBool,
   208  				Optional: true,
   209  				ForceNew: true,
   210  				Default:  false,
   211  			},
   212  
   213  			"custom_configuration_parameters": &schema.Schema{
   214  				Type:     schema.TypeMap,
   215  				Optional: true,
   216  				ForceNew: true,
   217  			},
   218  			"windows_opt_config": &schema.Schema{
   219  				Type:     schema.TypeList,
   220  				Optional: true,
   221  				ForceNew: true,
   222  				Elem: &schema.Resource{
   223  					Schema: map[string]*schema.Schema{
   224  						"product_key": &schema.Schema{
   225  							Type:     schema.TypeString,
   226  							Required: true,
   227  							ForceNew: true,
   228  						},
   229  
   230  						"admin_password": &schema.Schema{
   231  							Type:     schema.TypeString,
   232  							Optional: true,
   233  							ForceNew: true,
   234  						},
   235  
   236  						"domain_user": &schema.Schema{
   237  							Type:     schema.TypeString,
   238  							Optional: true,
   239  							ForceNew: true,
   240  						},
   241  
   242  						"domain": &schema.Schema{
   243  							Type:     schema.TypeString,
   244  							Optional: true,
   245  							ForceNew: true,
   246  						},
   247  
   248  						"domain_user_password": &schema.Schema{
   249  							Type:     schema.TypeString,
   250  							Optional: true,
   251  							ForceNew: true,
   252  						},
   253  					},
   254  				},
   255  			},
   256  
   257  			"network_interface": &schema.Schema{
   258  				Type:     schema.TypeList,
   259  				Required: true,
   260  				ForceNew: true,
   261  				Elem: &schema.Resource{
   262  					Schema: map[string]*schema.Schema{
   263  						"label": &schema.Schema{
   264  							Type:     schema.TypeString,
   265  							Required: true,
   266  							ForceNew: true,
   267  						},
   268  
   269  						"ip_address": &schema.Schema{
   270  							Type:       schema.TypeString,
   271  							Optional:   true,
   272  							Computed:   true,
   273  							Deprecated: "Please use ipv4_address",
   274  						},
   275  
   276  						"subnet_mask": &schema.Schema{
   277  							Type:       schema.TypeString,
   278  							Optional:   true,
   279  							Computed:   true,
   280  							Deprecated: "Please use ipv4_prefix_length",
   281  						},
   282  
   283  						"ipv4_address": &schema.Schema{
   284  							Type:     schema.TypeString,
   285  							Optional: true,
   286  							Computed: true,
   287  						},
   288  
   289  						"ipv4_prefix_length": &schema.Schema{
   290  							Type:     schema.TypeInt,
   291  							Optional: true,
   292  							Computed: true,
   293  						},
   294  
   295  						"ipv4_gateway": &schema.Schema{
   296  							Type:     schema.TypeString,
   297  							Optional: true,
   298  							Computed: true,
   299  						},
   300  
   301  						"ipv6_address": &schema.Schema{
   302  							Type:     schema.TypeString,
   303  							Optional: true,
   304  							Computed: true,
   305  						},
   306  
   307  						"ipv6_prefix_length": &schema.Schema{
   308  							Type:     schema.TypeInt,
   309  							Optional: true,
   310  							Computed: true,
   311  						},
   312  
   313  						"ipv6_gateway": &schema.Schema{
   314  							Type:     schema.TypeString,
   315  							Optional: true,
   316  							Computed: true,
   317  						},
   318  
   319  						"adapter_type": &schema.Schema{
   320  							Type:     schema.TypeString,
   321  							Optional: true,
   322  							ForceNew: true,
   323  						},
   324  
   325  						"mac_address": &schema.Schema{
   326  							Type:     schema.TypeString,
   327  							Optional: true,
   328  							Computed: true,
   329  						},
   330  					},
   331  				},
   332  			},
   333  
   334  			"disk": &schema.Schema{
   335  				Type:     schema.TypeSet,
   336  				Required: true,
   337  				Elem: &schema.Resource{
   338  					Schema: map[string]*schema.Schema{
   339  						"uuid": &schema.Schema{
   340  							Type:     schema.TypeString,
   341  							Computed: true,
   342  						},
   343  
   344  						"key": &schema.Schema{
   345  							Type:     schema.TypeInt,
   346  							Computed: true,
   347  						},
   348  
   349  						"template": &schema.Schema{
   350  							Type:     schema.TypeString,
   351  							Optional: true,
   352  						},
   353  
   354  						"type": &schema.Schema{
   355  							Type:     schema.TypeString,
   356  							Optional: true,
   357  							Default:  "eager_zeroed",
   358  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   359  								value := v.(string)
   360  								if value != "thin" && value != "eager_zeroed" {
   361  									errors = append(errors, fmt.Errorf(
   362  										"only 'thin' and 'eager_zeroed' are supported values for 'type'"))
   363  								}
   364  								return
   365  							},
   366  						},
   367  
   368  						"datastore": &schema.Schema{
   369  							Type:     schema.TypeString,
   370  							Optional: true,
   371  						},
   372  
   373  						"size": &schema.Schema{
   374  							Type:     schema.TypeInt,
   375  							Optional: true,
   376  						},
   377  
   378  						"name": &schema.Schema{
   379  							Type:     schema.TypeString,
   380  							Optional: true,
   381  						},
   382  
   383  						"iops": &schema.Schema{
   384  							Type:     schema.TypeInt,
   385  							Optional: true,
   386  						},
   387  
   388  						"vmdk": &schema.Schema{
   389  							// TODO: Add ValidateFunc to confirm path exists
   390  							Type:     schema.TypeString,
   391  							Optional: true,
   392  						},
   393  
   394  						"bootable": &schema.Schema{
   395  							Type:     schema.TypeBool,
   396  							Optional: true,
   397  						},
   398  
   399  						"keep_on_remove": &schema.Schema{
   400  							Type:     schema.TypeBool,
   401  							Optional: true,
   402  						},
   403  
   404  						"controller_type": &schema.Schema{
   405  							Type:     schema.TypeString,
   406  							Optional: true,
   407  							Default:  "scsi",
   408  							ForceNew: true,
   409  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   410  								value := v.(string)
   411  								if value != "scsi" && value != "ide" {
   412  									errors = append(errors, fmt.Errorf(
   413  										"only 'scsi' and 'ide' are supported values for 'controller_type'"))
   414  								}
   415  								return
   416  							},
   417  						},
   418  					},
   419  				},
   420  			},
   421  
   422  			"cdrom": &schema.Schema{
   423  				Type:     schema.TypeList,
   424  				Optional: true,
   425  				ForceNew: true,
   426  				Elem: &schema.Resource{
   427  					Schema: map[string]*schema.Schema{
   428  						"datastore": &schema.Schema{
   429  							Type:     schema.TypeString,
   430  							Required: true,
   431  							ForceNew: true,
   432  						},
   433  
   434  						"path": &schema.Schema{
   435  							Type:     schema.TypeString,
   436  							Required: true,
   437  							ForceNew: true,
   438  						},
   439  					},
   440  				},
   441  			},
   442  		},
   443  	}
   444  }
   445  
   446  func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error {
   447  	// flag if changes have to be applied
   448  	hasChanges := false
   449  	// flag if changes have to be done when powered off
   450  	rebootRequired := false
   451  
   452  	// make config spec
   453  	configSpec := types.VirtualMachineConfigSpec{}
   454  
   455  	if d.HasChange("vcpu") {
   456  		configSpec.NumCPUs = int32(d.Get("vcpu").(int))
   457  		hasChanges = true
   458  		rebootRequired = true
   459  	}
   460  
   461  	if d.HasChange("memory") {
   462  		configSpec.MemoryMB = int64(d.Get("memory").(int))
   463  		hasChanges = true
   464  		rebootRequired = true
   465  	}
   466  
   467  	client := meta.(*govmomi.Client)
   468  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   469  	if err != nil {
   470  		return err
   471  	}
   472  	finder := find.NewFinder(client.Client, true)
   473  	finder = finder.SetDatacenter(dc)
   474  
   475  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
   476  	if err != nil {
   477  		return err
   478  	}
   479  
   480  	if d.HasChange("disk") {
   481  		hasChanges = true
   482  		oldDisks, newDisks := d.GetChange("disk")
   483  		oldDiskSet := oldDisks.(*schema.Set)
   484  		newDiskSet := newDisks.(*schema.Set)
   485  
   486  		addedDisks := newDiskSet.Difference(oldDiskSet)
   487  		removedDisks := oldDiskSet.Difference(newDiskSet)
   488  
   489  		// Removed disks
   490  		for _, diskRaw := range removedDisks.List() {
   491  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   492  				devices, err := vm.Device(context.TODO())
   493  				if err != nil {
   494  					return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err)
   495  				}
   496  				virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
   497  
   498  				keep := false
   499  				if v, ok := d.GetOk("keep_on_remove"); ok {
   500  					keep = v.(bool)
   501  				}
   502  
   503  				err = vm.RemoveDevice(context.TODO(), keep, virtualDisk)
   504  				if err != nil {
   505  					return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
   506  				}
   507  			}
   508  		}
   509  		// Added disks
   510  		for _, diskRaw := range addedDisks.List() {
   511  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   512  
   513  				var datastore *object.Datastore
   514  				if disk["datastore"] == "" {
   515  					datastore, err = finder.DefaultDatastore(context.TODO())
   516  					if err != nil {
   517  						return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err)
   518  					}
   519  				} else {
   520  					datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string))
   521  					if err != nil {
   522  						log.Printf("[ERROR] Couldn't find datastore %v.  %s", disk["datastore"].(string), err)
   523  						return err
   524  					}
   525  				}
   526  
   527  				var size int64
   528  				if disk["size"] == 0 {
   529  					size = 0
   530  				} else {
   531  					size = int64(disk["size"].(int))
   532  				}
   533  				iops := int64(disk["iops"].(int))
   534  				controller_type := disk["controller"].(string)
   535  
   536  				var mo mo.VirtualMachine
   537  				vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo)
   538  
   539  				var diskPath string
   540  				switch {
   541  				case disk["vmdk"] != "":
   542  					diskPath = disk["vmdk"].(string)
   543  				case disk["name"] != "":
   544  					snapshotFullDir := mo.Config.Files.SnapshotDirectory
   545  					split := strings.Split(snapshotFullDir, " ")
   546  					if len(split) != 2 {
   547  						return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
   548  					}
   549  					vmWorkingPath := split[1]
   550  					diskPath = vmWorkingPath + disk["name"].(string)
   551  				default:
   552  					return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given")
   553  				}
   554  
   555  				log.Printf("[INFO] Attaching disk: %v", diskPath)
   556  				err = addHardDisk(vm, size, iops, "thin", datastore, diskPath, controller_type)
   557  				if err != nil {
   558  					log.Printf("[ERROR] Add Hard Disk Failed: %v", err)
   559  					return err
   560  				}
   561  			}
   562  			if err != nil {
   563  				return err
   564  			}
   565  		}
   566  	}
   567  
   568  	// do nothing if there are no changes
   569  	if !hasChanges {
   570  		return nil
   571  	}
   572  
   573  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
   574  
   575  	if rebootRequired {
   576  		log.Printf("[INFO] Shutting down virtual machine: %s", d.Id())
   577  
   578  		task, err := vm.PowerOff(context.TODO())
   579  		if err != nil {
   580  			return err
   581  		}
   582  
   583  		err = task.Wait(context.TODO())
   584  		if err != nil {
   585  			return err
   586  		}
   587  	}
   588  
   589  	log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id())
   590  
   591  	task, err := vm.Reconfigure(context.TODO(), configSpec)
   592  	if err != nil {
   593  		log.Printf("[ERROR] %s", err)
   594  	}
   595  
   596  	err = task.Wait(context.TODO())
   597  	if err != nil {
   598  		log.Printf("[ERROR] %s", err)
   599  	}
   600  
   601  	if rebootRequired {
   602  		task, err = vm.PowerOn(context.TODO())
   603  		if err != nil {
   604  			return err
   605  		}
   606  
   607  		err = task.Wait(context.TODO())
   608  		if err != nil {
   609  			log.Printf("[ERROR] %s", err)
   610  		}
   611  	}
   612  
   613  	ip, err := vm.WaitForIP(context.TODO())
   614  	if err != nil {
   615  		return err
   616  	}
   617  	log.Printf("[DEBUG] ip address: %v", ip)
   618  
   619  	return resourceVSphereVirtualMachineRead(d, meta)
   620  }
   621  
   622  func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
   623  	client := meta.(*govmomi.Client)
   624  
   625  	vm := virtualMachine{
   626  		name:     d.Get("name").(string),
   627  		vcpu:     int32(d.Get("vcpu").(int)),
   628  		memoryMb: int64(d.Get("memory").(int)),
   629  		memoryAllocation: memoryAllocation{
   630  			reservation: int64(d.Get("memory_reservation").(int)),
   631  		},
   632  	}
   633  
   634  	if v, ok := d.GetOk("folder"); ok {
   635  		vm.folder = v.(string)
   636  	}
   637  
   638  	if v, ok := d.GetOk("datacenter"); ok {
   639  		vm.datacenter = v.(string)
   640  	}
   641  
   642  	if v, ok := d.GetOk("cluster"); ok {
   643  		vm.cluster = v.(string)
   644  	}
   645  
   646  	if v, ok := d.GetOk("resource_pool"); ok {
   647  		vm.resourcePool = v.(string)
   648  	}
   649  
   650  	if v, ok := d.GetOk("domain"); ok {
   651  		vm.domain = v.(string)
   652  	}
   653  
   654  	if v, ok := d.GetOk("time_zone"); ok {
   655  		vm.timeZone = v.(string)
   656  	}
   657  
   658  	if v, ok := d.GetOk("linked_clone"); ok {
   659  		vm.linkedClone = v.(bool)
   660  	}
   661  
   662  	if v, ok := d.GetOk("skip_customization"); ok {
   663  		vm.skipCustomization = v.(bool)
   664  	}
   665  
   666  	if raw, ok := d.GetOk("dns_suffixes"); ok {
   667  		for _, v := range raw.([]interface{}) {
   668  			vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string))
   669  		}
   670  	} else {
   671  		vm.dnsSuffixes = DefaultDNSSuffixes
   672  	}
   673  
   674  	if raw, ok := d.GetOk("dns_servers"); ok {
   675  		for _, v := range raw.([]interface{}) {
   676  			vm.dnsServers = append(vm.dnsServers, v.(string))
   677  		}
   678  	} else {
   679  		vm.dnsServers = DefaultDNSServers
   680  	}
   681  
   682  	if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
   683  		if custom_configs, ok := vL.(map[string]interface{}); ok {
   684  			custom := make(map[string]types.AnyType)
   685  			for k, v := range custom_configs {
   686  				custom[k] = v
   687  			}
   688  			vm.customConfigurations = custom
   689  			log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
   690  		}
   691  	}
   692  
   693  	if vL, ok := d.GetOk("network_interface"); ok {
   694  		networks := make([]networkInterface, len(vL.([]interface{})))
   695  		for i, v := range vL.([]interface{}) {
   696  			network := v.(map[string]interface{})
   697  			networks[i].label = network["label"].(string)
   698  			if v, ok := network["ip_address"].(string); ok && v != "" {
   699  				networks[i].ipv4Address = v
   700  			}
   701  			if v, ok := d.GetOk("gateway"); ok {
   702  				networks[i].ipv4Gateway = v.(string)
   703  			}
   704  			if v, ok := network["subnet_mask"].(string); ok && v != "" {
   705  				ip := net.ParseIP(v).To4()
   706  				if ip != nil {
   707  					mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3])
   708  					pl, _ := mask.Size()
   709  					networks[i].ipv4PrefixLength = pl
   710  				} else {
   711  					return fmt.Errorf("subnet_mask parameter is invalid.")
   712  				}
   713  			}
   714  			if v, ok := network["ipv4_address"].(string); ok && v != "" {
   715  				networks[i].ipv4Address = v
   716  			}
   717  			if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 {
   718  				networks[i].ipv4PrefixLength = v
   719  			}
   720  			if v, ok := network["ipv4_gateway"].(string); ok && v != "" {
   721  				networks[i].ipv4Gateway = v
   722  			}
   723  			if v, ok := network["ipv6_address"].(string); ok && v != "" {
   724  				networks[i].ipv6Address = v
   725  			}
   726  			if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 {
   727  				networks[i].ipv6PrefixLength = v
   728  			}
   729  			if v, ok := network["ipv6_gateway"].(string); ok && v != "" {
   730  				networks[i].ipv6Gateway = v
   731  			}
   732  			if v, ok := network["mac_address"].(string); ok && v != "" {
   733  				networks[i].macAddress = v
   734  			}
   735  		}
   736  		vm.networkInterfaces = networks
   737  		log.Printf("[DEBUG] network_interface init: %v", networks)
   738  	}
   739  
   740  	if vL, ok := d.GetOk("windows_opt_config"); ok {
   741  		var winOpt windowsOptConfig
   742  		custom_configs := (vL.([]interface{}))[0].(map[string]interface{})
   743  		if v, ok := custom_configs["admin_password"].(string); ok && v != "" {
   744  			winOpt.adminPassword = v
   745  		}
   746  		if v, ok := custom_configs["domain"].(string); ok && v != "" {
   747  			winOpt.domain = v
   748  		}
   749  		if v, ok := custom_configs["domain_user"].(string); ok && v != "" {
   750  			winOpt.domainUser = v
   751  		}
   752  		if v, ok := custom_configs["product_key"].(string); ok && v != "" {
   753  			winOpt.productKey = v
   754  		}
   755  		if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" {
   756  			winOpt.domainUserPassword = v
   757  		}
   758  		vm.windowsOptionalConfig = winOpt
   759  		log.Printf("[DEBUG] windows config init: %v", winOpt)
   760  	}
   761  
   762  	if vL, ok := d.GetOk("disk"); ok {
   763  		if diskSet, ok := vL.(*schema.Set); ok {
   764  
   765  			disks := []hardDisk{}
   766  			hasBootableDisk := false
   767  			for _, value := range diskSet.List() {
   768  				disk := value.(map[string]interface{})
   769  				newDisk := hardDisk{}
   770  
   771  				if v, ok := disk["template"].(string); ok && v != "" {
   772  					if v, ok := disk["name"].(string); ok && v != "" {
   773  						return fmt.Errorf("Cannot specify name of a template")
   774  					}
   775  					vm.template = v
   776  					if hasBootableDisk {
   777  						return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   778  					}
   779  					hasBootableDisk = true
   780  				}
   781  
   782  				if v, ok := disk["type"].(string); ok && v != "" {
   783  					newDisk.initType = v
   784  				}
   785  
   786  				if v, ok := disk["datastore"].(string); ok && v != "" {
   787  					vm.datastore = v
   788  				}
   789  
   790  				if v, ok := disk["size"].(int); ok && v != 0 {
   791  					if v, ok := disk["template"].(string); ok && v != "" {
   792  						return fmt.Errorf("Cannot specify size of a template")
   793  					}
   794  
   795  					if v, ok := disk["name"].(string); ok && v != "" {
   796  						newDisk.name = v
   797  					} else {
   798  						return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk")
   799  					}
   800  
   801  					newDisk.size = int64(v)
   802  				}
   803  
   804  				if v, ok := disk["iops"].(int); ok && v != 0 {
   805  					newDisk.iops = int64(v)
   806  				}
   807  
   808  				if v, ok := disk["controller_type"].(string); ok && v != "" {
   809  					newDisk.controller = v
   810  				}
   811  
   812  				if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" {
   813  					if v, ok := disk["template"].(string); ok && v != "" {
   814  						return fmt.Errorf("Cannot specify a vmdk for a template")
   815  					}
   816  					if v, ok := disk["size"].(string); ok && v != "" {
   817  						return fmt.Errorf("Cannot specify size of a vmdk")
   818  					}
   819  					if v, ok := disk["name"].(string); ok && v != "" {
   820  						return fmt.Errorf("Cannot specify name of a vmdk")
   821  					}
   822  					if vBootable, ok := disk["bootable"].(bool); ok {
   823  						hasBootableDisk = true
   824  						newDisk.bootable = vBootable
   825  						vm.hasBootableVmdk = vBootable
   826  					}
   827  					newDisk.vmdkPath = vVmdk
   828  				}
   829  				// Preserves order so bootable disk is first
   830  				if newDisk.bootable == true || disk["template"] != "" {
   831  					disks = append([]hardDisk{newDisk}, disks...)
   832  				} else {
   833  					disks = append(disks, newDisk)
   834  				}
   835  			}
   836  			vm.hardDisks = disks
   837  			log.Printf("[DEBUG] disk init: %v", disks)
   838  		}
   839  	}
   840  
   841  	if vL, ok := d.GetOk("cdrom"); ok {
   842  		cdroms := make([]cdrom, len(vL.([]interface{})))
   843  		for i, v := range vL.([]interface{}) {
   844  			c := v.(map[string]interface{})
   845  			if v, ok := c["datastore"].(string); ok && v != "" {
   846  				cdroms[i].datastore = v
   847  			} else {
   848  				return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.")
   849  			}
   850  			if v, ok := c["path"].(string); ok && v != "" {
   851  				cdroms[i].path = v
   852  			} else {
   853  				return fmt.Errorf("Path argument must be specified when attaching a cdrom image.")
   854  			}
   855  		}
   856  		vm.cdroms = cdroms
   857  		log.Printf("[DEBUG] cdrom init: %v", cdroms)
   858  	}
   859  
   860  	err := vm.setupVirtualMachine(client)
   861  	if err != nil {
   862  		return err
   863  	}
   864  
   865  	d.SetId(vm.Path())
   866  	log.Printf("[INFO] Created virtual machine: %s", d.Id())
   867  
   868  	return resourceVSphereVirtualMachineRead(d, meta)
   869  }
   870  
   871  func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
   872  	log.Printf("[DEBUG] virtual machine resource data: %#v", d)
   873  	client := meta.(*govmomi.Client)
   874  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   875  	if err != nil {
   876  		return err
   877  	}
   878  	finder := find.NewFinder(client.Client, true)
   879  	finder = finder.SetDatacenter(dc)
   880  
   881  	vm, err := finder.VirtualMachine(context.TODO(), d.Id())
   882  	if err != nil {
   883  		d.SetId("")
   884  		return nil
   885  	}
   886  
   887  	var mvm mo.VirtualMachine
   888  
   889  	// wait for interfaces to appear
   890  	_, err = vm.WaitForNetIP(context.TODO(), true)
   891  	if err != nil {
   892  		return err
   893  	}
   894  
   895  	collector := property.DefaultCollector(client.Client)
   896  	if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil {
   897  		return err
   898  	}
   899  
   900  	log.Printf("[DEBUG] Datacenter - %#v", dc)
   901  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config)
   902  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config)
   903  	log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net)
   904  
   905  	disks := make([]map[string]interface{}, 0)
   906  	templateDisk := make(map[string]interface{}, 1)
   907  	for _, device := range mvm.Config.Hardware.Device {
   908  		if vd, ok := device.(*types.VirtualDisk); ok {
   909  
   910  			virtualDevice := vd.GetVirtualDevice()
   911  
   912  			backingInfo := virtualDevice.Backing
   913  			var diskFullPath string
   914  			var diskUuid string
   915  			if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok {
   916  				diskFullPath = v.FileName
   917  				diskUuid = v.Uuid
   918  			} else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok {
   919  				diskFullPath = v.FileName
   920  				diskUuid = v.Uuid
   921  			}
   922  			log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath)
   923  
   924  			// Separate datastore and path
   925  			diskFullPathSplit := strings.Split(diskFullPath, " ")
   926  			if len(diskFullPathSplit) != 2 {
   927  				return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath)
   928  			}
   929  			diskPath := diskFullPathSplit[1]
   930  			// Isolate filename
   931  			diskNameSplit := strings.Split(diskPath, "/")
   932  			diskName := diskNameSplit[len(diskNameSplit)-1]
   933  			// Remove possible extension
   934  			diskName = strings.Split(diskName, ".")[0]
   935  
   936  			if prevDisks, ok := d.GetOk("disk"); ok {
   937  				if prevDiskSet, ok := prevDisks.(*schema.Set); ok {
   938  					for _, v := range prevDiskSet.List() {
   939  						prevDisk := v.(map[string]interface{})
   940  
   941  						// We're guaranteed only one template disk.  Passing value directly through since templates should be immutable
   942  						if prevDisk["template"] != "" {
   943  							if len(templateDisk) == 0 {
   944  								templateDisk = prevDisk
   945  								disks = append(disks, templateDisk)
   946  								break
   947  							}
   948  						}
   949  
   950  						// It is enforced that prevDisk["name"] should only be set in the case
   951  						// of creating a new disk for the user.
   952  						// size case:  name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name
   953  						// vmdk case:  compare prevDisk["vmdk"] and mo.Filename
   954  						if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] {
   955  
   956  							prevDisk["key"] = virtualDevice.Key
   957  							prevDisk["uuid"] = diskUuid
   958  
   959  							disks = append(disks, prevDisk)
   960  							break
   961  						}
   962  					}
   963  				}
   964  			}
   965  			log.Printf("[DEBUG] disks: %#v", disks)
   966  		}
   967  	}
   968  	err = d.Set("disk", disks)
   969  	if err != nil {
   970  		return fmt.Errorf("Invalid disks to set: %#v", disks)
   971  	}
   972  
   973  	networkInterfaces := make([]map[string]interface{}, 0)
   974  	for _, v := range mvm.Guest.Net {
   975  		if v.DeviceConfigId >= 0 {
   976  			log.Printf("[DEBUG] v.Network - %#v", v.Network)
   977  			networkInterface := make(map[string]interface{})
   978  			networkInterface["label"] = v.Network
   979  			networkInterface["mac_address"] = v.MacAddress
   980  			for _, ip := range v.IpConfig.IpAddress {
   981  				p := net.ParseIP(ip.IpAddress)
   982  				if p.To4() != nil {
   983  					log.Printf("[DEBUG] p.String - %#v", p.String())
   984  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
   985  					networkInterface["ipv4_address"] = p.String()
   986  					networkInterface["ipv4_prefix_length"] = ip.PrefixLength
   987  				} else if p.To16() != nil {
   988  					log.Printf("[DEBUG] p.String - %#v", p.String())
   989  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
   990  					networkInterface["ipv6_address"] = p.String()
   991  					networkInterface["ipv6_prefix_length"] = ip.PrefixLength
   992  				}
   993  				log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
   994  			}
   995  			log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
   996  			networkInterfaces = append(networkInterfaces, networkInterface)
   997  		}
   998  	}
   999  	if mvm.Guest.IpStack != nil {
  1000  		for _, v := range mvm.Guest.IpStack {
  1001  			if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil {
  1002  				for _, route := range v.IpRouteConfig.IpRoute {
  1003  					if route.Gateway.Device != "" {
  1004  						gatewaySetting := ""
  1005  						if route.Network == "::" {
  1006  							gatewaySetting = "ipv6_gateway"
  1007  						} else if route.Network == "0.0.0.0" {
  1008  							gatewaySetting = "ipv4_gateway"
  1009  						}
  1010  						if gatewaySetting != "" {
  1011  							deviceID, err := strconv.Atoi(route.Gateway.Device)
  1012  							if err != nil {
  1013  								log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err)
  1014  							} else {
  1015  								log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress)
  1016  								networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress
  1017  							}
  1018  						}
  1019  					}
  1020  				}
  1021  			}
  1022  		}
  1023  	}
  1024  	log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces)
  1025  	err = d.Set("network_interface", networkInterfaces)
  1026  	if err != nil {
  1027  		return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces)
  1028  	}
  1029  
  1030  	log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string))
  1031  	d.SetConnInfo(map[string]string{
  1032  		"type": "ssh",
  1033  		"host": networkInterfaces[0]["ipv4_address"].(string),
  1034  	})
  1035  
  1036  	var rootDatastore string
  1037  	for _, v := range mvm.Datastore {
  1038  		var md mo.Datastore
  1039  		if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil {
  1040  			return err
  1041  		}
  1042  		if md.Parent.Type == "StoragePod" {
  1043  			var msp mo.StoragePod
  1044  			if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil {
  1045  				return err
  1046  			}
  1047  			rootDatastore = msp.Name
  1048  			log.Printf("[DEBUG] %#v", msp.Name)
  1049  		} else {
  1050  			rootDatastore = md.Name
  1051  			log.Printf("[DEBUG] %#v", md.Name)
  1052  		}
  1053  		break
  1054  	}
  1055  
  1056  	d.Set("datacenter", dc)
  1057  	d.Set("memory", mvm.Summary.Config.MemorySizeMB)
  1058  	d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation)
  1059  	d.Set("cpu", mvm.Summary.Config.NumCpu)
  1060  	d.Set("datastore", rootDatastore)
  1061  
  1062  	return nil
  1063  }
  1064  
  1065  func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
  1066  	client := meta.(*govmomi.Client)
  1067  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
  1068  	if err != nil {
  1069  		return err
  1070  	}
  1071  	finder := find.NewFinder(client.Client, true)
  1072  	finder = finder.SetDatacenter(dc)
  1073  
  1074  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
  1075  	if err != nil {
  1076  		return err
  1077  	}
  1078  
  1079  	log.Printf("[INFO] Deleting virtual machine: %s", d.Id())
  1080  	state, err := vm.PowerState(context.TODO())
  1081  	if err != nil {
  1082  		return err
  1083  	}
  1084  
  1085  	if state == types.VirtualMachinePowerStatePoweredOn {
  1086  		task, err := vm.PowerOff(context.TODO())
  1087  		if err != nil {
  1088  			return err
  1089  		}
  1090  
  1091  		err = task.Wait(context.TODO())
  1092  		if err != nil {
  1093  			return err
  1094  		}
  1095  	}
  1096  
  1097  	task, err := vm.Destroy(context.TODO())
  1098  	if err != nil {
  1099  		return err
  1100  	}
  1101  
  1102  	err = task.Wait(context.TODO())
  1103  	if err != nil {
  1104  		return err
  1105  	}
  1106  
  1107  	d.SetId("")
  1108  	return nil
  1109  }
  1110  
  1111  // addHardDisk adds a new Hard Disk to the VirtualMachine.
  1112  func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error {
  1113  	devices, err := vm.Device(context.TODO())
  1114  	if err != nil {
  1115  		return err
  1116  	}
  1117  	log.Printf("[DEBUG] vm devices: %#v\n", devices)
  1118  
  1119  	var controller types.BaseVirtualController
  1120  	controller, err = devices.FindDiskController(controller_type)
  1121  	if err != nil {
  1122  		log.Printf("[DEBUG] Couldn't find a %v controller.  Creating one..", controller_type)
  1123  
  1124  		var c types.BaseVirtualDevice
  1125  		switch controller_type {
  1126  		case "scsi":
  1127  			// Create scsi controller
  1128  			c, err = devices.CreateSCSIController("scsi")
  1129  			if err != nil {
  1130  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1131  			}
  1132  		case "ide":
  1133  			// Create ide controller
  1134  			c, err = devices.CreateIDEController()
  1135  			if err != nil {
  1136  				return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1137  			}
  1138  		default:
  1139  			return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1140  		}
  1141  
  1142  		vm.AddDevice(context.TODO(), c)
  1143  		controller, err = devices.FindDiskController(controller_type)
  1144  		if err != nil {
  1145  			return fmt.Errorf("[ERROR] Could not find the controller we just created")
  1146  		}
  1147  	}
  1148  
  1149  	log.Printf("[DEBUG] disk controller: %#v\n", controller)
  1150  
  1151  	// TODO Check if diskPath & datastore exist
  1152  	// If diskPath is not specified, pass empty string to CreateDisk()
  1153  	if diskPath == "" {
  1154  		return fmt.Errorf("[ERROR] addHardDisk - No path proided")
  1155  	} else {
  1156  		// TODO Check if diskPath & datastore exist
  1157  		diskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath)
  1158  	}
  1159  	log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath)
  1160  	disk := devices.CreateDisk(controller, datastore.Reference(), diskPath)
  1161  
  1162  	existing := devices.SelectByBackingInfo(disk.Backing)
  1163  	log.Printf("[DEBUG] disk: %#v\n", disk)
  1164  
  1165  	if len(existing) == 0 {
  1166  		disk.CapacityInKB = int64(size * 1024 * 1024)
  1167  		if iops != 0 {
  1168  			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
  1169  				Limit: iops,
  1170  			}
  1171  		}
  1172  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
  1173  
  1174  		if diskType == "eager_zeroed" {
  1175  			// eager zeroed thick virtual disk
  1176  			backing.ThinProvisioned = types.NewBool(false)
  1177  			backing.EagerlyScrub = types.NewBool(true)
  1178  		} else if diskType == "thin" {
  1179  			// thin provisioned virtual disk
  1180  			backing.ThinProvisioned = types.NewBool(true)
  1181  		}
  1182  
  1183  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
  1184  		log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB)
  1185  
  1186  		return vm.AddDevice(context.TODO(), disk)
  1187  	} else {
  1188  		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")
  1189  
  1190  		return nil
  1191  	}
  1192  }
  1193  
  1194  // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path.
  1195  func addCdrom(vm *object.VirtualMachine, datastore, path string) error {
  1196  	devices, err := vm.Device(context.TODO())
  1197  	if err != nil {
  1198  		return err
  1199  	}
  1200  	log.Printf("[DEBUG] vm devices: %#v", devices)
  1201  
  1202  	var controller *types.VirtualIDEController
  1203  	controller, err = devices.FindIDEController("")
  1204  	if err != nil {
  1205  		log.Printf("[DEBUG] Couldn't find a ide controller.  Creating one..")
  1206  
  1207  		var c types.BaseVirtualDevice
  1208  		c, err := devices.CreateIDEController()
  1209  		if err != nil {
  1210  			return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1211  		}
  1212  
  1213  		if v, ok := c.(*types.VirtualIDEController); ok {
  1214  			controller = v
  1215  		} else {
  1216  			return fmt.Errorf("[ERROR] Controller type could not be asserted")
  1217  		}
  1218  		vm.AddDevice(context.TODO(), c)
  1219  		controller, err = devices.FindIDEController("")
  1220  		if err != nil {
  1221  			return fmt.Errorf("[ERROR] Could not find the controller we just created")
  1222  		}
  1223  	}
  1224  	log.Printf("[DEBUG] ide controller: %#v", controller)
  1225  
  1226  	c, err := devices.CreateCdrom(controller)
  1227  	if err != nil {
  1228  		return err
  1229  	}
  1230  
  1231  	c = devices.InsertIso(c, fmt.Sprintf("[%s] %s", datastore, path))
  1232  	log.Printf("[DEBUG] addCdrom: %#v", c)
  1233  
  1234  	return vm.AddDevice(context.TODO(), c)
  1235  }
  1236  
  1237  // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device.
  1238  func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) {
  1239  	network, err := f.Network(context.TODO(), "*"+label)
  1240  	if err != nil {
  1241  		return nil, err
  1242  	}
  1243  
  1244  	backing, err := network.EthernetCardBackingInfo(context.TODO())
  1245  	if err != nil {
  1246  		return nil, err
  1247  	}
  1248  
  1249  	var address_type string
  1250  	if macAddress == "" {
  1251  		address_type = string(types.VirtualEthernetCardMacTypeGenerated)
  1252  	} else {
  1253  		address_type = string(types.VirtualEthernetCardMacTypeManual)
  1254  	}
  1255  
  1256  	if adapterType == "vmxnet3" {
  1257  		return &types.VirtualDeviceConfigSpec{
  1258  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1259  			Device: &types.VirtualVmxnet3{
  1260  				VirtualVmxnet: types.VirtualVmxnet{
  1261  					VirtualEthernetCard: types.VirtualEthernetCard{
  1262  						VirtualDevice: types.VirtualDevice{
  1263  							Key:     -1,
  1264  							Backing: backing,
  1265  						},
  1266  						AddressType: address_type,
  1267  						MacAddress:  macAddress,
  1268  					},
  1269  				},
  1270  			},
  1271  		}, nil
  1272  	} else if adapterType == "e1000" {
  1273  		return &types.VirtualDeviceConfigSpec{
  1274  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1275  			Device: &types.VirtualE1000{
  1276  				VirtualEthernetCard: types.VirtualEthernetCard{
  1277  					VirtualDevice: types.VirtualDevice{
  1278  						Key:     -1,
  1279  						Backing: backing,
  1280  					},
  1281  					AddressType: address_type,
  1282  					MacAddress:  macAddress,
  1283  				},
  1284  			},
  1285  		}, nil
  1286  	} else {
  1287  		return nil, fmt.Errorf("Invalid network adapter type.")
  1288  	}
  1289  }
  1290  
  1291  // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
  1292  func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) {
  1293  	var key int32
  1294  	var moveType string
  1295  	if linkedClone {
  1296  		moveType = "createNewChildDiskBacking"
  1297  	} else {
  1298  		moveType = "moveAllDiskBackingsAndDisallowSharing"
  1299  	}
  1300  	log.Printf("[DEBUG] relocate type: [%s]", moveType)
  1301  
  1302  	devices, err := vm.Device(context.TODO())
  1303  	if err != nil {
  1304  		return types.VirtualMachineRelocateSpec{}, err
  1305  	}
  1306  	for _, d := range devices {
  1307  		if devices.Type(d) == "disk" {
  1308  			key = int32(d.GetVirtualDevice().Key)
  1309  		}
  1310  	}
  1311  
  1312  	isThin := initType == "thin"
  1313  	rpr := rp.Reference()
  1314  	dsr := ds.Reference()
  1315  	return types.VirtualMachineRelocateSpec{
  1316  		Datastore:    &dsr,
  1317  		Pool:         &rpr,
  1318  		DiskMoveType: moveType,
  1319  		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1320  			{
  1321  				Datastore: dsr,
  1322  				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
  1323  					DiskMode:        "persistent",
  1324  					ThinProvisioned: types.NewBool(isThin),
  1325  					EagerlyScrub:    types.NewBool(!isThin),
  1326  				},
  1327  				DiskId: key,
  1328  			},
  1329  		},
  1330  	}, nil
  1331  }
  1332  
  1333  // getDatastoreObject gets datastore object.
  1334  func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) {
  1335  	s := object.NewSearchIndex(client.Client)
  1336  	ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name)
  1337  	if err != nil {
  1338  		return types.ManagedObjectReference{}, err
  1339  	}
  1340  	if ref == nil {
  1341  		return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name)
  1342  	}
  1343  	log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref)
  1344  	return ref.Reference(), nil
  1345  }
  1346  
  1347  // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action.
  1348  func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec {
  1349  	vmfr := f.VmFolder.Reference()
  1350  	rpr := rp.Reference()
  1351  	spr := storagePod.Reference()
  1352  
  1353  	sps := types.StoragePlacementSpec{
  1354  		Type:       "create",
  1355  		ConfigSpec: &configSpec,
  1356  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1357  			StoragePod: &spr,
  1358  		},
  1359  		Folder:       &vmfr,
  1360  		ResourcePool: &rpr,
  1361  	}
  1362  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1363  	return sps
  1364  }
  1365  
  1366  // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action.
  1367  func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
  1368  	vmr := vm.Reference()
  1369  	vmfr := f.VmFolder.Reference()
  1370  	rpr := rp.Reference()
  1371  	spr := storagePod.Reference()
  1372  
  1373  	var o mo.VirtualMachine
  1374  	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
  1375  	if err != nil {
  1376  		return types.StoragePlacementSpec{}
  1377  	}
  1378  	ds := object.NewDatastore(c.Client, o.Datastore[0])
  1379  	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)
  1380  
  1381  	devices, err := vm.Device(context.TODO())
  1382  	if err != nil {
  1383  		return types.StoragePlacementSpec{}
  1384  	}
  1385  
  1386  	var key int32
  1387  	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
  1388  		key = int32(d.GetVirtualDevice().Key)
  1389  		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
  1390  	}
  1391  
  1392  	sps := types.StoragePlacementSpec{
  1393  		Type: "clone",
  1394  		Vm:   &vmr,
  1395  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1396  			StoragePod: &spr,
  1397  		},
  1398  		CloneSpec: &types.VirtualMachineCloneSpec{
  1399  			Location: types.VirtualMachineRelocateSpec{
  1400  				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1401  					{
  1402  						Datastore:       ds.Reference(),
  1403  						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
  1404  						DiskId:          key,
  1405  					},
  1406  				},
  1407  				Pool: &rpr,
  1408  			},
  1409  			PowerOn:  false,
  1410  			Template: false,
  1411  		},
  1412  		CloneName: "dummy",
  1413  		Folder:    &vmfr,
  1414  	}
  1415  	return sps
  1416  }
  1417  
  1418  // findDatastore finds Datastore object.
  1419  func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) {
  1420  	var datastore *object.Datastore
  1421  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1422  
  1423  	srm := object.NewStorageResourceManager(c.Client)
  1424  	rds, err := srm.RecommendDatastores(context.TODO(), sps)
  1425  	if err != nil {
  1426  		return nil, err
  1427  	}
  1428  	log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)
  1429  
  1430  	spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
  1431  	datastore = object.NewDatastore(c.Client, spa.Destination)
  1432  	log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore)
  1433  
  1434  	return datastore, nil
  1435  }
  1436  
  1437  // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller.
  1438  func createCdroms(vm *object.VirtualMachine, cdroms []cdrom) error {
  1439  	log.Printf("[DEBUG] add cdroms: %v", cdroms)
  1440  	for _, cd := range cdroms {
  1441  		log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore)
  1442  		log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path)
  1443  		err := addCdrom(vm, cd.datastore, cd.path)
  1444  		if err != nil {
  1445  			return err
  1446  		}
  1447  	}
  1448  
  1449  	return nil
  1450  }
  1451  
  1452  func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error {
  1453  	dc, err := getDatacenter(c, vm.datacenter)
  1454  
  1455  	if err != nil {
  1456  		return err
  1457  	}
  1458  	finder := find.NewFinder(c.Client, true)
  1459  	finder = finder.SetDatacenter(dc)
  1460  
  1461  	var template *object.VirtualMachine
  1462  	var template_mo mo.VirtualMachine
  1463  	var vm_mo mo.VirtualMachine
  1464  	if vm.template != "" {
  1465  		template, err = finder.VirtualMachine(context.TODO(), vm.template)
  1466  		if err != nil {
  1467  			return err
  1468  		}
  1469  		log.Printf("[DEBUG] template: %#v", template)
  1470  
  1471  		err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)
  1472  		if err != nil {
  1473  			return err
  1474  		}
  1475  	}
  1476  
  1477  	var resourcePool *object.ResourcePool
  1478  	if vm.resourcePool == "" {
  1479  		if vm.cluster == "" {
  1480  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
  1481  			if err != nil {
  1482  				return err
  1483  			}
  1484  		} else {
  1485  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
  1486  			if err != nil {
  1487  				return err
  1488  			}
  1489  		}
  1490  	} else {
  1491  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
  1492  		if err != nil {
  1493  			return err
  1494  		}
  1495  	}
  1496  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
  1497  
  1498  	dcFolders, err := dc.Folders(context.TODO())
  1499  	if err != nil {
  1500  		return err
  1501  	}
  1502  	log.Printf("[DEBUG] folder: %#v", vm.folder)
  1503  
  1504  	folder := dcFolders.VmFolder
  1505  	if len(vm.folder) > 0 {
  1506  		si := object.NewSearchIndex(c.Client)
  1507  		folderRef, err := si.FindByInventoryPath(
  1508  			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
  1509  		if err != nil {
  1510  			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
  1511  		} else if folderRef == nil {
  1512  			return fmt.Errorf("Cannot find folder %s", vm.folder)
  1513  		} else {
  1514  			folder = folderRef.(*object.Folder)
  1515  		}
  1516  	}
  1517  
  1518  	// make config spec
  1519  	configSpec := types.VirtualMachineConfigSpec{
  1520  		Name:              vm.name,
  1521  		NumCPUs:           vm.vcpu,
  1522  		NumCoresPerSocket: 1,
  1523  		MemoryMB:          vm.memoryMb,
  1524  		MemoryAllocation: &types.ResourceAllocationInfo{
  1525  			Reservation: vm.memoryAllocation.reservation,
  1526  		},
  1527  	}
  1528  	if vm.template == "" {
  1529  		configSpec.GuestId = "otherLinux64Guest"
  1530  	}
  1531  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
  1532  
  1533  	// make ExtraConfig
  1534  	log.Printf("[DEBUG] virtual machine Extra Config spec start")
  1535  	if len(vm.customConfigurations) > 0 {
  1536  		var ov []types.BaseOptionValue
  1537  		for k, v := range vm.customConfigurations {
  1538  			key := k
  1539  			value := v
  1540  			o := types.OptionValue{
  1541  				Key:   key,
  1542  				Value: &value,
  1543  			}
  1544  			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
  1545  			ov = append(ov, &o)
  1546  		}
  1547  		configSpec.ExtraConfig = ov
  1548  		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
  1549  	}
  1550  
  1551  	var datastore *object.Datastore
  1552  	if vm.datastore == "" {
  1553  		datastore, err = finder.DefaultDatastore(context.TODO())
  1554  		if err != nil {
  1555  			return err
  1556  		}
  1557  	} else {
  1558  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
  1559  		if err != nil {
  1560  			// TODO: datastore cluster support in govmomi finder function
  1561  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
  1562  			if err != nil {
  1563  				return err
  1564  			}
  1565  
  1566  			if d.Type == "StoragePod" {
  1567  				sp := object.StoragePod{
  1568  					Folder: object.NewFolder(c.Client, d),
  1569  				}
  1570  
  1571  				var sps types.StoragePlacementSpec
  1572  				if vm.template != "" {
  1573  					sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
  1574  				} else {
  1575  					sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
  1576  				}
  1577  
  1578  				datastore, err = findDatastore(c, sps)
  1579  				if err != nil {
  1580  					return err
  1581  				}
  1582  			} else {
  1583  				datastore = object.NewDatastore(c.Client, d)
  1584  			}
  1585  		}
  1586  	}
  1587  
  1588  	log.Printf("[DEBUG] datastore: %#v", datastore)
  1589  
  1590  	// network
  1591  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
  1592  	networkConfigs := []types.CustomizationAdapterMapping{}
  1593  	for _, network := range vm.networkInterfaces {
  1594  		// network device
  1595  		var networkDeviceType string
  1596  		if vm.template == "" {
  1597  			networkDeviceType = "e1000"
  1598  		} else {
  1599  			networkDeviceType = "vmxnet3"
  1600  		}
  1601  		nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress)
  1602  		if err != nil {
  1603  			return err
  1604  		}
  1605  		log.Printf("[DEBUG] network device: %+v", nd.Device)
  1606  		networkDevices = append(networkDevices, nd)
  1607  
  1608  		if vm.template != "" {
  1609  			var ipSetting types.CustomizationIPSettings
  1610  			if network.ipv4Address == "" {
  1611  				ipSetting.Ip = &types.CustomizationDhcpIpGenerator{}
  1612  			} else {
  1613  				if network.ipv4PrefixLength == 0 {
  1614  					return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
  1615  				}
  1616  				m := net.CIDRMask(network.ipv4PrefixLength, 32)
  1617  				sm := net.IPv4(m[0], m[1], m[2], m[3])
  1618  				subnetMask := sm.String()
  1619  				log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway)
  1620  				log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address)
  1621  				log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength)
  1622  				log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask)
  1623  				ipSetting.Gateway = []string{
  1624  					network.ipv4Gateway,
  1625  				}
  1626  				ipSetting.Ip = &types.CustomizationFixedIp{
  1627  					IpAddress: network.ipv4Address,
  1628  				}
  1629  				ipSetting.SubnetMask = subnetMask
  1630  			}
  1631  
  1632  			ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{}
  1633  			if network.ipv6Address == "" {
  1634  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1635  					&types.CustomizationDhcpIpV6Generator{},
  1636  				}
  1637  			} else {
  1638  				log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway)
  1639  				log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address)
  1640  				log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength)
  1641  
  1642  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1643  					&types.CustomizationFixedIpV6{
  1644  						IpAddress:  network.ipv6Address,
  1645  						SubnetMask: int32(network.ipv6PrefixLength),
  1646  					},
  1647  				}
  1648  				ipv6Spec.Gateway = []string{network.ipv6Gateway}
  1649  			}
  1650  			ipSetting.IpV6Spec = ipv6Spec
  1651  
  1652  			// network config
  1653  			config := types.CustomizationAdapterMapping{
  1654  				Adapter: ipSetting,
  1655  			}
  1656  			networkConfigs = append(networkConfigs, config)
  1657  		}
  1658  	}
  1659  	log.Printf("[DEBUG] network devices: %#v", networkDevices)
  1660  	log.Printf("[DEBUG] network configs: %#v", networkConfigs)
  1661  
  1662  	var task *object.Task
  1663  	if vm.template == "" {
  1664  		var mds mo.Datastore
  1665  		if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
  1666  			return err
  1667  		}
  1668  		log.Printf("[DEBUG] datastore: %#v", mds.Name)
  1669  		scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
  1670  		if err != nil {
  1671  			log.Printf("[ERROR] %s", err)
  1672  		}
  1673  
  1674  		configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
  1675  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1676  			Device:    scsi,
  1677  		})
  1678  
  1679  		configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
  1680  
  1681  		task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
  1682  		if err != nil {
  1683  			log.Printf("[ERROR] %s", err)
  1684  		}
  1685  
  1686  		err = task.Wait(context.TODO())
  1687  		if err != nil {
  1688  			log.Printf("[ERROR] %s", err)
  1689  		}
  1690  
  1691  	} else {
  1692  
  1693  		relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
  1694  		if err != nil {
  1695  			return err
  1696  		}
  1697  
  1698  		log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
  1699  
  1700  		// make vm clone spec
  1701  		cloneSpec := types.VirtualMachineCloneSpec{
  1702  			Location: relocateSpec,
  1703  			Template: false,
  1704  			Config:   &configSpec,
  1705  			PowerOn:  false,
  1706  		}
  1707  		if vm.linkedClone {
  1708  			if template_mo.Snapshot == nil {
  1709  				return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
  1710  			}
  1711  			cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
  1712  		}
  1713  		log.Printf("[DEBUG] clone spec: %v", cloneSpec)
  1714  
  1715  		task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec)
  1716  		if err != nil {
  1717  			return err
  1718  		}
  1719  	}
  1720  
  1721  	err = task.Wait(context.TODO())
  1722  	if err != nil {
  1723  		log.Printf("[ERROR] %s", err)
  1724  	}
  1725  
  1726  	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
  1727  	if err != nil {
  1728  		return err
  1729  	}
  1730  	log.Printf("[DEBUG] new vm: %v", newVM)
  1731  
  1732  	devices, err := newVM.Device(context.TODO())
  1733  	if err != nil {
  1734  		log.Printf("[DEBUG] Template devices can't be found")
  1735  		return err
  1736  	}
  1737  
  1738  	for _, dvc := range devices {
  1739  		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
  1740  		if devices.Type(dvc) == "ethernet" {
  1741  			err := newVM.RemoveDevice(context.TODO(), false, dvc)
  1742  			if err != nil {
  1743  				return err
  1744  			}
  1745  		}
  1746  	}
  1747  	// Add Network devices
  1748  	for _, dvc := range networkDevices {
  1749  		err := newVM.AddDevice(
  1750  			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
  1751  		if err != nil {
  1752  			return err
  1753  		}
  1754  	}
  1755  
  1756  	// Create the cdroms if needed.
  1757  	if err := createCdroms(newVM, vm.cdroms); err != nil {
  1758  		return err
  1759  	}
  1760  
  1761  	newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo)
  1762  	firstDisk := 0
  1763  	if vm.template != "" {
  1764  		firstDisk++
  1765  	}
  1766  	for i := firstDisk; i < len(vm.hardDisks); i++ {
  1767  		log.Printf("[DEBUG] disk index: %v", i)
  1768  
  1769  		var diskPath string
  1770  		switch {
  1771  		case vm.hardDisks[i].vmdkPath != "":
  1772  			diskPath = vm.hardDisks[i].vmdkPath
  1773  		case vm.hardDisks[i].name != "":
  1774  			snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory
  1775  			split := strings.Split(snapshotFullDir, " ")
  1776  			if len(split) != 2 {
  1777  				return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
  1778  			}
  1779  			vmWorkingPath := split[1]
  1780  			diskPath = vmWorkingPath + vm.hardDisks[i].name
  1781  		default:
  1782  			return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i])
  1783  		}
  1784  
  1785  		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  1786  		if err != nil {
  1787  			return err
  1788  		}
  1789  	}
  1790  
  1791  	if vm.skipCustomization || vm.template == "" {
  1792  		log.Printf("[DEBUG] VM customization skipped")
  1793  	} else {
  1794  		var identity_options types.BaseCustomizationIdentitySettings
  1795  		if strings.HasPrefix(template_mo.Config.GuestId, "win") {
  1796  			var timeZone int
  1797  			if vm.timeZone == "Etc/UTC" {
  1798  				vm.timeZone = "085"
  1799  			}
  1800  			timeZone, err := strconv.Atoi(vm.timeZone)
  1801  			if err != nil {
  1802  				return fmt.Errorf("Error converting TimeZone: %s", err)
  1803  			}
  1804  
  1805  			guiUnattended := types.CustomizationGuiUnattended{
  1806  				AutoLogon:      false,
  1807  				AutoLogonCount: 1,
  1808  				TimeZone:       int32(timeZone),
  1809  			}
  1810  
  1811  			customIdentification := types.CustomizationIdentification{}
  1812  
  1813  			userData := types.CustomizationUserData{
  1814  				ComputerName: &types.CustomizationFixedName{
  1815  					Name: strings.Split(vm.name, ".")[0],
  1816  				},
  1817  				ProductId: vm.windowsOptionalConfig.productKey,
  1818  				FullName:  "terraform",
  1819  				OrgName:   "terraform",
  1820  			}
  1821  
  1822  			if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
  1823  				customIdentification.DomainAdminPassword = &types.CustomizationPassword{
  1824  					PlainText: true,
  1825  					Value:     vm.windowsOptionalConfig.domainUserPassword,
  1826  				}
  1827  				customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
  1828  				customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
  1829  			}
  1830  
  1831  			if vm.windowsOptionalConfig.adminPassword != "" {
  1832  				guiUnattended.Password = &types.CustomizationPassword{
  1833  					PlainText: true,
  1834  					Value:     vm.windowsOptionalConfig.adminPassword,
  1835  				}
  1836  			}
  1837  
  1838  			identity_options = &types.CustomizationSysprep{
  1839  				GuiUnattended:  guiUnattended,
  1840  				Identification: customIdentification,
  1841  				UserData:       userData,
  1842  			}
  1843  		} else {
  1844  			identity_options = &types.CustomizationLinuxPrep{
  1845  				HostName: &types.CustomizationFixedName{
  1846  					Name: strings.Split(vm.name, ".")[0],
  1847  				},
  1848  				Domain:     vm.domain,
  1849  				TimeZone:   vm.timeZone,
  1850  				HwClockUTC: types.NewBool(true),
  1851  			}
  1852  		}
  1853  
  1854  		// create CustomizationSpec
  1855  		customSpec := types.CustomizationSpec{
  1856  			Identity: identity_options,
  1857  			GlobalIPSettings: types.CustomizationGlobalIPSettings{
  1858  				DnsSuffixList: vm.dnsSuffixes,
  1859  				DnsServerList: vm.dnsServers,
  1860  			},
  1861  			NicSettingMap: networkConfigs,
  1862  		}
  1863  		log.Printf("[DEBUG] custom spec: %v", customSpec)
  1864  
  1865  		log.Printf("[DEBUG] VM customization starting")
  1866  		taskb, err := newVM.Customize(context.TODO(), customSpec)
  1867  		if err != nil {
  1868  			return err
  1869  		}
  1870  		_, err = taskb.WaitForResult(context.TODO(), nil)
  1871  		if err != nil {
  1872  			return err
  1873  		}
  1874  		log.Printf("[DEBUG] VM customization finished")
  1875  	}
  1876  
  1877  	if vm.hasBootableVmdk || vm.template != "" {
  1878  		newVM.PowerOn(context.TODO())
  1879  	}
  1880  	return nil
  1881  }