github.com/keshavdv/terraform@v0.7.0-rc2.0.20160711232630-d69256dcb425/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about)

     1  package vsphere
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/hashicorp/terraform/helper/schema"
    11  	"github.com/vmware/govmomi"
    12  	"github.com/vmware/govmomi/find"
    13  	"github.com/vmware/govmomi/object"
    14  	"github.com/vmware/govmomi/property"
    15  	"github.com/vmware/govmomi/vim25/mo"
    16  	"github.com/vmware/govmomi/vim25/types"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  var DefaultDNSSuffixes = []string{
    21  	"vsphere.local",
    22  }
    23  
    24  var DefaultDNSServers = []string{
    25  	"8.8.8.8",
    26  	"8.8.4.4",
    27  }
    28  
    29  type networkInterface struct {
    30  	deviceName       string
    31  	label            string
    32  	ipv4Address      string
    33  	ipv4PrefixLength int
    34  	ipv4Gateway      string
    35  	ipv6Address      string
    36  	ipv6PrefixLength int
    37  	ipv6Gateway      string
    38  	adapterType      string // TODO: Make "adapter_type" argument
    39  	macAddress       string
    40  }
    41  
    42  type hardDisk struct {
    43  	name       string
    44  	size       int64
    45  	iops       int64
    46  	initType   string
    47  	vmdkPath   string
    48  	controller string
    49  	bootable   bool
    50  }
    51  
    52  //Additional options Vsphere can use clones of windows machines
    53  type windowsOptConfig struct {
    54  	productKey         string
    55  	adminPassword      string
    56  	domainUser         string
    57  	domain             string
    58  	domainUserPassword string
    59  }
    60  
    61  type cdrom struct {
    62  	datastore string
    63  	path      string
    64  }
    65  
    66  type memoryAllocation struct {
    67  	reservation int64
    68  }
    69  
    70  type virtualMachine struct {
    71  	name                  string
    72  	folder                string
    73  	datacenter            string
    74  	cluster               string
    75  	resourcePool          string
    76  	datastore             string
    77  	vcpu                  int32
    78  	memoryMb              int64
    79  	memoryAllocation      memoryAllocation
    80  	template              string
    81  	networkInterfaces     []networkInterface
    82  	hardDisks             []hardDisk
    83  	cdroms                []cdrom
    84  	domain                string
    85  	timeZone              string
    86  	dnsSuffixes           []string
    87  	dnsServers            []string
    88  	hasBootableVmdk       bool
    89  	linkedClone           bool
    90  	skipCustomization     bool
    91  	enableDiskUUID        bool
    92  	windowsOptionalConfig windowsOptConfig
    93  	customConfigurations  map[string](types.AnyType)
    94  }
    95  
    96  func (v virtualMachine) Path() string {
    97  	return vmPath(v.folder, v.name)
    98  }
    99  
   100  func vmPath(folder string, name string) string {
   101  	var path string
   102  	if len(folder) > 0 {
   103  		path += folder + "/"
   104  	}
   105  	return path + name
   106  }
   107  
   108  func resourceVSphereVirtualMachine() *schema.Resource {
   109  	return &schema.Resource{
   110  		Create: resourceVSphereVirtualMachineCreate,
   111  		Read:   resourceVSphereVirtualMachineRead,
   112  		Update: resourceVSphereVirtualMachineUpdate,
   113  		Delete: resourceVSphereVirtualMachineDelete,
   114  
   115  		SchemaVersion: 1,
   116  		MigrateState:  resourceVSphereVirtualMachineMigrateState,
   117  
   118  		Schema: map[string]*schema.Schema{
   119  			"name": &schema.Schema{
   120  				Type:     schema.TypeString,
   121  				Required: true,
   122  				ForceNew: true,
   123  			},
   124  
   125  			"folder": &schema.Schema{
   126  				Type:     schema.TypeString,
   127  				Optional: true,
   128  				ForceNew: true,
   129  			},
   130  
   131  			"vcpu": &schema.Schema{
   132  				Type:     schema.TypeInt,
   133  				Required: true,
   134  			},
   135  
   136  			"memory": &schema.Schema{
   137  				Type:     schema.TypeInt,
   138  				Required: true,
   139  			},
   140  
   141  			"memory_reservation": &schema.Schema{
   142  				Type:     schema.TypeInt,
   143  				Optional: true,
   144  				Default:  0,
   145  				ForceNew: true,
   146  			},
   147  
   148  			"datacenter": &schema.Schema{
   149  				Type:     schema.TypeString,
   150  				Optional: true,
   151  				ForceNew: true,
   152  			},
   153  
   154  			"cluster": &schema.Schema{
   155  				Type:     schema.TypeString,
   156  				Optional: true,
   157  				ForceNew: true,
   158  			},
   159  
   160  			"resource_pool": &schema.Schema{
   161  				Type:     schema.TypeString,
   162  				Optional: true,
   163  				ForceNew: true,
   164  			},
   165  
   166  			"linked_clone": &schema.Schema{
   167  				Type:     schema.TypeBool,
   168  				Optional: true,
   169  				Default:  false,
   170  				ForceNew: true,
   171  			},
   172  			"gateway": &schema.Schema{
   173  				Type:       schema.TypeString,
   174  				Optional:   true,
   175  				ForceNew:   true,
   176  				Deprecated: "Please use network_interface.ipv4_gateway",
   177  			},
   178  
   179  			"domain": &schema.Schema{
   180  				Type:     schema.TypeString,
   181  				Optional: true,
   182  				ForceNew: true,
   183  				Default:  "vsphere.local",
   184  			},
   185  
   186  			"time_zone": &schema.Schema{
   187  				Type:     schema.TypeString,
   188  				Optional: true,
   189  				ForceNew: true,
   190  				Default:  "Etc/UTC",
   191  			},
   192  
   193  			"dns_suffixes": &schema.Schema{
   194  				Type:     schema.TypeList,
   195  				Optional: true,
   196  				Elem:     &schema.Schema{Type: schema.TypeString},
   197  				ForceNew: true,
   198  			},
   199  
   200  			"dns_servers": &schema.Schema{
   201  				Type:     schema.TypeList,
   202  				Optional: true,
   203  				Elem:     &schema.Schema{Type: schema.TypeString},
   204  				ForceNew: true,
   205  			},
   206  
   207  			"skip_customization": &schema.Schema{
   208  				Type:     schema.TypeBool,
   209  				Optional: true,
   210  				ForceNew: true,
   211  				Default:  false,
   212  			},
   213  
   214  			"enable_disk_uuid": &schema.Schema{
   215  				Type:     schema.TypeBool,
   216  				Optional: true,
   217  				ForceNew: true,
   218  				Default:  false,
   219  			},
   220  
   221  			"uuid": &schema.Schema{
   222  				Type:     schema.TypeString,
   223  				Computed: true,
   224  			},
   225  
   226  			"custom_configuration_parameters": &schema.Schema{
   227  				Type:     schema.TypeMap,
   228  				Optional: true,
   229  				ForceNew: true,
   230  			},
   231  
   232  			"windows_opt_config": &schema.Schema{
   233  				Type:     schema.TypeList,
   234  				Optional: true,
   235  				ForceNew: true,
   236  				Elem: &schema.Resource{
   237  					Schema: map[string]*schema.Schema{
   238  						"product_key": &schema.Schema{
   239  							Type:     schema.TypeString,
   240  							Optional: true,
   241  							ForceNew: true,
   242  						},
   243  
   244  						"admin_password": &schema.Schema{
   245  							Type:     schema.TypeString,
   246  							Optional: true,
   247  							ForceNew: true,
   248  						},
   249  
   250  						"domain_user": &schema.Schema{
   251  							Type:     schema.TypeString,
   252  							Optional: true,
   253  							ForceNew: true,
   254  						},
   255  
   256  						"domain": &schema.Schema{
   257  							Type:     schema.TypeString,
   258  							Optional: true,
   259  							ForceNew: true,
   260  						},
   261  
   262  						"domain_user_password": &schema.Schema{
   263  							Type:     schema.TypeString,
   264  							Optional: true,
   265  							ForceNew: true,
   266  						},
   267  					},
   268  				},
   269  			},
   270  
   271  			"network_interface": &schema.Schema{
   272  				Type:     schema.TypeList,
   273  				Required: true,
   274  				ForceNew: true,
   275  				Elem: &schema.Resource{
   276  					Schema: map[string]*schema.Schema{
   277  						"label": &schema.Schema{
   278  							Type:     schema.TypeString,
   279  							Required: true,
   280  							ForceNew: true,
   281  						},
   282  
   283  						"ip_address": &schema.Schema{
   284  							Type:       schema.TypeString,
   285  							Optional:   true,
   286  							Computed:   true,
   287  							Deprecated: "Please use ipv4_address",
   288  						},
   289  
   290  						"subnet_mask": &schema.Schema{
   291  							Type:       schema.TypeString,
   292  							Optional:   true,
   293  							Computed:   true,
   294  							Deprecated: "Please use ipv4_prefix_length",
   295  						},
   296  
   297  						"ipv4_address": &schema.Schema{
   298  							Type:     schema.TypeString,
   299  							Optional: true,
   300  							Computed: true,
   301  						},
   302  
   303  						"ipv4_prefix_length": &schema.Schema{
   304  							Type:     schema.TypeInt,
   305  							Optional: true,
   306  							Computed: true,
   307  						},
   308  
   309  						"ipv4_gateway": &schema.Schema{
   310  							Type:     schema.TypeString,
   311  							Optional: true,
   312  							Computed: true,
   313  						},
   314  
   315  						"ipv6_address": &schema.Schema{
   316  							Type:     schema.TypeString,
   317  							Optional: true,
   318  							Computed: true,
   319  						},
   320  
   321  						"ipv6_prefix_length": &schema.Schema{
   322  							Type:     schema.TypeInt,
   323  							Optional: true,
   324  							Computed: true,
   325  						},
   326  
   327  						"ipv6_gateway": &schema.Schema{
   328  							Type:     schema.TypeString,
   329  							Optional: true,
   330  							Computed: true,
   331  						},
   332  
   333  						"adapter_type": &schema.Schema{
   334  							Type:     schema.TypeString,
   335  							Optional: true,
   336  							ForceNew: true,
   337  						},
   338  
   339  						"mac_address": &schema.Schema{
   340  							Type:     schema.TypeString,
   341  							Optional: true,
   342  							Computed: true,
   343  						},
   344  					},
   345  				},
   346  			},
   347  
   348  			"disk": &schema.Schema{
   349  				Type:     schema.TypeSet,
   350  				Required: true,
   351  				Elem: &schema.Resource{
   352  					Schema: map[string]*schema.Schema{
   353  						"uuid": &schema.Schema{
   354  							Type:     schema.TypeString,
   355  							Computed: true,
   356  						},
   357  
   358  						"key": &schema.Schema{
   359  							Type:     schema.TypeInt,
   360  							Computed: true,
   361  						},
   362  
   363  						"template": &schema.Schema{
   364  							Type:     schema.TypeString,
   365  							Optional: true,
   366  						},
   367  
   368  						"type": &schema.Schema{
   369  							Type:     schema.TypeString,
   370  							Optional: true,
   371  							Default:  "eager_zeroed",
   372  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   373  								value := v.(string)
   374  								if value != "thin" && value != "eager_zeroed" {
   375  									errors = append(errors, fmt.Errorf(
   376  										"only 'thin' and 'eager_zeroed' are supported values for 'type'"))
   377  								}
   378  								return
   379  							},
   380  						},
   381  
   382  						"datastore": &schema.Schema{
   383  							Type:     schema.TypeString,
   384  							Optional: true,
   385  						},
   386  
   387  						"size": &schema.Schema{
   388  							Type:     schema.TypeInt,
   389  							Optional: true,
   390  						},
   391  
   392  						"name": &schema.Schema{
   393  							Type:     schema.TypeString,
   394  							Optional: true,
   395  						},
   396  
   397  						"iops": &schema.Schema{
   398  							Type:     schema.TypeInt,
   399  							Optional: true,
   400  						},
   401  
   402  						"vmdk": &schema.Schema{
   403  							// TODO: Add ValidateFunc to confirm path exists
   404  							Type:     schema.TypeString,
   405  							Optional: true,
   406  						},
   407  
   408  						"bootable": &schema.Schema{
   409  							Type:     schema.TypeBool,
   410  							Optional: true,
   411  						},
   412  
   413  						"keep_on_remove": &schema.Schema{
   414  							Type:     schema.TypeBool,
   415  							Optional: true,
   416  						},
   417  
   418  						"controller_type": &schema.Schema{
   419  							Type:     schema.TypeString,
   420  							Optional: true,
   421  							Default:  "scsi",
   422  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   423  								value := v.(string)
   424  								if value != "scsi" && value != "ide" {
   425  									errors = append(errors, fmt.Errorf(
   426  										"only 'scsi' and 'ide' are supported values for 'controller_type'"))
   427  								}
   428  								return
   429  							},
   430  						},
   431  					},
   432  				},
   433  			},
   434  
   435  			"cdrom": &schema.Schema{
   436  				Type:     schema.TypeList,
   437  				Optional: true,
   438  				ForceNew: true,
   439  				Elem: &schema.Resource{
   440  					Schema: map[string]*schema.Schema{
   441  						"datastore": &schema.Schema{
   442  							Type:     schema.TypeString,
   443  							Required: true,
   444  							ForceNew: true,
   445  						},
   446  
   447  						"path": &schema.Schema{
   448  							Type:     schema.TypeString,
   449  							Required: true,
   450  							ForceNew: true,
   451  						},
   452  					},
   453  				},
   454  			},
   455  		},
   456  	}
   457  }
   458  
   459  func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error {
   460  	// flag if changes have to be applied
   461  	hasChanges := false
   462  	// flag if changes have to be done when powered off
   463  	rebootRequired := false
   464  
   465  	// make config spec
   466  	configSpec := types.VirtualMachineConfigSpec{}
   467  
   468  	if d.HasChange("vcpu") {
   469  		configSpec.NumCPUs = int32(d.Get("vcpu").(int))
   470  		hasChanges = true
   471  		rebootRequired = true
   472  	}
   473  
   474  	if d.HasChange("memory") {
   475  		configSpec.MemoryMB = int64(d.Get("memory").(int))
   476  		hasChanges = true
   477  		rebootRequired = true
   478  	}
   479  
   480  	client := meta.(*govmomi.Client)
   481  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   482  	if err != nil {
   483  		return err
   484  	}
   485  	finder := find.NewFinder(client.Client, true)
   486  	finder = finder.SetDatacenter(dc)
   487  
   488  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
   489  	if err != nil {
   490  		return err
   491  	}
   492  
   493  	if d.HasChange("disk") {
   494  		hasChanges = true
   495  		oldDisks, newDisks := d.GetChange("disk")
   496  		oldDiskSet := oldDisks.(*schema.Set)
   497  		newDiskSet := newDisks.(*schema.Set)
   498  
   499  		addedDisks := newDiskSet.Difference(oldDiskSet)
   500  		removedDisks := oldDiskSet.Difference(newDiskSet)
   501  
   502  		// Removed disks
   503  		for _, diskRaw := range removedDisks.List() {
   504  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   505  				devices, err := vm.Device(context.TODO())
   506  				if err != nil {
   507  					return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err)
   508  				}
   509  				virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
   510  
   511  				keep := false
   512  				if v, ok := disk["keep_on_remove"].(bool); ok {
   513  					keep = v
   514  				}
   515  
   516  				err = vm.RemoveDevice(context.TODO(), keep, virtualDisk)
   517  				if err != nil {
   518  					return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
   519  				}
   520  			}
   521  		}
   522  		// Added disks
   523  		for _, diskRaw := range addedDisks.List() {
   524  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   525  
   526  				var datastore *object.Datastore
   527  				if disk["datastore"] == "" {
   528  					datastore, err = finder.DefaultDatastore(context.TODO())
   529  					if err != nil {
   530  						return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err)
   531  					}
   532  				} else {
   533  					datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string))
   534  					if err != nil {
   535  						log.Printf("[ERROR] Couldn't find datastore %v.  %s", disk["datastore"].(string), err)
   536  						return err
   537  					}
   538  				}
   539  
   540  				var size int64
   541  				if disk["size"] == 0 {
   542  					size = 0
   543  				} else {
   544  					size = int64(disk["size"].(int))
   545  				}
   546  				iops := int64(disk["iops"].(int))
   547  				controller_type := disk["controller_type"].(string)
   548  
   549  				var mo mo.VirtualMachine
   550  				vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo)
   551  
   552  				var diskPath string
   553  				switch {
   554  				case disk["vmdk"] != "":
   555  					diskPath = disk["vmdk"].(string)
   556  				case disk["name"] != "":
   557  					snapshotFullDir := mo.Config.Files.SnapshotDirectory
   558  					split := strings.Split(snapshotFullDir, " ")
   559  					if len(split) != 2 {
   560  						return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
   561  					}
   562  					vmWorkingPath := split[1]
   563  					diskPath = vmWorkingPath + disk["name"].(string)
   564  				default:
   565  					return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given")
   566  				}
   567  
   568  				log.Printf("[INFO] Attaching disk: %v", diskPath)
   569  				err = addHardDisk(vm, size, iops, "thin", datastore, diskPath, controller_type)
   570  				if err != nil {
   571  					log.Printf("[ERROR] Add Hard Disk Failed: %v", err)
   572  					return err
   573  				}
   574  			}
   575  			if err != nil {
   576  				return err
   577  			}
   578  		}
   579  	}
   580  
   581  	// do nothing if there are no changes
   582  	if !hasChanges {
   583  		return nil
   584  	}
   585  
   586  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
   587  
   588  	if rebootRequired {
   589  		log.Printf("[INFO] Shutting down virtual machine: %s", d.Id())
   590  
   591  		task, err := vm.PowerOff(context.TODO())
   592  		if err != nil {
   593  			return err
   594  		}
   595  
   596  		err = task.Wait(context.TODO())
   597  		if err != nil {
   598  			return err
   599  		}
   600  	}
   601  
   602  	log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id())
   603  
   604  	task, err := vm.Reconfigure(context.TODO(), configSpec)
   605  	if err != nil {
   606  		log.Printf("[ERROR] %s", err)
   607  	}
   608  
   609  	err = task.Wait(context.TODO())
   610  	if err != nil {
   611  		log.Printf("[ERROR] %s", err)
   612  	}
   613  
   614  	if rebootRequired {
   615  		task, err = vm.PowerOn(context.TODO())
   616  		if err != nil {
   617  			return err
   618  		}
   619  
   620  		err = task.Wait(context.TODO())
   621  		if err != nil {
   622  			log.Printf("[ERROR] %s", err)
   623  		}
   624  	}
   625  
   626  	ip, err := vm.WaitForIP(context.TODO())
   627  	if err != nil {
   628  		return err
   629  	}
   630  	log.Printf("[DEBUG] ip address: %v", ip)
   631  
   632  	return resourceVSphereVirtualMachineRead(d, meta)
   633  }
   634  
   635  func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
   636  	client := meta.(*govmomi.Client)
   637  
   638  	vm := virtualMachine{
   639  		name:     d.Get("name").(string),
   640  		vcpu:     int32(d.Get("vcpu").(int)),
   641  		memoryMb: int64(d.Get("memory").(int)),
   642  		memoryAllocation: memoryAllocation{
   643  			reservation: int64(d.Get("memory_reservation").(int)),
   644  		},
   645  	}
   646  
   647  	if v, ok := d.GetOk("folder"); ok {
   648  		vm.folder = v.(string)
   649  	}
   650  
   651  	if v, ok := d.GetOk("datacenter"); ok {
   652  		vm.datacenter = v.(string)
   653  	}
   654  
   655  	if v, ok := d.GetOk("cluster"); ok {
   656  		vm.cluster = v.(string)
   657  	}
   658  
   659  	if v, ok := d.GetOk("resource_pool"); ok {
   660  		vm.resourcePool = v.(string)
   661  	}
   662  
   663  	if v, ok := d.GetOk("domain"); ok {
   664  		vm.domain = v.(string)
   665  	}
   666  
   667  	if v, ok := d.GetOk("time_zone"); ok {
   668  		vm.timeZone = v.(string)
   669  	}
   670  
   671  	if v, ok := d.GetOk("linked_clone"); ok {
   672  		vm.linkedClone = v.(bool)
   673  	}
   674  
   675  	if v, ok := d.GetOk("skip_customization"); ok {
   676  		vm.skipCustomization = v.(bool)
   677  	}
   678  
   679  	if v, ok := d.GetOk("enable_disk_uuid"); ok {
   680  		vm.enableDiskUUID = v.(bool)
   681  	}
   682  
   683  	if raw, ok := d.GetOk("dns_suffixes"); ok {
   684  		for _, v := range raw.([]interface{}) {
   685  			vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string))
   686  		}
   687  	} else {
   688  		vm.dnsSuffixes = DefaultDNSSuffixes
   689  	}
   690  
   691  	if raw, ok := d.GetOk("dns_servers"); ok {
   692  		for _, v := range raw.([]interface{}) {
   693  			vm.dnsServers = append(vm.dnsServers, v.(string))
   694  		}
   695  	} else {
   696  		vm.dnsServers = DefaultDNSServers
   697  	}
   698  
   699  	if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
   700  		if custom_configs, ok := vL.(map[string]interface{}); ok {
   701  			custom := make(map[string]types.AnyType)
   702  			for k, v := range custom_configs {
   703  				custom[k] = v
   704  			}
   705  			vm.customConfigurations = custom
   706  			log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
   707  		}
   708  	}
   709  
   710  	if vL, ok := d.GetOk("network_interface"); ok {
   711  		networks := make([]networkInterface, len(vL.([]interface{})))
   712  		for i, v := range vL.([]interface{}) {
   713  			network := v.(map[string]interface{})
   714  			networks[i].label = network["label"].(string)
   715  			if v, ok := network["ip_address"].(string); ok && v != "" {
   716  				networks[i].ipv4Address = v
   717  			}
   718  			if v, ok := d.GetOk("gateway"); ok {
   719  				networks[i].ipv4Gateway = v.(string)
   720  			}
   721  			if v, ok := network["subnet_mask"].(string); ok && v != "" {
   722  				ip := net.ParseIP(v).To4()
   723  				if ip != nil {
   724  					mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3])
   725  					pl, _ := mask.Size()
   726  					networks[i].ipv4PrefixLength = pl
   727  				} else {
   728  					return fmt.Errorf("subnet_mask parameter is invalid.")
   729  				}
   730  			}
   731  			if v, ok := network["ipv4_address"].(string); ok && v != "" {
   732  				networks[i].ipv4Address = v
   733  			}
   734  			if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 {
   735  				networks[i].ipv4PrefixLength = v
   736  			}
   737  			if v, ok := network["ipv4_gateway"].(string); ok && v != "" {
   738  				networks[i].ipv4Gateway = v
   739  			}
   740  			if v, ok := network["ipv6_address"].(string); ok && v != "" {
   741  				networks[i].ipv6Address = v
   742  			}
   743  			if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 {
   744  				networks[i].ipv6PrefixLength = v
   745  			}
   746  			if v, ok := network["ipv6_gateway"].(string); ok && v != "" {
   747  				networks[i].ipv6Gateway = v
   748  			}
   749  			if v, ok := network["mac_address"].(string); ok && v != "" {
   750  				networks[i].macAddress = v
   751  			}
   752  		}
   753  		vm.networkInterfaces = networks
   754  		log.Printf("[DEBUG] network_interface init: %v", networks)
   755  	}
   756  
   757  	if vL, ok := d.GetOk("windows_opt_config"); ok {
   758  		var winOpt windowsOptConfig
   759  		custom_configs := (vL.([]interface{}))[0].(map[string]interface{})
   760  		if v, ok := custom_configs["admin_password"].(string); ok && v != "" {
   761  			winOpt.adminPassword = v
   762  		}
   763  		if v, ok := custom_configs["domain"].(string); ok && v != "" {
   764  			winOpt.domain = v
   765  		}
   766  		if v, ok := custom_configs["domain_user"].(string); ok && v != "" {
   767  			winOpt.domainUser = v
   768  		}
   769  		if v, ok := custom_configs["product_key"].(string); ok && v != "" {
   770  			winOpt.productKey = v
   771  		}
   772  		if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" {
   773  			winOpt.domainUserPassword = v
   774  		}
   775  		vm.windowsOptionalConfig = winOpt
   776  		log.Printf("[DEBUG] windows config init: %v", winOpt)
   777  	}
   778  
   779  	if vL, ok := d.GetOk("disk"); ok {
   780  		if diskSet, ok := vL.(*schema.Set); ok {
   781  
   782  			disks := []hardDisk{}
   783  			hasBootableDisk := false
   784  			for _, value := range diskSet.List() {
   785  				disk := value.(map[string]interface{})
   786  				newDisk := hardDisk{}
   787  
   788  				if v, ok := disk["template"].(string); ok && v != "" {
   789  					if v, ok := disk["name"].(string); ok && v != "" {
   790  						return fmt.Errorf("Cannot specify name of a template")
   791  					}
   792  					vm.template = v
   793  					if hasBootableDisk {
   794  						return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   795  					}
   796  					hasBootableDisk = true
   797  				}
   798  
   799  				if v, ok := disk["type"].(string); ok && v != "" {
   800  					newDisk.initType = v
   801  				}
   802  
   803  				if v, ok := disk["datastore"].(string); ok && v != "" {
   804  					vm.datastore = v
   805  				}
   806  
   807  				if v, ok := disk["size"].(int); ok && v != 0 {
   808  					if v, ok := disk["template"].(string); ok && v != "" {
   809  						return fmt.Errorf("Cannot specify size of a template")
   810  					}
   811  
   812  					if v, ok := disk["name"].(string); ok && v != "" {
   813  						newDisk.name = v
   814  					} else {
   815  						return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk")
   816  					}
   817  
   818  					newDisk.size = int64(v)
   819  				}
   820  
   821  				if v, ok := disk["iops"].(int); ok && v != 0 {
   822  					newDisk.iops = int64(v)
   823  				}
   824  
   825  				if v, ok := disk["controller_type"].(string); ok && v != "" {
   826  					newDisk.controller = v
   827  				}
   828  
   829  				if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" {
   830  					if v, ok := disk["template"].(string); ok && v != "" {
   831  						return fmt.Errorf("Cannot specify a vmdk for a template")
   832  					}
   833  					if v, ok := disk["size"].(string); ok && v != "" {
   834  						return fmt.Errorf("Cannot specify size of a vmdk")
   835  					}
   836  					if v, ok := disk["name"].(string); ok && v != "" {
   837  						return fmt.Errorf("Cannot specify name of a vmdk")
   838  					}
   839  					if vBootable, ok := disk["bootable"].(bool); ok {
   840  						hasBootableDisk = true
   841  						newDisk.bootable = vBootable
   842  						vm.hasBootableVmdk = vBootable
   843  					}
   844  					newDisk.vmdkPath = vVmdk
   845  				}
   846  				// Preserves order so bootable disk is first
   847  				if newDisk.bootable == true || disk["template"] != "" {
   848  					disks = append([]hardDisk{newDisk}, disks...)
   849  				} else {
   850  					disks = append(disks, newDisk)
   851  				}
   852  			}
   853  			vm.hardDisks = disks
   854  			log.Printf("[DEBUG] disk init: %v", disks)
   855  		}
   856  	}
   857  
   858  	if vL, ok := d.GetOk("cdrom"); ok {
   859  		cdroms := make([]cdrom, len(vL.([]interface{})))
   860  		for i, v := range vL.([]interface{}) {
   861  			c := v.(map[string]interface{})
   862  			if v, ok := c["datastore"].(string); ok && v != "" {
   863  				cdroms[i].datastore = v
   864  			} else {
   865  				return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.")
   866  			}
   867  			if v, ok := c["path"].(string); ok && v != "" {
   868  				cdroms[i].path = v
   869  			} else {
   870  				return fmt.Errorf("Path argument must be specified when attaching a cdrom image.")
   871  			}
   872  		}
   873  		vm.cdroms = cdroms
   874  		log.Printf("[DEBUG] cdrom init: %v", cdroms)
   875  	}
   876  
   877  	err := vm.setupVirtualMachine(client)
   878  	if err != nil {
   879  		return err
   880  	}
   881  
   882  	d.SetId(vm.Path())
   883  	log.Printf("[INFO] Created virtual machine: %s", d.Id())
   884  
   885  	return resourceVSphereVirtualMachineRead(d, meta)
   886  }
   887  
   888  func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
   889  	log.Printf("[DEBUG] virtual machine resource data: %#v", d)
   890  	client := meta.(*govmomi.Client)
   891  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   892  	if err != nil {
   893  		return err
   894  	}
   895  	finder := find.NewFinder(client.Client, true)
   896  	finder = finder.SetDatacenter(dc)
   897  
   898  	vm, err := finder.VirtualMachine(context.TODO(), d.Id())
   899  	if err != nil {
   900  		d.SetId("")
   901  		return nil
   902  	}
   903  
   904  	var mvm mo.VirtualMachine
   905  
   906  	// wait for interfaces to appear
   907  	_, err = vm.WaitForNetIP(context.TODO(), true)
   908  	if err != nil {
   909  		return err
   910  	}
   911  
   912  	collector := property.DefaultCollector(client.Client)
   913  	if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil {
   914  		return err
   915  	}
   916  
   917  	log.Printf("[DEBUG] Datacenter - %#v", dc)
   918  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config)
   919  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config)
   920  	log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net)
   921  
   922  	disks := make([]map[string]interface{}, 0)
   923  	templateDisk := make(map[string]interface{}, 1)
   924  	for _, device := range mvm.Config.Hardware.Device {
   925  		if vd, ok := device.(*types.VirtualDisk); ok {
   926  
   927  			virtualDevice := vd.GetVirtualDevice()
   928  
   929  			backingInfo := virtualDevice.Backing
   930  			var diskFullPath string
   931  			var diskUuid string
   932  			if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok {
   933  				diskFullPath = v.FileName
   934  				diskUuid = v.Uuid
   935  			} else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok {
   936  				diskFullPath = v.FileName
   937  				diskUuid = v.Uuid
   938  			}
   939  			log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath)
   940  
   941  			// Separate datastore and path
   942  			diskFullPathSplit := strings.Split(diskFullPath, " ")
   943  			if len(diskFullPathSplit) != 2 {
   944  				return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath)
   945  			}
   946  			diskPath := diskFullPathSplit[1]
   947  			// Isolate filename
   948  			diskNameSplit := strings.Split(diskPath, "/")
   949  			diskName := diskNameSplit[len(diskNameSplit)-1]
   950  			// Remove possible extension
   951  			diskName = strings.Split(diskName, ".")[0]
   952  
   953  			if prevDisks, ok := d.GetOk("disk"); ok {
   954  				if prevDiskSet, ok := prevDisks.(*schema.Set); ok {
   955  					for _, v := range prevDiskSet.List() {
   956  						prevDisk := v.(map[string]interface{})
   957  
   958  						// We're guaranteed only one template disk.  Passing value directly through since templates should be immutable
   959  						if prevDisk["template"] != "" {
   960  							if len(templateDisk) == 0 {
   961  								templateDisk = prevDisk
   962  								disks = append(disks, templateDisk)
   963  								break
   964  							}
   965  						}
   966  
   967  						// It is enforced that prevDisk["name"] should only be set in the case
   968  						// of creating a new disk for the user.
   969  						// size case:  name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name
   970  						// vmdk case:  compare prevDisk["vmdk"] and mo.Filename
   971  						if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] {
   972  
   973  							prevDisk["key"] = virtualDevice.Key
   974  							prevDisk["uuid"] = diskUuid
   975  
   976  							disks = append(disks, prevDisk)
   977  							break
   978  						}
   979  					}
   980  				}
   981  			}
   982  			log.Printf("[DEBUG] disks: %#v", disks)
   983  		}
   984  	}
   985  	err = d.Set("disk", disks)
   986  	if err != nil {
   987  		return fmt.Errorf("Invalid disks to set: %#v", disks)
   988  	}
   989  
   990  	networkInterfaces := make([]map[string]interface{}, 0)
   991  	for _, v := range mvm.Guest.Net {
   992  		if v.DeviceConfigId >= 0 {
   993  			log.Printf("[DEBUG] v.Network - %#v", v.Network)
   994  			networkInterface := make(map[string]interface{})
   995  			networkInterface["label"] = v.Network
   996  			networkInterface["mac_address"] = v.MacAddress
   997  			for _, ip := range v.IpConfig.IpAddress {
   998  				p := net.ParseIP(ip.IpAddress)
   999  				if p.To4() != nil {
  1000  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1001  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1002  					networkInterface["ipv4_address"] = p.String()
  1003  					networkInterface["ipv4_prefix_length"] = ip.PrefixLength
  1004  				} else if p.To16() != nil {
  1005  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1006  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1007  					networkInterface["ipv6_address"] = p.String()
  1008  					networkInterface["ipv6_prefix_length"] = ip.PrefixLength
  1009  				}
  1010  				log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1011  			}
  1012  			log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1013  			networkInterfaces = append(networkInterfaces, networkInterface)
  1014  		}
  1015  	}
  1016  	if mvm.Guest.IpStack != nil {
  1017  		for _, v := range mvm.Guest.IpStack {
  1018  			if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil {
  1019  				for _, route := range v.IpRouteConfig.IpRoute {
  1020  					if route.Gateway.Device != "" {
  1021  						gatewaySetting := ""
  1022  						if route.Network == "::" {
  1023  							gatewaySetting = "ipv6_gateway"
  1024  						} else if route.Network == "0.0.0.0" {
  1025  							gatewaySetting = "ipv4_gateway"
  1026  						}
  1027  						if gatewaySetting != "" {
  1028  							deviceID, err := strconv.Atoi(route.Gateway.Device)
  1029  							if err != nil {
  1030  								log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err)
  1031  							} else {
  1032  								log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress)
  1033  								networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress
  1034  							}
  1035  						}
  1036  					}
  1037  				}
  1038  			}
  1039  		}
  1040  	}
  1041  	log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces)
  1042  	err = d.Set("network_interface", networkInterfaces)
  1043  	if err != nil {
  1044  		return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces)
  1045  	}
  1046  
  1047  	log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string))
  1048  	d.SetConnInfo(map[string]string{
  1049  		"type": "ssh",
  1050  		"host": networkInterfaces[0]["ipv4_address"].(string),
  1051  	})
  1052  
  1053  	var rootDatastore string
  1054  	for _, v := range mvm.Datastore {
  1055  		var md mo.Datastore
  1056  		if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil {
  1057  			return err
  1058  		}
  1059  		if md.Parent.Type == "StoragePod" {
  1060  			var msp mo.StoragePod
  1061  			if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil {
  1062  				return err
  1063  			}
  1064  			rootDatastore = msp.Name
  1065  			log.Printf("[DEBUG] %#v", msp.Name)
  1066  		} else {
  1067  			rootDatastore = md.Name
  1068  			log.Printf("[DEBUG] %#v", md.Name)
  1069  		}
  1070  		break
  1071  	}
  1072  
  1073  	d.Set("datacenter", dc)
  1074  	d.Set("memory", mvm.Summary.Config.MemorySizeMB)
  1075  	d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation)
  1076  	d.Set("cpu", mvm.Summary.Config.NumCpu)
  1077  	d.Set("datastore", rootDatastore)
  1078  	d.Set("uuid", mvm.Summary.Config.Uuid)
  1079  
  1080  	return nil
  1081  }
  1082  
  1083  func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
  1084  	client := meta.(*govmomi.Client)
  1085  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
  1086  	if err != nil {
  1087  		return err
  1088  	}
  1089  	finder := find.NewFinder(client.Client, true)
  1090  	finder = finder.SetDatacenter(dc)
  1091  
  1092  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
  1093  	if err != nil {
  1094  		return err
  1095  	}
  1096  	devices, err := vm.Device(context.TODO())
  1097  	if err != nil {
  1098  		log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err)
  1099  		return err
  1100  	}
  1101  
  1102  	log.Printf("[INFO] Deleting virtual machine: %s", d.Id())
  1103  	state, err := vm.PowerState(context.TODO())
  1104  	if err != nil {
  1105  		return err
  1106  	}
  1107  
  1108  	if state == types.VirtualMachinePowerStatePoweredOn {
  1109  		task, err := vm.PowerOff(context.TODO())
  1110  		if err != nil {
  1111  			return err
  1112  		}
  1113  
  1114  		err = task.Wait(context.TODO())
  1115  		if err != nil {
  1116  			return err
  1117  		}
  1118  	}
  1119  
  1120  	// Safely eject any disks the user marked as keep_on_remove
  1121  	if vL, ok := d.GetOk("disk"); ok {
  1122  		if diskSet, ok := vL.(*schema.Set); ok {
  1123  
  1124  			for _, value := range diskSet.List() {
  1125  				disk := value.(map[string]interface{})
  1126  
  1127  				if v, ok := disk["keep_on_remove"].(bool); ok && v == true {
  1128  					log.Printf("[DEBUG] not destroying %v", disk["name"])
  1129  					virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
  1130  					err = vm.RemoveDevice(context.TODO(), true, virtualDisk)
  1131  					if err != nil {
  1132  						log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
  1133  						return err
  1134  					}
  1135  				}
  1136  			}
  1137  		}
  1138  	}
  1139  
  1140  	task, err := vm.Destroy(context.TODO())
  1141  	if err != nil {
  1142  		return err
  1143  	}
  1144  
  1145  	err = task.Wait(context.TODO())
  1146  	if err != nil {
  1147  		return err
  1148  	}
  1149  
  1150  	d.SetId("")
  1151  	return nil
  1152  }
  1153  
  1154  // addHardDisk adds a new Hard Disk to the VirtualMachine.
  1155  func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error {
  1156  	devices, err := vm.Device(context.TODO())
  1157  	if err != nil {
  1158  		return err
  1159  	}
  1160  	log.Printf("[DEBUG] vm devices: %#v\n", devices)
  1161  
  1162  	var controller types.BaseVirtualController
  1163  	controller, err = devices.FindDiskController(controller_type)
  1164  	if err != nil {
  1165  		log.Printf("[DEBUG] Couldn't find a %v controller.  Creating one..", controller_type)
  1166  
  1167  		var c types.BaseVirtualDevice
  1168  		switch controller_type {
  1169  		case "scsi":
  1170  			// Create scsi controller
  1171  			c, err = devices.CreateSCSIController("scsi")
  1172  			if err != nil {
  1173  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1174  			}
  1175  		case "ide":
  1176  			// Create ide controller
  1177  			c, err = devices.CreateIDEController()
  1178  			if err != nil {
  1179  				return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1180  			}
  1181  		default:
  1182  			return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1183  		}
  1184  
  1185  		vm.AddDevice(context.TODO(), c)
  1186  		// Update our devices list
  1187  		devices, err := vm.Device(context.TODO())
  1188  		if err != nil {
  1189  			return err
  1190  		}
  1191  		controller, err = devices.FindDiskController(controller_type)
  1192  		if err != nil {
  1193  			log.Printf("[ERROR] Could not find the new %v controller: %v", controller_type, err)
  1194  			return err
  1195  		}
  1196  	}
  1197  
  1198  	log.Printf("[DEBUG] disk controller: %#v\n", controller)
  1199  
  1200  	// TODO Check if diskPath & datastore exist
  1201  	// If diskPath is not specified, pass empty string to CreateDisk()
  1202  	if diskPath == "" {
  1203  		return fmt.Errorf("[ERROR] addHardDisk - No path proided")
  1204  	} else {
  1205  		// TODO Check if diskPath & datastore exist
  1206  		diskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath)
  1207  	}
  1208  	log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath)
  1209  	disk := devices.CreateDisk(controller, datastore.Reference(), diskPath)
  1210  
  1211  	existing := devices.SelectByBackingInfo(disk.Backing)
  1212  	log.Printf("[DEBUG] disk: %#v\n", disk)
  1213  
  1214  	if len(existing) == 0 {
  1215  		disk.CapacityInKB = int64(size * 1024 * 1024)
  1216  		if iops != 0 {
  1217  			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
  1218  				Limit: iops,
  1219  			}
  1220  		}
  1221  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
  1222  
  1223  		if diskType == "eager_zeroed" {
  1224  			// eager zeroed thick virtual disk
  1225  			backing.ThinProvisioned = types.NewBool(false)
  1226  			backing.EagerlyScrub = types.NewBool(true)
  1227  		} else if diskType == "thin" {
  1228  			// thin provisioned virtual disk
  1229  			backing.ThinProvisioned = types.NewBool(true)
  1230  		}
  1231  
  1232  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
  1233  		log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB)
  1234  
  1235  		return vm.AddDevice(context.TODO(), disk)
  1236  	} else {
  1237  		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")
  1238  
  1239  		return nil
  1240  	}
  1241  }
  1242  
  1243  // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path.
  1244  func addCdrom(vm *object.VirtualMachine, datastore, path string) error {
  1245  	devices, err := vm.Device(context.TODO())
  1246  	if err != nil {
  1247  		return err
  1248  	}
  1249  	log.Printf("[DEBUG] vm devices: %#v", devices)
  1250  
  1251  	var controller *types.VirtualIDEController
  1252  	controller, err = devices.FindIDEController("")
  1253  	if err != nil {
  1254  		log.Printf("[DEBUG] Couldn't find a ide controller.  Creating one..")
  1255  
  1256  		var c types.BaseVirtualDevice
  1257  		c, err := devices.CreateIDEController()
  1258  		if err != nil {
  1259  			return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1260  		}
  1261  
  1262  		if v, ok := c.(*types.VirtualIDEController); ok {
  1263  			controller = v
  1264  		} else {
  1265  			return fmt.Errorf("[ERROR] Controller type could not be asserted")
  1266  		}
  1267  		vm.AddDevice(context.TODO(), c)
  1268  		// Update our devices list
  1269  		devices, err := vm.Device(context.TODO())
  1270  		if err != nil {
  1271  			return err
  1272  		}
  1273  		controller, err = devices.FindIDEController("")
  1274  		if err != nil {
  1275  			log.Printf("[ERROR] Could not find the new disk IDE controller: %v", err)
  1276  			return err
  1277  		}
  1278  	}
  1279  	log.Printf("[DEBUG] ide controller: %#v", controller)
  1280  
  1281  	c, err := devices.CreateCdrom(controller)
  1282  	if err != nil {
  1283  		return err
  1284  	}
  1285  
  1286  	c = devices.InsertIso(c, fmt.Sprintf("[%s] %s", datastore, path))
  1287  	log.Printf("[DEBUG] addCdrom: %#v", c)
  1288  
  1289  	return vm.AddDevice(context.TODO(), c)
  1290  }
  1291  
  1292  // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device.
  1293  func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) {
  1294  	network, err := f.Network(context.TODO(), "*"+label)
  1295  	if err != nil {
  1296  		return nil, err
  1297  	}
  1298  
  1299  	backing, err := network.EthernetCardBackingInfo(context.TODO())
  1300  	if err != nil {
  1301  		return nil, err
  1302  	}
  1303  
  1304  	var address_type string
  1305  	if macAddress == "" {
  1306  		address_type = string(types.VirtualEthernetCardMacTypeGenerated)
  1307  	} else {
  1308  		address_type = string(types.VirtualEthernetCardMacTypeManual)
  1309  	}
  1310  
  1311  	if adapterType == "vmxnet3" {
  1312  		return &types.VirtualDeviceConfigSpec{
  1313  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1314  			Device: &types.VirtualVmxnet3{
  1315  				VirtualVmxnet: types.VirtualVmxnet{
  1316  					VirtualEthernetCard: types.VirtualEthernetCard{
  1317  						VirtualDevice: types.VirtualDevice{
  1318  							Key:     -1,
  1319  							Backing: backing,
  1320  						},
  1321  						AddressType: address_type,
  1322  						MacAddress:  macAddress,
  1323  					},
  1324  				},
  1325  			},
  1326  		}, nil
  1327  	} else if adapterType == "e1000" {
  1328  		return &types.VirtualDeviceConfigSpec{
  1329  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1330  			Device: &types.VirtualE1000{
  1331  				VirtualEthernetCard: types.VirtualEthernetCard{
  1332  					VirtualDevice: types.VirtualDevice{
  1333  						Key:     -1,
  1334  						Backing: backing,
  1335  					},
  1336  					AddressType: address_type,
  1337  					MacAddress:  macAddress,
  1338  				},
  1339  			},
  1340  		}, nil
  1341  	} else {
  1342  		return nil, fmt.Errorf("Invalid network adapter type.")
  1343  	}
  1344  }
  1345  
  1346  // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
  1347  func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) {
  1348  	var key int32
  1349  	var moveType string
  1350  	if linkedClone {
  1351  		moveType = "createNewChildDiskBacking"
  1352  	} else {
  1353  		moveType = "moveAllDiskBackingsAndDisallowSharing"
  1354  	}
  1355  	log.Printf("[DEBUG] relocate type: [%s]", moveType)
  1356  
  1357  	devices, err := vm.Device(context.TODO())
  1358  	if err != nil {
  1359  		return types.VirtualMachineRelocateSpec{}, err
  1360  	}
  1361  	for _, d := range devices {
  1362  		if devices.Type(d) == "disk" {
  1363  			key = int32(d.GetVirtualDevice().Key)
  1364  		}
  1365  	}
  1366  
  1367  	isThin := initType == "thin"
  1368  	rpr := rp.Reference()
  1369  	dsr := ds.Reference()
  1370  	return types.VirtualMachineRelocateSpec{
  1371  		Datastore:    &dsr,
  1372  		Pool:         &rpr,
  1373  		DiskMoveType: moveType,
  1374  		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1375  			{
  1376  				Datastore: dsr,
  1377  				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
  1378  					DiskMode:        "persistent",
  1379  					ThinProvisioned: types.NewBool(isThin),
  1380  					EagerlyScrub:    types.NewBool(!isThin),
  1381  				},
  1382  				DiskId: key,
  1383  			},
  1384  		},
  1385  	}, nil
  1386  }
  1387  
  1388  // getDatastoreObject gets datastore object.
  1389  func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) {
  1390  	s := object.NewSearchIndex(client.Client)
  1391  	ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name)
  1392  	if err != nil {
  1393  		return types.ManagedObjectReference{}, err
  1394  	}
  1395  	if ref == nil {
  1396  		return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name)
  1397  	}
  1398  	log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref)
  1399  	return ref.Reference(), nil
  1400  }
  1401  
  1402  // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action.
  1403  func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec {
  1404  	vmfr := f.VmFolder.Reference()
  1405  	rpr := rp.Reference()
  1406  	spr := storagePod.Reference()
  1407  
  1408  	sps := types.StoragePlacementSpec{
  1409  		Type:       "create",
  1410  		ConfigSpec: &configSpec,
  1411  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1412  			StoragePod: &spr,
  1413  		},
  1414  		Folder:       &vmfr,
  1415  		ResourcePool: &rpr,
  1416  	}
  1417  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1418  	return sps
  1419  }
  1420  
  1421  // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action.
  1422  func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
  1423  	vmr := vm.Reference()
  1424  	vmfr := f.VmFolder.Reference()
  1425  	rpr := rp.Reference()
  1426  	spr := storagePod.Reference()
  1427  
  1428  	var o mo.VirtualMachine
  1429  	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
  1430  	if err != nil {
  1431  		return types.StoragePlacementSpec{}
  1432  	}
  1433  	ds := object.NewDatastore(c.Client, o.Datastore[0])
  1434  	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)
  1435  
  1436  	devices, err := vm.Device(context.TODO())
  1437  	if err != nil {
  1438  		return types.StoragePlacementSpec{}
  1439  	}
  1440  
  1441  	var key int32
  1442  	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
  1443  		key = int32(d.GetVirtualDevice().Key)
  1444  		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
  1445  	}
  1446  
  1447  	sps := types.StoragePlacementSpec{
  1448  		Type: "clone",
  1449  		Vm:   &vmr,
  1450  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1451  			StoragePod: &spr,
  1452  		},
  1453  		CloneSpec: &types.VirtualMachineCloneSpec{
  1454  			Location: types.VirtualMachineRelocateSpec{
  1455  				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1456  					{
  1457  						Datastore:       ds.Reference(),
  1458  						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
  1459  						DiskId:          key,
  1460  					},
  1461  				},
  1462  				Pool: &rpr,
  1463  			},
  1464  			PowerOn:  false,
  1465  			Template: false,
  1466  		},
  1467  		CloneName: "dummy",
  1468  		Folder:    &vmfr,
  1469  	}
  1470  	return sps
  1471  }
  1472  
  1473  // findDatastore finds Datastore object.
  1474  func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) {
  1475  	var datastore *object.Datastore
  1476  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1477  
  1478  	srm := object.NewStorageResourceManager(c.Client)
  1479  	rds, err := srm.RecommendDatastores(context.TODO(), sps)
  1480  	if err != nil {
  1481  		return nil, err
  1482  	}
  1483  	log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)
  1484  
  1485  	spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
  1486  	datastore = object.NewDatastore(c.Client, spa.Destination)
  1487  	log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore)
  1488  
  1489  	return datastore, nil
  1490  }
  1491  
  1492  // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller.
  1493  func createCdroms(vm *object.VirtualMachine, cdroms []cdrom) error {
  1494  	log.Printf("[DEBUG] add cdroms: %v", cdroms)
  1495  	for _, cd := range cdroms {
  1496  		log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore)
  1497  		log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path)
  1498  		err := addCdrom(vm, cd.datastore, cd.path)
  1499  		if err != nil {
  1500  			return err
  1501  		}
  1502  	}
  1503  
  1504  	return nil
  1505  }
  1506  
  1507  func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error {
  1508  	dc, err := getDatacenter(c, vm.datacenter)
  1509  
  1510  	if err != nil {
  1511  		return err
  1512  	}
  1513  	finder := find.NewFinder(c.Client, true)
  1514  	finder = finder.SetDatacenter(dc)
  1515  
  1516  	var template *object.VirtualMachine
  1517  	var template_mo mo.VirtualMachine
  1518  	var vm_mo mo.VirtualMachine
  1519  	if vm.template != "" {
  1520  		template, err = finder.VirtualMachine(context.TODO(), vm.template)
  1521  		if err != nil {
  1522  			return err
  1523  		}
  1524  		log.Printf("[DEBUG] template: %#v", template)
  1525  
  1526  		err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)
  1527  		if err != nil {
  1528  			return err
  1529  		}
  1530  	}
  1531  
  1532  	var resourcePool *object.ResourcePool
  1533  	if vm.resourcePool == "" {
  1534  		if vm.cluster == "" {
  1535  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
  1536  			if err != nil {
  1537  				return err
  1538  			}
  1539  		} else {
  1540  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
  1541  			if err != nil {
  1542  				return err
  1543  			}
  1544  		}
  1545  	} else {
  1546  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
  1547  		if err != nil {
  1548  			return err
  1549  		}
  1550  	}
  1551  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
  1552  
  1553  	dcFolders, err := dc.Folders(context.TODO())
  1554  	if err != nil {
  1555  		return err
  1556  	}
  1557  	log.Printf("[DEBUG] folder: %#v", vm.folder)
  1558  
  1559  	folder := dcFolders.VmFolder
  1560  	if len(vm.folder) > 0 {
  1561  		si := object.NewSearchIndex(c.Client)
  1562  		folderRef, err := si.FindByInventoryPath(
  1563  			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
  1564  		if err != nil {
  1565  			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
  1566  		} else if folderRef == nil {
  1567  			return fmt.Errorf("Cannot find folder %s", vm.folder)
  1568  		} else {
  1569  			folder = folderRef.(*object.Folder)
  1570  		}
  1571  	}
  1572  
  1573  	// make config spec
  1574  	configSpec := types.VirtualMachineConfigSpec{
  1575  		Name:              vm.name,
  1576  		NumCPUs:           vm.vcpu,
  1577  		NumCoresPerSocket: 1,
  1578  		MemoryMB:          vm.memoryMb,
  1579  		MemoryAllocation: &types.ResourceAllocationInfo{
  1580  			Reservation: vm.memoryAllocation.reservation,
  1581  		},
  1582  		Flags: &types.VirtualMachineFlagInfo{
  1583  			DiskUuidEnabled: &vm.enableDiskUUID,
  1584  		},
  1585  	}
  1586  	if vm.template == "" {
  1587  		configSpec.GuestId = "otherLinux64Guest"
  1588  	}
  1589  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
  1590  
  1591  	// make ExtraConfig
  1592  	log.Printf("[DEBUG] virtual machine Extra Config spec start")
  1593  	if len(vm.customConfigurations) > 0 {
  1594  		var ov []types.BaseOptionValue
  1595  		for k, v := range vm.customConfigurations {
  1596  			key := k
  1597  			value := v
  1598  			o := types.OptionValue{
  1599  				Key:   key,
  1600  				Value: &value,
  1601  			}
  1602  			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
  1603  			ov = append(ov, &o)
  1604  		}
  1605  		configSpec.ExtraConfig = ov
  1606  		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
  1607  	}
  1608  
  1609  	var datastore *object.Datastore
  1610  	if vm.datastore == "" {
  1611  		datastore, err = finder.DefaultDatastore(context.TODO())
  1612  		if err != nil {
  1613  			return err
  1614  		}
  1615  	} else {
  1616  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
  1617  		if err != nil {
  1618  			// TODO: datastore cluster support in govmomi finder function
  1619  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
  1620  			if err != nil {
  1621  				return err
  1622  			}
  1623  
  1624  			if d.Type == "StoragePod" {
  1625  				sp := object.StoragePod{
  1626  					Folder: object.NewFolder(c.Client, d),
  1627  				}
  1628  
  1629  				var sps types.StoragePlacementSpec
  1630  				if vm.template != "" {
  1631  					sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
  1632  				} else {
  1633  					sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
  1634  				}
  1635  
  1636  				datastore, err = findDatastore(c, sps)
  1637  				if err != nil {
  1638  					return err
  1639  				}
  1640  			} else {
  1641  				datastore = object.NewDatastore(c.Client, d)
  1642  			}
  1643  		}
  1644  	}
  1645  
  1646  	log.Printf("[DEBUG] datastore: %#v", datastore)
  1647  
  1648  	// network
  1649  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
  1650  	networkConfigs := []types.CustomizationAdapterMapping{}
  1651  	for _, network := range vm.networkInterfaces {
  1652  		// network device
  1653  		var networkDeviceType string
  1654  		if vm.template == "" {
  1655  			networkDeviceType = "e1000"
  1656  		} else {
  1657  			networkDeviceType = "vmxnet3"
  1658  		}
  1659  		nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress)
  1660  		if err != nil {
  1661  			return err
  1662  		}
  1663  		log.Printf("[DEBUG] network device: %+v", nd.Device)
  1664  		networkDevices = append(networkDevices, nd)
  1665  
  1666  		if vm.template != "" {
  1667  			var ipSetting types.CustomizationIPSettings
  1668  			if network.ipv4Address == "" {
  1669  				ipSetting.Ip = &types.CustomizationDhcpIpGenerator{}
  1670  			} else {
  1671  				if network.ipv4PrefixLength == 0 {
  1672  					return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
  1673  				}
  1674  				m := net.CIDRMask(network.ipv4PrefixLength, 32)
  1675  				sm := net.IPv4(m[0], m[1], m[2], m[3])
  1676  				subnetMask := sm.String()
  1677  				log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway)
  1678  				log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address)
  1679  				log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength)
  1680  				log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask)
  1681  				ipSetting.Gateway = []string{
  1682  					network.ipv4Gateway,
  1683  				}
  1684  				ipSetting.Ip = &types.CustomizationFixedIp{
  1685  					IpAddress: network.ipv4Address,
  1686  				}
  1687  				ipSetting.SubnetMask = subnetMask
  1688  			}
  1689  
  1690  			ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{}
  1691  			if network.ipv6Address == "" {
  1692  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1693  					&types.CustomizationDhcpIpV6Generator{},
  1694  				}
  1695  			} else {
  1696  				log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway)
  1697  				log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address)
  1698  				log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength)
  1699  
  1700  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1701  					&types.CustomizationFixedIpV6{
  1702  						IpAddress:  network.ipv6Address,
  1703  						SubnetMask: int32(network.ipv6PrefixLength),
  1704  					},
  1705  				}
  1706  				ipv6Spec.Gateway = []string{network.ipv6Gateway}
  1707  			}
  1708  			ipSetting.IpV6Spec = ipv6Spec
  1709  
  1710  			// network config
  1711  			config := types.CustomizationAdapterMapping{
  1712  				Adapter: ipSetting,
  1713  			}
  1714  			networkConfigs = append(networkConfigs, config)
  1715  		}
  1716  	}
  1717  	log.Printf("[DEBUG] network devices: %#v", networkDevices)
  1718  	log.Printf("[DEBUG] network configs: %#v", networkConfigs)
  1719  
  1720  	var task *object.Task
  1721  	if vm.template == "" {
  1722  		var mds mo.Datastore
  1723  		if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
  1724  			return err
  1725  		}
  1726  		log.Printf("[DEBUG] datastore: %#v", mds.Name)
  1727  		scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
  1728  		if err != nil {
  1729  			log.Printf("[ERROR] %s", err)
  1730  		}
  1731  
  1732  		configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
  1733  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1734  			Device:    scsi,
  1735  		})
  1736  
  1737  		configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
  1738  
  1739  		task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
  1740  		if err != nil {
  1741  			log.Printf("[ERROR] %s", err)
  1742  		}
  1743  
  1744  		err = task.Wait(context.TODO())
  1745  		if err != nil {
  1746  			log.Printf("[ERROR] %s", err)
  1747  		}
  1748  
  1749  	} else {
  1750  
  1751  		relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
  1752  		if err != nil {
  1753  			return err
  1754  		}
  1755  
  1756  		log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
  1757  
  1758  		// make vm clone spec
  1759  		cloneSpec := types.VirtualMachineCloneSpec{
  1760  			Location: relocateSpec,
  1761  			Template: false,
  1762  			Config:   &configSpec,
  1763  			PowerOn:  false,
  1764  		}
  1765  		if vm.linkedClone {
  1766  			if template_mo.Snapshot == nil {
  1767  				return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
  1768  			}
  1769  			cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
  1770  		}
  1771  		log.Printf("[DEBUG] clone spec: %v", cloneSpec)
  1772  
  1773  		task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec)
  1774  		if err != nil {
  1775  			return err
  1776  		}
  1777  	}
  1778  
  1779  	err = task.Wait(context.TODO())
  1780  	if err != nil {
  1781  		log.Printf("[ERROR] %s", err)
  1782  	}
  1783  
  1784  	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
  1785  	if err != nil {
  1786  		return err
  1787  	}
  1788  	log.Printf("[DEBUG] new vm: %v", newVM)
  1789  
  1790  	devices, err := newVM.Device(context.TODO())
  1791  	if err != nil {
  1792  		log.Printf("[DEBUG] Template devices can't be found")
  1793  		return err
  1794  	}
  1795  
  1796  	for _, dvc := range devices {
  1797  		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
  1798  		if devices.Type(dvc) == "ethernet" {
  1799  			err := newVM.RemoveDevice(context.TODO(), false, dvc)
  1800  			if err != nil {
  1801  				return err
  1802  			}
  1803  		}
  1804  	}
  1805  	// Add Network devices
  1806  	for _, dvc := range networkDevices {
  1807  		err := newVM.AddDevice(
  1808  			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
  1809  		if err != nil {
  1810  			return err
  1811  		}
  1812  	}
  1813  
  1814  	// Create the cdroms if needed.
  1815  	if err := createCdroms(newVM, vm.cdroms); err != nil {
  1816  		return err
  1817  	}
  1818  
  1819  	newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo)
  1820  	firstDisk := 0
  1821  	if vm.template != "" {
  1822  		firstDisk++
  1823  	}
  1824  	for i := firstDisk; i < len(vm.hardDisks); i++ {
  1825  		log.Printf("[DEBUG] disk index: %v", i)
  1826  
  1827  		var diskPath string
  1828  		switch {
  1829  		case vm.hardDisks[i].vmdkPath != "":
  1830  			diskPath = vm.hardDisks[i].vmdkPath
  1831  		case vm.hardDisks[i].name != "":
  1832  			snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory
  1833  			split := strings.Split(snapshotFullDir, " ")
  1834  			if len(split) != 2 {
  1835  				return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
  1836  			}
  1837  			vmWorkingPath := split[1]
  1838  			diskPath = vmWorkingPath + vm.hardDisks[i].name
  1839  		default:
  1840  			return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i])
  1841  		}
  1842  
  1843  		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  1844  		if err != nil {
  1845  			return err
  1846  		}
  1847  	}
  1848  
  1849  	if vm.skipCustomization || vm.template == "" {
  1850  		log.Printf("[DEBUG] VM customization skipped")
  1851  	} else {
  1852  		var identity_options types.BaseCustomizationIdentitySettings
  1853  		if strings.HasPrefix(template_mo.Config.GuestId, "win") {
  1854  			var timeZone int
  1855  			if vm.timeZone == "Etc/UTC" {
  1856  				vm.timeZone = "085"
  1857  			}
  1858  			timeZone, err := strconv.Atoi(vm.timeZone)
  1859  			if err != nil {
  1860  				return fmt.Errorf("Error converting TimeZone: %s", err)
  1861  			}
  1862  
  1863  			guiUnattended := types.CustomizationGuiUnattended{
  1864  				AutoLogon:      false,
  1865  				AutoLogonCount: 1,
  1866  				TimeZone:       int32(timeZone),
  1867  			}
  1868  
  1869  			customIdentification := types.CustomizationIdentification{}
  1870  
  1871  			userData := types.CustomizationUserData{
  1872  				ComputerName: &types.CustomizationFixedName{
  1873  					Name: strings.Split(vm.name, ".")[0],
  1874  				},
  1875  				ProductId: vm.windowsOptionalConfig.productKey,
  1876  				FullName:  "terraform",
  1877  				OrgName:   "terraform",
  1878  			}
  1879  
  1880  			if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
  1881  				customIdentification.DomainAdminPassword = &types.CustomizationPassword{
  1882  					PlainText: true,
  1883  					Value:     vm.windowsOptionalConfig.domainUserPassword,
  1884  				}
  1885  				customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
  1886  				customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
  1887  			}
  1888  
  1889  			if vm.windowsOptionalConfig.adminPassword != "" {
  1890  				guiUnattended.Password = &types.CustomizationPassword{
  1891  					PlainText: true,
  1892  					Value:     vm.windowsOptionalConfig.adminPassword,
  1893  				}
  1894  			}
  1895  
  1896  			identity_options = &types.CustomizationSysprep{
  1897  				GuiUnattended:  guiUnattended,
  1898  				Identification: customIdentification,
  1899  				UserData:       userData,
  1900  			}
  1901  		} else {
  1902  			identity_options = &types.CustomizationLinuxPrep{
  1903  				HostName: &types.CustomizationFixedName{
  1904  					Name: strings.Split(vm.name, ".")[0],
  1905  				},
  1906  				Domain:     vm.domain,
  1907  				TimeZone:   vm.timeZone,
  1908  				HwClockUTC: types.NewBool(true),
  1909  			}
  1910  		}
  1911  
  1912  		// create CustomizationSpec
  1913  		customSpec := types.CustomizationSpec{
  1914  			Identity: identity_options,
  1915  			GlobalIPSettings: types.CustomizationGlobalIPSettings{
  1916  				DnsSuffixList: vm.dnsSuffixes,
  1917  				DnsServerList: vm.dnsServers,
  1918  			},
  1919  			NicSettingMap: networkConfigs,
  1920  		}
  1921  		log.Printf("[DEBUG] custom spec: %v", customSpec)
  1922  
  1923  		log.Printf("[DEBUG] VM customization starting")
  1924  		taskb, err := newVM.Customize(context.TODO(), customSpec)
  1925  		if err != nil {
  1926  			return err
  1927  		}
  1928  		_, err = taskb.WaitForResult(context.TODO(), nil)
  1929  		if err != nil {
  1930  			return err
  1931  		}
  1932  		log.Printf("[DEBUG] VM customization finished")
  1933  	}
  1934  
  1935  	if vm.hasBootableVmdk || vm.template != "" {
  1936  		newVM.PowerOn(context.TODO())
  1937  	}
  1938  	return nil
  1939  }