github.com/gabrielperezs/terraform@v0.7.0-rc2.0.20160715084931-f7da2612946f/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about)

     1  package vsphere
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/hashicorp/terraform/helper/schema"
    11  	"github.com/vmware/govmomi"
    12  	"github.com/vmware/govmomi/find"
    13  	"github.com/vmware/govmomi/object"
    14  	"github.com/vmware/govmomi/property"
    15  	"github.com/vmware/govmomi/vim25/mo"
    16  	"github.com/vmware/govmomi/vim25/types"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  var DefaultDNSSuffixes = []string{
    21  	"vsphere.local",
    22  }
    23  
    24  var DefaultDNSServers = []string{
    25  	"8.8.8.8",
    26  	"8.8.4.4",
    27  }
    28  
    29  var DiskControllerTypes = []string{
    30  	"scsi",
    31  	"scsi-lsi-parallel",
    32  	"scsi-buslogic",
    33  	"scsi-paravirtual",
    34  	"scsi-lsi-sas",
    35  	"ide",
    36  }
    37  
    38  type networkInterface struct {
    39  	deviceName       string
    40  	label            string
    41  	ipv4Address      string
    42  	ipv4PrefixLength int
    43  	ipv4Gateway      string
    44  	ipv6Address      string
    45  	ipv6PrefixLength int
    46  	ipv6Gateway      string
    47  	adapterType      string // TODO: Make "adapter_type" argument
    48  	macAddress       string
    49  }
    50  
    51  type hardDisk struct {
    52  	name       string
    53  	size       int64
    54  	iops       int64
    55  	initType   string
    56  	vmdkPath   string
    57  	controller string
    58  	bootable   bool
    59  }
    60  
    61  //Additional options Vsphere can use clones of windows machines
    62  type windowsOptConfig struct {
    63  	productKey         string
    64  	adminPassword      string
    65  	domainUser         string
    66  	domain             string
    67  	domainUserPassword string
    68  }
    69  
    70  type cdrom struct {
    71  	datastore string
    72  	path      string
    73  }
    74  
    75  type memoryAllocation struct {
    76  	reservation int64
    77  }
    78  
    79  type virtualMachine struct {
    80  	name                  string
    81  	folder                string
    82  	datacenter            string
    83  	cluster               string
    84  	resourcePool          string
    85  	datastore             string
    86  	vcpu                  int32
    87  	memoryMb              int64
    88  	memoryAllocation      memoryAllocation
    89  	template              string
    90  	networkInterfaces     []networkInterface
    91  	hardDisks             []hardDisk
    92  	cdroms                []cdrom
    93  	domain                string
    94  	timeZone              string
    95  	dnsSuffixes           []string
    96  	dnsServers            []string
    97  	hasBootableVmdk       bool
    98  	linkedClone           bool
    99  	skipCustomization     bool
   100  	enableDiskUUID        bool
   101  	windowsOptionalConfig windowsOptConfig
   102  	customConfigurations  map[string](types.AnyType)
   103  }
   104  
   105  func (v virtualMachine) Path() string {
   106  	return vmPath(v.folder, v.name)
   107  }
   108  
   109  func vmPath(folder string, name string) string {
   110  	var path string
   111  	if len(folder) > 0 {
   112  		path += folder + "/"
   113  	}
   114  	return path + name
   115  }
   116  
   117  func resourceVSphereVirtualMachine() *schema.Resource {
   118  	return &schema.Resource{
   119  		Create: resourceVSphereVirtualMachineCreate,
   120  		Read:   resourceVSphereVirtualMachineRead,
   121  		Update: resourceVSphereVirtualMachineUpdate,
   122  		Delete: resourceVSphereVirtualMachineDelete,
   123  
   124  		SchemaVersion: 1,
   125  		MigrateState:  resourceVSphereVirtualMachineMigrateState,
   126  
   127  		Schema: map[string]*schema.Schema{
   128  			"name": &schema.Schema{
   129  				Type:     schema.TypeString,
   130  				Required: true,
   131  				ForceNew: true,
   132  			},
   133  
   134  			"folder": &schema.Schema{
   135  				Type:     schema.TypeString,
   136  				Optional: true,
   137  				ForceNew: true,
   138  			},
   139  
   140  			"vcpu": &schema.Schema{
   141  				Type:     schema.TypeInt,
   142  				Required: true,
   143  			},
   144  
   145  			"memory": &schema.Schema{
   146  				Type:     schema.TypeInt,
   147  				Required: true,
   148  			},
   149  
   150  			"memory_reservation": &schema.Schema{
   151  				Type:     schema.TypeInt,
   152  				Optional: true,
   153  				Default:  0,
   154  				ForceNew: true,
   155  			},
   156  
   157  			"datacenter": &schema.Schema{
   158  				Type:     schema.TypeString,
   159  				Optional: true,
   160  				ForceNew: true,
   161  			},
   162  
   163  			"cluster": &schema.Schema{
   164  				Type:     schema.TypeString,
   165  				Optional: true,
   166  				ForceNew: true,
   167  			},
   168  
   169  			"resource_pool": &schema.Schema{
   170  				Type:     schema.TypeString,
   171  				Optional: true,
   172  				ForceNew: true,
   173  			},
   174  
   175  			"linked_clone": &schema.Schema{
   176  				Type:     schema.TypeBool,
   177  				Optional: true,
   178  				Default:  false,
   179  				ForceNew: true,
   180  			},
   181  			"gateway": &schema.Schema{
   182  				Type:       schema.TypeString,
   183  				Optional:   true,
   184  				ForceNew:   true,
   185  				Deprecated: "Please use network_interface.ipv4_gateway",
   186  			},
   187  
   188  			"domain": &schema.Schema{
   189  				Type:     schema.TypeString,
   190  				Optional: true,
   191  				ForceNew: true,
   192  				Default:  "vsphere.local",
   193  			},
   194  
   195  			"time_zone": &schema.Schema{
   196  				Type:     schema.TypeString,
   197  				Optional: true,
   198  				ForceNew: true,
   199  				Default:  "Etc/UTC",
   200  			},
   201  
   202  			"dns_suffixes": &schema.Schema{
   203  				Type:     schema.TypeList,
   204  				Optional: true,
   205  				Elem:     &schema.Schema{Type: schema.TypeString},
   206  				ForceNew: true,
   207  			},
   208  
   209  			"dns_servers": &schema.Schema{
   210  				Type:     schema.TypeList,
   211  				Optional: true,
   212  				Elem:     &schema.Schema{Type: schema.TypeString},
   213  				ForceNew: true,
   214  			},
   215  
   216  			"skip_customization": &schema.Schema{
   217  				Type:     schema.TypeBool,
   218  				Optional: true,
   219  				ForceNew: true,
   220  				Default:  false,
   221  			},
   222  
   223  			"enable_disk_uuid": &schema.Schema{
   224  				Type:     schema.TypeBool,
   225  				Optional: true,
   226  				ForceNew: true,
   227  				Default:  false,
   228  			},
   229  
   230  			"uuid": &schema.Schema{
   231  				Type:     schema.TypeString,
   232  				Computed: true,
   233  			},
   234  
   235  			"custom_configuration_parameters": &schema.Schema{
   236  				Type:     schema.TypeMap,
   237  				Optional: true,
   238  				ForceNew: true,
   239  			},
   240  
   241  			"windows_opt_config": &schema.Schema{
   242  				Type:     schema.TypeList,
   243  				Optional: true,
   244  				ForceNew: true,
   245  				Elem: &schema.Resource{
   246  					Schema: map[string]*schema.Schema{
   247  						"product_key": &schema.Schema{
   248  							Type:     schema.TypeString,
   249  							Optional: true,
   250  							ForceNew: true,
   251  						},
   252  
   253  						"admin_password": &schema.Schema{
   254  							Type:     schema.TypeString,
   255  							Optional: true,
   256  							ForceNew: true,
   257  						},
   258  
   259  						"domain_user": &schema.Schema{
   260  							Type:     schema.TypeString,
   261  							Optional: true,
   262  							ForceNew: true,
   263  						},
   264  
   265  						"domain": &schema.Schema{
   266  							Type:     schema.TypeString,
   267  							Optional: true,
   268  							ForceNew: true,
   269  						},
   270  
   271  						"domain_user_password": &schema.Schema{
   272  							Type:     schema.TypeString,
   273  							Optional: true,
   274  							ForceNew: true,
   275  						},
   276  					},
   277  				},
   278  			},
   279  
   280  			"network_interface": &schema.Schema{
   281  				Type:     schema.TypeList,
   282  				Required: true,
   283  				ForceNew: true,
   284  				Elem: &schema.Resource{
   285  					Schema: map[string]*schema.Schema{
   286  						"label": &schema.Schema{
   287  							Type:     schema.TypeString,
   288  							Required: true,
   289  							ForceNew: true,
   290  						},
   291  
   292  						"ip_address": &schema.Schema{
   293  							Type:       schema.TypeString,
   294  							Optional:   true,
   295  							Computed:   true,
   296  							Deprecated: "Please use ipv4_address",
   297  						},
   298  
   299  						"subnet_mask": &schema.Schema{
   300  							Type:       schema.TypeString,
   301  							Optional:   true,
   302  							Computed:   true,
   303  							Deprecated: "Please use ipv4_prefix_length",
   304  						},
   305  
   306  						"ipv4_address": &schema.Schema{
   307  							Type:     schema.TypeString,
   308  							Optional: true,
   309  							Computed: true,
   310  						},
   311  
   312  						"ipv4_prefix_length": &schema.Schema{
   313  							Type:     schema.TypeInt,
   314  							Optional: true,
   315  							Computed: true,
   316  						},
   317  
   318  						"ipv4_gateway": &schema.Schema{
   319  							Type:     schema.TypeString,
   320  							Optional: true,
   321  							Computed: true,
   322  						},
   323  
   324  						"ipv6_address": &schema.Schema{
   325  							Type:     schema.TypeString,
   326  							Optional: true,
   327  							Computed: true,
   328  						},
   329  
   330  						"ipv6_prefix_length": &schema.Schema{
   331  							Type:     schema.TypeInt,
   332  							Optional: true,
   333  							Computed: true,
   334  						},
   335  
   336  						"ipv6_gateway": &schema.Schema{
   337  							Type:     schema.TypeString,
   338  							Optional: true,
   339  							Computed: true,
   340  						},
   341  
   342  						"adapter_type": &schema.Schema{
   343  							Type:     schema.TypeString,
   344  							Optional: true,
   345  							ForceNew: true,
   346  						},
   347  
   348  						"mac_address": &schema.Schema{
   349  							Type:     schema.TypeString,
   350  							Optional: true,
   351  							Computed: true,
   352  						},
   353  					},
   354  				},
   355  			},
   356  
   357  			"disk": &schema.Schema{
   358  				Type:     schema.TypeSet,
   359  				Required: true,
   360  				Elem: &schema.Resource{
   361  					Schema: map[string]*schema.Schema{
   362  						"uuid": &schema.Schema{
   363  							Type:     schema.TypeString,
   364  							Computed: true,
   365  						},
   366  
   367  						"key": &schema.Schema{
   368  							Type:     schema.TypeInt,
   369  							Computed: true,
   370  						},
   371  
   372  						"template": &schema.Schema{
   373  							Type:     schema.TypeString,
   374  							Optional: true,
   375  						},
   376  
   377  						"type": &schema.Schema{
   378  							Type:     schema.TypeString,
   379  							Optional: true,
   380  							Default:  "eager_zeroed",
   381  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   382  								value := v.(string)
   383  								if value != "thin" && value != "eager_zeroed" {
   384  									errors = append(errors, fmt.Errorf(
   385  										"only 'thin' and 'eager_zeroed' are supported values for 'type'"))
   386  								}
   387  								return
   388  							},
   389  						},
   390  
   391  						"datastore": &schema.Schema{
   392  							Type:     schema.TypeString,
   393  							Optional: true,
   394  						},
   395  
   396  						"size": &schema.Schema{
   397  							Type:     schema.TypeInt,
   398  							Optional: true,
   399  						},
   400  
   401  						"name": &schema.Schema{
   402  							Type:     schema.TypeString,
   403  							Optional: true,
   404  						},
   405  
   406  						"iops": &schema.Schema{
   407  							Type:     schema.TypeInt,
   408  							Optional: true,
   409  						},
   410  
   411  						"vmdk": &schema.Schema{
   412  							// TODO: Add ValidateFunc to confirm path exists
   413  							Type:     schema.TypeString,
   414  							Optional: true,
   415  						},
   416  
   417  						"bootable": &schema.Schema{
   418  							Type:     schema.TypeBool,
   419  							Optional: true,
   420  						},
   421  
   422  						"keep_on_remove": &schema.Schema{
   423  							Type:     schema.TypeBool,
   424  							Optional: true,
   425  						},
   426  
   427  						"controller_type": &schema.Schema{
   428  							Type:     schema.TypeString,
   429  							Optional: true,
   430  							Default:  "scsi",
   431  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   432  								value := v.(string)
   433  								found := false
   434  								for _, t := range DiskControllerTypes {
   435  									if t == value {
   436  										found = true
   437  									}
   438  								}
   439  								if !found {
   440  									errors = append(errors, fmt.Errorf(
   441  										"Supported values for 'controller_type' are %v", strings.Join(DiskControllerTypes, ", ")))
   442  								}
   443  								return
   444  							},
   445  						},
   446  					},
   447  				},
   448  			},
   449  
   450  			"cdrom": &schema.Schema{
   451  				Type:     schema.TypeList,
   452  				Optional: true,
   453  				ForceNew: true,
   454  				Elem: &schema.Resource{
   455  					Schema: map[string]*schema.Schema{
   456  						"datastore": &schema.Schema{
   457  							Type:     schema.TypeString,
   458  							Required: true,
   459  							ForceNew: true,
   460  						},
   461  
   462  						"path": &schema.Schema{
   463  							Type:     schema.TypeString,
   464  							Required: true,
   465  							ForceNew: true,
   466  						},
   467  					},
   468  				},
   469  			},
   470  		},
   471  	}
   472  }
   473  
   474  func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error {
   475  	// flag if changes have to be applied
   476  	hasChanges := false
   477  	// flag if changes have to be done when powered off
   478  	rebootRequired := false
   479  
   480  	// make config spec
   481  	configSpec := types.VirtualMachineConfigSpec{}
   482  
   483  	if d.HasChange("vcpu") {
   484  		configSpec.NumCPUs = int32(d.Get("vcpu").(int))
   485  		hasChanges = true
   486  		rebootRequired = true
   487  	}
   488  
   489  	if d.HasChange("memory") {
   490  		configSpec.MemoryMB = int64(d.Get("memory").(int))
   491  		hasChanges = true
   492  		rebootRequired = true
   493  	}
   494  
   495  	client := meta.(*govmomi.Client)
   496  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   497  	if err != nil {
   498  		return err
   499  	}
   500  	finder := find.NewFinder(client.Client, true)
   501  	finder = finder.SetDatacenter(dc)
   502  
   503  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
   504  	if err != nil {
   505  		return err
   506  	}
   507  
   508  	if d.HasChange("disk") {
   509  		hasChanges = true
   510  		oldDisks, newDisks := d.GetChange("disk")
   511  		oldDiskSet := oldDisks.(*schema.Set)
   512  		newDiskSet := newDisks.(*schema.Set)
   513  
   514  		addedDisks := newDiskSet.Difference(oldDiskSet)
   515  		removedDisks := oldDiskSet.Difference(newDiskSet)
   516  
   517  		// Removed disks
   518  		for _, diskRaw := range removedDisks.List() {
   519  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   520  				devices, err := vm.Device(context.TODO())
   521  				if err != nil {
   522  					return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err)
   523  				}
   524  				virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
   525  
   526  				keep := false
   527  				if v, ok := disk["keep_on_remove"].(bool); ok {
   528  					keep = v
   529  				}
   530  
   531  				err = vm.RemoveDevice(context.TODO(), keep, virtualDisk)
   532  				if err != nil {
   533  					return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
   534  				}
   535  			}
   536  		}
   537  		// Added disks
   538  		for _, diskRaw := range addedDisks.List() {
   539  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   540  
   541  				var datastore *object.Datastore
   542  				if disk["datastore"] == "" {
   543  					datastore, err = finder.DefaultDatastore(context.TODO())
   544  					if err != nil {
   545  						return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err)
   546  					}
   547  				} else {
   548  					datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string))
   549  					if err != nil {
   550  						log.Printf("[ERROR] Couldn't find datastore %v.  %s", disk["datastore"].(string), err)
   551  						return err
   552  					}
   553  				}
   554  
   555  				var size int64
   556  				if disk["size"] == 0 {
   557  					size = 0
   558  				} else {
   559  					size = int64(disk["size"].(int))
   560  				}
   561  				iops := int64(disk["iops"].(int))
   562  				controller_type := disk["controller_type"].(string)
   563  
   564  				var mo mo.VirtualMachine
   565  				vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo)
   566  
   567  				var diskPath string
   568  				switch {
   569  				case disk["vmdk"] != "":
   570  					diskPath = disk["vmdk"].(string)
   571  				case disk["name"] != "":
   572  					snapshotFullDir := mo.Config.Files.SnapshotDirectory
   573  					split := strings.Split(snapshotFullDir, " ")
   574  					if len(split) != 2 {
   575  						return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
   576  					}
   577  					vmWorkingPath := split[1]
   578  					diskPath = vmWorkingPath + disk["name"].(string)
   579  				default:
   580  					return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given")
   581  				}
   582  
   583  				log.Printf("[INFO] Attaching disk: %v", diskPath)
   584  				err = addHardDisk(vm, size, iops, "thin", datastore, diskPath, controller_type)
   585  				if err != nil {
   586  					log.Printf("[ERROR] Add Hard Disk Failed: %v", err)
   587  					return err
   588  				}
   589  			}
   590  			if err != nil {
   591  				return err
   592  			}
   593  		}
   594  	}
   595  
   596  	// do nothing if there are no changes
   597  	if !hasChanges {
   598  		return nil
   599  	}
   600  
   601  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
   602  
   603  	if rebootRequired {
   604  		log.Printf("[INFO] Shutting down virtual machine: %s", d.Id())
   605  
   606  		task, err := vm.PowerOff(context.TODO())
   607  		if err != nil {
   608  			return err
   609  		}
   610  
   611  		err = task.Wait(context.TODO())
   612  		if err != nil {
   613  			return err
   614  		}
   615  	}
   616  
   617  	log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id())
   618  
   619  	task, err := vm.Reconfigure(context.TODO(), configSpec)
   620  	if err != nil {
   621  		log.Printf("[ERROR] %s", err)
   622  	}
   623  
   624  	err = task.Wait(context.TODO())
   625  	if err != nil {
   626  		log.Printf("[ERROR] %s", err)
   627  	}
   628  
   629  	if rebootRequired {
   630  		task, err = vm.PowerOn(context.TODO())
   631  		if err != nil {
   632  			return err
   633  		}
   634  
   635  		err = task.Wait(context.TODO())
   636  		if err != nil {
   637  			log.Printf("[ERROR] %s", err)
   638  		}
   639  	}
   640  
   641  	return resourceVSphereVirtualMachineRead(d, meta)
   642  }
   643  
   644  func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
   645  	client := meta.(*govmomi.Client)
   646  
   647  	vm := virtualMachine{
   648  		name:     d.Get("name").(string),
   649  		vcpu:     int32(d.Get("vcpu").(int)),
   650  		memoryMb: int64(d.Get("memory").(int)),
   651  		memoryAllocation: memoryAllocation{
   652  			reservation: int64(d.Get("memory_reservation").(int)),
   653  		},
   654  	}
   655  
   656  	if v, ok := d.GetOk("folder"); ok {
   657  		vm.folder = v.(string)
   658  	}
   659  
   660  	if v, ok := d.GetOk("datacenter"); ok {
   661  		vm.datacenter = v.(string)
   662  	}
   663  
   664  	if v, ok := d.GetOk("cluster"); ok {
   665  		vm.cluster = v.(string)
   666  	}
   667  
   668  	if v, ok := d.GetOk("resource_pool"); ok {
   669  		vm.resourcePool = v.(string)
   670  	}
   671  
   672  	if v, ok := d.GetOk("domain"); ok {
   673  		vm.domain = v.(string)
   674  	}
   675  
   676  	if v, ok := d.GetOk("time_zone"); ok {
   677  		vm.timeZone = v.(string)
   678  	}
   679  
   680  	if v, ok := d.GetOk("linked_clone"); ok {
   681  		vm.linkedClone = v.(bool)
   682  	}
   683  
   684  	if v, ok := d.GetOk("skip_customization"); ok {
   685  		vm.skipCustomization = v.(bool)
   686  	}
   687  
   688  	if v, ok := d.GetOk("enable_disk_uuid"); ok {
   689  		vm.enableDiskUUID = v.(bool)
   690  	}
   691  
   692  	if raw, ok := d.GetOk("dns_suffixes"); ok {
   693  		for _, v := range raw.([]interface{}) {
   694  			vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string))
   695  		}
   696  	} else {
   697  		vm.dnsSuffixes = DefaultDNSSuffixes
   698  	}
   699  
   700  	if raw, ok := d.GetOk("dns_servers"); ok {
   701  		for _, v := range raw.([]interface{}) {
   702  			vm.dnsServers = append(vm.dnsServers, v.(string))
   703  		}
   704  	} else {
   705  		vm.dnsServers = DefaultDNSServers
   706  	}
   707  
   708  	if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
   709  		if custom_configs, ok := vL.(map[string]interface{}); ok {
   710  			custom := make(map[string]types.AnyType)
   711  			for k, v := range custom_configs {
   712  				custom[k] = v
   713  			}
   714  			vm.customConfigurations = custom
   715  			log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
   716  		}
   717  	}
   718  
   719  	if vL, ok := d.GetOk("network_interface"); ok {
   720  		networks := make([]networkInterface, len(vL.([]interface{})))
   721  		for i, v := range vL.([]interface{}) {
   722  			network := v.(map[string]interface{})
   723  			networks[i].label = network["label"].(string)
   724  			if v, ok := network["ip_address"].(string); ok && v != "" {
   725  				networks[i].ipv4Address = v
   726  			}
   727  			if v, ok := d.GetOk("gateway"); ok {
   728  				networks[i].ipv4Gateway = v.(string)
   729  			}
   730  			if v, ok := network["subnet_mask"].(string); ok && v != "" {
   731  				ip := net.ParseIP(v).To4()
   732  				if ip != nil {
   733  					mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3])
   734  					pl, _ := mask.Size()
   735  					networks[i].ipv4PrefixLength = pl
   736  				} else {
   737  					return fmt.Errorf("subnet_mask parameter is invalid.")
   738  				}
   739  			}
   740  			if v, ok := network["ipv4_address"].(string); ok && v != "" {
   741  				networks[i].ipv4Address = v
   742  			}
   743  			if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 {
   744  				networks[i].ipv4PrefixLength = v
   745  			}
   746  			if v, ok := network["ipv4_gateway"].(string); ok && v != "" {
   747  				networks[i].ipv4Gateway = v
   748  			}
   749  			if v, ok := network["ipv6_address"].(string); ok && v != "" {
   750  				networks[i].ipv6Address = v
   751  			}
   752  			if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 {
   753  				networks[i].ipv6PrefixLength = v
   754  			}
   755  			if v, ok := network["ipv6_gateway"].(string); ok && v != "" {
   756  				networks[i].ipv6Gateway = v
   757  			}
   758  			if v, ok := network["mac_address"].(string); ok && v != "" {
   759  				networks[i].macAddress = v
   760  			}
   761  		}
   762  		vm.networkInterfaces = networks
   763  		log.Printf("[DEBUG] network_interface init: %v", networks)
   764  	}
   765  
   766  	if vL, ok := d.GetOk("windows_opt_config"); ok {
   767  		var winOpt windowsOptConfig
   768  		custom_configs := (vL.([]interface{}))[0].(map[string]interface{})
   769  		if v, ok := custom_configs["admin_password"].(string); ok && v != "" {
   770  			winOpt.adminPassword = v
   771  		}
   772  		if v, ok := custom_configs["domain"].(string); ok && v != "" {
   773  			winOpt.domain = v
   774  		}
   775  		if v, ok := custom_configs["domain_user"].(string); ok && v != "" {
   776  			winOpt.domainUser = v
   777  		}
   778  		if v, ok := custom_configs["product_key"].(string); ok && v != "" {
   779  			winOpt.productKey = v
   780  		}
   781  		if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" {
   782  			winOpt.domainUserPassword = v
   783  		}
   784  		vm.windowsOptionalConfig = winOpt
   785  		log.Printf("[DEBUG] windows config init: %v", winOpt)
   786  	}
   787  
   788  	if vL, ok := d.GetOk("disk"); ok {
   789  		if diskSet, ok := vL.(*schema.Set); ok {
   790  
   791  			disks := []hardDisk{}
   792  			hasBootableDisk := false
   793  			for _, value := range diskSet.List() {
   794  				disk := value.(map[string]interface{})
   795  				newDisk := hardDisk{}
   796  
   797  				if v, ok := disk["template"].(string); ok && v != "" {
   798  					if v, ok := disk["name"].(string); ok && v != "" {
   799  						return fmt.Errorf("Cannot specify name of a template")
   800  					}
   801  					vm.template = v
   802  					if hasBootableDisk {
   803  						return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   804  					}
   805  					hasBootableDisk = true
   806  				}
   807  
   808  				if v, ok := disk["type"].(string); ok && v != "" {
   809  					newDisk.initType = v
   810  				}
   811  
   812  				if v, ok := disk["datastore"].(string); ok && v != "" {
   813  					vm.datastore = v
   814  				}
   815  
   816  				if v, ok := disk["size"].(int); ok && v != 0 {
   817  					if v, ok := disk["template"].(string); ok && v != "" {
   818  						return fmt.Errorf("Cannot specify size of a template")
   819  					}
   820  
   821  					if v, ok := disk["name"].(string); ok && v != "" {
   822  						newDisk.name = v
   823  					} else {
   824  						return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk")
   825  					}
   826  
   827  					newDisk.size = int64(v)
   828  				}
   829  
   830  				if v, ok := disk["iops"].(int); ok && v != 0 {
   831  					newDisk.iops = int64(v)
   832  				}
   833  
   834  				if v, ok := disk["controller_type"].(string); ok && v != "" {
   835  					newDisk.controller = v
   836  				}
   837  
   838  				if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" {
   839  					if v, ok := disk["template"].(string); ok && v != "" {
   840  						return fmt.Errorf("Cannot specify a vmdk for a template")
   841  					}
   842  					if v, ok := disk["size"].(string); ok && v != "" {
   843  						return fmt.Errorf("Cannot specify size of a vmdk")
   844  					}
   845  					if v, ok := disk["name"].(string); ok && v != "" {
   846  						return fmt.Errorf("Cannot specify name of a vmdk")
   847  					}
   848  					if vBootable, ok := disk["bootable"].(bool); ok {
   849  						hasBootableDisk = true
   850  						newDisk.bootable = vBootable
   851  						vm.hasBootableVmdk = vBootable
   852  					}
   853  					newDisk.vmdkPath = vVmdk
   854  				}
   855  				// Preserves order so bootable disk is first
   856  				if newDisk.bootable == true || disk["template"] != "" {
   857  					disks = append([]hardDisk{newDisk}, disks...)
   858  				} else {
   859  					disks = append(disks, newDisk)
   860  				}
   861  			}
   862  			vm.hardDisks = disks
   863  			log.Printf("[DEBUG] disk init: %v", disks)
   864  		}
   865  	}
   866  
   867  	if vL, ok := d.GetOk("cdrom"); ok {
   868  		cdroms := make([]cdrom, len(vL.([]interface{})))
   869  		for i, v := range vL.([]interface{}) {
   870  			c := v.(map[string]interface{})
   871  			if v, ok := c["datastore"].(string); ok && v != "" {
   872  				cdroms[i].datastore = v
   873  			} else {
   874  				return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.")
   875  			}
   876  			if v, ok := c["path"].(string); ok && v != "" {
   877  				cdroms[i].path = v
   878  			} else {
   879  				return fmt.Errorf("Path argument must be specified when attaching a cdrom image.")
   880  			}
   881  		}
   882  		vm.cdroms = cdroms
   883  		log.Printf("[DEBUG] cdrom init: %v", cdroms)
   884  	}
   885  
   886  	err := vm.setupVirtualMachine(client)
   887  	if err != nil {
   888  		return err
   889  	}
   890  
   891  	d.SetId(vm.Path())
   892  	log.Printf("[INFO] Created virtual machine: %s", d.Id())
   893  
   894  	return resourceVSphereVirtualMachineRead(d, meta)
   895  }
   896  
   897  func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
   898  	log.Printf("[DEBUG] virtual machine resource data: %#v", d)
   899  	client := meta.(*govmomi.Client)
   900  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   901  	if err != nil {
   902  		return err
   903  	}
   904  	finder := find.NewFinder(client.Client, true)
   905  	finder = finder.SetDatacenter(dc)
   906  
   907  	vm, err := finder.VirtualMachine(context.TODO(), d.Id())
   908  	if err != nil {
   909  		d.SetId("")
   910  		return nil
   911  	}
   912  
   913  	state, err := vm.PowerState(context.TODO())
   914  	if err != nil {
   915  		return err
   916  	}
   917  
   918  	if state == types.VirtualMachinePowerStatePoweredOn {
   919  		// wait for interfaces to appear
   920  		_, err = vm.WaitForNetIP(context.TODO(), true)
   921  		if err != nil {
   922  			return err
   923  		}
   924  	}
   925  
   926  	var mvm mo.VirtualMachine
   927  	collector := property.DefaultCollector(client.Client)
   928  	if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil {
   929  		return err
   930  	}
   931  
   932  	log.Printf("[DEBUG] Datacenter - %#v", dc)
   933  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config)
   934  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config)
   935  	log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net)
   936  
   937  	disks := make([]map[string]interface{}, 0)
   938  	templateDisk := make(map[string]interface{}, 1)
   939  	for _, device := range mvm.Config.Hardware.Device {
   940  		if vd, ok := device.(*types.VirtualDisk); ok {
   941  
   942  			virtualDevice := vd.GetVirtualDevice()
   943  
   944  			backingInfo := virtualDevice.Backing
   945  			var diskFullPath string
   946  			var diskUuid string
   947  			if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok {
   948  				diskFullPath = v.FileName
   949  				diskUuid = v.Uuid
   950  			} else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok {
   951  				diskFullPath = v.FileName
   952  				diskUuid = v.Uuid
   953  			}
   954  			log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath)
   955  
   956  			// Separate datastore and path
   957  			diskFullPathSplit := strings.Split(diskFullPath, " ")
   958  			if len(diskFullPathSplit) != 2 {
   959  				return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath)
   960  			}
   961  			diskPath := diskFullPathSplit[1]
   962  			// Isolate filename
   963  			diskNameSplit := strings.Split(diskPath, "/")
   964  			diskName := diskNameSplit[len(diskNameSplit)-1]
   965  			// Remove possible extension
   966  			diskName = strings.Split(diskName, ".")[0]
   967  
   968  			if prevDisks, ok := d.GetOk("disk"); ok {
   969  				if prevDiskSet, ok := prevDisks.(*schema.Set); ok {
   970  					for _, v := range prevDiskSet.List() {
   971  						prevDisk := v.(map[string]interface{})
   972  
   973  						// We're guaranteed only one template disk.  Passing value directly through since templates should be immutable
   974  						if prevDisk["template"] != "" {
   975  							if len(templateDisk) == 0 {
   976  								templateDisk = prevDisk
   977  								disks = append(disks, templateDisk)
   978  								break
   979  							}
   980  						}
   981  
   982  						// It is enforced that prevDisk["name"] should only be set in the case
   983  						// of creating a new disk for the user.
   984  						// size case:  name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name
   985  						// vmdk case:  compare prevDisk["vmdk"] and mo.Filename
   986  						if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] {
   987  
   988  							prevDisk["key"] = virtualDevice.Key
   989  							prevDisk["uuid"] = diskUuid
   990  
   991  							disks = append(disks, prevDisk)
   992  							break
   993  						}
   994  					}
   995  				}
   996  			}
   997  			log.Printf("[DEBUG] disks: %#v", disks)
   998  		}
   999  	}
  1000  	err = d.Set("disk", disks)
  1001  	if err != nil {
  1002  		return fmt.Errorf("Invalid disks to set: %#v", disks)
  1003  	}
  1004  
  1005  	networkInterfaces := make([]map[string]interface{}, 0)
  1006  	for _, v := range mvm.Guest.Net {
  1007  		if v.DeviceConfigId >= 0 {
  1008  			log.Printf("[DEBUG] v.Network - %#v", v.Network)
  1009  			networkInterface := make(map[string]interface{})
  1010  			networkInterface["label"] = v.Network
  1011  			networkInterface["mac_address"] = v.MacAddress
  1012  			for _, ip := range v.IpConfig.IpAddress {
  1013  				p := net.ParseIP(ip.IpAddress)
  1014  				if p.To4() != nil {
  1015  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1016  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1017  					networkInterface["ipv4_address"] = p.String()
  1018  					networkInterface["ipv4_prefix_length"] = ip.PrefixLength
  1019  				} else if p.To16() != nil {
  1020  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1021  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1022  					networkInterface["ipv6_address"] = p.String()
  1023  					networkInterface["ipv6_prefix_length"] = ip.PrefixLength
  1024  				}
  1025  				log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1026  			}
  1027  			log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1028  			networkInterfaces = append(networkInterfaces, networkInterface)
  1029  		}
  1030  	}
  1031  	if mvm.Guest.IpStack != nil {
  1032  		for _, v := range mvm.Guest.IpStack {
  1033  			if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil {
  1034  				for _, route := range v.IpRouteConfig.IpRoute {
  1035  					if route.Gateway.Device != "" {
  1036  						gatewaySetting := ""
  1037  						if route.Network == "::" {
  1038  							gatewaySetting = "ipv6_gateway"
  1039  						} else if route.Network == "0.0.0.0" {
  1040  							gatewaySetting = "ipv4_gateway"
  1041  						}
  1042  						if gatewaySetting != "" {
  1043  							deviceID, err := strconv.Atoi(route.Gateway.Device)
  1044  							if err != nil {
  1045  								log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err)
  1046  							} else {
  1047  								log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress)
  1048  								networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress
  1049  							}
  1050  						}
  1051  					}
  1052  				}
  1053  			}
  1054  		}
  1055  	}
  1056  	log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces)
  1057  	err = d.Set("network_interface", networkInterfaces)
  1058  	if err != nil {
  1059  		return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces)
  1060  	}
  1061  
  1062  	if len(networkInterfaces) > 0 {
  1063  		if _, ok := networkInterfaces[0]["ipv4_address"]; ok {
  1064  			log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string))
  1065  			d.SetConnInfo(map[string]string{
  1066  				"type": "ssh",
  1067  				"host": networkInterfaces[0]["ipv4_address"].(string),
  1068  			})
  1069  		}
  1070  	}
  1071  
  1072  	var rootDatastore string
  1073  	for _, v := range mvm.Datastore {
  1074  		var md mo.Datastore
  1075  		if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil {
  1076  			return err
  1077  		}
  1078  		if md.Parent.Type == "StoragePod" {
  1079  			var msp mo.StoragePod
  1080  			if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil {
  1081  				return err
  1082  			}
  1083  			rootDatastore = msp.Name
  1084  			log.Printf("[DEBUG] %#v", msp.Name)
  1085  		} else {
  1086  			rootDatastore = md.Name
  1087  			log.Printf("[DEBUG] %#v", md.Name)
  1088  		}
  1089  		break
  1090  	}
  1091  
  1092  	d.Set("datacenter", dc)
  1093  	d.Set("memory", mvm.Summary.Config.MemorySizeMB)
  1094  	d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation)
  1095  	d.Set("cpu", mvm.Summary.Config.NumCpu)
  1096  	d.Set("datastore", rootDatastore)
  1097  	d.Set("uuid", mvm.Summary.Config.Uuid)
  1098  
  1099  	return nil
  1100  }
  1101  
  1102  func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
  1103  	client := meta.(*govmomi.Client)
  1104  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
  1105  	if err != nil {
  1106  		return err
  1107  	}
  1108  	finder := find.NewFinder(client.Client, true)
  1109  	finder = finder.SetDatacenter(dc)
  1110  
  1111  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
  1112  	if err != nil {
  1113  		return err
  1114  	}
  1115  	devices, err := vm.Device(context.TODO())
  1116  	if err != nil {
  1117  		log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err)
  1118  		return err
  1119  	}
  1120  
  1121  	log.Printf("[INFO] Deleting virtual machine: %s", d.Id())
  1122  	state, err := vm.PowerState(context.TODO())
  1123  	if err != nil {
  1124  		return err
  1125  	}
  1126  
  1127  	if state == types.VirtualMachinePowerStatePoweredOn {
  1128  		task, err := vm.PowerOff(context.TODO())
  1129  		if err != nil {
  1130  			return err
  1131  		}
  1132  
  1133  		err = task.Wait(context.TODO())
  1134  		if err != nil {
  1135  			return err
  1136  		}
  1137  	}
  1138  
  1139  	// Safely eject any disks the user marked as keep_on_remove
  1140  	if vL, ok := d.GetOk("disk"); ok {
  1141  		if diskSet, ok := vL.(*schema.Set); ok {
  1142  
  1143  			for _, value := range diskSet.List() {
  1144  				disk := value.(map[string]interface{})
  1145  
  1146  				if v, ok := disk["keep_on_remove"].(bool); ok && v == true {
  1147  					log.Printf("[DEBUG] not destroying %v", disk["name"])
  1148  					virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
  1149  					err = vm.RemoveDevice(context.TODO(), true, virtualDisk)
  1150  					if err != nil {
  1151  						log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
  1152  						return err
  1153  					}
  1154  				}
  1155  			}
  1156  		}
  1157  	}
  1158  
  1159  	task, err := vm.Destroy(context.TODO())
  1160  	if err != nil {
  1161  		return err
  1162  	}
  1163  
  1164  	err = task.Wait(context.TODO())
  1165  	if err != nil {
  1166  		return err
  1167  	}
  1168  
  1169  	d.SetId("")
  1170  	return nil
  1171  }
  1172  
  1173  // addHardDisk adds a new Hard Disk to the VirtualMachine.
  1174  func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error {
  1175  	devices, err := vm.Device(context.TODO())
  1176  	if err != nil {
  1177  		return err
  1178  	}
  1179  	log.Printf("[DEBUG] vm devices: %#v\n", devices)
  1180  
  1181  	var controller types.BaseVirtualController
  1182  	switch controller_type {
  1183  	case "scsi":
  1184  		controller, err = devices.FindDiskController(controller_type)
  1185  	case "scsi-lsi-parallel":
  1186  		controller = devices.PickController(&types.VirtualLsiLogicController{})
  1187  	case "scsi-buslogic":
  1188  		controller = devices.PickController(&types.VirtualBusLogicController{})
  1189  	case "scsi-paravirtual":
  1190  		controller = devices.PickController(&types.ParaVirtualSCSIController{})
  1191  	case "scsi-lsi-sas":
  1192  		controller = devices.PickController(&types.VirtualLsiLogicSASController{})
  1193  	case "ide":
  1194  		controller, err = devices.FindDiskController(controller_type)
  1195  	default:
  1196  		return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1197  	}
  1198  
  1199  	if err != nil || controller == nil {
  1200  		log.Printf("[DEBUG] Couldn't find a %v controller.  Creating one..", controller_type)
  1201  
  1202  		var c types.BaseVirtualDevice
  1203  		switch controller_type {
  1204  		case "scsi":
  1205  			// Create scsi controller
  1206  			c, err = devices.CreateSCSIController("scsi")
  1207  			if err != nil {
  1208  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1209  			}
  1210  		case "scsi-lsi-parallel":
  1211  			// Create scsi controller
  1212  			c, err = devices.CreateSCSIController("lsilogic")
  1213  			if err != nil {
  1214  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1215  			}
  1216  		case "scsi-buslogic":
  1217  			// Create scsi controller
  1218  			c, err = devices.CreateSCSIController("buslogic")
  1219  			if err != nil {
  1220  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1221  			}
  1222  		case "scsi-paravirtual":
  1223  			// Create scsi controller
  1224  			c, err = devices.CreateSCSIController("pvscsi")
  1225  			if err != nil {
  1226  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1227  			}
  1228  		case "scsi-lsi-sas":
  1229  			// Create scsi controller
  1230  			c, err = devices.CreateSCSIController("lsilogic-sas")
  1231  			if err != nil {
  1232  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1233  			}
  1234  		case "ide":
  1235  			// Create ide controller
  1236  			c, err = devices.CreateIDEController()
  1237  			if err != nil {
  1238  				return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1239  			}
  1240  		default:
  1241  			return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1242  		}
  1243  
  1244  		vm.AddDevice(context.TODO(), c)
  1245  		// Update our devices list
  1246  		devices, err := vm.Device(context.TODO())
  1247  		if err != nil {
  1248  			return err
  1249  		}
  1250  		controller = devices.PickController(c.(types.BaseVirtualController))
  1251  		if controller == nil {
  1252  			log.Printf("[ERROR] Could not find the new %v controller", controller_type)
  1253  			return fmt.Errorf("Could not find the new %v controller", controller_type)
  1254  		}
  1255  	}
  1256  
  1257  	log.Printf("[DEBUG] disk controller: %#v\n", controller)
  1258  
  1259  	// TODO Check if diskPath & datastore exist
  1260  	// If diskPath is not specified, pass empty string to CreateDisk()
  1261  	if diskPath == "" {
  1262  		return fmt.Errorf("[ERROR] addHardDisk - No path proided")
  1263  	} else {
  1264  		// TODO Check if diskPath & datastore exist
  1265  		diskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath)
  1266  	}
  1267  	log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath)
  1268  	disk := devices.CreateDisk(controller, datastore.Reference(), diskPath)
  1269  
  1270  	existing := devices.SelectByBackingInfo(disk.Backing)
  1271  	log.Printf("[DEBUG] disk: %#v\n", disk)
  1272  
  1273  	if len(existing) == 0 {
  1274  		disk.CapacityInKB = int64(size * 1024 * 1024)
  1275  		if iops != 0 {
  1276  			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
  1277  				Limit: iops,
  1278  			}
  1279  		}
  1280  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
  1281  
  1282  		if diskType == "eager_zeroed" {
  1283  			// eager zeroed thick virtual disk
  1284  			backing.ThinProvisioned = types.NewBool(false)
  1285  			backing.EagerlyScrub = types.NewBool(true)
  1286  		} else if diskType == "thin" {
  1287  			// thin provisioned virtual disk
  1288  			backing.ThinProvisioned = types.NewBool(true)
  1289  		}
  1290  
  1291  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
  1292  		log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB)
  1293  
  1294  		return vm.AddDevice(context.TODO(), disk)
  1295  	} else {
  1296  		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")
  1297  
  1298  		return nil
  1299  	}
  1300  }
  1301  
  1302  // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path.
  1303  func addCdrom(vm *object.VirtualMachine, datastore, path string) error {
  1304  	devices, err := vm.Device(context.TODO())
  1305  	if err != nil {
  1306  		return err
  1307  	}
  1308  	log.Printf("[DEBUG] vm devices: %#v", devices)
  1309  
  1310  	var controller *types.VirtualIDEController
  1311  	controller, err = devices.FindIDEController("")
  1312  	if err != nil {
  1313  		log.Printf("[DEBUG] Couldn't find a ide controller.  Creating one..")
  1314  
  1315  		var c types.BaseVirtualDevice
  1316  		c, err := devices.CreateIDEController()
  1317  		if err != nil {
  1318  			return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1319  		}
  1320  
  1321  		if v, ok := c.(*types.VirtualIDEController); ok {
  1322  			controller = v
  1323  		} else {
  1324  			return fmt.Errorf("[ERROR] Controller type could not be asserted")
  1325  		}
  1326  		vm.AddDevice(context.TODO(), c)
  1327  		// Update our devices list
  1328  		devices, err := vm.Device(context.TODO())
  1329  		if err != nil {
  1330  			return err
  1331  		}
  1332  		controller, err = devices.FindIDEController("")
  1333  		if err != nil {
  1334  			log.Printf("[ERROR] Could not find the new disk IDE controller: %v", err)
  1335  			return err
  1336  		}
  1337  	}
  1338  	log.Printf("[DEBUG] ide controller: %#v", controller)
  1339  
  1340  	c, err := devices.CreateCdrom(controller)
  1341  	if err != nil {
  1342  		return err
  1343  	}
  1344  
  1345  	c = devices.InsertIso(c, fmt.Sprintf("[%s] %s", datastore, path))
  1346  	log.Printf("[DEBUG] addCdrom: %#v", c)
  1347  
  1348  	return vm.AddDevice(context.TODO(), c)
  1349  }
  1350  
  1351  // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device.
  1352  func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) {
  1353  	network, err := f.Network(context.TODO(), "*"+label)
  1354  	if err != nil {
  1355  		return nil, err
  1356  	}
  1357  
  1358  	backing, err := network.EthernetCardBackingInfo(context.TODO())
  1359  	if err != nil {
  1360  		return nil, err
  1361  	}
  1362  
  1363  	var address_type string
  1364  	if macAddress == "" {
  1365  		address_type = string(types.VirtualEthernetCardMacTypeGenerated)
  1366  	} else {
  1367  		address_type = string(types.VirtualEthernetCardMacTypeManual)
  1368  	}
  1369  
  1370  	if adapterType == "vmxnet3" {
  1371  		return &types.VirtualDeviceConfigSpec{
  1372  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1373  			Device: &types.VirtualVmxnet3{
  1374  				VirtualVmxnet: types.VirtualVmxnet{
  1375  					VirtualEthernetCard: types.VirtualEthernetCard{
  1376  						VirtualDevice: types.VirtualDevice{
  1377  							Key:     -1,
  1378  							Backing: backing,
  1379  						},
  1380  						AddressType: address_type,
  1381  						MacAddress:  macAddress,
  1382  					},
  1383  				},
  1384  			},
  1385  		}, nil
  1386  	} else if adapterType == "e1000" {
  1387  		return &types.VirtualDeviceConfigSpec{
  1388  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1389  			Device: &types.VirtualE1000{
  1390  				VirtualEthernetCard: types.VirtualEthernetCard{
  1391  					VirtualDevice: types.VirtualDevice{
  1392  						Key:     -1,
  1393  						Backing: backing,
  1394  					},
  1395  					AddressType: address_type,
  1396  					MacAddress:  macAddress,
  1397  				},
  1398  			},
  1399  		}, nil
  1400  	} else {
  1401  		return nil, fmt.Errorf("Invalid network adapter type.")
  1402  	}
  1403  }
  1404  
  1405  // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
  1406  func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) {
  1407  	var key int32
  1408  	var moveType string
  1409  	if linkedClone {
  1410  		moveType = "createNewChildDiskBacking"
  1411  	} else {
  1412  		moveType = "moveAllDiskBackingsAndDisallowSharing"
  1413  	}
  1414  	log.Printf("[DEBUG] relocate type: [%s]", moveType)
  1415  
  1416  	devices, err := vm.Device(context.TODO())
  1417  	if err != nil {
  1418  		return types.VirtualMachineRelocateSpec{}, err
  1419  	}
  1420  	for _, d := range devices {
  1421  		if devices.Type(d) == "disk" {
  1422  			key = int32(d.GetVirtualDevice().Key)
  1423  		}
  1424  	}
  1425  
  1426  	isThin := initType == "thin"
  1427  	rpr := rp.Reference()
  1428  	dsr := ds.Reference()
  1429  	return types.VirtualMachineRelocateSpec{
  1430  		Datastore:    &dsr,
  1431  		Pool:         &rpr,
  1432  		DiskMoveType: moveType,
  1433  		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1434  			{
  1435  				Datastore: dsr,
  1436  				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
  1437  					DiskMode:        "persistent",
  1438  					ThinProvisioned: types.NewBool(isThin),
  1439  					EagerlyScrub:    types.NewBool(!isThin),
  1440  				},
  1441  				DiskId: key,
  1442  			},
  1443  		},
  1444  	}, nil
  1445  }
  1446  
  1447  // getDatastoreObject gets datastore object.
  1448  func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) {
  1449  	s := object.NewSearchIndex(client.Client)
  1450  	ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name)
  1451  	if err != nil {
  1452  		return types.ManagedObjectReference{}, err
  1453  	}
  1454  	if ref == nil {
  1455  		return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name)
  1456  	}
  1457  	log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref)
  1458  	return ref.Reference(), nil
  1459  }
  1460  
  1461  // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action.
  1462  func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec {
  1463  	vmfr := f.VmFolder.Reference()
  1464  	rpr := rp.Reference()
  1465  	spr := storagePod.Reference()
  1466  
  1467  	sps := types.StoragePlacementSpec{
  1468  		Type:       "create",
  1469  		ConfigSpec: &configSpec,
  1470  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1471  			StoragePod: &spr,
  1472  		},
  1473  		Folder:       &vmfr,
  1474  		ResourcePool: &rpr,
  1475  	}
  1476  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1477  	return sps
  1478  }
  1479  
  1480  // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action.
  1481  func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
  1482  	vmr := vm.Reference()
  1483  	vmfr := f.VmFolder.Reference()
  1484  	rpr := rp.Reference()
  1485  	spr := storagePod.Reference()
  1486  
  1487  	var o mo.VirtualMachine
  1488  	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
  1489  	if err != nil {
  1490  		return types.StoragePlacementSpec{}
  1491  	}
  1492  	ds := object.NewDatastore(c.Client, o.Datastore[0])
  1493  	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)
  1494  
  1495  	devices, err := vm.Device(context.TODO())
  1496  	if err != nil {
  1497  		return types.StoragePlacementSpec{}
  1498  	}
  1499  
  1500  	var key int32
  1501  	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
  1502  		key = int32(d.GetVirtualDevice().Key)
  1503  		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
  1504  	}
  1505  
  1506  	sps := types.StoragePlacementSpec{
  1507  		Type: "clone",
  1508  		Vm:   &vmr,
  1509  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1510  			StoragePod: &spr,
  1511  		},
  1512  		CloneSpec: &types.VirtualMachineCloneSpec{
  1513  			Location: types.VirtualMachineRelocateSpec{
  1514  				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1515  					{
  1516  						Datastore:       ds.Reference(),
  1517  						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
  1518  						DiskId:          key,
  1519  					},
  1520  				},
  1521  				Pool: &rpr,
  1522  			},
  1523  			PowerOn:  false,
  1524  			Template: false,
  1525  		},
  1526  		CloneName: "dummy",
  1527  		Folder:    &vmfr,
  1528  	}
  1529  	return sps
  1530  }
  1531  
  1532  // findDatastore finds Datastore object.
  1533  func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) {
  1534  	var datastore *object.Datastore
  1535  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1536  
  1537  	srm := object.NewStorageResourceManager(c.Client)
  1538  	rds, err := srm.RecommendDatastores(context.TODO(), sps)
  1539  	if err != nil {
  1540  		return nil, err
  1541  	}
  1542  	log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)
  1543  
  1544  	spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
  1545  	datastore = object.NewDatastore(c.Client, spa.Destination)
  1546  	log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore)
  1547  
  1548  	return datastore, nil
  1549  }
  1550  
  1551  // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller.
  1552  func createCdroms(vm *object.VirtualMachine, cdroms []cdrom) error {
  1553  	log.Printf("[DEBUG] add cdroms: %v", cdroms)
  1554  	for _, cd := range cdroms {
  1555  		log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore)
  1556  		log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path)
  1557  		err := addCdrom(vm, cd.datastore, cd.path)
  1558  		if err != nil {
  1559  			return err
  1560  		}
  1561  	}
  1562  
  1563  	return nil
  1564  }
  1565  
  1566  func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error {
  1567  	dc, err := getDatacenter(c, vm.datacenter)
  1568  
  1569  	if err != nil {
  1570  		return err
  1571  	}
  1572  	finder := find.NewFinder(c.Client, true)
  1573  	finder = finder.SetDatacenter(dc)
  1574  
  1575  	var template *object.VirtualMachine
  1576  	var template_mo mo.VirtualMachine
  1577  	var vm_mo mo.VirtualMachine
  1578  	if vm.template != "" {
  1579  		template, err = finder.VirtualMachine(context.TODO(), vm.template)
  1580  		if err != nil {
  1581  			return err
  1582  		}
  1583  		log.Printf("[DEBUG] template: %#v", template)
  1584  
  1585  		err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)
  1586  		if err != nil {
  1587  			return err
  1588  		}
  1589  	}
  1590  
  1591  	var resourcePool *object.ResourcePool
  1592  	if vm.resourcePool == "" {
  1593  		if vm.cluster == "" {
  1594  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
  1595  			if err != nil {
  1596  				return err
  1597  			}
  1598  		} else {
  1599  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
  1600  			if err != nil {
  1601  				return err
  1602  			}
  1603  		}
  1604  	} else {
  1605  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
  1606  		if err != nil {
  1607  			return err
  1608  		}
  1609  	}
  1610  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
  1611  
  1612  	dcFolders, err := dc.Folders(context.TODO())
  1613  	if err != nil {
  1614  		return err
  1615  	}
  1616  	log.Printf("[DEBUG] folder: %#v", vm.folder)
  1617  
  1618  	folder := dcFolders.VmFolder
  1619  	if len(vm.folder) > 0 {
  1620  		si := object.NewSearchIndex(c.Client)
  1621  		folderRef, err := si.FindByInventoryPath(
  1622  			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
  1623  		if err != nil {
  1624  			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
  1625  		} else if folderRef == nil {
  1626  			return fmt.Errorf("Cannot find folder %s", vm.folder)
  1627  		} else {
  1628  			folder = folderRef.(*object.Folder)
  1629  		}
  1630  	}
  1631  
  1632  	// make config spec
  1633  	configSpec := types.VirtualMachineConfigSpec{
  1634  		Name:              vm.name,
  1635  		NumCPUs:           vm.vcpu,
  1636  		NumCoresPerSocket: 1,
  1637  		MemoryMB:          vm.memoryMb,
  1638  		MemoryAllocation: &types.ResourceAllocationInfo{
  1639  			Reservation: vm.memoryAllocation.reservation,
  1640  		},
  1641  		Flags: &types.VirtualMachineFlagInfo{
  1642  			DiskUuidEnabled: &vm.enableDiskUUID,
  1643  		},
  1644  	}
  1645  	if vm.template == "" {
  1646  		configSpec.GuestId = "otherLinux64Guest"
  1647  	}
  1648  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
  1649  
  1650  	// make ExtraConfig
  1651  	log.Printf("[DEBUG] virtual machine Extra Config spec start")
  1652  	if len(vm.customConfigurations) > 0 {
  1653  		var ov []types.BaseOptionValue
  1654  		for k, v := range vm.customConfigurations {
  1655  			key := k
  1656  			value := v
  1657  			o := types.OptionValue{
  1658  				Key:   key,
  1659  				Value: &value,
  1660  			}
  1661  			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
  1662  			ov = append(ov, &o)
  1663  		}
  1664  		configSpec.ExtraConfig = ov
  1665  		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
  1666  	}
  1667  
  1668  	var datastore *object.Datastore
  1669  	if vm.datastore == "" {
  1670  		datastore, err = finder.DefaultDatastore(context.TODO())
  1671  		if err != nil {
  1672  			return err
  1673  		}
  1674  	} else {
  1675  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
  1676  		if err != nil {
  1677  			// TODO: datastore cluster support in govmomi finder function
  1678  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
  1679  			if err != nil {
  1680  				return err
  1681  			}
  1682  
  1683  			if d.Type == "StoragePod" {
  1684  				sp := object.StoragePod{
  1685  					Folder: object.NewFolder(c.Client, d),
  1686  				}
  1687  
  1688  				var sps types.StoragePlacementSpec
  1689  				if vm.template != "" {
  1690  					sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
  1691  				} else {
  1692  					sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
  1693  				}
  1694  
  1695  				datastore, err = findDatastore(c, sps)
  1696  				if err != nil {
  1697  					return err
  1698  				}
  1699  			} else {
  1700  				datastore = object.NewDatastore(c.Client, d)
  1701  			}
  1702  		}
  1703  	}
  1704  
  1705  	log.Printf("[DEBUG] datastore: %#v", datastore)
  1706  
  1707  	// network
  1708  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
  1709  	networkConfigs := []types.CustomizationAdapterMapping{}
  1710  	for _, network := range vm.networkInterfaces {
  1711  		// network device
  1712  		var networkDeviceType string
  1713  		if vm.template == "" {
  1714  			networkDeviceType = "e1000"
  1715  		} else {
  1716  			networkDeviceType = "vmxnet3"
  1717  		}
  1718  		nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress)
  1719  		if err != nil {
  1720  			return err
  1721  		}
  1722  		log.Printf("[DEBUG] network device: %+v", nd.Device)
  1723  		networkDevices = append(networkDevices, nd)
  1724  
  1725  		if vm.template != "" {
  1726  			var ipSetting types.CustomizationIPSettings
  1727  			if network.ipv4Address == "" {
  1728  				ipSetting.Ip = &types.CustomizationDhcpIpGenerator{}
  1729  			} else {
  1730  				if network.ipv4PrefixLength == 0 {
  1731  					return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
  1732  				}
  1733  				m := net.CIDRMask(network.ipv4PrefixLength, 32)
  1734  				sm := net.IPv4(m[0], m[1], m[2], m[3])
  1735  				subnetMask := sm.String()
  1736  				log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway)
  1737  				log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address)
  1738  				log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength)
  1739  				log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask)
  1740  				ipSetting.Gateway = []string{
  1741  					network.ipv4Gateway,
  1742  				}
  1743  				ipSetting.Ip = &types.CustomizationFixedIp{
  1744  					IpAddress: network.ipv4Address,
  1745  				}
  1746  				ipSetting.SubnetMask = subnetMask
  1747  			}
  1748  
  1749  			ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{}
  1750  			if network.ipv6Address == "" {
  1751  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1752  					&types.CustomizationDhcpIpV6Generator{},
  1753  				}
  1754  			} else {
  1755  				log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway)
  1756  				log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address)
  1757  				log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength)
  1758  
  1759  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1760  					&types.CustomizationFixedIpV6{
  1761  						IpAddress:  network.ipv6Address,
  1762  						SubnetMask: int32(network.ipv6PrefixLength),
  1763  					},
  1764  				}
  1765  				ipv6Spec.Gateway = []string{network.ipv6Gateway}
  1766  			}
  1767  			ipSetting.IpV6Spec = ipv6Spec
  1768  
  1769  			// network config
  1770  			config := types.CustomizationAdapterMapping{
  1771  				Adapter: ipSetting,
  1772  			}
  1773  			networkConfigs = append(networkConfigs, config)
  1774  		}
  1775  	}
  1776  	log.Printf("[DEBUG] network devices: %#v", networkDevices)
  1777  	log.Printf("[DEBUG] network configs: %#v", networkConfigs)
  1778  
  1779  	var task *object.Task
  1780  	if vm.template == "" {
  1781  		var mds mo.Datastore
  1782  		if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
  1783  			return err
  1784  		}
  1785  		log.Printf("[DEBUG] datastore: %#v", mds.Name)
  1786  		scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
  1787  		if err != nil {
  1788  			log.Printf("[ERROR] %s", err)
  1789  		}
  1790  
  1791  		configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
  1792  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1793  			Device:    scsi,
  1794  		})
  1795  
  1796  		configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
  1797  
  1798  		task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
  1799  		if err != nil {
  1800  			log.Printf("[ERROR] %s", err)
  1801  		}
  1802  
  1803  		err = task.Wait(context.TODO())
  1804  		if err != nil {
  1805  			log.Printf("[ERROR] %s", err)
  1806  		}
  1807  
  1808  	} else {
  1809  
  1810  		relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
  1811  		if err != nil {
  1812  			return err
  1813  		}
  1814  
  1815  		log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
  1816  
  1817  		// make vm clone spec
  1818  		cloneSpec := types.VirtualMachineCloneSpec{
  1819  			Location: relocateSpec,
  1820  			Template: false,
  1821  			Config:   &configSpec,
  1822  			PowerOn:  false,
  1823  		}
  1824  		if vm.linkedClone {
  1825  			if template_mo.Snapshot == nil {
  1826  				return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
  1827  			}
  1828  			cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
  1829  		}
  1830  		log.Printf("[DEBUG] clone spec: %v", cloneSpec)
  1831  
  1832  		task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec)
  1833  		if err != nil {
  1834  			return err
  1835  		}
  1836  	}
  1837  
  1838  	err = task.Wait(context.TODO())
  1839  	if err != nil {
  1840  		log.Printf("[ERROR] %s", err)
  1841  	}
  1842  
  1843  	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
  1844  	if err != nil {
  1845  		return err
  1846  	}
  1847  	log.Printf("[DEBUG] new vm: %v", newVM)
  1848  
  1849  	devices, err := newVM.Device(context.TODO())
  1850  	if err != nil {
  1851  		log.Printf("[DEBUG] Template devices can't be found")
  1852  		return err
  1853  	}
  1854  
  1855  	for _, dvc := range devices {
  1856  		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
  1857  		if devices.Type(dvc) == "ethernet" {
  1858  			err := newVM.RemoveDevice(context.TODO(), false, dvc)
  1859  			if err != nil {
  1860  				return err
  1861  			}
  1862  		}
  1863  	}
  1864  	// Add Network devices
  1865  	for _, dvc := range networkDevices {
  1866  		err := newVM.AddDevice(
  1867  			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
  1868  		if err != nil {
  1869  			return err
  1870  		}
  1871  	}
  1872  
  1873  	// Create the cdroms if needed.
  1874  	if err := createCdroms(newVM, vm.cdroms); err != nil {
  1875  		return err
  1876  	}
  1877  
  1878  	newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo)
  1879  	firstDisk := 0
  1880  	if vm.template != "" {
  1881  		firstDisk++
  1882  	}
  1883  	for i := firstDisk; i < len(vm.hardDisks); i++ {
  1884  		log.Printf("[DEBUG] disk index: %v", i)
  1885  
  1886  		var diskPath string
  1887  		switch {
  1888  		case vm.hardDisks[i].vmdkPath != "":
  1889  			diskPath = vm.hardDisks[i].vmdkPath
  1890  		case vm.hardDisks[i].name != "":
  1891  			snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory
  1892  			split := strings.Split(snapshotFullDir, " ")
  1893  			if len(split) != 2 {
  1894  				return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
  1895  			}
  1896  			vmWorkingPath := split[1]
  1897  			diskPath = vmWorkingPath + vm.hardDisks[i].name
  1898  		default:
  1899  			return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i])
  1900  		}
  1901  
  1902  		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  1903  		if err != nil {
  1904  			return err
  1905  		}
  1906  	}
  1907  
  1908  	if vm.skipCustomization || vm.template == "" {
  1909  		log.Printf("[DEBUG] VM customization skipped")
  1910  	} else {
  1911  		var identity_options types.BaseCustomizationIdentitySettings
  1912  		if strings.HasPrefix(template_mo.Config.GuestId, "win") {
  1913  			var timeZone int
  1914  			if vm.timeZone == "Etc/UTC" {
  1915  				vm.timeZone = "085"
  1916  			}
  1917  			timeZone, err := strconv.Atoi(vm.timeZone)
  1918  			if err != nil {
  1919  				return fmt.Errorf("Error converting TimeZone: %s", err)
  1920  			}
  1921  
  1922  			guiUnattended := types.CustomizationGuiUnattended{
  1923  				AutoLogon:      false,
  1924  				AutoLogonCount: 1,
  1925  				TimeZone:       int32(timeZone),
  1926  			}
  1927  
  1928  			customIdentification := types.CustomizationIdentification{}
  1929  
  1930  			userData := types.CustomizationUserData{
  1931  				ComputerName: &types.CustomizationFixedName{
  1932  					Name: strings.Split(vm.name, ".")[0],
  1933  				},
  1934  				ProductId: vm.windowsOptionalConfig.productKey,
  1935  				FullName:  "terraform",
  1936  				OrgName:   "terraform",
  1937  			}
  1938  
  1939  			if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
  1940  				customIdentification.DomainAdminPassword = &types.CustomizationPassword{
  1941  					PlainText: true,
  1942  					Value:     vm.windowsOptionalConfig.domainUserPassword,
  1943  				}
  1944  				customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
  1945  				customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
  1946  			}
  1947  
  1948  			if vm.windowsOptionalConfig.adminPassword != "" {
  1949  				guiUnattended.Password = &types.CustomizationPassword{
  1950  					PlainText: true,
  1951  					Value:     vm.windowsOptionalConfig.adminPassword,
  1952  				}
  1953  			}
  1954  
  1955  			identity_options = &types.CustomizationSysprep{
  1956  				GuiUnattended:  guiUnattended,
  1957  				Identification: customIdentification,
  1958  				UserData:       userData,
  1959  			}
  1960  		} else {
  1961  			identity_options = &types.CustomizationLinuxPrep{
  1962  				HostName: &types.CustomizationFixedName{
  1963  					Name: strings.Split(vm.name, ".")[0],
  1964  				},
  1965  				Domain:     vm.domain,
  1966  				TimeZone:   vm.timeZone,
  1967  				HwClockUTC: types.NewBool(true),
  1968  			}
  1969  		}
  1970  
  1971  		// create CustomizationSpec
  1972  		customSpec := types.CustomizationSpec{
  1973  			Identity: identity_options,
  1974  			GlobalIPSettings: types.CustomizationGlobalIPSettings{
  1975  				DnsSuffixList: vm.dnsSuffixes,
  1976  				DnsServerList: vm.dnsServers,
  1977  			},
  1978  			NicSettingMap: networkConfigs,
  1979  		}
  1980  		log.Printf("[DEBUG] custom spec: %v", customSpec)
  1981  
  1982  		log.Printf("[DEBUG] VM customization starting")
  1983  		taskb, err := newVM.Customize(context.TODO(), customSpec)
  1984  		if err != nil {
  1985  			return err
  1986  		}
  1987  		_, err = taskb.WaitForResult(context.TODO(), nil)
  1988  		if err != nil {
  1989  			return err
  1990  		}
  1991  		log.Printf("[DEBUG] VM customization finished")
  1992  	}
  1993  
  1994  	if vm.hasBootableVmdk || vm.template != "" {
  1995  		newVM.PowerOn(context.TODO())
  1996  		err = newVM.WaitForPowerState(context.TODO(), types.VirtualMachinePowerStatePoweredOn)
  1997  		if err != nil {
  1998  			return err
  1999  		}
  2000  	}
  2001  	return nil
  2002  }