github.com/mohanarpit/terraform@v0.6.16-0.20160909104007-291f29853544/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about)

     1  package vsphere
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/hashicorp/terraform/helper/schema"
    11  	"github.com/vmware/govmomi"
    12  	"github.com/vmware/govmomi/find"
    13  	"github.com/vmware/govmomi/object"
    14  	"github.com/vmware/govmomi/property"
    15  	"github.com/vmware/govmomi/vim25/mo"
    16  	"github.com/vmware/govmomi/vim25/types"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  var DefaultDNSSuffixes = []string{
    21  	"vsphere.local",
    22  }
    23  
    24  var DefaultDNSServers = []string{
    25  	"8.8.8.8",
    26  	"8.8.4.4",
    27  }
    28  
    29  var DiskControllerTypes = []string{
    30  	"scsi",
    31  	"scsi-lsi-parallel",
    32  	"scsi-buslogic",
    33  	"scsi-paravirtual",
    34  	"scsi-lsi-sas",
    35  	"ide",
    36  }
    37  
    38  type networkInterface struct {
    39  	deviceName       string
    40  	label            string
    41  	ipv4Address      string
    42  	ipv4PrefixLength int
    43  	ipv4Gateway      string
    44  	ipv6Address      string
    45  	ipv6PrefixLength int
    46  	ipv6Gateway      string
    47  	adapterType      string // TODO: Make "adapter_type" argument
    48  	macAddress       string
    49  }
    50  
    51  type hardDisk struct {
    52  	name       string
    53  	size       int64
    54  	iops       int64
    55  	initType   string
    56  	vmdkPath   string
    57  	controller string
    58  	bootable   bool
    59  }
    60  
    61  //Additional options Vsphere can use clones of windows machines
    62  type windowsOptConfig struct {
    63  	productKey         string
    64  	adminPassword      string
    65  	domainUser         string
    66  	domain             string
    67  	domainUserPassword string
    68  }
    69  
    70  type cdrom struct {
    71  	datastore string
    72  	path      string
    73  }
    74  
    75  type memoryAllocation struct {
    76  	reservation int64
    77  }
    78  
    79  type virtualMachine struct {
    80  	name                  string
    81  	folder                string
    82  	datacenter            string
    83  	cluster               string
    84  	resourcePool          string
    85  	datastore             string
    86  	vcpu                  int32
    87  	memoryMb              int64
    88  	memoryAllocation      memoryAllocation
    89  	template              string
    90  	networkInterfaces     []networkInterface
    91  	hardDisks             []hardDisk
    92  	cdroms                []cdrom
    93  	domain                string
    94  	timeZone              string
    95  	dnsSuffixes           []string
    96  	dnsServers            []string
    97  	hasBootableVmdk       bool
    98  	linkedClone           bool
    99  	skipCustomization     bool
   100  	enableDiskUUID        bool
   101  	windowsOptionalConfig windowsOptConfig
   102  	customConfigurations  map[string](types.AnyType)
   103  }
   104  
   105  func (v virtualMachine) Path() string {
   106  	return vmPath(v.folder, v.name)
   107  }
   108  
   109  func vmPath(folder string, name string) string {
   110  	var path string
   111  	if len(folder) > 0 {
   112  		path += folder + "/"
   113  	}
   114  	return path + name
   115  }
   116  
   117  func resourceVSphereVirtualMachine() *schema.Resource {
   118  	return &schema.Resource{
   119  		Create: resourceVSphereVirtualMachineCreate,
   120  		Read:   resourceVSphereVirtualMachineRead,
   121  		Update: resourceVSphereVirtualMachineUpdate,
   122  		Delete: resourceVSphereVirtualMachineDelete,
   123  
   124  		SchemaVersion: 1,
   125  		MigrateState:  resourceVSphereVirtualMachineMigrateState,
   126  
   127  		Schema: map[string]*schema.Schema{
   128  			"name": &schema.Schema{
   129  				Type:     schema.TypeString,
   130  				Required: true,
   131  				ForceNew: true,
   132  			},
   133  
   134  			"folder": &schema.Schema{
   135  				Type:     schema.TypeString,
   136  				Optional: true,
   137  				ForceNew: true,
   138  			},
   139  
   140  			"vcpu": &schema.Schema{
   141  				Type:     schema.TypeInt,
   142  				Required: true,
   143  			},
   144  
   145  			"memory": &schema.Schema{
   146  				Type:     schema.TypeInt,
   147  				Required: true,
   148  			},
   149  
   150  			"memory_reservation": &schema.Schema{
   151  				Type:     schema.TypeInt,
   152  				Optional: true,
   153  				Default:  0,
   154  				ForceNew: true,
   155  			},
   156  
   157  			"datacenter": &schema.Schema{
   158  				Type:     schema.TypeString,
   159  				Optional: true,
   160  				ForceNew: true,
   161  			},
   162  
   163  			"cluster": &schema.Schema{
   164  				Type:     schema.TypeString,
   165  				Optional: true,
   166  				ForceNew: true,
   167  			},
   168  
   169  			"resource_pool": &schema.Schema{
   170  				Type:     schema.TypeString,
   171  				Optional: true,
   172  				ForceNew: true,
   173  			},
   174  
   175  			"linked_clone": &schema.Schema{
   176  				Type:     schema.TypeBool,
   177  				Optional: true,
   178  				Default:  false,
   179  				ForceNew: true,
   180  			},
   181  			"gateway": &schema.Schema{
   182  				Type:       schema.TypeString,
   183  				Optional:   true,
   184  				ForceNew:   true,
   185  				Deprecated: "Please use network_interface.ipv4_gateway",
   186  			},
   187  
   188  			"domain": &schema.Schema{
   189  				Type:     schema.TypeString,
   190  				Optional: true,
   191  				ForceNew: true,
   192  				Default:  "vsphere.local",
   193  			},
   194  
   195  			"time_zone": &schema.Schema{
   196  				Type:     schema.TypeString,
   197  				Optional: true,
   198  				ForceNew: true,
   199  				Default:  "Etc/UTC",
   200  			},
   201  
   202  			"dns_suffixes": &schema.Schema{
   203  				Type:     schema.TypeList,
   204  				Optional: true,
   205  				Elem:     &schema.Schema{Type: schema.TypeString},
   206  				ForceNew: true,
   207  			},
   208  
   209  			"dns_servers": &schema.Schema{
   210  				Type:     schema.TypeList,
   211  				Optional: true,
   212  				Elem:     &schema.Schema{Type: schema.TypeString},
   213  				ForceNew: true,
   214  			},
   215  
   216  			"skip_customization": &schema.Schema{
   217  				Type:     schema.TypeBool,
   218  				Optional: true,
   219  				ForceNew: true,
   220  				Default:  false,
   221  			},
   222  
   223  			"enable_disk_uuid": &schema.Schema{
   224  				Type:     schema.TypeBool,
   225  				Optional: true,
   226  				ForceNew: true,
   227  				Default:  false,
   228  			},
   229  
   230  			"uuid": &schema.Schema{
   231  				Type:     schema.TypeString,
   232  				Computed: true,
   233  			},
   234  
   235  			"custom_configuration_parameters": &schema.Schema{
   236  				Type:     schema.TypeMap,
   237  				Optional: true,
   238  				ForceNew: true,
   239  			},
   240  
   241  			"windows_opt_config": &schema.Schema{
   242  				Type:     schema.TypeList,
   243  				Optional: true,
   244  				ForceNew: true,
   245  				Elem: &schema.Resource{
   246  					Schema: map[string]*schema.Schema{
   247  						"product_key": &schema.Schema{
   248  							Type:     schema.TypeString,
   249  							Optional: true,
   250  							ForceNew: true,
   251  						},
   252  
   253  						"admin_password": &schema.Schema{
   254  							Type:     schema.TypeString,
   255  							Optional: true,
   256  							ForceNew: true,
   257  						},
   258  
   259  						"domain_user": &schema.Schema{
   260  							Type:     schema.TypeString,
   261  							Optional: true,
   262  							ForceNew: true,
   263  						},
   264  
   265  						"domain": &schema.Schema{
   266  							Type:     schema.TypeString,
   267  							Optional: true,
   268  							ForceNew: true,
   269  						},
   270  
   271  						"domain_user_password": &schema.Schema{
   272  							Type:     schema.TypeString,
   273  							Optional: true,
   274  							ForceNew: true,
   275  						},
   276  					},
   277  				},
   278  			},
   279  
   280  			"network_interface": &schema.Schema{
   281  				Type:     schema.TypeList,
   282  				Required: true,
   283  				ForceNew: true,
   284  				Elem: &schema.Resource{
   285  					Schema: map[string]*schema.Schema{
   286  						"label": &schema.Schema{
   287  							Type:     schema.TypeString,
   288  							Required: true,
   289  							ForceNew: true,
   290  						},
   291  
   292  						"ip_address": &schema.Schema{
   293  							Type:       schema.TypeString,
   294  							Optional:   true,
   295  							Computed:   true,
   296  							Deprecated: "Please use ipv4_address",
   297  						},
   298  
   299  						"subnet_mask": &schema.Schema{
   300  							Type:       schema.TypeString,
   301  							Optional:   true,
   302  							Computed:   true,
   303  							Deprecated: "Please use ipv4_prefix_length",
   304  						},
   305  
   306  						"ipv4_address": &schema.Schema{
   307  							Type:     schema.TypeString,
   308  							Optional: true,
   309  							Computed: true,
   310  						},
   311  
   312  						"ipv4_prefix_length": &schema.Schema{
   313  							Type:     schema.TypeInt,
   314  							Optional: true,
   315  							Computed: true,
   316  						},
   317  
   318  						"ipv4_gateway": &schema.Schema{
   319  							Type:     schema.TypeString,
   320  							Optional: true,
   321  							Computed: true,
   322  						},
   323  
   324  						"ipv6_address": &schema.Schema{
   325  							Type:     schema.TypeString,
   326  							Optional: true,
   327  							Computed: true,
   328  						},
   329  
   330  						"ipv6_prefix_length": &schema.Schema{
   331  							Type:     schema.TypeInt,
   332  							Optional: true,
   333  							Computed: true,
   334  						},
   335  
   336  						"ipv6_gateway": &schema.Schema{
   337  							Type:     schema.TypeString,
   338  							Optional: true,
   339  							Computed: true,
   340  						},
   341  
   342  						"adapter_type": &schema.Schema{
   343  							Type:     schema.TypeString,
   344  							Optional: true,
   345  							ForceNew: true,
   346  						},
   347  
   348  						"mac_address": &schema.Schema{
   349  							Type:     schema.TypeString,
   350  							Optional: true,
   351  							Computed: true,
   352  						},
   353  					},
   354  				},
   355  			},
   356  
   357  			"disk": &schema.Schema{
   358  				Type:     schema.TypeSet,
   359  				Required: true,
   360  				Elem: &schema.Resource{
   361  					Schema: map[string]*schema.Schema{
   362  						"uuid": &schema.Schema{
   363  							Type:     schema.TypeString,
   364  							Computed: true,
   365  						},
   366  
   367  						"key": &schema.Schema{
   368  							Type:     schema.TypeInt,
   369  							Computed: true,
   370  						},
   371  
   372  						"template": &schema.Schema{
   373  							Type:     schema.TypeString,
   374  							Optional: true,
   375  						},
   376  
   377  						"type": &schema.Schema{
   378  							Type:     schema.TypeString,
   379  							Optional: true,
   380  							Default:  "eager_zeroed",
   381  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   382  								value := v.(string)
   383  								if value != "thin" && value != "eager_zeroed" && value != "lazy" {
   384  									errors = append(errors, fmt.Errorf(
   385  										"only 'thin', 'eager_zeroed', and 'lazy' are supported values for 'type'"))
   386  								}
   387  								return
   388  							},
   389  						},
   390  
   391  						"datastore": &schema.Schema{
   392  							Type:     schema.TypeString,
   393  							Optional: true,
   394  						},
   395  
   396  						"size": &schema.Schema{
   397  							Type:     schema.TypeInt,
   398  							Optional: true,
   399  						},
   400  
   401  						"name": &schema.Schema{
   402  							Type:     schema.TypeString,
   403  							Optional: true,
   404  						},
   405  
   406  						"iops": &schema.Schema{
   407  							Type:     schema.TypeInt,
   408  							Optional: true,
   409  						},
   410  
   411  						"vmdk": &schema.Schema{
   412  							// TODO: Add ValidateFunc to confirm path exists
   413  							Type:     schema.TypeString,
   414  							Optional: true,
   415  						},
   416  
   417  						"bootable": &schema.Schema{
   418  							Type:     schema.TypeBool,
   419  							Optional: true,
   420  						},
   421  
   422  						"keep_on_remove": &schema.Schema{
   423  							Type:     schema.TypeBool,
   424  							Optional: true,
   425  						},
   426  
   427  						"controller_type": &schema.Schema{
   428  							Type:     schema.TypeString,
   429  							Optional: true,
   430  							Default:  "scsi",
   431  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   432  								value := v.(string)
   433  								found := false
   434  								for _, t := range DiskControllerTypes {
   435  									if t == value {
   436  										found = true
   437  									}
   438  								}
   439  								if !found {
   440  									errors = append(errors, fmt.Errorf(
   441  										"Supported values for 'controller_type' are %v", strings.Join(DiskControllerTypes, ", ")))
   442  								}
   443  								return
   444  							},
   445  						},
   446  					},
   447  				},
   448  			},
   449  
   450  			"cdrom": &schema.Schema{
   451  				Type:     schema.TypeList,
   452  				Optional: true,
   453  				ForceNew: true,
   454  				Elem: &schema.Resource{
   455  					Schema: map[string]*schema.Schema{
   456  						"datastore": &schema.Schema{
   457  							Type:     schema.TypeString,
   458  							Required: true,
   459  							ForceNew: true,
   460  						},
   461  
   462  						"path": &schema.Schema{
   463  							Type:     schema.TypeString,
   464  							Required: true,
   465  							ForceNew: true,
   466  						},
   467  					},
   468  				},
   469  			},
   470  		},
   471  	}
   472  }
   473  
   474  func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error {
   475  	// flag if changes have to be applied
   476  	hasChanges := false
   477  	// flag if changes have to be done when powered off
   478  	rebootRequired := false
   479  
   480  	// make config spec
   481  	configSpec := types.VirtualMachineConfigSpec{}
   482  
   483  	if d.HasChange("vcpu") {
   484  		configSpec.NumCPUs = int32(d.Get("vcpu").(int))
   485  		hasChanges = true
   486  		rebootRequired = true
   487  	}
   488  
   489  	if d.HasChange("memory") {
   490  		configSpec.MemoryMB = int64(d.Get("memory").(int))
   491  		hasChanges = true
   492  		rebootRequired = true
   493  	}
   494  
   495  	client := meta.(*govmomi.Client)
   496  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   497  	if err != nil {
   498  		return err
   499  	}
   500  	finder := find.NewFinder(client.Client, true)
   501  	finder = finder.SetDatacenter(dc)
   502  
   503  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
   504  	if err != nil {
   505  		return err
   506  	}
   507  
   508  	if d.HasChange("disk") {
   509  		hasChanges = true
   510  		oldDisks, newDisks := d.GetChange("disk")
   511  		oldDiskSet := oldDisks.(*schema.Set)
   512  		newDiskSet := newDisks.(*schema.Set)
   513  
   514  		addedDisks := newDiskSet.Difference(oldDiskSet)
   515  		removedDisks := oldDiskSet.Difference(newDiskSet)
   516  
   517  		// Removed disks
   518  		for _, diskRaw := range removedDisks.List() {
   519  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   520  				devices, err := vm.Device(context.TODO())
   521  				if err != nil {
   522  					return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err)
   523  				}
   524  				virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
   525  
   526  				keep := false
   527  				if v, ok := disk["keep_on_remove"].(bool); ok {
   528  					keep = v
   529  				}
   530  
   531  				err = vm.RemoveDevice(context.TODO(), keep, virtualDisk)
   532  				if err != nil {
   533  					return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
   534  				}
   535  			}
   536  		}
   537  		// Added disks
   538  		for _, diskRaw := range addedDisks.List() {
   539  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   540  
   541  				var datastore *object.Datastore
   542  				if disk["datastore"] == "" {
   543  					datastore, err = finder.DefaultDatastore(context.TODO())
   544  					if err != nil {
   545  						return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err)
   546  					}
   547  				} else {
   548  					datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string))
   549  					if err != nil {
   550  						log.Printf("[ERROR] Couldn't find datastore %v.  %s", disk["datastore"].(string), err)
   551  						return err
   552  					}
   553  				}
   554  
   555  				var size int64
   556  				if disk["size"] == 0 {
   557  					size = 0
   558  				} else {
   559  					size = int64(disk["size"].(int))
   560  				}
   561  				iops := int64(disk["iops"].(int))
   562  				controller_type := disk["controller_type"].(string)
   563  
   564  				var mo mo.VirtualMachine
   565  				vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo)
   566  
   567  				var diskPath string
   568  				switch {
   569  				case disk["vmdk"] != "":
   570  					diskPath = disk["vmdk"].(string)
   571  				case disk["name"] != "":
   572  					snapshotFullDir := mo.Config.Files.SnapshotDirectory
   573  					split := strings.Split(snapshotFullDir, " ")
   574  					if len(split) != 2 {
   575  						return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
   576  					}
   577  					vmWorkingPath := split[1]
   578  					diskPath = vmWorkingPath + disk["name"].(string)
   579  				default:
   580  					return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given")
   581  				}
   582  
   583  				var initType string
   584  				if disk["type"] != "" {
   585  					initType = disk["type"].(string)
   586  				} else {
   587  					initType = "thin"
   588  				}
   589  
   590  				log.Printf("[INFO] Attaching disk: %v", diskPath)
   591  				err = addHardDisk(vm, size, iops, initType, datastore, diskPath, controller_type)
   592  				if err != nil {
   593  					log.Printf("[ERROR] Add Hard Disk Failed: %v", err)
   594  					return err
   595  				}
   596  			}
   597  			if err != nil {
   598  				return err
   599  			}
   600  		}
   601  	}
   602  
   603  	// do nothing if there are no changes
   604  	if !hasChanges {
   605  		return nil
   606  	}
   607  
   608  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
   609  
   610  	if rebootRequired {
   611  		log.Printf("[INFO] Shutting down virtual machine: %s", d.Id())
   612  
   613  		task, err := vm.PowerOff(context.TODO())
   614  		if err != nil {
   615  			return err
   616  		}
   617  
   618  		err = task.Wait(context.TODO())
   619  		if err != nil {
   620  			return err
   621  		}
   622  	}
   623  
   624  	log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id())
   625  
   626  	task, err := vm.Reconfigure(context.TODO(), configSpec)
   627  	if err != nil {
   628  		log.Printf("[ERROR] %s", err)
   629  	}
   630  
   631  	err = task.Wait(context.TODO())
   632  	if err != nil {
   633  		log.Printf("[ERROR] %s", err)
   634  	}
   635  
   636  	if rebootRequired {
   637  		task, err = vm.PowerOn(context.TODO())
   638  		if err != nil {
   639  			return err
   640  		}
   641  
   642  		err = task.Wait(context.TODO())
   643  		if err != nil {
   644  			log.Printf("[ERROR] %s", err)
   645  		}
   646  	}
   647  
   648  	return resourceVSphereVirtualMachineRead(d, meta)
   649  }
   650  
   651  func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
   652  	client := meta.(*govmomi.Client)
   653  
   654  	vm := virtualMachine{
   655  		name:     d.Get("name").(string),
   656  		vcpu:     int32(d.Get("vcpu").(int)),
   657  		memoryMb: int64(d.Get("memory").(int)),
   658  		memoryAllocation: memoryAllocation{
   659  			reservation: int64(d.Get("memory_reservation").(int)),
   660  		},
   661  	}
   662  
   663  	if v, ok := d.GetOk("folder"); ok {
   664  		vm.folder = v.(string)
   665  	}
   666  
   667  	if v, ok := d.GetOk("datacenter"); ok {
   668  		vm.datacenter = v.(string)
   669  	}
   670  
   671  	if v, ok := d.GetOk("cluster"); ok {
   672  		vm.cluster = v.(string)
   673  	}
   674  
   675  	if v, ok := d.GetOk("resource_pool"); ok {
   676  		vm.resourcePool = v.(string)
   677  	}
   678  
   679  	if v, ok := d.GetOk("domain"); ok {
   680  		vm.domain = v.(string)
   681  	}
   682  
   683  	if v, ok := d.GetOk("time_zone"); ok {
   684  		vm.timeZone = v.(string)
   685  	}
   686  
   687  	if v, ok := d.GetOk("linked_clone"); ok {
   688  		vm.linkedClone = v.(bool)
   689  	}
   690  
   691  	if v, ok := d.GetOk("skip_customization"); ok {
   692  		vm.skipCustomization = v.(bool)
   693  	}
   694  
   695  	if v, ok := d.GetOk("enable_disk_uuid"); ok {
   696  		vm.enableDiskUUID = v.(bool)
   697  	}
   698  
   699  	if raw, ok := d.GetOk("dns_suffixes"); ok {
   700  		for _, v := range raw.([]interface{}) {
   701  			vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string))
   702  		}
   703  	} else {
   704  		vm.dnsSuffixes = DefaultDNSSuffixes
   705  	}
   706  
   707  	if raw, ok := d.GetOk("dns_servers"); ok {
   708  		for _, v := range raw.([]interface{}) {
   709  			vm.dnsServers = append(vm.dnsServers, v.(string))
   710  		}
   711  	} else {
   712  		vm.dnsServers = DefaultDNSServers
   713  	}
   714  
   715  	if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
   716  		if custom_configs, ok := vL.(map[string]interface{}); ok {
   717  			custom := make(map[string]types.AnyType)
   718  			for k, v := range custom_configs {
   719  				custom[k] = v
   720  			}
   721  			vm.customConfigurations = custom
   722  			log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
   723  		}
   724  	}
   725  
   726  	if vL, ok := d.GetOk("network_interface"); ok {
   727  		networks := make([]networkInterface, len(vL.([]interface{})))
   728  		for i, v := range vL.([]interface{}) {
   729  			network := v.(map[string]interface{})
   730  			networks[i].label = network["label"].(string)
   731  			if v, ok := network["ip_address"].(string); ok && v != "" {
   732  				networks[i].ipv4Address = v
   733  			}
   734  			if v, ok := d.GetOk("gateway"); ok {
   735  				networks[i].ipv4Gateway = v.(string)
   736  			}
   737  			if v, ok := network["subnet_mask"].(string); ok && v != "" {
   738  				ip := net.ParseIP(v).To4()
   739  				if ip != nil {
   740  					mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3])
   741  					pl, _ := mask.Size()
   742  					networks[i].ipv4PrefixLength = pl
   743  				} else {
   744  					return fmt.Errorf("subnet_mask parameter is invalid.")
   745  				}
   746  			}
   747  			if v, ok := network["ipv4_address"].(string); ok && v != "" {
   748  				networks[i].ipv4Address = v
   749  			}
   750  			if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 {
   751  				networks[i].ipv4PrefixLength = v
   752  			}
   753  			if v, ok := network["ipv4_gateway"].(string); ok && v != "" {
   754  				networks[i].ipv4Gateway = v
   755  			}
   756  			if v, ok := network["ipv6_address"].(string); ok && v != "" {
   757  				networks[i].ipv6Address = v
   758  			}
   759  			if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 {
   760  				networks[i].ipv6PrefixLength = v
   761  			}
   762  			if v, ok := network["ipv6_gateway"].(string); ok && v != "" {
   763  				networks[i].ipv6Gateway = v
   764  			}
   765  			if v, ok := network["mac_address"].(string); ok && v != "" {
   766  				networks[i].macAddress = v
   767  			}
   768  		}
   769  		vm.networkInterfaces = networks
   770  		log.Printf("[DEBUG] network_interface init: %v", networks)
   771  	}
   772  
   773  	if vL, ok := d.GetOk("windows_opt_config"); ok {
   774  		var winOpt windowsOptConfig
   775  		custom_configs := (vL.([]interface{}))[0].(map[string]interface{})
   776  		if v, ok := custom_configs["admin_password"].(string); ok && v != "" {
   777  			winOpt.adminPassword = v
   778  		}
   779  		if v, ok := custom_configs["domain"].(string); ok && v != "" {
   780  			winOpt.domain = v
   781  		}
   782  		if v, ok := custom_configs["domain_user"].(string); ok && v != "" {
   783  			winOpt.domainUser = v
   784  		}
   785  		if v, ok := custom_configs["product_key"].(string); ok && v != "" {
   786  			winOpt.productKey = v
   787  		}
   788  		if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" {
   789  			winOpt.domainUserPassword = v
   790  		}
   791  		vm.windowsOptionalConfig = winOpt
   792  		log.Printf("[DEBUG] windows config init: %v", winOpt)
   793  	}
   794  
   795  	if vL, ok := d.GetOk("disk"); ok {
   796  		if diskSet, ok := vL.(*schema.Set); ok {
   797  
   798  			disks := []hardDisk{}
   799  			for _, value := range diskSet.List() {
   800  				disk := value.(map[string]interface{})
   801  				newDisk := hardDisk{}
   802  
   803  				if v, ok := disk["template"].(string); ok && v != "" {
   804  					if v, ok := disk["name"].(string); ok && v != "" {
   805  						return fmt.Errorf("Cannot specify name of a template")
   806  					}
   807  					vm.template = v
   808  					if vm.hasBootableVmdk {
   809  						return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   810  					}
   811  					vm.hasBootableVmdk = true
   812  				}
   813  
   814  				if v, ok := disk["type"].(string); ok && v != "" {
   815  					newDisk.initType = v
   816  				}
   817  
   818  				if v, ok := disk["datastore"].(string); ok && v != "" {
   819  					vm.datastore = v
   820  				}
   821  
   822  				if v, ok := disk["size"].(int); ok && v != 0 {
   823  					if v, ok := disk["template"].(string); ok && v != "" {
   824  						return fmt.Errorf("Cannot specify size of a template")
   825  					}
   826  
   827  					if v, ok := disk["name"].(string); ok && v != "" {
   828  						newDisk.name = v
   829  					} else {
   830  						return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk")
   831  					}
   832  
   833  					newDisk.size = int64(v)
   834  				}
   835  
   836  				if v, ok := disk["iops"].(int); ok && v != 0 {
   837  					newDisk.iops = int64(v)
   838  				}
   839  
   840  				if v, ok := disk["controller_type"].(string); ok && v != "" {
   841  					newDisk.controller = v
   842  				}
   843  
   844  				if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" {
   845  					if v, ok := disk["template"].(string); ok && v != "" {
   846  						return fmt.Errorf("Cannot specify a vmdk for a template")
   847  					}
   848  					if v, ok := disk["size"].(string); ok && v != "" {
   849  						return fmt.Errorf("Cannot specify size of a vmdk")
   850  					}
   851  					if v, ok := disk["name"].(string); ok && v != "" {
   852  						return fmt.Errorf("Cannot specify name of a vmdk")
   853  					}
   854  					if vBootable, ok := disk["bootable"].(bool); ok {
   855  						if vBootable && vm.hasBootableVmdk {
   856  							return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   857  						}
   858  						newDisk.bootable = vBootable
   859  						vm.hasBootableVmdk = vm.hasBootableVmdk || vBootable
   860  					}
   861  					newDisk.vmdkPath = vVmdk
   862  				}
   863  				// Preserves order so bootable disk is first
   864  				if newDisk.bootable == true || disk["template"] != "" {
   865  					disks = append([]hardDisk{newDisk}, disks...)
   866  				} else {
   867  					disks = append(disks, newDisk)
   868  				}
   869  			}
   870  			vm.hardDisks = disks
   871  			log.Printf("[DEBUG] disk init: %v", disks)
   872  		}
   873  	}
   874  
   875  	if vL, ok := d.GetOk("cdrom"); ok {
   876  		cdroms := make([]cdrom, len(vL.([]interface{})))
   877  		for i, v := range vL.([]interface{}) {
   878  			c := v.(map[string]interface{})
   879  			if v, ok := c["datastore"].(string); ok && v != "" {
   880  				cdroms[i].datastore = v
   881  			} else {
   882  				return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.")
   883  			}
   884  			if v, ok := c["path"].(string); ok && v != "" {
   885  				cdroms[i].path = v
   886  			} else {
   887  				return fmt.Errorf("Path argument must be specified when attaching a cdrom image.")
   888  			}
   889  		}
   890  		vm.cdroms = cdroms
   891  		log.Printf("[DEBUG] cdrom init: %v", cdroms)
   892  	}
   893  
   894  	err := vm.setupVirtualMachine(client)
   895  	if err != nil {
   896  		return err
   897  	}
   898  
   899  	d.SetId(vm.Path())
   900  	log.Printf("[INFO] Created virtual machine: %s", d.Id())
   901  
   902  	return resourceVSphereVirtualMachineRead(d, meta)
   903  }
   904  
   905  func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
   906  	log.Printf("[DEBUG] virtual machine resource data: %#v", d)
   907  	client := meta.(*govmomi.Client)
   908  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   909  	if err != nil {
   910  		return err
   911  	}
   912  	finder := find.NewFinder(client.Client, true)
   913  	finder = finder.SetDatacenter(dc)
   914  
   915  	vm, err := finder.VirtualMachine(context.TODO(), d.Id())
   916  	if err != nil {
   917  		d.SetId("")
   918  		return nil
   919  	}
   920  
   921  	state, err := vm.PowerState(context.TODO())
   922  	if err != nil {
   923  		return err
   924  	}
   925  
   926  	if state == types.VirtualMachinePowerStatePoweredOn {
   927  		// wait for interfaces to appear
   928  		log.Printf("[DEBUG] Waiting for interfaces to appear")
   929  
   930  		_, err = vm.WaitForNetIP(context.TODO(), false)
   931  		if err != nil {
   932  			return err
   933  		}
   934  
   935  		log.Printf("[DEBUG] Successfully waited for interfaces to appear")
   936  	}
   937  
   938  	var mvm mo.VirtualMachine
   939  	collector := property.DefaultCollector(client.Client)
   940  	if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil {
   941  		return err
   942  	}
   943  
   944  	log.Printf("[DEBUG] Datacenter - %#v", dc)
   945  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config)
   946  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config)
   947  	log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net)
   948  
   949  	disks := make([]map[string]interface{}, 0)
   950  	templateDisk := make(map[string]interface{}, 1)
   951  	for _, device := range mvm.Config.Hardware.Device {
   952  		if vd, ok := device.(*types.VirtualDisk); ok {
   953  
   954  			virtualDevice := vd.GetVirtualDevice()
   955  
   956  			backingInfo := virtualDevice.Backing
   957  			var diskFullPath string
   958  			var diskUuid string
   959  			if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok {
   960  				diskFullPath = v.FileName
   961  				diskUuid = v.Uuid
   962  			} else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok {
   963  				diskFullPath = v.FileName
   964  				diskUuid = v.Uuid
   965  			}
   966  			log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath)
   967  
   968  			// Separate datastore and path
   969  			diskFullPathSplit := strings.Split(diskFullPath, " ")
   970  			if len(diskFullPathSplit) != 2 {
   971  				return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath)
   972  			}
   973  			diskPath := diskFullPathSplit[1]
   974  			// Isolate filename
   975  			diskNameSplit := strings.Split(diskPath, "/")
   976  			diskName := diskNameSplit[len(diskNameSplit)-1]
   977  			// Remove possible extension
   978  			diskName = strings.Split(diskName, ".")[0]
   979  
   980  			if prevDisks, ok := d.GetOk("disk"); ok {
   981  				if prevDiskSet, ok := prevDisks.(*schema.Set); ok {
   982  					for _, v := range prevDiskSet.List() {
   983  						prevDisk := v.(map[string]interface{})
   984  
   985  						// We're guaranteed only one template disk.  Passing value directly through since templates should be immutable
   986  						if prevDisk["template"] != "" {
   987  							if len(templateDisk) == 0 {
   988  								templateDisk = prevDisk
   989  								disks = append(disks, templateDisk)
   990  								break
   991  							}
   992  						}
   993  
   994  						// It is enforced that prevDisk["name"] should only be set in the case
   995  						// of creating a new disk for the user.
   996  						// size case:  name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name
   997  						// vmdk case:  compare prevDisk["vmdk"] and mo.Filename
   998  						if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] {
   999  
  1000  							prevDisk["key"] = virtualDevice.Key
  1001  							prevDisk["uuid"] = diskUuid
  1002  
  1003  							disks = append(disks, prevDisk)
  1004  							break
  1005  						}
  1006  					}
  1007  				}
  1008  			}
  1009  			log.Printf("[DEBUG] disks: %#v", disks)
  1010  		}
  1011  	}
  1012  	err = d.Set("disk", disks)
  1013  	if err != nil {
  1014  		return fmt.Errorf("Invalid disks to set: %#v", disks)
  1015  	}
  1016  
  1017  	networkInterfaces := make([]map[string]interface{}, 0)
  1018  	for _, v := range mvm.Guest.Net {
  1019  		if v.DeviceConfigId >= 0 {
  1020  			log.Printf("[DEBUG] v.Network - %#v", v.Network)
  1021  			networkInterface := make(map[string]interface{})
  1022  			networkInterface["label"] = v.Network
  1023  			networkInterface["mac_address"] = v.MacAddress
  1024  			for _, ip := range v.IpConfig.IpAddress {
  1025  				p := net.ParseIP(ip.IpAddress)
  1026  				if p.To4() != nil {
  1027  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1028  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1029  					networkInterface["ipv4_address"] = p.String()
  1030  					networkInterface["ipv4_prefix_length"] = ip.PrefixLength
  1031  				} else if p.To16() != nil {
  1032  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1033  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1034  					networkInterface["ipv6_address"] = p.String()
  1035  					networkInterface["ipv6_prefix_length"] = ip.PrefixLength
  1036  				}
  1037  				log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1038  			}
  1039  			log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1040  			networkInterfaces = append(networkInterfaces, networkInterface)
  1041  		}
  1042  	}
  1043  	if mvm.Guest.IpStack != nil {
  1044  		for _, v := range mvm.Guest.IpStack {
  1045  			if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil {
  1046  				for _, route := range v.IpRouteConfig.IpRoute {
  1047  					if route.Gateway.Device != "" {
  1048  						gatewaySetting := ""
  1049  						if route.Network == "::" {
  1050  							gatewaySetting = "ipv6_gateway"
  1051  						} else if route.Network == "0.0.0.0" {
  1052  							gatewaySetting = "ipv4_gateway"
  1053  						}
  1054  						if gatewaySetting != "" {
  1055  							deviceID, err := strconv.Atoi(route.Gateway.Device)
  1056  							if err != nil {
  1057  								log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err)
  1058  							} else {
  1059  								log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress)
  1060  								networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress
  1061  							}
  1062  						}
  1063  					}
  1064  				}
  1065  			}
  1066  		}
  1067  	}
  1068  	log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces)
  1069  	err = d.Set("network_interface", networkInterfaces)
  1070  	if err != nil {
  1071  		return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces)
  1072  	}
  1073  
  1074  	if len(networkInterfaces) > 0 {
  1075  		if _, ok := networkInterfaces[0]["ipv4_address"]; ok {
  1076  			log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string))
  1077  			d.SetConnInfo(map[string]string{
  1078  				"type": "ssh",
  1079  				"host": networkInterfaces[0]["ipv4_address"].(string),
  1080  			})
  1081  		}
  1082  	}
  1083  
  1084  	var rootDatastore string
  1085  	for _, v := range mvm.Datastore {
  1086  		var md mo.Datastore
  1087  		if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil {
  1088  			return err
  1089  		}
  1090  		if md.Parent.Type == "StoragePod" {
  1091  			var msp mo.StoragePod
  1092  			if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil {
  1093  				return err
  1094  			}
  1095  			rootDatastore = msp.Name
  1096  			log.Printf("[DEBUG] %#v", msp.Name)
  1097  		} else {
  1098  			rootDatastore = md.Name
  1099  			log.Printf("[DEBUG] %#v", md.Name)
  1100  		}
  1101  		break
  1102  	}
  1103  
  1104  	d.Set("datacenter", dc)
  1105  	d.Set("memory", mvm.Summary.Config.MemorySizeMB)
  1106  	d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation)
  1107  	d.Set("cpu", mvm.Summary.Config.NumCpu)
  1108  	d.Set("datastore", rootDatastore)
  1109  	d.Set("uuid", mvm.Summary.Config.Uuid)
  1110  
  1111  	return nil
  1112  }
  1113  
  1114  func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
  1115  	client := meta.(*govmomi.Client)
  1116  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
  1117  	if err != nil {
  1118  		return err
  1119  	}
  1120  	finder := find.NewFinder(client.Client, true)
  1121  	finder = finder.SetDatacenter(dc)
  1122  
  1123  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
  1124  	if err != nil {
  1125  		return err
  1126  	}
  1127  	devices, err := vm.Device(context.TODO())
  1128  	if err != nil {
  1129  		log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err)
  1130  		return err
  1131  	}
  1132  
  1133  	log.Printf("[INFO] Deleting virtual machine: %s", d.Id())
  1134  	state, err := vm.PowerState(context.TODO())
  1135  	if err != nil {
  1136  		return err
  1137  	}
  1138  
  1139  	if state == types.VirtualMachinePowerStatePoweredOn {
  1140  		task, err := vm.PowerOff(context.TODO())
  1141  		if err != nil {
  1142  			return err
  1143  		}
  1144  
  1145  		err = task.Wait(context.TODO())
  1146  		if err != nil {
  1147  			return err
  1148  		}
  1149  	}
  1150  
  1151  	// Safely eject any disks the user marked as keep_on_remove
  1152  	if vL, ok := d.GetOk("disk"); ok {
  1153  		if diskSet, ok := vL.(*schema.Set); ok {
  1154  
  1155  			for _, value := range diskSet.List() {
  1156  				disk := value.(map[string]interface{})
  1157  
  1158  				if v, ok := disk["keep_on_remove"].(bool); ok && v == true {
  1159  					log.Printf("[DEBUG] not destroying %v", disk["name"])
  1160  					virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
  1161  					err = vm.RemoveDevice(context.TODO(), true, virtualDisk)
  1162  					if err != nil {
  1163  						log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
  1164  						return err
  1165  					}
  1166  				}
  1167  			}
  1168  		}
  1169  	}
  1170  
  1171  	task, err := vm.Destroy(context.TODO())
  1172  	if err != nil {
  1173  		return err
  1174  	}
  1175  
  1176  	err = task.Wait(context.TODO())
  1177  	if err != nil {
  1178  		return err
  1179  	}
  1180  
  1181  	d.SetId("")
  1182  	return nil
  1183  }
  1184  
  1185  // addHardDisk adds a new Hard Disk to the VirtualMachine.
  1186  func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error {
  1187  	devices, err := vm.Device(context.TODO())
  1188  	if err != nil {
  1189  		return err
  1190  	}
  1191  	log.Printf("[DEBUG] vm devices: %#v\n", devices)
  1192  
  1193  	var controller types.BaseVirtualController
  1194  	switch controller_type {
  1195  	case "scsi":
  1196  		controller, err = devices.FindDiskController(controller_type)
  1197  	case "scsi-lsi-parallel":
  1198  		controller = devices.PickController(&types.VirtualLsiLogicController{})
  1199  	case "scsi-buslogic":
  1200  		controller = devices.PickController(&types.VirtualBusLogicController{})
  1201  	case "scsi-paravirtual":
  1202  		controller = devices.PickController(&types.ParaVirtualSCSIController{})
  1203  	case "scsi-lsi-sas":
  1204  		controller = devices.PickController(&types.VirtualLsiLogicSASController{})
  1205  	case "ide":
  1206  		controller, err = devices.FindDiskController(controller_type)
  1207  	default:
  1208  		return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1209  	}
  1210  
  1211  	if err != nil || controller == nil {
  1212  		// Check if max number of scsi controller are already used
  1213  		diskControllers := getSCSIControllers(devices)
  1214  		if len(diskControllers) >= 4 {
  1215  			return fmt.Errorf("[ERROR] Maximum number of SCSI controllers created")
  1216  		}
  1217  
  1218  		log.Printf("[DEBUG] Couldn't find a %v controller.  Creating one..", controller_type)
  1219  
  1220  		var c types.BaseVirtualDevice
  1221  		switch controller_type {
  1222  		case "scsi":
  1223  			// Create scsi controller
  1224  			c, err = devices.CreateSCSIController("scsi")
  1225  			if err != nil {
  1226  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1227  			}
  1228  		case "scsi-lsi-parallel":
  1229  			// Create scsi controller
  1230  			c, err = devices.CreateSCSIController("lsilogic")
  1231  			if err != nil {
  1232  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1233  			}
  1234  		case "scsi-buslogic":
  1235  			// Create scsi controller
  1236  			c, err = devices.CreateSCSIController("buslogic")
  1237  			if err != nil {
  1238  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1239  			}
  1240  		case "scsi-paravirtual":
  1241  			// Create scsi controller
  1242  			c, err = devices.CreateSCSIController("pvscsi")
  1243  			if err != nil {
  1244  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1245  			}
  1246  		case "scsi-lsi-sas":
  1247  			// Create scsi controller
  1248  			c, err = devices.CreateSCSIController("lsilogic-sas")
  1249  			if err != nil {
  1250  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1251  			}
  1252  		case "ide":
  1253  			// Create ide controller
  1254  			c, err = devices.CreateIDEController()
  1255  			if err != nil {
  1256  				return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1257  			}
  1258  		default:
  1259  			return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1260  		}
  1261  
  1262  		vm.AddDevice(context.TODO(), c)
  1263  		// Update our devices list
  1264  		devices, err := vm.Device(context.TODO())
  1265  		if err != nil {
  1266  			return err
  1267  		}
  1268  		controller = devices.PickController(c.(types.BaseVirtualController))
  1269  		if controller == nil {
  1270  			log.Printf("[ERROR] Could not find the new %v controller", controller_type)
  1271  			return fmt.Errorf("Could not find the new %v controller", controller_type)
  1272  		}
  1273  	}
  1274  
  1275  	log.Printf("[DEBUG] disk controller: %#v\n", controller)
  1276  
  1277  	// TODO Check if diskPath & datastore exist
  1278  	// If diskPath is not specified, pass empty string to CreateDisk()
  1279  	if diskPath == "" {
  1280  		return fmt.Errorf("[ERROR] addHardDisk - No path proided")
  1281  	} else {
  1282  		diskPath = datastore.Path(diskPath)
  1283  	}
  1284  	log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath)
  1285  	disk := devices.CreateDisk(controller, datastore.Reference(), diskPath)
  1286  
  1287  	if strings.Contains(controller_type, "scsi") {
  1288  		unitNumber, err := getNextUnitNumber(devices, controller)
  1289  		if err != nil {
  1290  			return err
  1291  		}
  1292  		*disk.UnitNumber = unitNumber
  1293  	}
  1294  
  1295  	existing := devices.SelectByBackingInfo(disk.Backing)
  1296  	log.Printf("[DEBUG] disk: %#v\n", disk)
  1297  
  1298  	if len(existing) == 0 {
  1299  		disk.CapacityInKB = int64(size * 1024 * 1024)
  1300  		if iops != 0 {
  1301  			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
  1302  				Limit: iops,
  1303  			}
  1304  		}
  1305  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
  1306  
  1307  		if diskType == "eager_zeroed" {
  1308  			// eager zeroed thick virtual disk
  1309  			backing.ThinProvisioned = types.NewBool(false)
  1310  			backing.EagerlyScrub = types.NewBool(true)
  1311  		} else if diskType == "lazy" {
  1312  			// lazy zeroed thick virtual disk
  1313  			backing.ThinProvisioned = types.NewBool(false)
  1314  			backing.EagerlyScrub = types.NewBool(false)
  1315  		} else if diskType == "thin" {
  1316  			// thin provisioned virtual disk
  1317  			backing.ThinProvisioned = types.NewBool(true)
  1318  		}
  1319  
  1320  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
  1321  		log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB)
  1322  
  1323  		return vm.AddDevice(context.TODO(), disk)
  1324  	} else {
  1325  		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")
  1326  
  1327  		return nil
  1328  	}
  1329  }
  1330  
  1331  func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController {
  1332  	// get virtual scsi controllers of all supported types
  1333  	var scsiControllers []*types.VirtualController
  1334  	for _, device := range vmDevices {
  1335  		devType := vmDevices.Type(device)
  1336  		switch devType {
  1337  		case "scsi", "lsilogic", "buslogic", "pvscsi", "lsilogic-sas":
  1338  			if c, ok := device.(types.BaseVirtualController); ok {
  1339  				scsiControllers = append(scsiControllers, c.GetVirtualController())
  1340  			}
  1341  		}
  1342  	}
  1343  	return scsiControllers
  1344  }
  1345  
  1346  func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) {
  1347  	key := c.GetVirtualController().Key
  1348  
  1349  	var unitNumbers [16]bool
  1350  	unitNumbers[7] = true
  1351  
  1352  	for _, device := range devices {
  1353  		d := device.GetVirtualDevice()
  1354  
  1355  		if d.ControllerKey == key {
  1356  			if d.UnitNumber != nil {
  1357  				unitNumbers[*d.UnitNumber] = true
  1358  			}
  1359  		}
  1360  	}
  1361  	for i, taken := range unitNumbers {
  1362  		if !taken {
  1363  			return int32(i), nil
  1364  		}
  1365  	}
  1366  	return -1, fmt.Errorf("[ERROR] getNextUnitNumber - controller is full")
  1367  }
  1368  
  1369  // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path.
  1370  func addCdrom(client *govmomi.Client, vm *object.VirtualMachine, datacenter *object.Datacenter, datastore, path string) error {
  1371  	devices, err := vm.Device(context.TODO())
  1372  	if err != nil {
  1373  		return err
  1374  	}
  1375  	log.Printf("[DEBUG] vm devices: %#v", devices)
  1376  
  1377  	var controller *types.VirtualIDEController
  1378  	controller, err = devices.FindIDEController("")
  1379  	if err != nil {
  1380  		log.Printf("[DEBUG] Couldn't find a ide controller.  Creating one..")
  1381  
  1382  		var c types.BaseVirtualDevice
  1383  		c, err := devices.CreateIDEController()
  1384  		if err != nil {
  1385  			return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1386  		}
  1387  
  1388  		if v, ok := c.(*types.VirtualIDEController); ok {
  1389  			controller = v
  1390  		} else {
  1391  			return fmt.Errorf("[ERROR] Controller type could not be asserted")
  1392  		}
  1393  		vm.AddDevice(context.TODO(), c)
  1394  		// Update our devices list
  1395  		devices, err := vm.Device(context.TODO())
  1396  		if err != nil {
  1397  			return err
  1398  		}
  1399  		controller, err = devices.FindIDEController("")
  1400  		if err != nil {
  1401  			log.Printf("[ERROR] Could not find the new disk IDE controller: %v", err)
  1402  			return err
  1403  		}
  1404  	}
  1405  	log.Printf("[DEBUG] ide controller: %#v", controller)
  1406  
  1407  	c, err := devices.CreateCdrom(controller)
  1408  	if err != nil {
  1409  		return err
  1410  	}
  1411  
  1412  	finder := find.NewFinder(client.Client, true)
  1413  	finder = finder.SetDatacenter(datacenter)
  1414  	ds, err := getDatastore(finder, datastore)
  1415  	if err != nil {
  1416  		return err
  1417  	}
  1418  
  1419  	c = devices.InsertIso(c, ds.Path(path))
  1420  	log.Printf("[DEBUG] addCdrom: %#v", c)
  1421  
  1422  	return vm.AddDevice(context.TODO(), c)
  1423  }
  1424  
  1425  // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device.
  1426  func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) {
  1427  	network, err := f.Network(context.TODO(), "*"+label)
  1428  	if err != nil {
  1429  		return nil, err
  1430  	}
  1431  
  1432  	backing, err := network.EthernetCardBackingInfo(context.TODO())
  1433  	if err != nil {
  1434  		return nil, err
  1435  	}
  1436  
  1437  	var address_type string
  1438  	if macAddress == "" {
  1439  		address_type = string(types.VirtualEthernetCardMacTypeGenerated)
  1440  	} else {
  1441  		address_type = string(types.VirtualEthernetCardMacTypeManual)
  1442  	}
  1443  
  1444  	if adapterType == "vmxnet3" {
  1445  		return &types.VirtualDeviceConfigSpec{
  1446  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1447  			Device: &types.VirtualVmxnet3{
  1448  				VirtualVmxnet: types.VirtualVmxnet{
  1449  					VirtualEthernetCard: types.VirtualEthernetCard{
  1450  						VirtualDevice: types.VirtualDevice{
  1451  							Key:     -1,
  1452  							Backing: backing,
  1453  						},
  1454  						AddressType: address_type,
  1455  						MacAddress:  macAddress,
  1456  					},
  1457  				},
  1458  			},
  1459  		}, nil
  1460  	} else if adapterType == "e1000" {
  1461  		return &types.VirtualDeviceConfigSpec{
  1462  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1463  			Device: &types.VirtualE1000{
  1464  				VirtualEthernetCard: types.VirtualEthernetCard{
  1465  					VirtualDevice: types.VirtualDevice{
  1466  						Key:     -1,
  1467  						Backing: backing,
  1468  					},
  1469  					AddressType: address_type,
  1470  					MacAddress:  macAddress,
  1471  				},
  1472  			},
  1473  		}, nil
  1474  	} else {
  1475  		return nil, fmt.Errorf("Invalid network adapter type.")
  1476  	}
  1477  }
  1478  
  1479  // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
  1480  func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) {
  1481  	var key int32
  1482  	var moveType string
  1483  	if linkedClone {
  1484  		moveType = "createNewChildDiskBacking"
  1485  	} else {
  1486  		moveType = "moveAllDiskBackingsAndDisallowSharing"
  1487  	}
  1488  	log.Printf("[DEBUG] relocate type: [%s]", moveType)
  1489  
  1490  	devices, err := vm.Device(context.TODO())
  1491  	if err != nil {
  1492  		return types.VirtualMachineRelocateSpec{}, err
  1493  	}
  1494  	for _, d := range devices {
  1495  		if devices.Type(d) == "disk" {
  1496  			key = int32(d.GetVirtualDevice().Key)
  1497  		}
  1498  	}
  1499  
  1500  	isThin := initType == "thin"
  1501  	eagerScrub := initType == "eager_zeroed"
  1502  	rpr := rp.Reference()
  1503  	dsr := ds.Reference()
  1504  	return types.VirtualMachineRelocateSpec{
  1505  		Datastore:    &dsr,
  1506  		Pool:         &rpr,
  1507  		DiskMoveType: moveType,
  1508  		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1509  			{
  1510  				Datastore: dsr,
  1511  				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
  1512  					DiskMode:        "persistent",
  1513  					ThinProvisioned: types.NewBool(isThin),
  1514  					EagerlyScrub:    types.NewBool(eagerScrub),
  1515  				},
  1516  				DiskId: key,
  1517  			},
  1518  		},
  1519  	}, nil
  1520  }
  1521  
  1522  // getDatastoreObject gets datastore object.
  1523  func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) {
  1524  	s := object.NewSearchIndex(client.Client)
  1525  	ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name)
  1526  	if err != nil {
  1527  		return types.ManagedObjectReference{}, err
  1528  	}
  1529  	if ref == nil {
  1530  		return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name)
  1531  	}
  1532  	log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref)
  1533  	return ref.Reference(), nil
  1534  }
  1535  
  1536  // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action.
  1537  func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec {
  1538  	vmfr := f.VmFolder.Reference()
  1539  	rpr := rp.Reference()
  1540  	spr := storagePod.Reference()
  1541  
  1542  	sps := types.StoragePlacementSpec{
  1543  		Type:       "create",
  1544  		ConfigSpec: &configSpec,
  1545  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1546  			StoragePod: &spr,
  1547  		},
  1548  		Folder:       &vmfr,
  1549  		ResourcePool: &rpr,
  1550  	}
  1551  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1552  	return sps
  1553  }
  1554  
  1555  // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action.
  1556  func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
  1557  	vmr := vm.Reference()
  1558  	vmfr := f.VmFolder.Reference()
  1559  	rpr := rp.Reference()
  1560  	spr := storagePod.Reference()
  1561  
  1562  	var o mo.VirtualMachine
  1563  	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
  1564  	if err != nil {
  1565  		return types.StoragePlacementSpec{}
  1566  	}
  1567  	ds := object.NewDatastore(c.Client, o.Datastore[0])
  1568  	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)
  1569  
  1570  	devices, err := vm.Device(context.TODO())
  1571  	if err != nil {
  1572  		return types.StoragePlacementSpec{}
  1573  	}
  1574  
  1575  	var key int32
  1576  	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
  1577  		key = int32(d.GetVirtualDevice().Key)
  1578  		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
  1579  	}
  1580  
  1581  	sps := types.StoragePlacementSpec{
  1582  		Type: "clone",
  1583  		Vm:   &vmr,
  1584  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1585  			StoragePod: &spr,
  1586  		},
  1587  		CloneSpec: &types.VirtualMachineCloneSpec{
  1588  			Location: types.VirtualMachineRelocateSpec{
  1589  				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1590  					{
  1591  						Datastore:       ds.Reference(),
  1592  						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
  1593  						DiskId:          key,
  1594  					},
  1595  				},
  1596  				Pool: &rpr,
  1597  			},
  1598  			PowerOn:  false,
  1599  			Template: false,
  1600  		},
  1601  		CloneName: "dummy",
  1602  		Folder:    &vmfr,
  1603  	}
  1604  	return sps
  1605  }
  1606  
  1607  // findDatastore finds Datastore object.
  1608  func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) {
  1609  	var datastore *object.Datastore
  1610  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1611  
  1612  	srm := object.NewStorageResourceManager(c.Client)
  1613  	rds, err := srm.RecommendDatastores(context.TODO(), sps)
  1614  	if err != nil {
  1615  		return nil, err
  1616  	}
  1617  	log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)
  1618  
  1619  	spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
  1620  	datastore = object.NewDatastore(c.Client, spa.Destination)
  1621  	log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore)
  1622  
  1623  	return datastore, nil
  1624  }
  1625  
  1626  // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller.
  1627  func createCdroms(client *govmomi.Client, vm *object.VirtualMachine, datacenter *object.Datacenter, cdroms []cdrom) error {
  1628  	log.Printf("[DEBUG] add cdroms: %v", cdroms)
  1629  	for _, cd := range cdroms {
  1630  		log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore)
  1631  		log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path)
  1632  		err := addCdrom(client, vm, datacenter, cd.datastore, cd.path)
  1633  		if err != nil {
  1634  			return err
  1635  		}
  1636  	}
  1637  
  1638  	return nil
  1639  }
  1640  
  1641  func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error {
  1642  	dc, err := getDatacenter(c, vm.datacenter)
  1643  
  1644  	if err != nil {
  1645  		return err
  1646  	}
  1647  	finder := find.NewFinder(c.Client, true)
  1648  	finder = finder.SetDatacenter(dc)
  1649  
  1650  	var template *object.VirtualMachine
  1651  	var template_mo mo.VirtualMachine
  1652  	var vm_mo mo.VirtualMachine
  1653  	if vm.template != "" {
  1654  		template, err = finder.VirtualMachine(context.TODO(), vm.template)
  1655  		if err != nil {
  1656  			return err
  1657  		}
  1658  		log.Printf("[DEBUG] template: %#v", template)
  1659  
  1660  		err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)
  1661  		if err != nil {
  1662  			return err
  1663  		}
  1664  	}
  1665  
  1666  	var resourcePool *object.ResourcePool
  1667  	if vm.resourcePool == "" {
  1668  		if vm.cluster == "" {
  1669  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
  1670  			if err != nil {
  1671  				return err
  1672  			}
  1673  		} else {
  1674  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
  1675  			if err != nil {
  1676  				return err
  1677  			}
  1678  		}
  1679  	} else {
  1680  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
  1681  		if err != nil {
  1682  			return err
  1683  		}
  1684  	}
  1685  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
  1686  
  1687  	dcFolders, err := dc.Folders(context.TODO())
  1688  	if err != nil {
  1689  		return err
  1690  	}
  1691  	log.Printf("[DEBUG] folder: %#v", vm.folder)
  1692  
  1693  	folder := dcFolders.VmFolder
  1694  	if len(vm.folder) > 0 {
  1695  		si := object.NewSearchIndex(c.Client)
  1696  		folderRef, err := si.FindByInventoryPath(
  1697  			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
  1698  		if err != nil {
  1699  			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
  1700  		} else if folderRef == nil {
  1701  			return fmt.Errorf("Cannot find folder %s", vm.folder)
  1702  		} else {
  1703  			folder = folderRef.(*object.Folder)
  1704  		}
  1705  	}
  1706  
  1707  	// make config spec
  1708  	configSpec := types.VirtualMachineConfigSpec{
  1709  		Name:              vm.name,
  1710  		NumCPUs:           vm.vcpu,
  1711  		NumCoresPerSocket: 1,
  1712  		MemoryMB:          vm.memoryMb,
  1713  		MemoryAllocation: &types.ResourceAllocationInfo{
  1714  			Reservation: vm.memoryAllocation.reservation,
  1715  		},
  1716  		Flags: &types.VirtualMachineFlagInfo{
  1717  			DiskUuidEnabled: &vm.enableDiskUUID,
  1718  		},
  1719  	}
  1720  	if vm.template == "" {
  1721  		configSpec.GuestId = "otherLinux64Guest"
  1722  	}
  1723  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
  1724  
  1725  	// make ExtraConfig
  1726  	log.Printf("[DEBUG] virtual machine Extra Config spec start")
  1727  	if len(vm.customConfigurations) > 0 {
  1728  		var ov []types.BaseOptionValue
  1729  		for k, v := range vm.customConfigurations {
  1730  			key := k
  1731  			value := v
  1732  			o := types.OptionValue{
  1733  				Key:   key,
  1734  				Value: &value,
  1735  			}
  1736  			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
  1737  			ov = append(ov, &o)
  1738  		}
  1739  		configSpec.ExtraConfig = ov
  1740  		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
  1741  	}
  1742  
  1743  	var datastore *object.Datastore
  1744  	if vm.datastore == "" {
  1745  		datastore, err = finder.DefaultDatastore(context.TODO())
  1746  		if err != nil {
  1747  			return err
  1748  		}
  1749  	} else {
  1750  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
  1751  		if err != nil {
  1752  			// TODO: datastore cluster support in govmomi finder function
  1753  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
  1754  			if err != nil {
  1755  				return err
  1756  			}
  1757  
  1758  			if d.Type == "StoragePod" {
  1759  				sp := object.StoragePod{
  1760  					Folder: object.NewFolder(c.Client, d),
  1761  				}
  1762  
  1763  				var sps types.StoragePlacementSpec
  1764  				if vm.template != "" {
  1765  					sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
  1766  				} else {
  1767  					sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
  1768  				}
  1769  
  1770  				datastore, err = findDatastore(c, sps)
  1771  				if err != nil {
  1772  					return err
  1773  				}
  1774  			} else {
  1775  				datastore = object.NewDatastore(c.Client, d)
  1776  			}
  1777  		}
  1778  	}
  1779  
  1780  	log.Printf("[DEBUG] datastore: %#v", datastore)
  1781  
  1782  	// network
  1783  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
  1784  	networkConfigs := []types.CustomizationAdapterMapping{}
  1785  	for _, network := range vm.networkInterfaces {
  1786  		// network device
  1787  		var networkDeviceType string
  1788  		if vm.template == "" {
  1789  			networkDeviceType = "e1000"
  1790  		} else {
  1791  			networkDeviceType = "vmxnet3"
  1792  		}
  1793  		nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress)
  1794  		if err != nil {
  1795  			return err
  1796  		}
  1797  		log.Printf("[DEBUG] network device: %+v", nd.Device)
  1798  		networkDevices = append(networkDevices, nd)
  1799  
  1800  		if vm.template != "" {
  1801  			var ipSetting types.CustomizationIPSettings
  1802  			if network.ipv4Address == "" {
  1803  				ipSetting.Ip = &types.CustomizationDhcpIpGenerator{}
  1804  			} else {
  1805  				if network.ipv4PrefixLength == 0 {
  1806  					return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
  1807  				}
  1808  				m := net.CIDRMask(network.ipv4PrefixLength, 32)
  1809  				sm := net.IPv4(m[0], m[1], m[2], m[3])
  1810  				subnetMask := sm.String()
  1811  				log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway)
  1812  				log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address)
  1813  				log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength)
  1814  				log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask)
  1815  				ipSetting.Gateway = []string{
  1816  					network.ipv4Gateway,
  1817  				}
  1818  				ipSetting.Ip = &types.CustomizationFixedIp{
  1819  					IpAddress: network.ipv4Address,
  1820  				}
  1821  				ipSetting.SubnetMask = subnetMask
  1822  			}
  1823  
  1824  			ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{}
  1825  			if network.ipv6Address == "" {
  1826  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1827  					&types.CustomizationDhcpIpV6Generator{},
  1828  				}
  1829  			} else {
  1830  				log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway)
  1831  				log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address)
  1832  				log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength)
  1833  
  1834  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1835  					&types.CustomizationFixedIpV6{
  1836  						IpAddress:  network.ipv6Address,
  1837  						SubnetMask: int32(network.ipv6PrefixLength),
  1838  					},
  1839  				}
  1840  				ipv6Spec.Gateway = []string{network.ipv6Gateway}
  1841  			}
  1842  			ipSetting.IpV6Spec = ipv6Spec
  1843  
  1844  			// network config
  1845  			config := types.CustomizationAdapterMapping{
  1846  				Adapter: ipSetting,
  1847  			}
  1848  			networkConfigs = append(networkConfigs, config)
  1849  		}
  1850  	}
  1851  	log.Printf("[DEBUG] network devices: %#v", networkDevices)
  1852  	log.Printf("[DEBUG] network configs: %#v", networkConfigs)
  1853  
  1854  	var task *object.Task
  1855  	if vm.template == "" {
  1856  		var mds mo.Datastore
  1857  		if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
  1858  			return err
  1859  		}
  1860  		log.Printf("[DEBUG] datastore: %#v", mds.Name)
  1861  		scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
  1862  		if err != nil {
  1863  			log.Printf("[ERROR] %s", err)
  1864  		}
  1865  
  1866  		configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
  1867  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1868  			Device:    scsi,
  1869  		})
  1870  
  1871  		configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
  1872  
  1873  		task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
  1874  		if err != nil {
  1875  			log.Printf("[ERROR] %s", err)
  1876  		}
  1877  
  1878  		err = task.Wait(context.TODO())
  1879  		if err != nil {
  1880  			log.Printf("[ERROR] %s", err)
  1881  		}
  1882  
  1883  	} else {
  1884  
  1885  		relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
  1886  		if err != nil {
  1887  			return err
  1888  		}
  1889  
  1890  		log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
  1891  
  1892  		// make vm clone spec
  1893  		cloneSpec := types.VirtualMachineCloneSpec{
  1894  			Location: relocateSpec,
  1895  			Template: false,
  1896  			Config:   &configSpec,
  1897  			PowerOn:  false,
  1898  		}
  1899  		if vm.linkedClone {
  1900  			if template_mo.Snapshot == nil {
  1901  				return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
  1902  			}
  1903  			cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
  1904  		}
  1905  		log.Printf("[DEBUG] clone spec: %v", cloneSpec)
  1906  
  1907  		task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec)
  1908  		if err != nil {
  1909  			return err
  1910  		}
  1911  	}
  1912  
  1913  	err = task.Wait(context.TODO())
  1914  	if err != nil {
  1915  		log.Printf("[ERROR] %s", err)
  1916  	}
  1917  
  1918  	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
  1919  	if err != nil {
  1920  		return err
  1921  	}
  1922  	log.Printf("[DEBUG] new vm: %v", newVM)
  1923  
  1924  	devices, err := newVM.Device(context.TODO())
  1925  	if err != nil {
  1926  		log.Printf("[DEBUG] Template devices can't be found")
  1927  		return err
  1928  	}
  1929  
  1930  	for _, dvc := range devices {
  1931  		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
  1932  		if devices.Type(dvc) == "ethernet" {
  1933  			err := newVM.RemoveDevice(context.TODO(), false, dvc)
  1934  			if err != nil {
  1935  				return err
  1936  			}
  1937  		}
  1938  	}
  1939  	// Add Network devices
  1940  	for _, dvc := range networkDevices {
  1941  		err := newVM.AddDevice(
  1942  			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
  1943  		if err != nil {
  1944  			return err
  1945  		}
  1946  	}
  1947  
  1948  	// Create the cdroms if needed.
  1949  	if err := createCdroms(c, newVM, dc, vm.cdroms); err != nil {
  1950  		return err
  1951  	}
  1952  
  1953  	newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo)
  1954  	firstDisk := 0
  1955  	if vm.template != "" {
  1956  		firstDisk++
  1957  	}
  1958  	for i := firstDisk; i < len(vm.hardDisks); i++ {
  1959  		log.Printf("[DEBUG] disk index: %v", i)
  1960  
  1961  		var diskPath string
  1962  		switch {
  1963  		case vm.hardDisks[i].vmdkPath != "":
  1964  			diskPath = vm.hardDisks[i].vmdkPath
  1965  		case vm.hardDisks[i].name != "":
  1966  			snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory
  1967  			split := strings.Split(snapshotFullDir, " ")
  1968  			if len(split) != 2 {
  1969  				return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
  1970  			}
  1971  			vmWorkingPath := split[1]
  1972  			diskPath = vmWorkingPath + vm.hardDisks[i].name
  1973  		default:
  1974  			return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i])
  1975  		}
  1976  		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  1977  		if err != nil {
  1978  			err2 := addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  1979  			if err2 != nil {
  1980  				return err2
  1981  			}
  1982  			return err
  1983  		}
  1984  	}
  1985  
  1986  	if vm.skipCustomization || vm.template == "" {
  1987  		log.Printf("[DEBUG] VM customization skipped")
  1988  	} else {
  1989  		var identity_options types.BaseCustomizationIdentitySettings
  1990  		if strings.HasPrefix(template_mo.Config.GuestId, "win") {
  1991  			var timeZone int
  1992  			if vm.timeZone == "Etc/UTC" {
  1993  				vm.timeZone = "085"
  1994  			}
  1995  			timeZone, err := strconv.Atoi(vm.timeZone)
  1996  			if err != nil {
  1997  				return fmt.Errorf("Error converting TimeZone: %s", err)
  1998  			}
  1999  
  2000  			guiUnattended := types.CustomizationGuiUnattended{
  2001  				AutoLogon:      false,
  2002  				AutoLogonCount: 1,
  2003  				TimeZone:       int32(timeZone),
  2004  			}
  2005  
  2006  			customIdentification := types.CustomizationIdentification{}
  2007  
  2008  			userData := types.CustomizationUserData{
  2009  				ComputerName: &types.CustomizationFixedName{
  2010  					Name: strings.Split(vm.name, ".")[0],
  2011  				},
  2012  				ProductId: vm.windowsOptionalConfig.productKey,
  2013  				FullName:  "terraform",
  2014  				OrgName:   "terraform",
  2015  			}
  2016  
  2017  			if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
  2018  				customIdentification.DomainAdminPassword = &types.CustomizationPassword{
  2019  					PlainText: true,
  2020  					Value:     vm.windowsOptionalConfig.domainUserPassword,
  2021  				}
  2022  				customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
  2023  				customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
  2024  			}
  2025  
  2026  			if vm.windowsOptionalConfig.adminPassword != "" {
  2027  				guiUnattended.Password = &types.CustomizationPassword{
  2028  					PlainText: true,
  2029  					Value:     vm.windowsOptionalConfig.adminPassword,
  2030  				}
  2031  			}
  2032  
  2033  			identity_options = &types.CustomizationSysprep{
  2034  				GuiUnattended:  guiUnattended,
  2035  				Identification: customIdentification,
  2036  				UserData:       userData,
  2037  			}
  2038  		} else {
  2039  			identity_options = &types.CustomizationLinuxPrep{
  2040  				HostName: &types.CustomizationFixedName{
  2041  					Name: strings.Split(vm.name, ".")[0],
  2042  				},
  2043  				Domain:     vm.domain,
  2044  				TimeZone:   vm.timeZone,
  2045  				HwClockUTC: types.NewBool(true),
  2046  			}
  2047  		}
  2048  
  2049  		// create CustomizationSpec
  2050  		customSpec := types.CustomizationSpec{
  2051  			Identity: identity_options,
  2052  			GlobalIPSettings: types.CustomizationGlobalIPSettings{
  2053  				DnsSuffixList: vm.dnsSuffixes,
  2054  				DnsServerList: vm.dnsServers,
  2055  			},
  2056  			NicSettingMap: networkConfigs,
  2057  		}
  2058  		log.Printf("[DEBUG] custom spec: %v", customSpec)
  2059  
  2060  		log.Printf("[DEBUG] VM customization starting")
  2061  		taskb, err := newVM.Customize(context.TODO(), customSpec)
  2062  		if err != nil {
  2063  			return err
  2064  		}
  2065  		_, err = taskb.WaitForResult(context.TODO(), nil)
  2066  		if err != nil {
  2067  			return err
  2068  		}
  2069  		log.Printf("[DEBUG] VM customization finished")
  2070  	}
  2071  
  2072  	if vm.hasBootableVmdk || vm.template != "" {
  2073  		newVM.PowerOn(context.TODO())
  2074  		err = newVM.WaitForPowerState(context.TODO(), types.VirtualMachinePowerStatePoweredOn)
  2075  		if err != nil {
  2076  			return err
  2077  		}
  2078  	}
  2079  	return nil
  2080  }