github.com/bfallik/terraform@v0.7.1-0.20160814101525-d3a4714efbf5/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about)

     1  package vsphere
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/hashicorp/terraform/helper/schema"
    11  	"github.com/vmware/govmomi"
    12  	"github.com/vmware/govmomi/find"
    13  	"github.com/vmware/govmomi/object"
    14  	"github.com/vmware/govmomi/property"
    15  	"github.com/vmware/govmomi/vim25/mo"
    16  	"github.com/vmware/govmomi/vim25/types"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  var DefaultDNSSuffixes = []string{
    21  	"vsphere.local",
    22  }
    23  
    24  var DefaultDNSServers = []string{
    25  	"8.8.8.8",
    26  	"8.8.4.4",
    27  }
    28  
    29  var DiskControllerTypes = []string{
    30  	"scsi",
    31  	"scsi-lsi-parallel",
    32  	"scsi-buslogic",
    33  	"scsi-paravirtual",
    34  	"scsi-lsi-sas",
    35  	"ide",
    36  }
    37  
    38  type networkInterface struct {
    39  	deviceName       string
    40  	label            string
    41  	ipv4Address      string
    42  	ipv4PrefixLength int
    43  	ipv4Gateway      string
    44  	ipv6Address      string
    45  	ipv6PrefixLength int
    46  	ipv6Gateway      string
    47  	adapterType      string // TODO: Make "adapter_type" argument
    48  	macAddress       string
    49  }
    50  
    51  type hardDisk struct {
    52  	name       string
    53  	size       int64
    54  	iops       int64
    55  	initType   string
    56  	vmdkPath   string
    57  	controller string
    58  	bootable   bool
    59  }
    60  
    61  //Additional options Vsphere can use clones of windows machines
    62  type windowsOptConfig struct {
    63  	productKey         string
    64  	adminPassword      string
    65  	domainUser         string
    66  	domain             string
    67  	domainUserPassword string
    68  }
    69  
    70  type cdrom struct {
    71  	datastore string
    72  	path      string
    73  }
    74  
    75  type memoryAllocation struct {
    76  	reservation int64
    77  }
    78  
    79  type virtualMachine struct {
    80  	name                  string
    81  	folder                string
    82  	datacenter            string
    83  	cluster               string
    84  	resourcePool          string
    85  	datastore             string
    86  	vcpu                  int32
    87  	memoryMb              int64
    88  	memoryAllocation      memoryAllocation
    89  	template              string
    90  	networkInterfaces     []networkInterface
    91  	hardDisks             []hardDisk
    92  	cdroms                []cdrom
    93  	domain                string
    94  	timeZone              string
    95  	dnsSuffixes           []string
    96  	dnsServers            []string
    97  	hasBootableVmdk       bool
    98  	linkedClone           bool
    99  	skipCustomization     bool
   100  	enableDiskUUID        bool
   101  	windowsOptionalConfig windowsOptConfig
   102  	customConfigurations  map[string](types.AnyType)
   103  }
   104  
   105  func (v virtualMachine) Path() string {
   106  	return vmPath(v.folder, v.name)
   107  }
   108  
   109  func vmPath(folder string, name string) string {
   110  	var path string
   111  	if len(folder) > 0 {
   112  		path += folder + "/"
   113  	}
   114  	return path + name
   115  }
   116  
   117  func resourceVSphereVirtualMachine() *schema.Resource {
   118  	return &schema.Resource{
   119  		Create: resourceVSphereVirtualMachineCreate,
   120  		Read:   resourceVSphereVirtualMachineRead,
   121  		Update: resourceVSphereVirtualMachineUpdate,
   122  		Delete: resourceVSphereVirtualMachineDelete,
   123  
   124  		SchemaVersion: 1,
   125  		MigrateState:  resourceVSphereVirtualMachineMigrateState,
   126  
   127  		Schema: map[string]*schema.Schema{
   128  			"name": &schema.Schema{
   129  				Type:     schema.TypeString,
   130  				Required: true,
   131  				ForceNew: true,
   132  			},
   133  
   134  			"folder": &schema.Schema{
   135  				Type:     schema.TypeString,
   136  				Optional: true,
   137  				ForceNew: true,
   138  			},
   139  
   140  			"vcpu": &schema.Schema{
   141  				Type:     schema.TypeInt,
   142  				Required: true,
   143  			},
   144  
   145  			"memory": &schema.Schema{
   146  				Type:     schema.TypeInt,
   147  				Required: true,
   148  			},
   149  
   150  			"memory_reservation": &schema.Schema{
   151  				Type:     schema.TypeInt,
   152  				Optional: true,
   153  				Default:  0,
   154  				ForceNew: true,
   155  			},
   156  
   157  			"datacenter": &schema.Schema{
   158  				Type:     schema.TypeString,
   159  				Optional: true,
   160  				ForceNew: true,
   161  			},
   162  
   163  			"cluster": &schema.Schema{
   164  				Type:     schema.TypeString,
   165  				Optional: true,
   166  				ForceNew: true,
   167  			},
   168  
   169  			"resource_pool": &schema.Schema{
   170  				Type:     schema.TypeString,
   171  				Optional: true,
   172  				ForceNew: true,
   173  			},
   174  
   175  			"linked_clone": &schema.Schema{
   176  				Type:     schema.TypeBool,
   177  				Optional: true,
   178  				Default:  false,
   179  				ForceNew: true,
   180  			},
   181  			"gateway": &schema.Schema{
   182  				Type:       schema.TypeString,
   183  				Optional:   true,
   184  				ForceNew:   true,
   185  				Deprecated: "Please use network_interface.ipv4_gateway",
   186  			},
   187  
   188  			"domain": &schema.Schema{
   189  				Type:     schema.TypeString,
   190  				Optional: true,
   191  				ForceNew: true,
   192  				Default:  "vsphere.local",
   193  			},
   194  
   195  			"time_zone": &schema.Schema{
   196  				Type:     schema.TypeString,
   197  				Optional: true,
   198  				ForceNew: true,
   199  				Default:  "Etc/UTC",
   200  			},
   201  
   202  			"dns_suffixes": &schema.Schema{
   203  				Type:     schema.TypeList,
   204  				Optional: true,
   205  				Elem:     &schema.Schema{Type: schema.TypeString},
   206  				ForceNew: true,
   207  			},
   208  
   209  			"dns_servers": &schema.Schema{
   210  				Type:     schema.TypeList,
   211  				Optional: true,
   212  				Elem:     &schema.Schema{Type: schema.TypeString},
   213  				ForceNew: true,
   214  			},
   215  
   216  			"skip_customization": &schema.Schema{
   217  				Type:     schema.TypeBool,
   218  				Optional: true,
   219  				ForceNew: true,
   220  				Default:  false,
   221  			},
   222  
   223  			"enable_disk_uuid": &schema.Schema{
   224  				Type:     schema.TypeBool,
   225  				Optional: true,
   226  				ForceNew: true,
   227  				Default:  false,
   228  			},
   229  
   230  			"uuid": &schema.Schema{
   231  				Type:     schema.TypeString,
   232  				Computed: true,
   233  			},
   234  
   235  			"custom_configuration_parameters": &schema.Schema{
   236  				Type:     schema.TypeMap,
   237  				Optional: true,
   238  				ForceNew: true,
   239  			},
   240  
   241  			"windows_opt_config": &schema.Schema{
   242  				Type:     schema.TypeList,
   243  				Optional: true,
   244  				ForceNew: true,
   245  				Elem: &schema.Resource{
   246  					Schema: map[string]*schema.Schema{
   247  						"product_key": &schema.Schema{
   248  							Type:     schema.TypeString,
   249  							Optional: true,
   250  							ForceNew: true,
   251  						},
   252  
   253  						"admin_password": &schema.Schema{
   254  							Type:     schema.TypeString,
   255  							Optional: true,
   256  							ForceNew: true,
   257  						},
   258  
   259  						"domain_user": &schema.Schema{
   260  							Type:     schema.TypeString,
   261  							Optional: true,
   262  							ForceNew: true,
   263  						},
   264  
   265  						"domain": &schema.Schema{
   266  							Type:     schema.TypeString,
   267  							Optional: true,
   268  							ForceNew: true,
   269  						},
   270  
   271  						"domain_user_password": &schema.Schema{
   272  							Type:     schema.TypeString,
   273  							Optional: true,
   274  							ForceNew: true,
   275  						},
   276  					},
   277  				},
   278  			},
   279  
   280  			"network_interface": &schema.Schema{
   281  				Type:     schema.TypeList,
   282  				Required: true,
   283  				ForceNew: true,
   284  				Elem: &schema.Resource{
   285  					Schema: map[string]*schema.Schema{
   286  						"label": &schema.Schema{
   287  							Type:     schema.TypeString,
   288  							Required: true,
   289  							ForceNew: true,
   290  						},
   291  
   292  						"ip_address": &schema.Schema{
   293  							Type:       schema.TypeString,
   294  							Optional:   true,
   295  							Computed:   true,
   296  							Deprecated: "Please use ipv4_address",
   297  						},
   298  
   299  						"subnet_mask": &schema.Schema{
   300  							Type:       schema.TypeString,
   301  							Optional:   true,
   302  							Computed:   true,
   303  							Deprecated: "Please use ipv4_prefix_length",
   304  						},
   305  
   306  						"ipv4_address": &schema.Schema{
   307  							Type:     schema.TypeString,
   308  							Optional: true,
   309  							Computed: true,
   310  						},
   311  
   312  						"ipv4_prefix_length": &schema.Schema{
   313  							Type:     schema.TypeInt,
   314  							Optional: true,
   315  							Computed: true,
   316  						},
   317  
   318  						"ipv4_gateway": &schema.Schema{
   319  							Type:     schema.TypeString,
   320  							Optional: true,
   321  							Computed: true,
   322  						},
   323  
   324  						"ipv6_address": &schema.Schema{
   325  							Type:     schema.TypeString,
   326  							Optional: true,
   327  							Computed: true,
   328  						},
   329  
   330  						"ipv6_prefix_length": &schema.Schema{
   331  							Type:     schema.TypeInt,
   332  							Optional: true,
   333  							Computed: true,
   334  						},
   335  
   336  						"ipv6_gateway": &schema.Schema{
   337  							Type:     schema.TypeString,
   338  							Optional: true,
   339  							Computed: true,
   340  						},
   341  
   342  						"adapter_type": &schema.Schema{
   343  							Type:     schema.TypeString,
   344  							Optional: true,
   345  							ForceNew: true,
   346  						},
   347  
   348  						"mac_address": &schema.Schema{
   349  							Type:     schema.TypeString,
   350  							Optional: true,
   351  							Computed: true,
   352  						},
   353  					},
   354  				},
   355  			},
   356  
   357  			"disk": &schema.Schema{
   358  				Type:     schema.TypeSet,
   359  				Required: true,
   360  				Elem: &schema.Resource{
   361  					Schema: map[string]*schema.Schema{
   362  						"uuid": &schema.Schema{
   363  							Type:     schema.TypeString,
   364  							Computed: true,
   365  						},
   366  
   367  						"key": &schema.Schema{
   368  							Type:     schema.TypeInt,
   369  							Computed: true,
   370  						},
   371  
   372  						"template": &schema.Schema{
   373  							Type:     schema.TypeString,
   374  							Optional: true,
   375  						},
   376  
   377  						"type": &schema.Schema{
   378  							Type:     schema.TypeString,
   379  							Optional: true,
   380  							Default:  "eager_zeroed",
   381  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   382  								value := v.(string)
   383  								if value != "thin" && value != "eager_zeroed" && value != "lazy" {
   384  									errors = append(errors, fmt.Errorf(
   385  										"only 'thin', 'eager_zeroed', and 'lazy' are supported values for 'type'"))
   386  								}
   387  								return
   388  							},
   389  						},
   390  
   391  						"datastore": &schema.Schema{
   392  							Type:     schema.TypeString,
   393  							Optional: true,
   394  						},
   395  
   396  						"size": &schema.Schema{
   397  							Type:     schema.TypeInt,
   398  							Optional: true,
   399  						},
   400  
   401  						"name": &schema.Schema{
   402  							Type:     schema.TypeString,
   403  							Optional: true,
   404  						},
   405  
   406  						"iops": &schema.Schema{
   407  							Type:     schema.TypeInt,
   408  							Optional: true,
   409  						},
   410  
   411  						"vmdk": &schema.Schema{
   412  							// TODO: Add ValidateFunc to confirm path exists
   413  							Type:     schema.TypeString,
   414  							Optional: true,
   415  						},
   416  
   417  						"bootable": &schema.Schema{
   418  							Type:     schema.TypeBool,
   419  							Optional: true,
   420  						},
   421  
   422  						"keep_on_remove": &schema.Schema{
   423  							Type:     schema.TypeBool,
   424  							Optional: true,
   425  						},
   426  
   427  						"controller_type": &schema.Schema{
   428  							Type:     schema.TypeString,
   429  							Optional: true,
   430  							Default:  "scsi",
   431  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   432  								value := v.(string)
   433  								found := false
   434  								for _, t := range DiskControllerTypes {
   435  									if t == value {
   436  										found = true
   437  									}
   438  								}
   439  								if !found {
   440  									errors = append(errors, fmt.Errorf(
   441  										"Supported values for 'controller_type' are %v", strings.Join(DiskControllerTypes, ", ")))
   442  								}
   443  								return
   444  							},
   445  						},
   446  					},
   447  				},
   448  			},
   449  
   450  			"cdrom": &schema.Schema{
   451  				Type:     schema.TypeList,
   452  				Optional: true,
   453  				ForceNew: true,
   454  				Elem: &schema.Resource{
   455  					Schema: map[string]*schema.Schema{
   456  						"datastore": &schema.Schema{
   457  							Type:     schema.TypeString,
   458  							Required: true,
   459  							ForceNew: true,
   460  						},
   461  
   462  						"path": &schema.Schema{
   463  							Type:     schema.TypeString,
   464  							Required: true,
   465  							ForceNew: true,
   466  						},
   467  					},
   468  				},
   469  			},
   470  		},
   471  	}
   472  }
   473  
   474  func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error {
   475  	// flag if changes have to be applied
   476  	hasChanges := false
   477  	// flag if changes have to be done when powered off
   478  	rebootRequired := false
   479  
   480  	// make config spec
   481  	configSpec := types.VirtualMachineConfigSpec{}
   482  
   483  	if d.HasChange("vcpu") {
   484  		configSpec.NumCPUs = int32(d.Get("vcpu").(int))
   485  		hasChanges = true
   486  		rebootRequired = true
   487  	}
   488  
   489  	if d.HasChange("memory") {
   490  		configSpec.MemoryMB = int64(d.Get("memory").(int))
   491  		hasChanges = true
   492  		rebootRequired = true
   493  	}
   494  
   495  	client := meta.(*govmomi.Client)
   496  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   497  	if err != nil {
   498  		return err
   499  	}
   500  	finder := find.NewFinder(client.Client, true)
   501  	finder = finder.SetDatacenter(dc)
   502  
   503  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
   504  	if err != nil {
   505  		return err
   506  	}
   507  
   508  	if d.HasChange("disk") {
   509  		hasChanges = true
   510  		oldDisks, newDisks := d.GetChange("disk")
   511  		oldDiskSet := oldDisks.(*schema.Set)
   512  		newDiskSet := newDisks.(*schema.Set)
   513  
   514  		addedDisks := newDiskSet.Difference(oldDiskSet)
   515  		removedDisks := oldDiskSet.Difference(newDiskSet)
   516  
   517  		// Removed disks
   518  		for _, diskRaw := range removedDisks.List() {
   519  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   520  				devices, err := vm.Device(context.TODO())
   521  				if err != nil {
   522  					return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err)
   523  				}
   524  				virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
   525  
   526  				keep := false
   527  				if v, ok := disk["keep_on_remove"].(bool); ok {
   528  					keep = v
   529  				}
   530  
   531  				err = vm.RemoveDevice(context.TODO(), keep, virtualDisk)
   532  				if err != nil {
   533  					return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
   534  				}
   535  			}
   536  		}
   537  		// Added disks
   538  		for _, diskRaw := range addedDisks.List() {
   539  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   540  
   541  				var datastore *object.Datastore
   542  				if disk["datastore"] == "" {
   543  					datastore, err = finder.DefaultDatastore(context.TODO())
   544  					if err != nil {
   545  						return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err)
   546  					}
   547  				} else {
   548  					datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string))
   549  					if err != nil {
   550  						log.Printf("[ERROR] Couldn't find datastore %v.  %s", disk["datastore"].(string), err)
   551  						return err
   552  					}
   553  				}
   554  
   555  				var size int64
   556  				if disk["size"] == 0 {
   557  					size = 0
   558  				} else {
   559  					size = int64(disk["size"].(int))
   560  				}
   561  				iops := int64(disk["iops"].(int))
   562  				controller_type := disk["controller_type"].(string)
   563  
   564  				var mo mo.VirtualMachine
   565  				vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo)
   566  
   567  				var diskPath string
   568  				switch {
   569  				case disk["vmdk"] != "":
   570  					diskPath = disk["vmdk"].(string)
   571  				case disk["name"] != "":
   572  					snapshotFullDir := mo.Config.Files.SnapshotDirectory
   573  					split := strings.Split(snapshotFullDir, " ")
   574  					if len(split) != 2 {
   575  						return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
   576  					}
   577  					vmWorkingPath := split[1]
   578  					diskPath = vmWorkingPath + disk["name"].(string)
   579  				default:
   580  					return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given")
   581  				}
   582  
   583  				var initType string
   584  				if disk["type"] != "" {
   585  					initType = disk["type"].(string)
   586  				} else {
   587  					initType = "thin"
   588  				}
   589  
   590  				log.Printf("[INFO] Attaching disk: %v", diskPath)
   591  				err = addHardDisk(vm, size, iops, initType, datastore, diskPath, controller_type)
   592  				if err != nil {
   593  					log.Printf("[ERROR] Add Hard Disk Failed: %v", err)
   594  					return err
   595  				}
   596  			}
   597  			if err != nil {
   598  				return err
   599  			}
   600  		}
   601  	}
   602  
   603  	// do nothing if there are no changes
   604  	if !hasChanges {
   605  		return nil
   606  	}
   607  
   608  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
   609  
   610  	if rebootRequired {
   611  		log.Printf("[INFO] Shutting down virtual machine: %s", d.Id())
   612  
   613  		task, err := vm.PowerOff(context.TODO())
   614  		if err != nil {
   615  			return err
   616  		}
   617  
   618  		err = task.Wait(context.TODO())
   619  		if err != nil {
   620  			return err
   621  		}
   622  	}
   623  
   624  	log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id())
   625  
   626  	task, err := vm.Reconfigure(context.TODO(), configSpec)
   627  	if err != nil {
   628  		log.Printf("[ERROR] %s", err)
   629  	}
   630  
   631  	err = task.Wait(context.TODO())
   632  	if err != nil {
   633  		log.Printf("[ERROR] %s", err)
   634  	}
   635  
   636  	if rebootRequired {
   637  		task, err = vm.PowerOn(context.TODO())
   638  		if err != nil {
   639  			return err
   640  		}
   641  
   642  		err = task.Wait(context.TODO())
   643  		if err != nil {
   644  			log.Printf("[ERROR] %s", err)
   645  		}
   646  	}
   647  
   648  	return resourceVSphereVirtualMachineRead(d, meta)
   649  }
   650  
   651  func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
   652  	client := meta.(*govmomi.Client)
   653  
   654  	vm := virtualMachine{
   655  		name:     d.Get("name").(string),
   656  		vcpu:     int32(d.Get("vcpu").(int)),
   657  		memoryMb: int64(d.Get("memory").(int)),
   658  		memoryAllocation: memoryAllocation{
   659  			reservation: int64(d.Get("memory_reservation").(int)),
   660  		},
   661  	}
   662  
   663  	if v, ok := d.GetOk("folder"); ok {
   664  		vm.folder = v.(string)
   665  	}
   666  
   667  	if v, ok := d.GetOk("datacenter"); ok {
   668  		vm.datacenter = v.(string)
   669  	}
   670  
   671  	if v, ok := d.GetOk("cluster"); ok {
   672  		vm.cluster = v.(string)
   673  	}
   674  
   675  	if v, ok := d.GetOk("resource_pool"); ok {
   676  		vm.resourcePool = v.(string)
   677  	}
   678  
   679  	if v, ok := d.GetOk("domain"); ok {
   680  		vm.domain = v.(string)
   681  	}
   682  
   683  	if v, ok := d.GetOk("time_zone"); ok {
   684  		vm.timeZone = v.(string)
   685  	}
   686  
   687  	if v, ok := d.GetOk("linked_clone"); ok {
   688  		vm.linkedClone = v.(bool)
   689  	}
   690  
   691  	if v, ok := d.GetOk("skip_customization"); ok {
   692  		vm.skipCustomization = v.(bool)
   693  	}
   694  
   695  	if v, ok := d.GetOk("enable_disk_uuid"); ok {
   696  		vm.enableDiskUUID = v.(bool)
   697  	}
   698  
   699  	if raw, ok := d.GetOk("dns_suffixes"); ok {
   700  		for _, v := range raw.([]interface{}) {
   701  			vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string))
   702  		}
   703  	} else {
   704  		vm.dnsSuffixes = DefaultDNSSuffixes
   705  	}
   706  
   707  	if raw, ok := d.GetOk("dns_servers"); ok {
   708  		for _, v := range raw.([]interface{}) {
   709  			vm.dnsServers = append(vm.dnsServers, v.(string))
   710  		}
   711  	} else {
   712  		vm.dnsServers = DefaultDNSServers
   713  	}
   714  
   715  	if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
   716  		if custom_configs, ok := vL.(map[string]interface{}); ok {
   717  			custom := make(map[string]types.AnyType)
   718  			for k, v := range custom_configs {
   719  				custom[k] = v
   720  			}
   721  			vm.customConfigurations = custom
   722  			log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
   723  		}
   724  	}
   725  
   726  	if vL, ok := d.GetOk("network_interface"); ok {
   727  		networks := make([]networkInterface, len(vL.([]interface{})))
   728  		for i, v := range vL.([]interface{}) {
   729  			network := v.(map[string]interface{})
   730  			networks[i].label = network["label"].(string)
   731  			if v, ok := network["ip_address"].(string); ok && v != "" {
   732  				networks[i].ipv4Address = v
   733  			}
   734  			if v, ok := d.GetOk("gateway"); ok {
   735  				networks[i].ipv4Gateway = v.(string)
   736  			}
   737  			if v, ok := network["subnet_mask"].(string); ok && v != "" {
   738  				ip := net.ParseIP(v).To4()
   739  				if ip != nil {
   740  					mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3])
   741  					pl, _ := mask.Size()
   742  					networks[i].ipv4PrefixLength = pl
   743  				} else {
   744  					return fmt.Errorf("subnet_mask parameter is invalid.")
   745  				}
   746  			}
   747  			if v, ok := network["ipv4_address"].(string); ok && v != "" {
   748  				networks[i].ipv4Address = v
   749  			}
   750  			if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 {
   751  				networks[i].ipv4PrefixLength = v
   752  			}
   753  			if v, ok := network["ipv4_gateway"].(string); ok && v != "" {
   754  				networks[i].ipv4Gateway = v
   755  			}
   756  			if v, ok := network["ipv6_address"].(string); ok && v != "" {
   757  				networks[i].ipv6Address = v
   758  			}
   759  			if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 {
   760  				networks[i].ipv6PrefixLength = v
   761  			}
   762  			if v, ok := network["ipv6_gateway"].(string); ok && v != "" {
   763  				networks[i].ipv6Gateway = v
   764  			}
   765  			if v, ok := network["mac_address"].(string); ok && v != "" {
   766  				networks[i].macAddress = v
   767  			}
   768  		}
   769  		vm.networkInterfaces = networks
   770  		log.Printf("[DEBUG] network_interface init: %v", networks)
   771  	}
   772  
   773  	if vL, ok := d.GetOk("windows_opt_config"); ok {
   774  		var winOpt windowsOptConfig
   775  		custom_configs := (vL.([]interface{}))[0].(map[string]interface{})
   776  		if v, ok := custom_configs["admin_password"].(string); ok && v != "" {
   777  			winOpt.adminPassword = v
   778  		}
   779  		if v, ok := custom_configs["domain"].(string); ok && v != "" {
   780  			winOpt.domain = v
   781  		}
   782  		if v, ok := custom_configs["domain_user"].(string); ok && v != "" {
   783  			winOpt.domainUser = v
   784  		}
   785  		if v, ok := custom_configs["product_key"].(string); ok && v != "" {
   786  			winOpt.productKey = v
   787  		}
   788  		if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" {
   789  			winOpt.domainUserPassword = v
   790  		}
   791  		vm.windowsOptionalConfig = winOpt
   792  		log.Printf("[DEBUG] windows config init: %v", winOpt)
   793  	}
   794  
   795  	if vL, ok := d.GetOk("disk"); ok {
   796  		if diskSet, ok := vL.(*schema.Set); ok {
   797  
   798  			disks := []hardDisk{}
   799  			for _, value := range diskSet.List() {
   800  				disk := value.(map[string]interface{})
   801  				newDisk := hardDisk{}
   802  
   803  				if v, ok := disk["template"].(string); ok && v != "" {
   804  					if v, ok := disk["name"].(string); ok && v != "" {
   805  						return fmt.Errorf("Cannot specify name of a template")
   806  					}
   807  					vm.template = v
   808  					if vm.hasBootableVmdk {
   809  						return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   810  					}
   811  					vm.hasBootableVmdk = true
   812  				}
   813  
   814  				if v, ok := disk["type"].(string); ok && v != "" {
   815  					newDisk.initType = v
   816  				}
   817  
   818  				if v, ok := disk["datastore"].(string); ok && v != "" {
   819  					vm.datastore = v
   820  				}
   821  
   822  				if v, ok := disk["size"].(int); ok && v != 0 {
   823  					if v, ok := disk["template"].(string); ok && v != "" {
   824  						return fmt.Errorf("Cannot specify size of a template")
   825  					}
   826  
   827  					if v, ok := disk["name"].(string); ok && v != "" {
   828  						newDisk.name = v
   829  					} else {
   830  						return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk")
   831  					}
   832  
   833  					newDisk.size = int64(v)
   834  				}
   835  
   836  				if v, ok := disk["iops"].(int); ok && v != 0 {
   837  					newDisk.iops = int64(v)
   838  				}
   839  
   840  				if v, ok := disk["controller_type"].(string); ok && v != "" {
   841  					newDisk.controller = v
   842  				}
   843  
   844  				if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" {
   845  					if v, ok := disk["template"].(string); ok && v != "" {
   846  						return fmt.Errorf("Cannot specify a vmdk for a template")
   847  					}
   848  					if v, ok := disk["size"].(string); ok && v != "" {
   849  						return fmt.Errorf("Cannot specify size of a vmdk")
   850  					}
   851  					if v, ok := disk["name"].(string); ok && v != "" {
   852  						return fmt.Errorf("Cannot specify name of a vmdk")
   853  					}
   854  					if vBootable, ok := disk["bootable"].(bool); ok {
   855  						if vBootable && vm.hasBootableVmdk {
   856  							return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   857  						}
   858  						newDisk.bootable = vBootable
   859  						vm.hasBootableVmdk = vm.hasBootableVmdk || vBootable
   860  					}
   861  					newDisk.vmdkPath = vVmdk
   862  				}
   863  				// Preserves order so bootable disk is first
   864  				if newDisk.bootable == true || disk["template"] != "" {
   865  					disks = append([]hardDisk{newDisk}, disks...)
   866  				} else {
   867  					disks = append(disks, newDisk)
   868  				}
   869  			}
   870  			vm.hardDisks = disks
   871  			log.Printf("[DEBUG] disk init: %v", disks)
   872  		}
   873  	}
   874  
   875  	if vL, ok := d.GetOk("cdrom"); ok {
   876  		cdroms := make([]cdrom, len(vL.([]interface{})))
   877  		for i, v := range vL.([]interface{}) {
   878  			c := v.(map[string]interface{})
   879  			if v, ok := c["datastore"].(string); ok && v != "" {
   880  				cdroms[i].datastore = v
   881  			} else {
   882  				return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.")
   883  			}
   884  			if v, ok := c["path"].(string); ok && v != "" {
   885  				cdroms[i].path = v
   886  			} else {
   887  				return fmt.Errorf("Path argument must be specified when attaching a cdrom image.")
   888  			}
   889  		}
   890  		vm.cdroms = cdroms
   891  		log.Printf("[DEBUG] cdrom init: %v", cdroms)
   892  	}
   893  
   894  	err := vm.setupVirtualMachine(client)
   895  	if err != nil {
   896  		return err
   897  	}
   898  
   899  	d.SetId(vm.Path())
   900  	log.Printf("[INFO] Created virtual machine: %s", d.Id())
   901  
   902  	return resourceVSphereVirtualMachineRead(d, meta)
   903  }
   904  
   905  func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
   906  	log.Printf("[DEBUG] virtual machine resource data: %#v", d)
   907  	client := meta.(*govmomi.Client)
   908  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   909  	if err != nil {
   910  		return err
   911  	}
   912  	finder := find.NewFinder(client.Client, true)
   913  	finder = finder.SetDatacenter(dc)
   914  
   915  	vm, err := finder.VirtualMachine(context.TODO(), d.Id())
   916  	if err != nil {
   917  		d.SetId("")
   918  		return nil
   919  	}
   920  
   921  	state, err := vm.PowerState(context.TODO())
   922  	if err != nil {
   923  		return err
   924  	}
   925  
   926  	if state == types.VirtualMachinePowerStatePoweredOn {
   927  		// wait for interfaces to appear
   928  		_, err = vm.WaitForNetIP(context.TODO(), true)
   929  		if err != nil {
   930  			return err
   931  		}
   932  	}
   933  
   934  	var mvm mo.VirtualMachine
   935  	collector := property.DefaultCollector(client.Client)
   936  	if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil {
   937  		return err
   938  	}
   939  
   940  	log.Printf("[DEBUG] Datacenter - %#v", dc)
   941  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config)
   942  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config)
   943  	log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net)
   944  
   945  	disks := make([]map[string]interface{}, 0)
   946  	templateDisk := make(map[string]interface{}, 1)
   947  	for _, device := range mvm.Config.Hardware.Device {
   948  		if vd, ok := device.(*types.VirtualDisk); ok {
   949  
   950  			virtualDevice := vd.GetVirtualDevice()
   951  
   952  			backingInfo := virtualDevice.Backing
   953  			var diskFullPath string
   954  			var diskUuid string
   955  			if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok {
   956  				diskFullPath = v.FileName
   957  				diskUuid = v.Uuid
   958  			} else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok {
   959  				diskFullPath = v.FileName
   960  				diskUuid = v.Uuid
   961  			}
   962  			log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath)
   963  
   964  			// Separate datastore and path
   965  			diskFullPathSplit := strings.Split(diskFullPath, " ")
   966  			if len(diskFullPathSplit) != 2 {
   967  				return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath)
   968  			}
   969  			diskPath := diskFullPathSplit[1]
   970  			// Isolate filename
   971  			diskNameSplit := strings.Split(diskPath, "/")
   972  			diskName := diskNameSplit[len(diskNameSplit)-1]
   973  			// Remove possible extension
   974  			diskName = strings.Split(diskName, ".")[0]
   975  
   976  			if prevDisks, ok := d.GetOk("disk"); ok {
   977  				if prevDiskSet, ok := prevDisks.(*schema.Set); ok {
   978  					for _, v := range prevDiskSet.List() {
   979  						prevDisk := v.(map[string]interface{})
   980  
   981  						// We're guaranteed only one template disk.  Passing value directly through since templates should be immutable
   982  						if prevDisk["template"] != "" {
   983  							if len(templateDisk) == 0 {
   984  								templateDisk = prevDisk
   985  								disks = append(disks, templateDisk)
   986  								break
   987  							}
   988  						}
   989  
   990  						// It is enforced that prevDisk["name"] should only be set in the case
   991  						// of creating a new disk for the user.
   992  						// size case:  name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name
   993  						// vmdk case:  compare prevDisk["vmdk"] and mo.Filename
   994  						if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] {
   995  
   996  							prevDisk["key"] = virtualDevice.Key
   997  							prevDisk["uuid"] = diskUuid
   998  
   999  							disks = append(disks, prevDisk)
  1000  							break
  1001  						}
  1002  					}
  1003  				}
  1004  			}
  1005  			log.Printf("[DEBUG] disks: %#v", disks)
  1006  		}
  1007  	}
  1008  	err = d.Set("disk", disks)
  1009  	if err != nil {
  1010  		return fmt.Errorf("Invalid disks to set: %#v", disks)
  1011  	}
  1012  
  1013  	networkInterfaces := make([]map[string]interface{}, 0)
  1014  	for _, v := range mvm.Guest.Net {
  1015  		if v.DeviceConfigId >= 0 {
  1016  			log.Printf("[DEBUG] v.Network - %#v", v.Network)
  1017  			networkInterface := make(map[string]interface{})
  1018  			networkInterface["label"] = v.Network
  1019  			networkInterface["mac_address"] = v.MacAddress
  1020  			for _, ip := range v.IpConfig.IpAddress {
  1021  				p := net.ParseIP(ip.IpAddress)
  1022  				if p.To4() != nil {
  1023  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1024  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1025  					networkInterface["ipv4_address"] = p.String()
  1026  					networkInterface["ipv4_prefix_length"] = ip.PrefixLength
  1027  				} else if p.To16() != nil {
  1028  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1029  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1030  					networkInterface["ipv6_address"] = p.String()
  1031  					networkInterface["ipv6_prefix_length"] = ip.PrefixLength
  1032  				}
  1033  				log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1034  			}
  1035  			log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1036  			networkInterfaces = append(networkInterfaces, networkInterface)
  1037  		}
  1038  	}
  1039  	if mvm.Guest.IpStack != nil {
  1040  		for _, v := range mvm.Guest.IpStack {
  1041  			if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil {
  1042  				for _, route := range v.IpRouteConfig.IpRoute {
  1043  					if route.Gateway.Device != "" {
  1044  						gatewaySetting := ""
  1045  						if route.Network == "::" {
  1046  							gatewaySetting = "ipv6_gateway"
  1047  						} else if route.Network == "0.0.0.0" {
  1048  							gatewaySetting = "ipv4_gateway"
  1049  						}
  1050  						if gatewaySetting != "" {
  1051  							deviceID, err := strconv.Atoi(route.Gateway.Device)
  1052  							if err != nil {
  1053  								log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err)
  1054  							} else {
  1055  								log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress)
  1056  								networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress
  1057  							}
  1058  						}
  1059  					}
  1060  				}
  1061  			}
  1062  		}
  1063  	}
  1064  	log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces)
  1065  	err = d.Set("network_interface", networkInterfaces)
  1066  	if err != nil {
  1067  		return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces)
  1068  	}
  1069  
  1070  	if len(networkInterfaces) > 0 {
  1071  		if _, ok := networkInterfaces[0]["ipv4_address"]; ok {
  1072  			log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string))
  1073  			d.SetConnInfo(map[string]string{
  1074  				"type": "ssh",
  1075  				"host": networkInterfaces[0]["ipv4_address"].(string),
  1076  			})
  1077  		}
  1078  	}
  1079  
  1080  	var rootDatastore string
  1081  	for _, v := range mvm.Datastore {
  1082  		var md mo.Datastore
  1083  		if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil {
  1084  			return err
  1085  		}
  1086  		if md.Parent.Type == "StoragePod" {
  1087  			var msp mo.StoragePod
  1088  			if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil {
  1089  				return err
  1090  			}
  1091  			rootDatastore = msp.Name
  1092  			log.Printf("[DEBUG] %#v", msp.Name)
  1093  		} else {
  1094  			rootDatastore = md.Name
  1095  			log.Printf("[DEBUG] %#v", md.Name)
  1096  		}
  1097  		break
  1098  	}
  1099  
  1100  	d.Set("datacenter", dc)
  1101  	d.Set("memory", mvm.Summary.Config.MemorySizeMB)
  1102  	d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation)
  1103  	d.Set("cpu", mvm.Summary.Config.NumCpu)
  1104  	d.Set("datastore", rootDatastore)
  1105  	d.Set("uuid", mvm.Summary.Config.Uuid)
  1106  
  1107  	return nil
  1108  }
  1109  
  1110  func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
  1111  	client := meta.(*govmomi.Client)
  1112  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
  1113  	if err != nil {
  1114  		return err
  1115  	}
  1116  	finder := find.NewFinder(client.Client, true)
  1117  	finder = finder.SetDatacenter(dc)
  1118  
  1119  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
  1120  	if err != nil {
  1121  		return err
  1122  	}
  1123  	devices, err := vm.Device(context.TODO())
  1124  	if err != nil {
  1125  		log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err)
  1126  		return err
  1127  	}
  1128  
  1129  	log.Printf("[INFO] Deleting virtual machine: %s", d.Id())
  1130  	state, err := vm.PowerState(context.TODO())
  1131  	if err != nil {
  1132  		return err
  1133  	}
  1134  
  1135  	if state == types.VirtualMachinePowerStatePoweredOn {
  1136  		task, err := vm.PowerOff(context.TODO())
  1137  		if err != nil {
  1138  			return err
  1139  		}
  1140  
  1141  		err = task.Wait(context.TODO())
  1142  		if err != nil {
  1143  			return err
  1144  		}
  1145  	}
  1146  
  1147  	// Safely eject any disks the user marked as keep_on_remove
  1148  	if vL, ok := d.GetOk("disk"); ok {
  1149  		if diskSet, ok := vL.(*schema.Set); ok {
  1150  
  1151  			for _, value := range diskSet.List() {
  1152  				disk := value.(map[string]interface{})
  1153  
  1154  				if v, ok := disk["keep_on_remove"].(bool); ok && v == true {
  1155  					log.Printf("[DEBUG] not destroying %v", disk["name"])
  1156  					virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
  1157  					err = vm.RemoveDevice(context.TODO(), true, virtualDisk)
  1158  					if err != nil {
  1159  						log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
  1160  						return err
  1161  					}
  1162  				}
  1163  			}
  1164  		}
  1165  	}
  1166  
  1167  	task, err := vm.Destroy(context.TODO())
  1168  	if err != nil {
  1169  		return err
  1170  	}
  1171  
  1172  	err = task.Wait(context.TODO())
  1173  	if err != nil {
  1174  		return err
  1175  	}
  1176  
  1177  	d.SetId("")
  1178  	return nil
  1179  }
  1180  
  1181  // addHardDisk adds a new Hard Disk to the VirtualMachine.
  1182  func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error {
  1183  	devices, err := vm.Device(context.TODO())
  1184  	if err != nil {
  1185  		return err
  1186  	}
  1187  	log.Printf("[DEBUG] vm devices: %#v\n", devices)
  1188  
  1189  	var controller types.BaseVirtualController
  1190  	switch controller_type {
  1191  	case "scsi":
  1192  		controller, err = devices.FindDiskController(controller_type)
  1193  	case "scsi-lsi-parallel":
  1194  		controller = devices.PickController(&types.VirtualLsiLogicController{})
  1195  	case "scsi-buslogic":
  1196  		controller = devices.PickController(&types.VirtualBusLogicController{})
  1197  	case "scsi-paravirtual":
  1198  		controller = devices.PickController(&types.ParaVirtualSCSIController{})
  1199  	case "scsi-lsi-sas":
  1200  		controller = devices.PickController(&types.VirtualLsiLogicSASController{})
  1201  	case "ide":
  1202  		controller, err = devices.FindDiskController(controller_type)
  1203  	default:
  1204  		return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1205  	}
  1206  
  1207  	if err != nil || controller == nil {
  1208  		// Check if max number of scsi controller are already used
  1209  		diskControllers := getSCSIControllers(devices)
  1210  		if len(diskControllers) >= 4 {
  1211  			return fmt.Errorf("[ERROR] Maximum number of SCSI controllers created")
  1212  		}
  1213  
  1214  		log.Printf("[DEBUG] Couldn't find a %v controller.  Creating one..", controller_type)
  1215  
  1216  		var c types.BaseVirtualDevice
  1217  		switch controller_type {
  1218  		case "scsi":
  1219  			// Create scsi controller
  1220  			c, err = devices.CreateSCSIController("scsi")
  1221  			if err != nil {
  1222  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1223  			}
  1224  		case "scsi-lsi-parallel":
  1225  			// Create scsi controller
  1226  			c, err = devices.CreateSCSIController("lsilogic")
  1227  			if err != nil {
  1228  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1229  			}
  1230  		case "scsi-buslogic":
  1231  			// Create scsi controller
  1232  			c, err = devices.CreateSCSIController("buslogic")
  1233  			if err != nil {
  1234  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1235  			}
  1236  		case "scsi-paravirtual":
  1237  			// Create scsi controller
  1238  			c, err = devices.CreateSCSIController("pvscsi")
  1239  			if err != nil {
  1240  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1241  			}
  1242  		case "scsi-lsi-sas":
  1243  			// Create scsi controller
  1244  			c, err = devices.CreateSCSIController("lsilogic-sas")
  1245  			if err != nil {
  1246  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1247  			}
  1248  		case "ide":
  1249  			// Create ide controller
  1250  			c, err = devices.CreateIDEController()
  1251  			if err != nil {
  1252  				return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1253  			}
  1254  		default:
  1255  			return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1256  		}
  1257  
  1258  		vm.AddDevice(context.TODO(), c)
  1259  		// Update our devices list
  1260  		devices, err := vm.Device(context.TODO())
  1261  		if err != nil {
  1262  			return err
  1263  		}
  1264  		controller = devices.PickController(c.(types.BaseVirtualController))
  1265  		if controller == nil {
  1266  			log.Printf("[ERROR] Could not find the new %v controller", controller_type)
  1267  			return fmt.Errorf("Could not find the new %v controller", controller_type)
  1268  		}
  1269  	}
  1270  
  1271  	log.Printf("[DEBUG] disk controller: %#v\n", controller)
  1272  
  1273  	// TODO Check if diskPath & datastore exist
  1274  	// If diskPath is not specified, pass empty string to CreateDisk()
  1275  	if diskPath == "" {
  1276  		return fmt.Errorf("[ERROR] addHardDisk - No path proided")
  1277  	} else {
  1278  		// TODO Check if diskPath & datastore exist
  1279  		diskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath)
  1280  	}
  1281  	log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath)
  1282  	disk := devices.CreateDisk(controller, datastore.Reference(), diskPath)
  1283  
  1284  	if strings.Contains(controller_type, "scsi") {
  1285  		unitNumber, err := getNextUnitNumber(devices, controller)
  1286  		if err != nil {
  1287  			return err
  1288  		}
  1289  		*disk.UnitNumber = unitNumber
  1290  	}
  1291  
  1292  	existing := devices.SelectByBackingInfo(disk.Backing)
  1293  	log.Printf("[DEBUG] disk: %#v\n", disk)
  1294  
  1295  	if len(existing) == 0 {
  1296  		disk.CapacityInKB = int64(size * 1024 * 1024)
  1297  		if iops != 0 {
  1298  			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
  1299  				Limit: iops,
  1300  			}
  1301  		}
  1302  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
  1303  
  1304  		if diskType == "eager_zeroed" {
  1305  			// eager zeroed thick virtual disk
  1306  			backing.ThinProvisioned = types.NewBool(false)
  1307  			backing.EagerlyScrub = types.NewBool(true)
  1308  		} else if diskType == "lazy" {
  1309  			// lazy zeroed thick virtual disk
  1310  			backing.ThinProvisioned = types.NewBool(false)
  1311  			backing.EagerlyScrub = types.NewBool(false)
  1312  		} else if diskType == "thin" {
  1313  			// thin provisioned virtual disk
  1314  			backing.ThinProvisioned = types.NewBool(true)
  1315  		}
  1316  
  1317  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
  1318  		log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB)
  1319  
  1320  		return vm.AddDevice(context.TODO(), disk)
  1321  	} else {
  1322  		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")
  1323  
  1324  		return nil
  1325  	}
  1326  }
  1327  
  1328  func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController {
  1329  	// get virtual scsi controllers of all supported types
  1330  	var scsiControllers []*types.VirtualController
  1331  	for _, device := range vmDevices {
  1332  		devType := vmDevices.Type(device)
  1333  		switch devType {
  1334  		case "scsi", "lsilogic", "buslogic", "pvscsi", "lsilogic-sas":
  1335  			if c, ok := device.(types.BaseVirtualController); ok {
  1336  				scsiControllers = append(scsiControllers, c.GetVirtualController())
  1337  			}
  1338  		}
  1339  	}
  1340  	return scsiControllers
  1341  }
  1342  
  1343  func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) {
  1344  	key := c.GetVirtualController().Key
  1345  
  1346  	var unitNumbers [16]bool
  1347  	unitNumbers[7] = true
  1348  
  1349  	for _, device := range devices {
  1350  		d := device.GetVirtualDevice()
  1351  
  1352  		if d.ControllerKey == key {
  1353  			if d.UnitNumber != nil {
  1354  				unitNumbers[*d.UnitNumber] = true
  1355  			}
  1356  		}
  1357  	}
  1358  	for i, taken := range unitNumbers {
  1359  		if !taken {
  1360  			return int32(i), nil
  1361  		}
  1362  	}
  1363  	return -1, fmt.Errorf("[ERROR] getNextUnitNumber - controller is full")
  1364  }
  1365  
  1366  // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path.
  1367  func addCdrom(vm *object.VirtualMachine, datastore, path string) error {
  1368  	devices, err := vm.Device(context.TODO())
  1369  	if err != nil {
  1370  		return err
  1371  	}
  1372  	log.Printf("[DEBUG] vm devices: %#v", devices)
  1373  
  1374  	var controller *types.VirtualIDEController
  1375  	controller, err = devices.FindIDEController("")
  1376  	if err != nil {
  1377  		log.Printf("[DEBUG] Couldn't find a ide controller.  Creating one..")
  1378  
  1379  		var c types.BaseVirtualDevice
  1380  		c, err := devices.CreateIDEController()
  1381  		if err != nil {
  1382  			return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1383  		}
  1384  
  1385  		if v, ok := c.(*types.VirtualIDEController); ok {
  1386  			controller = v
  1387  		} else {
  1388  			return fmt.Errorf("[ERROR] Controller type could not be asserted")
  1389  		}
  1390  		vm.AddDevice(context.TODO(), c)
  1391  		// Update our devices list
  1392  		devices, err := vm.Device(context.TODO())
  1393  		if err != nil {
  1394  			return err
  1395  		}
  1396  		controller, err = devices.FindIDEController("")
  1397  		if err != nil {
  1398  			log.Printf("[ERROR] Could not find the new disk IDE controller: %v", err)
  1399  			return err
  1400  		}
  1401  	}
  1402  	log.Printf("[DEBUG] ide controller: %#v", controller)
  1403  
  1404  	c, err := devices.CreateCdrom(controller)
  1405  	if err != nil {
  1406  		return err
  1407  	}
  1408  
  1409  	c = devices.InsertIso(c, fmt.Sprintf("[%s] %s", datastore, path))
  1410  	log.Printf("[DEBUG] addCdrom: %#v", c)
  1411  
  1412  	return vm.AddDevice(context.TODO(), c)
  1413  }
  1414  
  1415  // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device.
  1416  func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) {
  1417  	network, err := f.Network(context.TODO(), "*"+label)
  1418  	if err != nil {
  1419  		return nil, err
  1420  	}
  1421  
  1422  	backing, err := network.EthernetCardBackingInfo(context.TODO())
  1423  	if err != nil {
  1424  		return nil, err
  1425  	}
  1426  
  1427  	var address_type string
  1428  	if macAddress == "" {
  1429  		address_type = string(types.VirtualEthernetCardMacTypeGenerated)
  1430  	} else {
  1431  		address_type = string(types.VirtualEthernetCardMacTypeManual)
  1432  	}
  1433  
  1434  	if adapterType == "vmxnet3" {
  1435  		return &types.VirtualDeviceConfigSpec{
  1436  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1437  			Device: &types.VirtualVmxnet3{
  1438  				VirtualVmxnet: types.VirtualVmxnet{
  1439  					VirtualEthernetCard: types.VirtualEthernetCard{
  1440  						VirtualDevice: types.VirtualDevice{
  1441  							Key:     -1,
  1442  							Backing: backing,
  1443  						},
  1444  						AddressType: address_type,
  1445  						MacAddress:  macAddress,
  1446  					},
  1447  				},
  1448  			},
  1449  		}, nil
  1450  	} else if adapterType == "e1000" {
  1451  		return &types.VirtualDeviceConfigSpec{
  1452  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1453  			Device: &types.VirtualE1000{
  1454  				VirtualEthernetCard: types.VirtualEthernetCard{
  1455  					VirtualDevice: types.VirtualDevice{
  1456  						Key:     -1,
  1457  						Backing: backing,
  1458  					},
  1459  					AddressType: address_type,
  1460  					MacAddress:  macAddress,
  1461  				},
  1462  			},
  1463  		}, nil
  1464  	} else {
  1465  		return nil, fmt.Errorf("Invalid network adapter type.")
  1466  	}
  1467  }
  1468  
  1469  // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
  1470  func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) {
  1471  	var key int32
  1472  	var moveType string
  1473  	if linkedClone {
  1474  		moveType = "createNewChildDiskBacking"
  1475  	} else {
  1476  		moveType = "moveAllDiskBackingsAndDisallowSharing"
  1477  	}
  1478  	log.Printf("[DEBUG] relocate type: [%s]", moveType)
  1479  
  1480  	devices, err := vm.Device(context.TODO())
  1481  	if err != nil {
  1482  		return types.VirtualMachineRelocateSpec{}, err
  1483  	}
  1484  	for _, d := range devices {
  1485  		if devices.Type(d) == "disk" {
  1486  			key = int32(d.GetVirtualDevice().Key)
  1487  		}
  1488  	}
  1489  
  1490  	isThin := initType == "thin"
  1491  	eagerScrub := initType == "eager_zeroed"
  1492  	rpr := rp.Reference()
  1493  	dsr := ds.Reference()
  1494  	return types.VirtualMachineRelocateSpec{
  1495  		Datastore:    &dsr,
  1496  		Pool:         &rpr,
  1497  		DiskMoveType: moveType,
  1498  		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1499  			{
  1500  				Datastore: dsr,
  1501  				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
  1502  					DiskMode:        "persistent",
  1503  					ThinProvisioned: types.NewBool(isThin),
  1504  					EagerlyScrub:    types.NewBool(eagerScrub),
  1505  				},
  1506  				DiskId: key,
  1507  			},
  1508  		},
  1509  	}, nil
  1510  }
  1511  
  1512  // getDatastoreObject gets datastore object.
  1513  func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) {
  1514  	s := object.NewSearchIndex(client.Client)
  1515  	ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name)
  1516  	if err != nil {
  1517  		return types.ManagedObjectReference{}, err
  1518  	}
  1519  	if ref == nil {
  1520  		return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name)
  1521  	}
  1522  	log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref)
  1523  	return ref.Reference(), nil
  1524  }
  1525  
  1526  // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action.
  1527  func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec {
  1528  	vmfr := f.VmFolder.Reference()
  1529  	rpr := rp.Reference()
  1530  	spr := storagePod.Reference()
  1531  
  1532  	sps := types.StoragePlacementSpec{
  1533  		Type:       "create",
  1534  		ConfigSpec: &configSpec,
  1535  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1536  			StoragePod: &spr,
  1537  		},
  1538  		Folder:       &vmfr,
  1539  		ResourcePool: &rpr,
  1540  	}
  1541  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1542  	return sps
  1543  }
  1544  
  1545  // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action.
  1546  func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
  1547  	vmr := vm.Reference()
  1548  	vmfr := f.VmFolder.Reference()
  1549  	rpr := rp.Reference()
  1550  	spr := storagePod.Reference()
  1551  
  1552  	var o mo.VirtualMachine
  1553  	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
  1554  	if err != nil {
  1555  		return types.StoragePlacementSpec{}
  1556  	}
  1557  	ds := object.NewDatastore(c.Client, o.Datastore[0])
  1558  	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)
  1559  
  1560  	devices, err := vm.Device(context.TODO())
  1561  	if err != nil {
  1562  		return types.StoragePlacementSpec{}
  1563  	}
  1564  
  1565  	var key int32
  1566  	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
  1567  		key = int32(d.GetVirtualDevice().Key)
  1568  		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
  1569  	}
  1570  
  1571  	sps := types.StoragePlacementSpec{
  1572  		Type: "clone",
  1573  		Vm:   &vmr,
  1574  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1575  			StoragePod: &spr,
  1576  		},
  1577  		CloneSpec: &types.VirtualMachineCloneSpec{
  1578  			Location: types.VirtualMachineRelocateSpec{
  1579  				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1580  					{
  1581  						Datastore:       ds.Reference(),
  1582  						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
  1583  						DiskId:          key,
  1584  					},
  1585  				},
  1586  				Pool: &rpr,
  1587  			},
  1588  			PowerOn:  false,
  1589  			Template: false,
  1590  		},
  1591  		CloneName: "dummy",
  1592  		Folder:    &vmfr,
  1593  	}
  1594  	return sps
  1595  }
  1596  
  1597  // findDatastore finds Datastore object.
  1598  func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) {
  1599  	var datastore *object.Datastore
  1600  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1601  
  1602  	srm := object.NewStorageResourceManager(c.Client)
  1603  	rds, err := srm.RecommendDatastores(context.TODO(), sps)
  1604  	if err != nil {
  1605  		return nil, err
  1606  	}
  1607  	log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)
  1608  
  1609  	spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
  1610  	datastore = object.NewDatastore(c.Client, spa.Destination)
  1611  	log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore)
  1612  
  1613  	return datastore, nil
  1614  }
  1615  
  1616  // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller.
  1617  func createCdroms(vm *object.VirtualMachine, cdroms []cdrom) error {
  1618  	log.Printf("[DEBUG] add cdroms: %v", cdroms)
  1619  	for _, cd := range cdroms {
  1620  		log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore)
  1621  		log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path)
  1622  		err := addCdrom(vm, cd.datastore, cd.path)
  1623  		if err != nil {
  1624  			return err
  1625  		}
  1626  	}
  1627  
  1628  	return nil
  1629  }
  1630  
  1631  func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error {
  1632  	dc, err := getDatacenter(c, vm.datacenter)
  1633  
  1634  	if err != nil {
  1635  		return err
  1636  	}
  1637  	finder := find.NewFinder(c.Client, true)
  1638  	finder = finder.SetDatacenter(dc)
  1639  
  1640  	var template *object.VirtualMachine
  1641  	var template_mo mo.VirtualMachine
  1642  	var vm_mo mo.VirtualMachine
  1643  	if vm.template != "" {
  1644  		template, err = finder.VirtualMachine(context.TODO(), vm.template)
  1645  		if err != nil {
  1646  			return err
  1647  		}
  1648  		log.Printf("[DEBUG] template: %#v", template)
  1649  
  1650  		err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)
  1651  		if err != nil {
  1652  			return err
  1653  		}
  1654  	}
  1655  
  1656  	var resourcePool *object.ResourcePool
  1657  	if vm.resourcePool == "" {
  1658  		if vm.cluster == "" {
  1659  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
  1660  			if err != nil {
  1661  				return err
  1662  			}
  1663  		} else {
  1664  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
  1665  			if err != nil {
  1666  				return err
  1667  			}
  1668  		}
  1669  	} else {
  1670  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
  1671  		if err != nil {
  1672  			return err
  1673  		}
  1674  	}
  1675  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
  1676  
  1677  	dcFolders, err := dc.Folders(context.TODO())
  1678  	if err != nil {
  1679  		return err
  1680  	}
  1681  	log.Printf("[DEBUG] folder: %#v", vm.folder)
  1682  
  1683  	folder := dcFolders.VmFolder
  1684  	if len(vm.folder) > 0 {
  1685  		si := object.NewSearchIndex(c.Client)
  1686  		folderRef, err := si.FindByInventoryPath(
  1687  			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
  1688  		if err != nil {
  1689  			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
  1690  		} else if folderRef == nil {
  1691  			return fmt.Errorf("Cannot find folder %s", vm.folder)
  1692  		} else {
  1693  			folder = folderRef.(*object.Folder)
  1694  		}
  1695  	}
  1696  
  1697  	// make config spec
  1698  	configSpec := types.VirtualMachineConfigSpec{
  1699  		Name:              vm.name,
  1700  		NumCPUs:           vm.vcpu,
  1701  		NumCoresPerSocket: 1,
  1702  		MemoryMB:          vm.memoryMb,
  1703  		MemoryAllocation: &types.ResourceAllocationInfo{
  1704  			Reservation: vm.memoryAllocation.reservation,
  1705  		},
  1706  		Flags: &types.VirtualMachineFlagInfo{
  1707  			DiskUuidEnabled: &vm.enableDiskUUID,
  1708  		},
  1709  	}
  1710  	if vm.template == "" {
  1711  		configSpec.GuestId = "otherLinux64Guest"
  1712  	}
  1713  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
  1714  
  1715  	// make ExtraConfig
  1716  	log.Printf("[DEBUG] virtual machine Extra Config spec start")
  1717  	if len(vm.customConfigurations) > 0 {
  1718  		var ov []types.BaseOptionValue
  1719  		for k, v := range vm.customConfigurations {
  1720  			key := k
  1721  			value := v
  1722  			o := types.OptionValue{
  1723  				Key:   key,
  1724  				Value: &value,
  1725  			}
  1726  			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
  1727  			ov = append(ov, &o)
  1728  		}
  1729  		configSpec.ExtraConfig = ov
  1730  		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
  1731  	}
  1732  
  1733  	var datastore *object.Datastore
  1734  	if vm.datastore == "" {
  1735  		datastore, err = finder.DefaultDatastore(context.TODO())
  1736  		if err != nil {
  1737  			return err
  1738  		}
  1739  	} else {
  1740  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
  1741  		if err != nil {
  1742  			// TODO: datastore cluster support in govmomi finder function
  1743  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
  1744  			if err != nil {
  1745  				return err
  1746  			}
  1747  
  1748  			if d.Type == "StoragePod" {
  1749  				sp := object.StoragePod{
  1750  					Folder: object.NewFolder(c.Client, d),
  1751  				}
  1752  
  1753  				var sps types.StoragePlacementSpec
  1754  				if vm.template != "" {
  1755  					sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
  1756  				} else {
  1757  					sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
  1758  				}
  1759  
  1760  				datastore, err = findDatastore(c, sps)
  1761  				if err != nil {
  1762  					return err
  1763  				}
  1764  			} else {
  1765  				datastore = object.NewDatastore(c.Client, d)
  1766  			}
  1767  		}
  1768  	}
  1769  
  1770  	log.Printf("[DEBUG] datastore: %#v", datastore)
  1771  
  1772  	// network
  1773  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
  1774  	networkConfigs := []types.CustomizationAdapterMapping{}
  1775  	for _, network := range vm.networkInterfaces {
  1776  		// network device
  1777  		var networkDeviceType string
  1778  		if vm.template == "" {
  1779  			networkDeviceType = "e1000"
  1780  		} else {
  1781  			networkDeviceType = "vmxnet3"
  1782  		}
  1783  		nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress)
  1784  		if err != nil {
  1785  			return err
  1786  		}
  1787  		log.Printf("[DEBUG] network device: %+v", nd.Device)
  1788  		networkDevices = append(networkDevices, nd)
  1789  
  1790  		if vm.template != "" {
  1791  			var ipSetting types.CustomizationIPSettings
  1792  			if network.ipv4Address == "" {
  1793  				ipSetting.Ip = &types.CustomizationDhcpIpGenerator{}
  1794  			} else {
  1795  				if network.ipv4PrefixLength == 0 {
  1796  					return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
  1797  				}
  1798  				m := net.CIDRMask(network.ipv4PrefixLength, 32)
  1799  				sm := net.IPv4(m[0], m[1], m[2], m[3])
  1800  				subnetMask := sm.String()
  1801  				log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway)
  1802  				log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address)
  1803  				log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength)
  1804  				log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask)
  1805  				ipSetting.Gateway = []string{
  1806  					network.ipv4Gateway,
  1807  				}
  1808  				ipSetting.Ip = &types.CustomizationFixedIp{
  1809  					IpAddress: network.ipv4Address,
  1810  				}
  1811  				ipSetting.SubnetMask = subnetMask
  1812  			}
  1813  
  1814  			ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{}
  1815  			if network.ipv6Address == "" {
  1816  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1817  					&types.CustomizationDhcpIpV6Generator{},
  1818  				}
  1819  			} else {
  1820  				log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway)
  1821  				log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address)
  1822  				log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength)
  1823  
  1824  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1825  					&types.CustomizationFixedIpV6{
  1826  						IpAddress:  network.ipv6Address,
  1827  						SubnetMask: int32(network.ipv6PrefixLength),
  1828  					},
  1829  				}
  1830  				ipv6Spec.Gateway = []string{network.ipv6Gateway}
  1831  			}
  1832  			ipSetting.IpV6Spec = ipv6Spec
  1833  
  1834  			// network config
  1835  			config := types.CustomizationAdapterMapping{
  1836  				Adapter: ipSetting,
  1837  			}
  1838  			networkConfigs = append(networkConfigs, config)
  1839  		}
  1840  	}
  1841  	log.Printf("[DEBUG] network devices: %#v", networkDevices)
  1842  	log.Printf("[DEBUG] network configs: %#v", networkConfigs)
  1843  
  1844  	var task *object.Task
  1845  	if vm.template == "" {
  1846  		var mds mo.Datastore
  1847  		if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
  1848  			return err
  1849  		}
  1850  		log.Printf("[DEBUG] datastore: %#v", mds.Name)
  1851  		scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
  1852  		if err != nil {
  1853  			log.Printf("[ERROR] %s", err)
  1854  		}
  1855  
  1856  		configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
  1857  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1858  			Device:    scsi,
  1859  		})
  1860  
  1861  		configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
  1862  
  1863  		task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
  1864  		if err != nil {
  1865  			log.Printf("[ERROR] %s", err)
  1866  		}
  1867  
  1868  		err = task.Wait(context.TODO())
  1869  		if err != nil {
  1870  			log.Printf("[ERROR] %s", err)
  1871  		}
  1872  
  1873  	} else {
  1874  
  1875  		relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
  1876  		if err != nil {
  1877  			return err
  1878  		}
  1879  
  1880  		log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
  1881  
  1882  		// make vm clone spec
  1883  		cloneSpec := types.VirtualMachineCloneSpec{
  1884  			Location: relocateSpec,
  1885  			Template: false,
  1886  			Config:   &configSpec,
  1887  			PowerOn:  false,
  1888  		}
  1889  		if vm.linkedClone {
  1890  			if template_mo.Snapshot == nil {
  1891  				return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
  1892  			}
  1893  			cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
  1894  		}
  1895  		log.Printf("[DEBUG] clone spec: %v", cloneSpec)
  1896  
  1897  		task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec)
  1898  		if err != nil {
  1899  			return err
  1900  		}
  1901  	}
  1902  
  1903  	err = task.Wait(context.TODO())
  1904  	if err != nil {
  1905  		log.Printf("[ERROR] %s", err)
  1906  	}
  1907  
  1908  	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
  1909  	if err != nil {
  1910  		return err
  1911  	}
  1912  	log.Printf("[DEBUG] new vm: %v", newVM)
  1913  
  1914  	devices, err := newVM.Device(context.TODO())
  1915  	if err != nil {
  1916  		log.Printf("[DEBUG] Template devices can't be found")
  1917  		return err
  1918  	}
  1919  
  1920  	for _, dvc := range devices {
  1921  		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
  1922  		if devices.Type(dvc) == "ethernet" {
  1923  			err := newVM.RemoveDevice(context.TODO(), false, dvc)
  1924  			if err != nil {
  1925  				return err
  1926  			}
  1927  		}
  1928  	}
  1929  	// Add Network devices
  1930  	for _, dvc := range networkDevices {
  1931  		err := newVM.AddDevice(
  1932  			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
  1933  		if err != nil {
  1934  			return err
  1935  		}
  1936  	}
  1937  
  1938  	// Create the cdroms if needed.
  1939  	if err := createCdroms(newVM, vm.cdroms); err != nil {
  1940  		return err
  1941  	}
  1942  
  1943  	newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo)
  1944  	firstDisk := 0
  1945  	if vm.template != "" {
  1946  		firstDisk++
  1947  	}
  1948  	for i := firstDisk; i < len(vm.hardDisks); i++ {
  1949  		log.Printf("[DEBUG] disk index: %v", i)
  1950  
  1951  		var diskPath string
  1952  		switch {
  1953  		case vm.hardDisks[i].vmdkPath != "":
  1954  			diskPath = vm.hardDisks[i].vmdkPath
  1955  		case vm.hardDisks[i].name != "":
  1956  			snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory
  1957  			split := strings.Split(snapshotFullDir, " ")
  1958  			if len(split) != 2 {
  1959  				return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
  1960  			}
  1961  			vmWorkingPath := split[1]
  1962  			diskPath = vmWorkingPath + vm.hardDisks[i].name
  1963  		default:
  1964  			return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i])
  1965  		}
  1966  
  1967  		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  1968  		if err != nil {
  1969  			err2 := addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  1970  			if err2 != nil {
  1971  				return err2
  1972  			}
  1973  			return err
  1974  		}
  1975  	}
  1976  
  1977  	if vm.skipCustomization || vm.template == "" {
  1978  		log.Printf("[DEBUG] VM customization skipped")
  1979  	} else {
  1980  		var identity_options types.BaseCustomizationIdentitySettings
  1981  		if strings.HasPrefix(template_mo.Config.GuestId, "win") {
  1982  			var timeZone int
  1983  			if vm.timeZone == "Etc/UTC" {
  1984  				vm.timeZone = "085"
  1985  			}
  1986  			timeZone, err := strconv.Atoi(vm.timeZone)
  1987  			if err != nil {
  1988  				return fmt.Errorf("Error converting TimeZone: %s", err)
  1989  			}
  1990  
  1991  			guiUnattended := types.CustomizationGuiUnattended{
  1992  				AutoLogon:      false,
  1993  				AutoLogonCount: 1,
  1994  				TimeZone:       int32(timeZone),
  1995  			}
  1996  
  1997  			customIdentification := types.CustomizationIdentification{}
  1998  
  1999  			userData := types.CustomizationUserData{
  2000  				ComputerName: &types.CustomizationFixedName{
  2001  					Name: strings.Split(vm.name, ".")[0],
  2002  				},
  2003  				ProductId: vm.windowsOptionalConfig.productKey,
  2004  				FullName:  "terraform",
  2005  				OrgName:   "terraform",
  2006  			}
  2007  
  2008  			if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
  2009  				customIdentification.DomainAdminPassword = &types.CustomizationPassword{
  2010  					PlainText: true,
  2011  					Value:     vm.windowsOptionalConfig.domainUserPassword,
  2012  				}
  2013  				customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
  2014  				customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
  2015  			}
  2016  
  2017  			if vm.windowsOptionalConfig.adminPassword != "" {
  2018  				guiUnattended.Password = &types.CustomizationPassword{
  2019  					PlainText: true,
  2020  					Value:     vm.windowsOptionalConfig.adminPassword,
  2021  				}
  2022  			}
  2023  
  2024  			identity_options = &types.CustomizationSysprep{
  2025  				GuiUnattended:  guiUnattended,
  2026  				Identification: customIdentification,
  2027  				UserData:       userData,
  2028  			}
  2029  		} else {
  2030  			identity_options = &types.CustomizationLinuxPrep{
  2031  				HostName: &types.CustomizationFixedName{
  2032  					Name: strings.Split(vm.name, ".")[0],
  2033  				},
  2034  				Domain:     vm.domain,
  2035  				TimeZone:   vm.timeZone,
  2036  				HwClockUTC: types.NewBool(true),
  2037  			}
  2038  		}
  2039  
  2040  		// create CustomizationSpec
  2041  		customSpec := types.CustomizationSpec{
  2042  			Identity: identity_options,
  2043  			GlobalIPSettings: types.CustomizationGlobalIPSettings{
  2044  				DnsSuffixList: vm.dnsSuffixes,
  2045  				DnsServerList: vm.dnsServers,
  2046  			},
  2047  			NicSettingMap: networkConfigs,
  2048  		}
  2049  		log.Printf("[DEBUG] custom spec: %v", customSpec)
  2050  
  2051  		log.Printf("[DEBUG] VM customization starting")
  2052  		taskb, err := newVM.Customize(context.TODO(), customSpec)
  2053  		if err != nil {
  2054  			return err
  2055  		}
  2056  		_, err = taskb.WaitForResult(context.TODO(), nil)
  2057  		if err != nil {
  2058  			return err
  2059  		}
  2060  		log.Printf("[DEBUG] VM customization finished")
  2061  	}
  2062  
  2063  	if vm.hasBootableVmdk || vm.template != "" {
  2064  		newVM.PowerOn(context.TODO())
  2065  		err = newVM.WaitForPowerState(context.TODO(), types.VirtualMachinePowerStatePoweredOn)
  2066  		if err != nil {
  2067  			return err
  2068  		}
  2069  	}
  2070  	return nil
  2071  }