github.com/danp/terraform@v0.9.5-0.20170426144147-39d740081351/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about)

     1  package vsphere
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/hashicorp/terraform/helper/schema"
    11  	"github.com/vmware/govmomi"
    12  	"github.com/vmware/govmomi/find"
    13  	"github.com/vmware/govmomi/object"
    14  	"github.com/vmware/govmomi/property"
    15  	"github.com/vmware/govmomi/vim25/mo"
    16  	"github.com/vmware/govmomi/vim25/types"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  var DefaultDNSSuffixes = []string{
    21  	"vsphere.local",
    22  }
    23  
    24  var DefaultDNSServers = []string{
    25  	"8.8.8.8",
    26  	"8.8.4.4",
    27  }
    28  
    29  var DiskControllerTypes = []string{
    30  	"scsi",
    31  	"scsi-lsi-parallel",
    32  	"scsi-buslogic",
    33  	"scsi-paravirtual",
    34  	"scsi-lsi-sas",
    35  	"ide",
    36  }
    37  
    38  type networkInterface struct {
    39  	deviceName       string
    40  	label            string
    41  	ipv4Address      string
    42  	ipv4PrefixLength int
    43  	ipv4Gateway      string
    44  	ipv6Address      string
    45  	ipv6PrefixLength int
    46  	ipv6Gateway      string
    47  	adapterType      string // TODO: Make "adapter_type" argument
    48  	macAddress       string
    49  }
    50  
    51  type hardDisk struct {
    52  	name       string
    53  	size       int64
    54  	iops       int64
    55  	initType   string
    56  	vmdkPath   string
    57  	controller string
    58  	bootable   bool
    59  }
    60  
    61  //Additional options Vsphere can use clones of windows machines
    62  type windowsOptConfig struct {
    63  	productKey         string
    64  	adminPassword      string
    65  	domainUser         string
    66  	domain             string
    67  	domainUserPassword string
    68  }
    69  
    70  type cdrom struct {
    71  	datastore string
    72  	path      string
    73  }
    74  
    75  type memoryAllocation struct {
    76  	reservation int64
    77  }
    78  
    79  type virtualMachine struct {
    80  	name                  string
    81  	folder                string
    82  	datacenter            string
    83  	cluster               string
    84  	resourcePool          string
    85  	datastore             string
    86  	vcpu                  int32
    87  	memoryMb              int64
    88  	memoryAllocation      memoryAllocation
    89  	template              string
    90  	networkInterfaces     []networkInterface
    91  	hardDisks             []hardDisk
    92  	cdroms                []cdrom
    93  	domain                string
    94  	timeZone              string
    95  	dnsSuffixes           []string
    96  	dnsServers            []string
    97  	hasBootableVmdk       bool
    98  	linkedClone           bool
    99  	skipCustomization     bool
   100  	enableDiskUUID        bool
   101  	windowsOptionalConfig windowsOptConfig
   102  	customConfigurations  map[string](types.AnyType)
   103  }
   104  
   105  func (v virtualMachine) Path() string {
   106  	return vmPath(v.folder, v.name)
   107  }
   108  
   109  func vmPath(folder string, name string) string {
   110  	var path string
   111  	if len(folder) > 0 {
   112  		path += folder + "/"
   113  	}
   114  	return path + name
   115  }
   116  
   117  func resourceVSphereVirtualMachine() *schema.Resource {
   118  	return &schema.Resource{
   119  		Create: resourceVSphereVirtualMachineCreate,
   120  		Read:   resourceVSphereVirtualMachineRead,
   121  		Update: resourceVSphereVirtualMachineUpdate,
   122  		Delete: resourceVSphereVirtualMachineDelete,
   123  
   124  		SchemaVersion: 1,
   125  		MigrateState:  resourceVSphereVirtualMachineMigrateState,
   126  
   127  		Schema: map[string]*schema.Schema{
   128  			"name": &schema.Schema{
   129  				Type:     schema.TypeString,
   130  				Required: true,
   131  				ForceNew: true,
   132  			},
   133  
   134  			"folder": &schema.Schema{
   135  				Type:     schema.TypeString,
   136  				Optional: true,
   137  				ForceNew: true,
   138  			},
   139  
   140  			"vcpu": &schema.Schema{
   141  				Type:     schema.TypeInt,
   142  				Required: true,
   143  			},
   144  
   145  			"memory": &schema.Schema{
   146  				Type:     schema.TypeInt,
   147  				Required: true,
   148  			},
   149  
   150  			"memory_reservation": &schema.Schema{
   151  				Type:     schema.TypeInt,
   152  				Optional: true,
   153  				Default:  0,
   154  				ForceNew: true,
   155  			},
   156  
   157  			"datacenter": &schema.Schema{
   158  				Type:     schema.TypeString,
   159  				Optional: true,
   160  				ForceNew: true,
   161  			},
   162  
   163  			"cluster": &schema.Schema{
   164  				Type:     schema.TypeString,
   165  				Optional: true,
   166  				ForceNew: true,
   167  			},
   168  
   169  			"resource_pool": &schema.Schema{
   170  				Type:     schema.TypeString,
   171  				Optional: true,
   172  				ForceNew: true,
   173  			},
   174  
   175  			"linked_clone": &schema.Schema{
   176  				Type:     schema.TypeBool,
   177  				Optional: true,
   178  				Default:  false,
   179  				ForceNew: true,
   180  			},
   181  			"gateway": &schema.Schema{
   182  				Type:       schema.TypeString,
   183  				Optional:   true,
   184  				ForceNew:   true,
   185  				Deprecated: "Please use network_interface.ipv4_gateway",
   186  			},
   187  
   188  			"domain": &schema.Schema{
   189  				Type:     schema.TypeString,
   190  				Optional: true,
   191  				ForceNew: true,
   192  				Default:  "vsphere.local",
   193  			},
   194  
   195  			"time_zone": &schema.Schema{
   196  				Type:     schema.TypeString,
   197  				Optional: true,
   198  				ForceNew: true,
   199  				Default:  "Etc/UTC",
   200  			},
   201  
   202  			"dns_suffixes": &schema.Schema{
   203  				Type:     schema.TypeList,
   204  				Optional: true,
   205  				Elem:     &schema.Schema{Type: schema.TypeString},
   206  				ForceNew: true,
   207  			},
   208  
   209  			"dns_servers": &schema.Schema{
   210  				Type:     schema.TypeList,
   211  				Optional: true,
   212  				Elem:     &schema.Schema{Type: schema.TypeString},
   213  				ForceNew: true,
   214  			},
   215  
   216  			"skip_customization": &schema.Schema{
   217  				Type:     schema.TypeBool,
   218  				Optional: true,
   219  				ForceNew: true,
   220  				Default:  false,
   221  			},
   222  
   223  			"enable_disk_uuid": &schema.Schema{
   224  				Type:     schema.TypeBool,
   225  				Optional: true,
   226  				ForceNew: true,
   227  				Default:  false,
   228  			},
   229  
   230  			"uuid": &schema.Schema{
   231  				Type:     schema.TypeString,
   232  				Computed: true,
   233  			},
   234  
   235  			"custom_configuration_parameters": &schema.Schema{
   236  				Type:     schema.TypeMap,
   237  				Optional: true,
   238  				ForceNew: true,
   239  			},
   240  
   241  			"windows_opt_config": &schema.Schema{
   242  				Type:     schema.TypeList,
   243  				Optional: true,
   244  				ForceNew: true,
   245  				Elem: &schema.Resource{
   246  					Schema: map[string]*schema.Schema{
   247  						"product_key": &schema.Schema{
   248  							Type:     schema.TypeString,
   249  							Optional: true,
   250  							ForceNew: true,
   251  						},
   252  
   253  						"admin_password": &schema.Schema{
   254  							Type:     schema.TypeString,
   255  							Optional: true,
   256  							ForceNew: true,
   257  						},
   258  
   259  						"domain_user": &schema.Schema{
   260  							Type:     schema.TypeString,
   261  							Optional: true,
   262  							ForceNew: true,
   263  						},
   264  
   265  						"domain": &schema.Schema{
   266  							Type:     schema.TypeString,
   267  							Optional: true,
   268  							ForceNew: true,
   269  						},
   270  
   271  						"domain_user_password": &schema.Schema{
   272  							Type:     schema.TypeString,
   273  							Optional: true,
   274  							ForceNew: true,
   275  						},
   276  					},
   277  				},
   278  			},
   279  
   280  			"network_interface": &schema.Schema{
   281  				Type:     schema.TypeList,
   282  				Required: true,
   283  				ForceNew: true,
   284  				Elem: &schema.Resource{
   285  					Schema: map[string]*schema.Schema{
   286  						"label": &schema.Schema{
   287  							Type:     schema.TypeString,
   288  							Required: true,
   289  							ForceNew: true,
   290  						},
   291  
   292  						"ip_address": &schema.Schema{
   293  							Type:       schema.TypeString,
   294  							Optional:   true,
   295  							Computed:   true,
   296  							Deprecated: "Please use ipv4_address",
   297  						},
   298  
   299  						"subnet_mask": &schema.Schema{
   300  							Type:       schema.TypeString,
   301  							Optional:   true,
   302  							Computed:   true,
   303  							Deprecated: "Please use ipv4_prefix_length",
   304  						},
   305  
   306  						"ipv4_address": &schema.Schema{
   307  							Type:     schema.TypeString,
   308  							Optional: true,
   309  							Computed: true,
   310  						},
   311  
   312  						"ipv4_prefix_length": &schema.Schema{
   313  							Type:     schema.TypeInt,
   314  							Optional: true,
   315  							Computed: true,
   316  						},
   317  
   318  						"ipv4_gateway": &schema.Schema{
   319  							Type:     schema.TypeString,
   320  							Optional: true,
   321  							Computed: true,
   322  						},
   323  
   324  						"ipv6_address": &schema.Schema{
   325  							Type:     schema.TypeString,
   326  							Optional: true,
   327  							Computed: true,
   328  						},
   329  
   330  						"ipv6_prefix_length": &schema.Schema{
   331  							Type:     schema.TypeInt,
   332  							Optional: true,
   333  							Computed: true,
   334  						},
   335  
   336  						"ipv6_gateway": &schema.Schema{
   337  							Type:     schema.TypeString,
   338  							Optional: true,
   339  							Computed: true,
   340  						},
   341  
   342  						"adapter_type": &schema.Schema{
   343  							Type:     schema.TypeString,
   344  							Optional: true,
   345  							ForceNew: true,
   346  						},
   347  
   348  						"mac_address": &schema.Schema{
   349  							Type:     schema.TypeString,
   350  							Optional: true,
   351  							Computed: true,
   352  						},
   353  					},
   354  				},
   355  			},
   356  
   357  			"disk": &schema.Schema{
   358  				Type:     schema.TypeSet,
   359  				Required: true,
   360  				Elem: &schema.Resource{
   361  					Schema: map[string]*schema.Schema{
   362  						"uuid": &schema.Schema{
   363  							Type:     schema.TypeString,
   364  							Computed: true,
   365  						},
   366  
   367  						"key": &schema.Schema{
   368  							Type:     schema.TypeInt,
   369  							Computed: true,
   370  						},
   371  
   372  						"template": &schema.Schema{
   373  							Type:     schema.TypeString,
   374  							Optional: true,
   375  						},
   376  
   377  						"type": &schema.Schema{
   378  							Type:     schema.TypeString,
   379  							Optional: true,
   380  							Default:  "eager_zeroed",
   381  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   382  								value := v.(string)
   383  								if value != "thin" && value != "eager_zeroed" && value != "lazy" {
   384  									errors = append(errors, fmt.Errorf(
   385  										"only 'thin', 'eager_zeroed', and 'lazy' are supported values for 'type'"))
   386  								}
   387  								return
   388  							},
   389  						},
   390  
   391  						"datastore": &schema.Schema{
   392  							Type:     schema.TypeString,
   393  							Optional: true,
   394  						},
   395  
   396  						"size": &schema.Schema{
   397  							Type:     schema.TypeInt,
   398  							Optional: true,
   399  						},
   400  
   401  						"name": &schema.Schema{
   402  							Type:     schema.TypeString,
   403  							Optional: true,
   404  						},
   405  
   406  						"iops": &schema.Schema{
   407  							Type:     schema.TypeInt,
   408  							Optional: true,
   409  						},
   410  
   411  						"vmdk": &schema.Schema{
   412  							// TODO: Add ValidateFunc to confirm path exists
   413  							Type:     schema.TypeString,
   414  							Optional: true,
   415  						},
   416  
   417  						"bootable": &schema.Schema{
   418  							Type:     schema.TypeBool,
   419  							Optional: true,
   420  						},
   421  
   422  						"keep_on_remove": &schema.Schema{
   423  							Type:     schema.TypeBool,
   424  							Optional: true,
   425  						},
   426  
   427  						"controller_type": &schema.Schema{
   428  							Type:     schema.TypeString,
   429  							Optional: true,
   430  							Default:  "scsi",
   431  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   432  								value := v.(string)
   433  								found := false
   434  								for _, t := range DiskControllerTypes {
   435  									if t == value {
   436  										found = true
   437  									}
   438  								}
   439  								if !found {
   440  									errors = append(errors, fmt.Errorf(
   441  										"Supported values for 'controller_type' are %v", strings.Join(DiskControllerTypes, ", ")))
   442  								}
   443  								return
   444  							},
   445  						},
   446  					},
   447  				},
   448  			},
   449  
   450  			"detach_unknown_disks_on_delete": &schema.Schema{
   451  				Type:     schema.TypeBool,
   452  				Optional: true,
   453  				Default:  false,
   454  			},
   455  
   456  			"cdrom": &schema.Schema{
   457  				Type:     schema.TypeList,
   458  				Optional: true,
   459  				ForceNew: true,
   460  				Elem: &schema.Resource{
   461  					Schema: map[string]*schema.Schema{
   462  						"datastore": &schema.Schema{
   463  							Type:     schema.TypeString,
   464  							Required: true,
   465  							ForceNew: true,
   466  						},
   467  
   468  						"path": &schema.Schema{
   469  							Type:     schema.TypeString,
   470  							Required: true,
   471  							ForceNew: true,
   472  						},
   473  					},
   474  				},
   475  			},
   476  		},
   477  	}
   478  }
   479  
   480  func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error {
   481  	// flag if changes have to be applied
   482  	hasChanges := false
   483  	// flag if changes have to be done when powered off
   484  	rebootRequired := false
   485  
   486  	// make config spec
   487  	configSpec := types.VirtualMachineConfigSpec{}
   488  
   489  	if d.HasChange("vcpu") {
   490  		configSpec.NumCPUs = int32(d.Get("vcpu").(int))
   491  		hasChanges = true
   492  		rebootRequired = true
   493  	}
   494  
   495  	if d.HasChange("memory") {
   496  		configSpec.MemoryMB = int64(d.Get("memory").(int))
   497  		hasChanges = true
   498  		rebootRequired = true
   499  	}
   500  
   501  	client := meta.(*govmomi.Client)
   502  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   503  	if err != nil {
   504  		return err
   505  	}
   506  	finder := find.NewFinder(client.Client, true)
   507  	finder = finder.SetDatacenter(dc)
   508  
   509  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
   510  	if err != nil {
   511  		return err
   512  	}
   513  
   514  	if d.HasChange("disk") {
   515  		hasChanges = true
   516  		oldDisks, newDisks := d.GetChange("disk")
   517  		oldDiskSet := oldDisks.(*schema.Set)
   518  		newDiskSet := newDisks.(*schema.Set)
   519  
   520  		addedDisks := newDiskSet.Difference(oldDiskSet)
   521  		removedDisks := oldDiskSet.Difference(newDiskSet)
   522  
   523  		// Removed disks
   524  		for _, diskRaw := range removedDisks.List() {
   525  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   526  				devices, err := vm.Device(context.TODO())
   527  				if err != nil {
   528  					return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err)
   529  				}
   530  				virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
   531  
   532  				keep := false
   533  				if v, ok := disk["keep_on_remove"].(bool); ok {
   534  					keep = v
   535  				}
   536  
   537  				err = vm.RemoveDevice(context.TODO(), keep, virtualDisk)
   538  				if err != nil {
   539  					return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
   540  				}
   541  			}
   542  		}
   543  		// Added disks
   544  		for _, diskRaw := range addedDisks.List() {
   545  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   546  
   547  				var datastore *object.Datastore
   548  				if disk["datastore"] == "" {
   549  					datastore, err = finder.DefaultDatastore(context.TODO())
   550  					if err != nil {
   551  						return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err)
   552  					}
   553  				} else {
   554  					datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string))
   555  					if err != nil {
   556  						log.Printf("[ERROR] Couldn't find datastore %v.  %s", disk["datastore"].(string), err)
   557  						return err
   558  					}
   559  				}
   560  
   561  				var size int64
   562  				if disk["size"] == 0 {
   563  					size = 0
   564  				} else {
   565  					size = int64(disk["size"].(int))
   566  				}
   567  				iops := int64(disk["iops"].(int))
   568  				controller_type := disk["controller_type"].(string)
   569  
   570  				var mo mo.VirtualMachine
   571  				vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo)
   572  
   573  				var diskPath string
   574  				switch {
   575  				case disk["vmdk"] != "":
   576  					diskPath = disk["vmdk"].(string)
   577  				case disk["name"] != "":
   578  					snapshotFullDir := mo.Config.Files.SnapshotDirectory
   579  					split := strings.Split(snapshotFullDir, " ")
   580  					if len(split) != 2 {
   581  						return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
   582  					}
   583  					vmWorkingPath := split[1]
   584  					diskPath = vmWorkingPath + disk["name"].(string)
   585  				default:
   586  					return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given")
   587  				}
   588  
   589  				var initType string
   590  				if disk["type"] != "" {
   591  					initType = disk["type"].(string)
   592  				} else {
   593  					initType = "thin"
   594  				}
   595  
   596  				log.Printf("[INFO] Attaching disk: %v", diskPath)
   597  				err = addHardDisk(vm, size, iops, initType, datastore, diskPath, controller_type)
   598  				if err != nil {
   599  					log.Printf("[ERROR] Add Hard Disk Failed: %v", err)
   600  					return err
   601  				}
   602  			}
   603  			if err != nil {
   604  				return err
   605  			}
   606  		}
   607  	}
   608  
   609  	// do nothing if there are no changes
   610  	if !hasChanges {
   611  		return nil
   612  	}
   613  
   614  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
   615  
   616  	if rebootRequired {
   617  		log.Printf("[INFO] Shutting down virtual machine: %s", d.Id())
   618  
   619  		task, err := vm.PowerOff(context.TODO())
   620  		if err != nil {
   621  			return err
   622  		}
   623  
   624  		err = task.Wait(context.TODO())
   625  		if err != nil {
   626  			return err
   627  		}
   628  	}
   629  
   630  	log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id())
   631  
   632  	task, err := vm.Reconfigure(context.TODO(), configSpec)
   633  	if err != nil {
   634  		log.Printf("[ERROR] %s", err)
   635  	}
   636  
   637  	err = task.Wait(context.TODO())
   638  	if err != nil {
   639  		log.Printf("[ERROR] %s", err)
   640  	}
   641  
   642  	if rebootRequired {
   643  		task, err = vm.PowerOn(context.TODO())
   644  		if err != nil {
   645  			return err
   646  		}
   647  
   648  		err = task.Wait(context.TODO())
   649  		if err != nil {
   650  			log.Printf("[ERROR] %s", err)
   651  		}
   652  	}
   653  
   654  	return resourceVSphereVirtualMachineRead(d, meta)
   655  }
   656  
   657  func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
   658  	client := meta.(*govmomi.Client)
   659  
   660  	vm := virtualMachine{
   661  		name:     d.Get("name").(string),
   662  		vcpu:     int32(d.Get("vcpu").(int)),
   663  		memoryMb: int64(d.Get("memory").(int)),
   664  		memoryAllocation: memoryAllocation{
   665  			reservation: int64(d.Get("memory_reservation").(int)),
   666  		},
   667  	}
   668  
   669  	if v, ok := d.GetOk("folder"); ok {
   670  		vm.folder = v.(string)
   671  	}
   672  
   673  	if v, ok := d.GetOk("datacenter"); ok {
   674  		vm.datacenter = v.(string)
   675  	}
   676  
   677  	if v, ok := d.GetOk("cluster"); ok {
   678  		vm.cluster = v.(string)
   679  	}
   680  
   681  	if v, ok := d.GetOk("resource_pool"); ok {
   682  		vm.resourcePool = v.(string)
   683  	}
   684  
   685  	if v, ok := d.GetOk("domain"); ok {
   686  		vm.domain = v.(string)
   687  	}
   688  
   689  	if v, ok := d.GetOk("time_zone"); ok {
   690  		vm.timeZone = v.(string)
   691  	}
   692  
   693  	if v, ok := d.GetOk("linked_clone"); ok {
   694  		vm.linkedClone = v.(bool)
   695  	}
   696  
   697  	if v, ok := d.GetOk("skip_customization"); ok {
   698  		vm.skipCustomization = v.(bool)
   699  	}
   700  
   701  	if v, ok := d.GetOk("enable_disk_uuid"); ok {
   702  		vm.enableDiskUUID = v.(bool)
   703  	}
   704  
   705  	if raw, ok := d.GetOk("dns_suffixes"); ok {
   706  		for _, v := range raw.([]interface{}) {
   707  			vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string))
   708  		}
   709  	} else {
   710  		vm.dnsSuffixes = DefaultDNSSuffixes
   711  	}
   712  
   713  	if raw, ok := d.GetOk("dns_servers"); ok {
   714  		for _, v := range raw.([]interface{}) {
   715  			vm.dnsServers = append(vm.dnsServers, v.(string))
   716  		}
   717  	} else {
   718  		vm.dnsServers = DefaultDNSServers
   719  	}
   720  
   721  	if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
   722  		if custom_configs, ok := vL.(map[string]interface{}); ok {
   723  			custom := make(map[string]types.AnyType)
   724  			for k, v := range custom_configs {
   725  				custom[k] = v
   726  			}
   727  			vm.customConfigurations = custom
   728  			log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
   729  		}
   730  	}
   731  
   732  	if vL, ok := d.GetOk("network_interface"); ok {
   733  		networks := make([]networkInterface, len(vL.([]interface{})))
   734  		for i, v := range vL.([]interface{}) {
   735  			network := v.(map[string]interface{})
   736  			networks[i].label = network["label"].(string)
   737  			if v, ok := network["ip_address"].(string); ok && v != "" {
   738  				networks[i].ipv4Address = v
   739  			}
   740  			if v, ok := d.GetOk("gateway"); ok {
   741  				networks[i].ipv4Gateway = v.(string)
   742  			}
   743  			if v, ok := network["subnet_mask"].(string); ok && v != "" {
   744  				ip := net.ParseIP(v).To4()
   745  				if ip != nil {
   746  					mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3])
   747  					pl, _ := mask.Size()
   748  					networks[i].ipv4PrefixLength = pl
   749  				} else {
   750  					return fmt.Errorf("subnet_mask parameter is invalid.")
   751  				}
   752  			}
   753  			if v, ok := network["ipv4_address"].(string); ok && v != "" {
   754  				networks[i].ipv4Address = v
   755  			}
   756  			if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 {
   757  				networks[i].ipv4PrefixLength = v
   758  			}
   759  			if v, ok := network["ipv4_gateway"].(string); ok && v != "" {
   760  				networks[i].ipv4Gateway = v
   761  			}
   762  			if v, ok := network["ipv6_address"].(string); ok && v != "" {
   763  				networks[i].ipv6Address = v
   764  			}
   765  			if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 {
   766  				networks[i].ipv6PrefixLength = v
   767  			}
   768  			if v, ok := network["ipv6_gateway"].(string); ok && v != "" {
   769  				networks[i].ipv6Gateway = v
   770  			}
   771  			if v, ok := network["mac_address"].(string); ok && v != "" {
   772  				networks[i].macAddress = v
   773  			}
   774  		}
   775  		vm.networkInterfaces = networks
   776  		log.Printf("[DEBUG] network_interface init: %v", networks)
   777  	}
   778  
   779  	if vL, ok := d.GetOk("windows_opt_config"); ok {
   780  		var winOpt windowsOptConfig
   781  		custom_configs := (vL.([]interface{}))[0].(map[string]interface{})
   782  		if v, ok := custom_configs["admin_password"].(string); ok && v != "" {
   783  			winOpt.adminPassword = v
   784  		}
   785  		if v, ok := custom_configs["domain"].(string); ok && v != "" {
   786  			winOpt.domain = v
   787  		}
   788  		if v, ok := custom_configs["domain_user"].(string); ok && v != "" {
   789  			winOpt.domainUser = v
   790  		}
   791  		if v, ok := custom_configs["product_key"].(string); ok && v != "" {
   792  			winOpt.productKey = v
   793  		}
   794  		if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" {
   795  			winOpt.domainUserPassword = v
   796  		}
   797  		vm.windowsOptionalConfig = winOpt
   798  		log.Printf("[DEBUG] windows config init: %v", winOpt)
   799  	}
   800  
   801  	if vL, ok := d.GetOk("disk"); ok {
   802  		if diskSet, ok := vL.(*schema.Set); ok {
   803  
   804  			disks := []hardDisk{}
   805  			for _, value := range diskSet.List() {
   806  				disk := value.(map[string]interface{})
   807  				newDisk := hardDisk{}
   808  
   809  				if v, ok := disk["template"].(string); ok && v != "" {
   810  					if v, ok := disk["name"].(string); ok && v != "" {
   811  						return fmt.Errorf("Cannot specify name of a template")
   812  					}
   813  					vm.template = v
   814  					if vm.hasBootableVmdk {
   815  						return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   816  					}
   817  					vm.hasBootableVmdk = true
   818  				}
   819  
   820  				if v, ok := disk["type"].(string); ok && v != "" {
   821  					newDisk.initType = v
   822  				}
   823  
   824  				if v, ok := disk["datastore"].(string); ok && v != "" {
   825  					vm.datastore = v
   826  				}
   827  
   828  				if v, ok := disk["size"].(int); ok && v != 0 {
   829  					if v, ok := disk["template"].(string); ok && v != "" {
   830  						return fmt.Errorf("Cannot specify size of a template")
   831  					}
   832  
   833  					if v, ok := disk["name"].(string); ok && v != "" {
   834  						newDisk.name = v
   835  					} else {
   836  						return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk")
   837  					}
   838  
   839  					newDisk.size = int64(v)
   840  				}
   841  
   842  				if v, ok := disk["iops"].(int); ok && v != 0 {
   843  					newDisk.iops = int64(v)
   844  				}
   845  
   846  				if v, ok := disk["controller_type"].(string); ok && v != "" {
   847  					newDisk.controller = v
   848  				}
   849  
   850  				if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" {
   851  					if v, ok := disk["template"].(string); ok && v != "" {
   852  						return fmt.Errorf("Cannot specify a vmdk for a template")
   853  					}
   854  					if v, ok := disk["size"].(string); ok && v != "" {
   855  						return fmt.Errorf("Cannot specify size of a vmdk")
   856  					}
   857  					if v, ok := disk["name"].(string); ok && v != "" {
   858  						return fmt.Errorf("Cannot specify name of a vmdk")
   859  					}
   860  					if vBootable, ok := disk["bootable"].(bool); ok {
   861  						if vBootable && vm.hasBootableVmdk {
   862  							return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   863  						}
   864  						newDisk.bootable = vBootable
   865  						vm.hasBootableVmdk = vm.hasBootableVmdk || vBootable
   866  					}
   867  					newDisk.vmdkPath = vVmdk
   868  				}
   869  				// Preserves order so bootable disk is first
   870  				if newDisk.bootable == true || disk["template"] != "" {
   871  					disks = append([]hardDisk{newDisk}, disks...)
   872  				} else {
   873  					disks = append(disks, newDisk)
   874  				}
   875  			}
   876  			vm.hardDisks = disks
   877  			log.Printf("[DEBUG] disk init: %v", disks)
   878  		}
   879  	}
   880  
   881  	if vL, ok := d.GetOk("cdrom"); ok {
   882  		cdroms := make([]cdrom, len(vL.([]interface{})))
   883  		for i, v := range vL.([]interface{}) {
   884  			c := v.(map[string]interface{})
   885  			if v, ok := c["datastore"].(string); ok && v != "" {
   886  				cdroms[i].datastore = v
   887  			} else {
   888  				return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.")
   889  			}
   890  			if v, ok := c["path"].(string); ok && v != "" {
   891  				cdroms[i].path = v
   892  			} else {
   893  				return fmt.Errorf("Path argument must be specified when attaching a cdrom image.")
   894  			}
   895  		}
   896  		vm.cdroms = cdroms
   897  		log.Printf("[DEBUG] cdrom init: %v", cdroms)
   898  	}
   899  
   900  	err := vm.setupVirtualMachine(client)
   901  	if err != nil {
   902  		return err
   903  	}
   904  
   905  	d.SetId(vm.Path())
   906  	log.Printf("[INFO] Created virtual machine: %s", d.Id())
   907  
   908  	return resourceVSphereVirtualMachineRead(d, meta)
   909  }
   910  
   911  func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
   912  	log.Printf("[DEBUG] virtual machine resource data: %#v", d)
   913  	client := meta.(*govmomi.Client)
   914  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   915  	if err != nil {
   916  		return err
   917  	}
   918  	finder := find.NewFinder(client.Client, true)
   919  	finder = finder.SetDatacenter(dc)
   920  
   921  	vm, err := finder.VirtualMachine(context.TODO(), d.Id())
   922  	if err != nil {
   923  		d.SetId("")
   924  		return nil
   925  	}
   926  
   927  	state, err := vm.PowerState(context.TODO())
   928  	if err != nil {
   929  		return err
   930  	}
   931  
   932  	if state == types.VirtualMachinePowerStatePoweredOn {
   933  		// wait for interfaces to appear
   934  		log.Printf("[DEBUG] Waiting for interfaces to appear")
   935  
   936  		_, err = vm.WaitForNetIP(context.TODO(), false)
   937  		if err != nil {
   938  			return err
   939  		}
   940  
   941  		log.Printf("[DEBUG] Successfully waited for interfaces to appear")
   942  	}
   943  
   944  	var mvm mo.VirtualMachine
   945  	collector := property.DefaultCollector(client.Client)
   946  	if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil {
   947  		return err
   948  	}
   949  
   950  	log.Printf("[DEBUG] Datacenter - %#v", dc)
   951  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config)
   952  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config)
   953  	log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net)
   954  
   955  	disks := make([]map[string]interface{}, 0)
   956  	templateDisk := make(map[string]interface{}, 1)
   957  	for _, device := range mvm.Config.Hardware.Device {
   958  		if vd, ok := device.(*types.VirtualDisk); ok {
   959  
   960  			virtualDevice := vd.GetVirtualDevice()
   961  
   962  			backingInfo := virtualDevice.Backing
   963  			var diskFullPath string
   964  			var diskUuid string
   965  			if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok {
   966  				diskFullPath = v.FileName
   967  				diskUuid = v.Uuid
   968  			} else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok {
   969  				diskFullPath = v.FileName
   970  				diskUuid = v.Uuid
   971  			}
   972  			log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath)
   973  
   974  			// Separate datastore and path
   975  			diskFullPathSplit := strings.Split(diskFullPath, " ")
   976  			if len(diskFullPathSplit) != 2 {
   977  				return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath)
   978  			}
   979  			diskPath := diskFullPathSplit[1]
   980  			// Isolate filename
   981  			diskNameSplit := strings.Split(diskPath, "/")
   982  			diskName := diskNameSplit[len(diskNameSplit)-1]
   983  			// Remove possible extension
   984  			diskName = strings.Split(diskName, ".")[0]
   985  
   986  			if prevDisks, ok := d.GetOk("disk"); ok {
   987  				if prevDiskSet, ok := prevDisks.(*schema.Set); ok {
   988  					for _, v := range prevDiskSet.List() {
   989  						prevDisk := v.(map[string]interface{})
   990  
   991  						// We're guaranteed only one template disk.  Passing value directly through since templates should be immutable
   992  						if prevDisk["template"] != "" {
   993  							if len(templateDisk) == 0 {
   994  								templateDisk = prevDisk
   995  								disks = append(disks, templateDisk)
   996  								break
   997  							}
   998  						}
   999  
  1000  						// It is enforced that prevDisk["name"] should only be set in the case
  1001  						// of creating a new disk for the user.
  1002  						// size case:  name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name
  1003  						// vmdk case:  compare prevDisk["vmdk"] and mo.Filename
  1004  						if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] {
  1005  
  1006  							prevDisk["key"] = virtualDevice.Key
  1007  							prevDisk["uuid"] = diskUuid
  1008  
  1009  							disks = append(disks, prevDisk)
  1010  							break
  1011  						}
  1012  					}
  1013  				}
  1014  			}
  1015  			log.Printf("[DEBUG] disks: %#v", disks)
  1016  		}
  1017  	}
  1018  	err = d.Set("disk", disks)
  1019  	if err != nil {
  1020  		return fmt.Errorf("Invalid disks to set: %#v", disks)
  1021  	}
  1022  
  1023  	networkInterfaces := make([]map[string]interface{}, 0)
  1024  	for _, v := range mvm.Guest.Net {
  1025  		if v.DeviceConfigId >= 0 {
  1026  			log.Printf("[DEBUG] v.Network - %#v", v.Network)
  1027  			networkInterface := make(map[string]interface{})
  1028  			networkInterface["label"] = v.Network
  1029  			networkInterface["mac_address"] = v.MacAddress
  1030  			for _, ip := range v.IpConfig.IpAddress {
  1031  				p := net.ParseIP(ip.IpAddress)
  1032  				if p.To4() != nil {
  1033  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1034  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1035  					networkInterface["ipv4_address"] = p.String()
  1036  					networkInterface["ipv4_prefix_length"] = ip.PrefixLength
  1037  				} else if p.To16() != nil {
  1038  					log.Printf("[DEBUG] p.String - %#v", p.String())
  1039  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
  1040  					networkInterface["ipv6_address"] = p.String()
  1041  					networkInterface["ipv6_prefix_length"] = ip.PrefixLength
  1042  				}
  1043  				log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1044  			}
  1045  			log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
  1046  			networkInterfaces = append(networkInterfaces, networkInterface)
  1047  		}
  1048  	}
  1049  	if mvm.Guest.IpStack != nil {
  1050  		for _, v := range mvm.Guest.IpStack {
  1051  			if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil {
  1052  				for _, route := range v.IpRouteConfig.IpRoute {
  1053  					if route.Gateway.Device != "" {
  1054  						gatewaySetting := ""
  1055  						if route.Network == "::" {
  1056  							gatewaySetting = "ipv6_gateway"
  1057  						} else if route.Network == "0.0.0.0" {
  1058  							gatewaySetting = "ipv4_gateway"
  1059  						}
  1060  						if gatewaySetting != "" {
  1061  							deviceID, err := strconv.Atoi(route.Gateway.Device)
  1062  							if len(networkInterfaces) == 1 {
  1063  								deviceID = 0
  1064  							}
  1065  							if err != nil {
  1066  								log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err)
  1067  							} else {
  1068  								log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress)
  1069  								networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress
  1070  							}
  1071  						}
  1072  					}
  1073  				}
  1074  			}
  1075  		}
  1076  	}
  1077  	log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces)
  1078  	err = d.Set("network_interface", networkInterfaces)
  1079  	if err != nil {
  1080  		return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces)
  1081  	}
  1082  
  1083  	if len(networkInterfaces) > 0 {
  1084  		if _, ok := networkInterfaces[0]["ipv4_address"]; ok {
  1085  			log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string))
  1086  			d.SetConnInfo(map[string]string{
  1087  				"type": "ssh",
  1088  				"host": networkInterfaces[0]["ipv4_address"].(string),
  1089  			})
  1090  		}
  1091  	}
  1092  
  1093  	var rootDatastore string
  1094  	for _, v := range mvm.Datastore {
  1095  		var md mo.Datastore
  1096  		if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil {
  1097  			return err
  1098  		}
  1099  		if md.Parent.Type == "StoragePod" {
  1100  			var msp mo.StoragePod
  1101  			if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil {
  1102  				return err
  1103  			}
  1104  			rootDatastore = msp.Name
  1105  			log.Printf("[DEBUG] %#v", msp.Name)
  1106  		} else {
  1107  			rootDatastore = md.Name
  1108  			log.Printf("[DEBUG] %#v", md.Name)
  1109  		}
  1110  		break
  1111  	}
  1112  
  1113  	d.Set("datacenter", dc)
  1114  	d.Set("memory", mvm.Summary.Config.MemorySizeMB)
  1115  	d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation)
  1116  	d.Set("cpu", mvm.Summary.Config.NumCpu)
  1117  	d.Set("datastore", rootDatastore)
  1118  	d.Set("uuid", mvm.Summary.Config.Uuid)
  1119  
  1120  	return nil
  1121  }
  1122  
  1123  func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
  1124  	client := meta.(*govmomi.Client)
  1125  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
  1126  	if err != nil {
  1127  		return err
  1128  	}
  1129  	finder := find.NewFinder(client.Client, true)
  1130  	finder = finder.SetDatacenter(dc)
  1131  
  1132  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
  1133  	if err != nil {
  1134  		return err
  1135  	}
  1136  	devices, err := vm.Device(context.TODO())
  1137  	if err != nil {
  1138  		log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err)
  1139  		return err
  1140  	}
  1141  
  1142  	log.Printf("[INFO] Deleting virtual machine: %s", d.Id())
  1143  	state, err := vm.PowerState(context.TODO())
  1144  	if err != nil {
  1145  		return err
  1146  	}
  1147  
  1148  	if state == types.VirtualMachinePowerStatePoweredOn {
  1149  		task, err := vm.PowerOff(context.TODO())
  1150  		if err != nil {
  1151  			return err
  1152  		}
  1153  
  1154  		err = task.Wait(context.TODO())
  1155  		if err != nil {
  1156  			return err
  1157  		}
  1158  	}
  1159  
  1160  	// Safely eject any disks the user marked as keep_on_remove
  1161  	var diskSetList []interface{}
  1162  	if vL, ok := d.GetOk("disk"); ok {
  1163  		if diskSet, ok := vL.(*schema.Set); ok {
  1164  			diskSetList = diskSet.List()
  1165  			for _, value := range diskSetList {
  1166  				disk := value.(map[string]interface{})
  1167  
  1168  				if v, ok := disk["keep_on_remove"].(bool); ok && v == true {
  1169  					log.Printf("[DEBUG] not destroying %v", disk["name"])
  1170  					virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
  1171  					err = vm.RemoveDevice(context.TODO(), true, virtualDisk)
  1172  					if err != nil {
  1173  						log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
  1174  						return err
  1175  					}
  1176  				}
  1177  			}
  1178  		}
  1179  	}
  1180  
  1181  	// Safely eject any disks that are not managed by this resource
  1182  	if v, ok := d.GetOk("detach_unknown_disks_on_delete"); ok && v.(bool) {
  1183  		var disksToRemove object.VirtualDeviceList
  1184  		for _, device := range devices {
  1185  			if devices.TypeName(device) != "VirtualDisk" {
  1186  				continue
  1187  			}
  1188  			vd := device.GetVirtualDevice()
  1189  			var skip bool
  1190  			for _, value := range diskSetList {
  1191  				disk := value.(map[string]interface{})
  1192  				if int32(disk["key"].(int)) == vd.Key {
  1193  					skip = true
  1194  					break
  1195  				}
  1196  			}
  1197  			if skip {
  1198  				continue
  1199  			}
  1200  			disksToRemove = append(disksToRemove, device)
  1201  		}
  1202  		if len(disksToRemove) != 0 {
  1203  			err = vm.RemoveDevice(context.TODO(), true, disksToRemove...)
  1204  			if err != nil {
  1205  				log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
  1206  				return err
  1207  			}
  1208  		}
  1209  	}
  1210  
  1211  	task, err := vm.Destroy(context.TODO())
  1212  	if err != nil {
  1213  		return err
  1214  	}
  1215  
  1216  	err = task.Wait(context.TODO())
  1217  	if err != nil {
  1218  		return err
  1219  	}
  1220  
  1221  	d.SetId("")
  1222  	return nil
  1223  }
  1224  
  1225  // addHardDisk adds a new Hard Disk to the VirtualMachine.
  1226  func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error {
  1227  	devices, err := vm.Device(context.TODO())
  1228  	if err != nil {
  1229  		return err
  1230  	}
  1231  	log.Printf("[DEBUG] vm devices: %#v\n", devices)
  1232  
  1233  	var controller types.BaseVirtualController
  1234  	switch controller_type {
  1235  	case "scsi":
  1236  		controller, err = devices.FindDiskController(controller_type)
  1237  	case "scsi-lsi-parallel":
  1238  		controller = devices.PickController(&types.VirtualLsiLogicController{})
  1239  	case "scsi-buslogic":
  1240  		controller = devices.PickController(&types.VirtualBusLogicController{})
  1241  	case "scsi-paravirtual":
  1242  		controller = devices.PickController(&types.ParaVirtualSCSIController{})
  1243  	case "scsi-lsi-sas":
  1244  		controller = devices.PickController(&types.VirtualLsiLogicSASController{})
  1245  	case "ide":
  1246  		controller, err = devices.FindDiskController(controller_type)
  1247  	default:
  1248  		return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1249  	}
  1250  
  1251  	if err != nil || controller == nil {
  1252  		// Check if max number of scsi controller are already used
  1253  		diskControllers := getSCSIControllers(devices)
  1254  		if len(diskControllers) >= 4 {
  1255  			return fmt.Errorf("[ERROR] Maximum number of SCSI controllers created")
  1256  		}
  1257  
  1258  		log.Printf("[DEBUG] Couldn't find a %v controller.  Creating one..", controller_type)
  1259  
  1260  		var c types.BaseVirtualDevice
  1261  		switch controller_type {
  1262  		case "scsi":
  1263  			// Create scsi controller
  1264  			c, err = devices.CreateSCSIController("scsi")
  1265  			if err != nil {
  1266  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1267  			}
  1268  		case "scsi-lsi-parallel":
  1269  			// Create scsi controller
  1270  			c, err = devices.CreateSCSIController("lsilogic")
  1271  			if err != nil {
  1272  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1273  			}
  1274  		case "scsi-buslogic":
  1275  			// Create scsi controller
  1276  			c, err = devices.CreateSCSIController("buslogic")
  1277  			if err != nil {
  1278  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1279  			}
  1280  		case "scsi-paravirtual":
  1281  			// Create scsi controller
  1282  			c, err = devices.CreateSCSIController("pvscsi")
  1283  			if err != nil {
  1284  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1285  			}
  1286  		case "scsi-lsi-sas":
  1287  			// Create scsi controller
  1288  			c, err = devices.CreateSCSIController("lsilogic-sas")
  1289  			if err != nil {
  1290  				return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err)
  1291  			}
  1292  		case "ide":
  1293  			// Create ide controller
  1294  			c, err = devices.CreateIDEController()
  1295  			if err != nil {
  1296  				return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1297  			}
  1298  		default:
  1299  			return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type)
  1300  		}
  1301  
  1302  		vm.AddDevice(context.TODO(), c)
  1303  		// Update our devices list
  1304  		devices, err := vm.Device(context.TODO())
  1305  		if err != nil {
  1306  			return err
  1307  		}
  1308  		controller = devices.PickController(c.(types.BaseVirtualController))
  1309  		if controller == nil {
  1310  			log.Printf("[ERROR] Could not find the new %v controller", controller_type)
  1311  			return fmt.Errorf("Could not find the new %v controller", controller_type)
  1312  		}
  1313  	}
  1314  
  1315  	log.Printf("[DEBUG] disk controller: %#v\n", controller)
  1316  
  1317  	// TODO Check if diskPath & datastore exist
  1318  	// If diskPath is not specified, pass empty string to CreateDisk()
  1319  	if diskPath == "" {
  1320  		return fmt.Errorf("[ERROR] addHardDisk - No path proided")
  1321  	} else {
  1322  		diskPath = datastore.Path(diskPath)
  1323  	}
  1324  	log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath)
  1325  	disk := devices.CreateDisk(controller, datastore.Reference(), diskPath)
  1326  
  1327  	if strings.Contains(controller_type, "scsi") {
  1328  		unitNumber, err := getNextUnitNumber(devices, controller)
  1329  		if err != nil {
  1330  			return err
  1331  		}
  1332  		*disk.UnitNumber = unitNumber
  1333  	}
  1334  
  1335  	existing := devices.SelectByBackingInfo(disk.Backing)
  1336  	log.Printf("[DEBUG] disk: %#v\n", disk)
  1337  
  1338  	if len(existing) == 0 {
  1339  		disk.CapacityInKB = int64(size * 1024 * 1024)
  1340  		if iops != 0 {
  1341  			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
  1342  				Limit: iops,
  1343  			}
  1344  		}
  1345  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
  1346  
  1347  		if diskType == "eager_zeroed" {
  1348  			// eager zeroed thick virtual disk
  1349  			backing.ThinProvisioned = types.NewBool(false)
  1350  			backing.EagerlyScrub = types.NewBool(true)
  1351  		} else if diskType == "lazy" {
  1352  			// lazy zeroed thick virtual disk
  1353  			backing.ThinProvisioned = types.NewBool(false)
  1354  			backing.EagerlyScrub = types.NewBool(false)
  1355  		} else if diskType == "thin" {
  1356  			// thin provisioned virtual disk
  1357  			backing.ThinProvisioned = types.NewBool(true)
  1358  		}
  1359  
  1360  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
  1361  		log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB)
  1362  
  1363  		return vm.AddDevice(context.TODO(), disk)
  1364  	} else {
  1365  		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")
  1366  
  1367  		return nil
  1368  	}
  1369  }
  1370  
  1371  func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController {
  1372  	// get virtual scsi controllers of all supported types
  1373  	var scsiControllers []*types.VirtualController
  1374  	for _, device := range vmDevices {
  1375  		devType := vmDevices.Type(device)
  1376  		switch devType {
  1377  		case "scsi", "lsilogic", "buslogic", "pvscsi", "lsilogic-sas":
  1378  			if c, ok := device.(types.BaseVirtualController); ok {
  1379  				scsiControllers = append(scsiControllers, c.GetVirtualController())
  1380  			}
  1381  		}
  1382  	}
  1383  	return scsiControllers
  1384  }
  1385  
  1386  func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) {
  1387  	key := c.GetVirtualController().Key
  1388  
  1389  	var unitNumbers [16]bool
  1390  	unitNumbers[7] = true
  1391  
  1392  	for _, device := range devices {
  1393  		d := device.GetVirtualDevice()
  1394  
  1395  		if d.ControllerKey == key {
  1396  			if d.UnitNumber != nil {
  1397  				unitNumbers[*d.UnitNumber] = true
  1398  			}
  1399  		}
  1400  	}
  1401  	for i, taken := range unitNumbers {
  1402  		if !taken {
  1403  			return int32(i), nil
  1404  		}
  1405  	}
  1406  	return -1, fmt.Errorf("[ERROR] getNextUnitNumber - controller is full")
  1407  }
  1408  
  1409  // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path.
  1410  func addCdrom(client *govmomi.Client, vm *object.VirtualMachine, datacenter *object.Datacenter, datastore, path string) error {
  1411  	devices, err := vm.Device(context.TODO())
  1412  	if err != nil {
  1413  		return err
  1414  	}
  1415  	log.Printf("[DEBUG] vm devices: %#v", devices)
  1416  
  1417  	var controller *types.VirtualIDEController
  1418  	controller, err = devices.FindIDEController("")
  1419  	if err != nil {
  1420  		log.Printf("[DEBUG] Couldn't find a ide controller.  Creating one..")
  1421  
  1422  		var c types.BaseVirtualDevice
  1423  		c, err := devices.CreateIDEController()
  1424  		if err != nil {
  1425  			return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err)
  1426  		}
  1427  
  1428  		if v, ok := c.(*types.VirtualIDEController); ok {
  1429  			controller = v
  1430  		} else {
  1431  			return fmt.Errorf("[ERROR] Controller type could not be asserted")
  1432  		}
  1433  		vm.AddDevice(context.TODO(), c)
  1434  		// Update our devices list
  1435  		devices, err := vm.Device(context.TODO())
  1436  		if err != nil {
  1437  			return err
  1438  		}
  1439  		controller, err = devices.FindIDEController("")
  1440  		if err != nil {
  1441  			log.Printf("[ERROR] Could not find the new disk IDE controller: %v", err)
  1442  			return err
  1443  		}
  1444  	}
  1445  	log.Printf("[DEBUG] ide controller: %#v", controller)
  1446  
  1447  	c, err := devices.CreateCdrom(controller)
  1448  	if err != nil {
  1449  		return err
  1450  	}
  1451  
  1452  	finder := find.NewFinder(client.Client, true)
  1453  	finder = finder.SetDatacenter(datacenter)
  1454  	ds, err := getDatastore(finder, datastore)
  1455  	if err != nil {
  1456  		return err
  1457  	}
  1458  
  1459  	c = devices.InsertIso(c, ds.Path(path))
  1460  	log.Printf("[DEBUG] addCdrom: %#v", c)
  1461  
  1462  	return vm.AddDevice(context.TODO(), c)
  1463  }
  1464  
  1465  // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device.
  1466  func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) {
  1467  	network, err := f.Network(context.TODO(), "*"+label)
  1468  	if err != nil {
  1469  		return nil, err
  1470  	}
  1471  
  1472  	backing, err := network.EthernetCardBackingInfo(context.TODO())
  1473  	if err != nil {
  1474  		return nil, err
  1475  	}
  1476  
  1477  	var address_type string
  1478  	if macAddress == "" {
  1479  		address_type = string(types.VirtualEthernetCardMacTypeGenerated)
  1480  	} else {
  1481  		address_type = string(types.VirtualEthernetCardMacTypeManual)
  1482  	}
  1483  
  1484  	if adapterType == "vmxnet3" {
  1485  		return &types.VirtualDeviceConfigSpec{
  1486  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1487  			Device: &types.VirtualVmxnet3{
  1488  				VirtualVmxnet: types.VirtualVmxnet{
  1489  					VirtualEthernetCard: types.VirtualEthernetCard{
  1490  						VirtualDevice: types.VirtualDevice{
  1491  							Key:     -1,
  1492  							Backing: backing,
  1493  						},
  1494  						AddressType: address_type,
  1495  						MacAddress:  macAddress,
  1496  					},
  1497  				},
  1498  			},
  1499  		}, nil
  1500  	} else if adapterType == "e1000" {
  1501  		return &types.VirtualDeviceConfigSpec{
  1502  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1503  			Device: &types.VirtualE1000{
  1504  				VirtualEthernetCard: types.VirtualEthernetCard{
  1505  					VirtualDevice: types.VirtualDevice{
  1506  						Key:     -1,
  1507  						Backing: backing,
  1508  					},
  1509  					AddressType: address_type,
  1510  					MacAddress:  macAddress,
  1511  				},
  1512  			},
  1513  		}, nil
  1514  	} else {
  1515  		return nil, fmt.Errorf("Invalid network adapter type.")
  1516  	}
  1517  }
  1518  
  1519  // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
  1520  func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) {
  1521  	var key int32
  1522  	var moveType string
  1523  	if linkedClone {
  1524  		moveType = "createNewChildDiskBacking"
  1525  	} else {
  1526  		moveType = "moveAllDiskBackingsAndDisallowSharing"
  1527  	}
  1528  	log.Printf("[DEBUG] relocate type: [%s]", moveType)
  1529  
  1530  	devices, err := vm.Device(context.TODO())
  1531  	if err != nil {
  1532  		return types.VirtualMachineRelocateSpec{}, err
  1533  	}
  1534  	for _, d := range devices {
  1535  		if devices.Type(d) == "disk" {
  1536  			key = int32(d.GetVirtualDevice().Key)
  1537  		}
  1538  	}
  1539  
  1540  	isThin := initType == "thin"
  1541  	eagerScrub := initType == "eager_zeroed"
  1542  	rpr := rp.Reference()
  1543  	dsr := ds.Reference()
  1544  	return types.VirtualMachineRelocateSpec{
  1545  		Datastore:    &dsr,
  1546  		Pool:         &rpr,
  1547  		DiskMoveType: moveType,
  1548  		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1549  			{
  1550  				Datastore: dsr,
  1551  				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
  1552  					DiskMode:        "persistent",
  1553  					ThinProvisioned: types.NewBool(isThin),
  1554  					EagerlyScrub:    types.NewBool(eagerScrub),
  1555  				},
  1556  				DiskId: key,
  1557  			},
  1558  		},
  1559  	}, nil
  1560  }
  1561  
  1562  // getDatastoreObject gets datastore object.
  1563  func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) {
  1564  	s := object.NewSearchIndex(client.Client)
  1565  	ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name)
  1566  	if err != nil {
  1567  		return types.ManagedObjectReference{}, err
  1568  	}
  1569  	if ref == nil {
  1570  		return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name)
  1571  	}
  1572  	log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref)
  1573  	return ref.Reference(), nil
  1574  }
  1575  
  1576  // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action.
  1577  func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec {
  1578  	vmfr := f.VmFolder.Reference()
  1579  	rpr := rp.Reference()
  1580  	spr := storagePod.Reference()
  1581  
  1582  	sps := types.StoragePlacementSpec{
  1583  		Type:       "create",
  1584  		ConfigSpec: &configSpec,
  1585  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1586  			StoragePod: &spr,
  1587  		},
  1588  		Folder:       &vmfr,
  1589  		ResourcePool: &rpr,
  1590  	}
  1591  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1592  	return sps
  1593  }
  1594  
  1595  // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action.
  1596  func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
  1597  	vmr := vm.Reference()
  1598  	vmfr := f.VmFolder.Reference()
  1599  	rpr := rp.Reference()
  1600  	spr := storagePod.Reference()
  1601  
  1602  	var o mo.VirtualMachine
  1603  	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
  1604  	if err != nil {
  1605  		return types.StoragePlacementSpec{}
  1606  	}
  1607  	ds := object.NewDatastore(c.Client, o.Datastore[0])
  1608  	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)
  1609  
  1610  	devices, err := vm.Device(context.TODO())
  1611  	if err != nil {
  1612  		return types.StoragePlacementSpec{}
  1613  	}
  1614  
  1615  	var key int32
  1616  	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
  1617  		key = int32(d.GetVirtualDevice().Key)
  1618  		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
  1619  	}
  1620  
  1621  	sps := types.StoragePlacementSpec{
  1622  		Type: "clone",
  1623  		Vm:   &vmr,
  1624  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1625  			StoragePod: &spr,
  1626  		},
  1627  		CloneSpec: &types.VirtualMachineCloneSpec{
  1628  			Location: types.VirtualMachineRelocateSpec{
  1629  				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1630  					{
  1631  						Datastore:       ds.Reference(),
  1632  						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
  1633  						DiskId:          key,
  1634  					},
  1635  				},
  1636  				Pool: &rpr,
  1637  			},
  1638  			PowerOn:  false,
  1639  			Template: false,
  1640  		},
  1641  		CloneName: "dummy",
  1642  		Folder:    &vmfr,
  1643  	}
  1644  	return sps
  1645  }
  1646  
  1647  // findDatastore finds Datastore object.
  1648  func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) {
  1649  	var datastore *object.Datastore
  1650  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1651  
  1652  	srm := object.NewStorageResourceManager(c.Client)
  1653  	rds, err := srm.RecommendDatastores(context.TODO(), sps)
  1654  	if err != nil {
  1655  		return nil, err
  1656  	}
  1657  	log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)
  1658  
  1659  	spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
  1660  	datastore = object.NewDatastore(c.Client, spa.Destination)
  1661  	log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore)
  1662  
  1663  	return datastore, nil
  1664  }
  1665  
  1666  // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller.
  1667  func createCdroms(client *govmomi.Client, vm *object.VirtualMachine, datacenter *object.Datacenter, cdroms []cdrom) error {
  1668  	log.Printf("[DEBUG] add cdroms: %v", cdroms)
  1669  	for _, cd := range cdroms {
  1670  		log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore)
  1671  		log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path)
  1672  		err := addCdrom(client, vm, datacenter, cd.datastore, cd.path)
  1673  		if err != nil {
  1674  			return err
  1675  		}
  1676  	}
  1677  
  1678  	return nil
  1679  }
  1680  
  1681  func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error {
  1682  	dc, err := getDatacenter(c, vm.datacenter)
  1683  
  1684  	if err != nil {
  1685  		return err
  1686  	}
  1687  	finder := find.NewFinder(c.Client, true)
  1688  	finder = finder.SetDatacenter(dc)
  1689  
  1690  	var template *object.VirtualMachine
  1691  	var template_mo mo.VirtualMachine
  1692  	var vm_mo mo.VirtualMachine
  1693  	if vm.template != "" {
  1694  		template, err = finder.VirtualMachine(context.TODO(), vm.template)
  1695  		if err != nil {
  1696  			return err
  1697  		}
  1698  		log.Printf("[DEBUG] template: %#v", template)
  1699  
  1700  		err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)
  1701  		if err != nil {
  1702  			return err
  1703  		}
  1704  	}
  1705  
  1706  	var resourcePool *object.ResourcePool
  1707  	if vm.resourcePool == "" {
  1708  		if vm.cluster == "" {
  1709  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
  1710  			if err != nil {
  1711  				return err
  1712  			}
  1713  		} else {
  1714  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
  1715  			if err != nil {
  1716  				return err
  1717  			}
  1718  		}
  1719  	} else {
  1720  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
  1721  		if err != nil {
  1722  			return err
  1723  		}
  1724  	}
  1725  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
  1726  
  1727  	dcFolders, err := dc.Folders(context.TODO())
  1728  	if err != nil {
  1729  		return err
  1730  	}
  1731  	log.Printf("[DEBUG] folder: %#v", vm.folder)
  1732  
  1733  	folder := dcFolders.VmFolder
  1734  	if len(vm.folder) > 0 {
  1735  		si := object.NewSearchIndex(c.Client)
  1736  		folderRef, err := si.FindByInventoryPath(
  1737  			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
  1738  		if err != nil {
  1739  			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
  1740  		} else if folderRef == nil {
  1741  			return fmt.Errorf("Cannot find folder %s", vm.folder)
  1742  		} else {
  1743  			folder = folderRef.(*object.Folder)
  1744  		}
  1745  	}
  1746  
  1747  	// make config spec
  1748  	configSpec := types.VirtualMachineConfigSpec{
  1749  		Name:              vm.name,
  1750  		NumCPUs:           vm.vcpu,
  1751  		NumCoresPerSocket: 1,
  1752  		MemoryMB:          vm.memoryMb,
  1753  		MemoryAllocation: &types.ResourceAllocationInfo{
  1754  			Reservation: vm.memoryAllocation.reservation,
  1755  		},
  1756  		Flags: &types.VirtualMachineFlagInfo{
  1757  			DiskUuidEnabled: &vm.enableDiskUUID,
  1758  		},
  1759  	}
  1760  	if vm.template == "" {
  1761  		configSpec.GuestId = "otherLinux64Guest"
  1762  	}
  1763  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
  1764  
  1765  	// make ExtraConfig
  1766  	log.Printf("[DEBUG] virtual machine Extra Config spec start")
  1767  	if len(vm.customConfigurations) > 0 {
  1768  		var ov []types.BaseOptionValue
  1769  		for k, v := range vm.customConfigurations {
  1770  			key := k
  1771  			value := v
  1772  			o := types.OptionValue{
  1773  				Key:   key,
  1774  				Value: &value,
  1775  			}
  1776  			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
  1777  			ov = append(ov, &o)
  1778  		}
  1779  		configSpec.ExtraConfig = ov
  1780  		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
  1781  	}
  1782  
  1783  	var datastore *object.Datastore
  1784  	if vm.datastore == "" {
  1785  		datastore, err = finder.DefaultDatastore(context.TODO())
  1786  		if err != nil {
  1787  			return err
  1788  		}
  1789  	} else {
  1790  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
  1791  		if err != nil {
  1792  			// TODO: datastore cluster support in govmomi finder function
  1793  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
  1794  			if err != nil {
  1795  				return err
  1796  			}
  1797  
  1798  			if d.Type == "StoragePod" {
  1799  				sp := object.StoragePod{
  1800  					Folder: object.NewFolder(c.Client, d),
  1801  				}
  1802  
  1803  				var sps types.StoragePlacementSpec
  1804  				if vm.template != "" {
  1805  					sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
  1806  				} else {
  1807  					sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
  1808  				}
  1809  
  1810  				datastore, err = findDatastore(c, sps)
  1811  				if err != nil {
  1812  					return err
  1813  				}
  1814  			} else {
  1815  				datastore = object.NewDatastore(c.Client, d)
  1816  			}
  1817  		}
  1818  	}
  1819  
  1820  	log.Printf("[DEBUG] datastore: %#v", datastore)
  1821  
  1822  	// network
  1823  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
  1824  	networkConfigs := []types.CustomizationAdapterMapping{}
  1825  	for _, network := range vm.networkInterfaces {
  1826  		// network device
  1827  		var networkDeviceType string
  1828  		if vm.template == "" {
  1829  			networkDeviceType = "e1000"
  1830  		} else {
  1831  			networkDeviceType = "vmxnet3"
  1832  		}
  1833  		nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress)
  1834  		if err != nil {
  1835  			return err
  1836  		}
  1837  		log.Printf("[DEBUG] network device: %+v", nd.Device)
  1838  		networkDevices = append(networkDevices, nd)
  1839  
  1840  		if vm.template != "" {
  1841  			var ipSetting types.CustomizationIPSettings
  1842  			if network.ipv4Address == "" {
  1843  				ipSetting.Ip = &types.CustomizationDhcpIpGenerator{}
  1844  			} else {
  1845  				if network.ipv4PrefixLength == 0 {
  1846  					return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
  1847  				}
  1848  				m := net.CIDRMask(network.ipv4PrefixLength, 32)
  1849  				sm := net.IPv4(m[0], m[1], m[2], m[3])
  1850  				subnetMask := sm.String()
  1851  				log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway)
  1852  				log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address)
  1853  				log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength)
  1854  				log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask)
  1855  				ipSetting.Gateway = []string{
  1856  					network.ipv4Gateway,
  1857  				}
  1858  				ipSetting.Ip = &types.CustomizationFixedIp{
  1859  					IpAddress: network.ipv4Address,
  1860  				}
  1861  				ipSetting.SubnetMask = subnetMask
  1862  			}
  1863  
  1864  			ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{}
  1865  			if network.ipv6Address == "" {
  1866  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1867  					&types.CustomizationDhcpIpV6Generator{},
  1868  				}
  1869  			} else {
  1870  				log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway)
  1871  				log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address)
  1872  				log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength)
  1873  
  1874  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1875  					&types.CustomizationFixedIpV6{
  1876  						IpAddress:  network.ipv6Address,
  1877  						SubnetMask: int32(network.ipv6PrefixLength),
  1878  					},
  1879  				}
  1880  				ipv6Spec.Gateway = []string{network.ipv6Gateway}
  1881  			}
  1882  			ipSetting.IpV6Spec = ipv6Spec
  1883  
  1884  			// network config
  1885  			config := types.CustomizationAdapterMapping{
  1886  				Adapter: ipSetting,
  1887  			}
  1888  			networkConfigs = append(networkConfigs, config)
  1889  		}
  1890  	}
  1891  	log.Printf("[DEBUG] network devices: %#v", networkDevices)
  1892  	log.Printf("[DEBUG] network configs: %#v", networkConfigs)
  1893  
  1894  	var task *object.Task
  1895  	if vm.template == "" {
  1896  		var mds mo.Datastore
  1897  		if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
  1898  			return err
  1899  		}
  1900  		log.Printf("[DEBUG] datastore: %#v", mds.Name)
  1901  		scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
  1902  		if err != nil {
  1903  			log.Printf("[ERROR] %s", err)
  1904  		}
  1905  
  1906  		configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
  1907  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1908  			Device:    scsi,
  1909  		})
  1910  
  1911  		configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
  1912  
  1913  		task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
  1914  		if err != nil {
  1915  			log.Printf("[ERROR] %s", err)
  1916  		}
  1917  
  1918  		err = task.Wait(context.TODO())
  1919  		if err != nil {
  1920  			log.Printf("[ERROR] %s", err)
  1921  		}
  1922  
  1923  	} else {
  1924  
  1925  		relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
  1926  		if err != nil {
  1927  			return err
  1928  		}
  1929  
  1930  		log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
  1931  
  1932  		// make vm clone spec
  1933  		cloneSpec := types.VirtualMachineCloneSpec{
  1934  			Location: relocateSpec,
  1935  			Template: false,
  1936  			Config:   &configSpec,
  1937  			PowerOn:  false,
  1938  		}
  1939  		if vm.linkedClone {
  1940  			if template_mo.Snapshot == nil {
  1941  				return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
  1942  			}
  1943  			cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
  1944  		}
  1945  		log.Printf("[DEBUG] clone spec: %v", cloneSpec)
  1946  
  1947  		task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec)
  1948  		if err != nil {
  1949  			return err
  1950  		}
  1951  	}
  1952  
  1953  	err = task.Wait(context.TODO())
  1954  	if err != nil {
  1955  		log.Printf("[ERROR] %s", err)
  1956  	}
  1957  
  1958  	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
  1959  	if err != nil {
  1960  		return err
  1961  	}
  1962  	log.Printf("[DEBUG] new vm: %v", newVM)
  1963  
  1964  	devices, err := newVM.Device(context.TODO())
  1965  	if err != nil {
  1966  		log.Printf("[DEBUG] Template devices can't be found")
  1967  		return err
  1968  	}
  1969  
  1970  	for _, dvc := range devices {
  1971  		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
  1972  		if devices.Type(dvc) == "ethernet" {
  1973  			err := newVM.RemoveDevice(context.TODO(), false, dvc)
  1974  			if err != nil {
  1975  				return err
  1976  			}
  1977  		}
  1978  	}
  1979  	// Add Network devices
  1980  	for _, dvc := range networkDevices {
  1981  		err := newVM.AddDevice(
  1982  			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
  1983  		if err != nil {
  1984  			return err
  1985  		}
  1986  	}
  1987  
  1988  	// Create the cdroms if needed.
  1989  	if err := createCdroms(c, newVM, dc, vm.cdroms); err != nil {
  1990  		return err
  1991  	}
  1992  
  1993  	newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo)
  1994  	firstDisk := 0
  1995  	if vm.template != "" {
  1996  		firstDisk++
  1997  	}
  1998  	for i := firstDisk; i < len(vm.hardDisks); i++ {
  1999  		log.Printf("[DEBUG] disk index: %v", i)
  2000  
  2001  		var diskPath string
  2002  		switch {
  2003  		case vm.hardDisks[i].vmdkPath != "":
  2004  			diskPath = vm.hardDisks[i].vmdkPath
  2005  		case vm.hardDisks[i].name != "":
  2006  			snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory
  2007  			split := strings.Split(snapshotFullDir, " ")
  2008  			if len(split) != 2 {
  2009  				return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
  2010  			}
  2011  			vmWorkingPath := split[1]
  2012  			diskPath = vmWorkingPath + vm.hardDisks[i].name
  2013  		default:
  2014  			return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i])
  2015  		}
  2016  		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  2017  		if err != nil {
  2018  			err2 := addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  2019  			if err2 != nil {
  2020  				return err2
  2021  			}
  2022  			return err
  2023  		}
  2024  	}
  2025  
  2026  	if vm.skipCustomization || vm.template == "" {
  2027  		log.Printf("[DEBUG] VM customization skipped")
  2028  	} else {
  2029  		var identity_options types.BaseCustomizationIdentitySettings
  2030  		if strings.HasPrefix(template_mo.Config.GuestId, "win") {
  2031  			var timeZone int
  2032  			if vm.timeZone == "Etc/UTC" {
  2033  				vm.timeZone = "085"
  2034  			}
  2035  			timeZone, err := strconv.Atoi(vm.timeZone)
  2036  			if err != nil {
  2037  				return fmt.Errorf("Error converting TimeZone: %s", err)
  2038  			}
  2039  
  2040  			guiUnattended := types.CustomizationGuiUnattended{
  2041  				AutoLogon:      false,
  2042  				AutoLogonCount: 1,
  2043  				TimeZone:       int32(timeZone),
  2044  			}
  2045  
  2046  			customIdentification := types.CustomizationIdentification{}
  2047  
  2048  			userData := types.CustomizationUserData{
  2049  				ComputerName: &types.CustomizationFixedName{
  2050  					Name: strings.Split(vm.name, ".")[0],
  2051  				},
  2052  				ProductId: vm.windowsOptionalConfig.productKey,
  2053  				FullName:  "terraform",
  2054  				OrgName:   "terraform",
  2055  			}
  2056  
  2057  			if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
  2058  				customIdentification.DomainAdminPassword = &types.CustomizationPassword{
  2059  					PlainText: true,
  2060  					Value:     vm.windowsOptionalConfig.domainUserPassword,
  2061  				}
  2062  				customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
  2063  				customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
  2064  			}
  2065  
  2066  			if vm.windowsOptionalConfig.adminPassword != "" {
  2067  				guiUnattended.Password = &types.CustomizationPassword{
  2068  					PlainText: true,
  2069  					Value:     vm.windowsOptionalConfig.adminPassword,
  2070  				}
  2071  			}
  2072  
  2073  			identity_options = &types.CustomizationSysprep{
  2074  				GuiUnattended:  guiUnattended,
  2075  				Identification: customIdentification,
  2076  				UserData:       userData,
  2077  			}
  2078  		} else {
  2079  			identity_options = &types.CustomizationLinuxPrep{
  2080  				HostName: &types.CustomizationFixedName{
  2081  					Name: strings.Split(vm.name, ".")[0],
  2082  				},
  2083  				Domain:     vm.domain,
  2084  				TimeZone:   vm.timeZone,
  2085  				HwClockUTC: types.NewBool(true),
  2086  			}
  2087  		}
  2088  
  2089  		// create CustomizationSpec
  2090  		customSpec := types.CustomizationSpec{
  2091  			Identity: identity_options,
  2092  			GlobalIPSettings: types.CustomizationGlobalIPSettings{
  2093  				DnsSuffixList: vm.dnsSuffixes,
  2094  				DnsServerList: vm.dnsServers,
  2095  			},
  2096  			NicSettingMap: networkConfigs,
  2097  		}
  2098  		log.Printf("[DEBUG] custom spec: %v", customSpec)
  2099  
  2100  		log.Printf("[DEBUG] VM customization starting")
  2101  		taskb, err := newVM.Customize(context.TODO(), customSpec)
  2102  		if err != nil {
  2103  			return err
  2104  		}
  2105  		_, err = taskb.WaitForResult(context.TODO(), nil)
  2106  		if err != nil {
  2107  			return err
  2108  		}
  2109  		log.Printf("[DEBUG] VM customization finished")
  2110  	}
  2111  
  2112  	if vm.hasBootableVmdk || vm.template != "" {
  2113  		t, err := newVM.PowerOn(context.TODO())
  2114  		if err != nil {
  2115  			return err
  2116  		}
  2117  		_, err = t.WaitForResult(context.TODO(), nil)
  2118  		if err != nil {
  2119  			return err
  2120  		}
  2121  		err = newVM.WaitForPowerState(context.TODO(), types.VirtualMachinePowerStatePoweredOn)
  2122  		if err != nil {
  2123  			return err
  2124  		}
  2125  	}
  2126  	return nil
  2127  }