github.com/sarguru/terraform@v0.6.17-0.20160525232901-8fcdfd7e3dc9/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about)

     1  package vsphere
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/hashicorp/terraform/helper/schema"
    11  	"github.com/vmware/govmomi"
    12  	"github.com/vmware/govmomi/find"
    13  	"github.com/vmware/govmomi/object"
    14  	"github.com/vmware/govmomi/property"
    15  	"github.com/vmware/govmomi/vim25/mo"
    16  	"github.com/vmware/govmomi/vim25/types"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  var DefaultDNSSuffixes = []string{
    21  	"vsphere.local",
    22  }
    23  
    24  var DefaultDNSServers = []string{
    25  	"8.8.8.8",
    26  	"8.8.4.4",
    27  }
    28  
    29  type networkInterface struct {
    30  	deviceName       string
    31  	label            string
    32  	ipv4Address      string
    33  	ipv4PrefixLength int
    34  	ipv4Gateway      string
    35  	ipv6Address      string
    36  	ipv6PrefixLength int
    37  	ipv6Gateway      string
    38  	adapterType      string // TODO: Make "adapter_type" argument
    39  }
    40  
    41  type hardDisk struct {
    42  	name       string
    43  	size       int64
    44  	iops       int64
    45  	initType   string
    46  	vmdkPath   string
    47  	controller string
    48  	bootable   bool
    49  }
    50  
    51  //Additional options Vsphere can use clones of windows machines
    52  type windowsOptConfig struct {
    53  	productKey         string
    54  	adminPassword      string
    55  	domainUser         string
    56  	domain             string
    57  	domainUserPassword string
    58  }
    59  
    60  type cdrom struct {
    61  	datastore string
    62  	path      string
    63  }
    64  
    65  type memoryAllocation struct {
    66  	reservation int64
    67  }
    68  
    69  type virtualMachine struct {
    70  	name                  string
    71  	folder                string
    72  	datacenter            string
    73  	cluster               string
    74  	resourcePool          string
    75  	datastore             string
    76  	vcpu                  int32
    77  	memoryMb              int64
    78  	memoryAllocation      memoryAllocation
    79  	template              string
    80  	networkInterfaces     []networkInterface
    81  	hardDisks             []hardDisk
    82  	cdroms                []cdrom
    83  	domain                string
    84  	timeZone              string
    85  	dnsSuffixes           []string
    86  	dnsServers            []string
    87  	hasBootableVmdk       bool
    88  	linkedClone           bool
    89  	skipCustomization     bool
    90  	windowsOptionalConfig windowsOptConfig
    91  	customConfigurations  map[string](types.AnyType)
    92  }
    93  
    94  func (v virtualMachine) Path() string {
    95  	return vmPath(v.folder, v.name)
    96  }
    97  
    98  func vmPath(folder string, name string) string {
    99  	var path string
   100  	if len(folder) > 0 {
   101  		path += folder + "/"
   102  	}
   103  	return path + name
   104  }
   105  
   106  func resourceVSphereVirtualMachine() *schema.Resource {
   107  	return &schema.Resource{
   108  		Create: resourceVSphereVirtualMachineCreate,
   109  		Read:   resourceVSphereVirtualMachineRead,
   110  		Update: resourceVSphereVirtualMachineUpdate,
   111  		Delete: resourceVSphereVirtualMachineDelete,
   112  
   113  		Schema: map[string]*schema.Schema{
   114  			"name": &schema.Schema{
   115  				Type:     schema.TypeString,
   116  				Required: true,
   117  				ForceNew: true,
   118  			},
   119  
   120  			"folder": &schema.Schema{
   121  				Type:     schema.TypeString,
   122  				Optional: true,
   123  				ForceNew: true,
   124  			},
   125  
   126  			"vcpu": &schema.Schema{
   127  				Type:     schema.TypeInt,
   128  				Required: true,
   129  			},
   130  
   131  			"memory": &schema.Schema{
   132  				Type:     schema.TypeInt,
   133  				Required: true,
   134  			},
   135  
   136  			"memory_reservation": &schema.Schema{
   137  				Type:     schema.TypeInt,
   138  				Optional: true,
   139  				Default:  0,
   140  				ForceNew: true,
   141  			},
   142  
   143  			"datacenter": &schema.Schema{
   144  				Type:     schema.TypeString,
   145  				Optional: true,
   146  				ForceNew: true,
   147  			},
   148  
   149  			"cluster": &schema.Schema{
   150  				Type:     schema.TypeString,
   151  				Optional: true,
   152  				ForceNew: true,
   153  			},
   154  
   155  			"resource_pool": &schema.Schema{
   156  				Type:     schema.TypeString,
   157  				Optional: true,
   158  				ForceNew: true,
   159  			},
   160  
   161  			"linked_clone": &schema.Schema{
   162  				Type:     schema.TypeBool,
   163  				Optional: true,
   164  				Default:  false,
   165  				ForceNew: true,
   166  			},
   167  			"gateway": &schema.Schema{
   168  				Type:       schema.TypeString,
   169  				Optional:   true,
   170  				ForceNew:   true,
   171  				Deprecated: "Please use network_interface.ipv4_gateway",
   172  			},
   173  
   174  			"domain": &schema.Schema{
   175  				Type:     schema.TypeString,
   176  				Optional: true,
   177  				ForceNew: true,
   178  				Default:  "vsphere.local",
   179  			},
   180  
   181  			"time_zone": &schema.Schema{
   182  				Type:     schema.TypeString,
   183  				Optional: true,
   184  				ForceNew: true,
   185  				Default:  "Etc/UTC",
   186  			},
   187  
   188  			"dns_suffixes": &schema.Schema{
   189  				Type:     schema.TypeList,
   190  				Optional: true,
   191  				Elem:     &schema.Schema{Type: schema.TypeString},
   192  				ForceNew: true,
   193  			},
   194  
   195  			"dns_servers": &schema.Schema{
   196  				Type:     schema.TypeList,
   197  				Optional: true,
   198  				Elem:     &schema.Schema{Type: schema.TypeString},
   199  				ForceNew: true,
   200  			},
   201  
   202  			"skip_customization": &schema.Schema{
   203  				Type:     schema.TypeBool,
   204  				Optional: true,
   205  				ForceNew: true,
   206  				Default:  false,
   207  			},
   208  
   209  			"custom_configuration_parameters": &schema.Schema{
   210  				Type:     schema.TypeMap,
   211  				Optional: true,
   212  				ForceNew: true,
   213  			},
   214  			"windows_opt_config": &schema.Schema{
   215  				Type:     schema.TypeList,
   216  				Optional: true,
   217  				ForceNew: true,
   218  				Elem: &schema.Resource{
   219  					Schema: map[string]*schema.Schema{
   220  						"product_key": &schema.Schema{
   221  							Type:     schema.TypeString,
   222  							Required: true,
   223  							ForceNew: true,
   224  						},
   225  
   226  						"admin_password": &schema.Schema{
   227  							Type:     schema.TypeString,
   228  							Optional: true,
   229  							ForceNew: true,
   230  						},
   231  
   232  						"domain_user": &schema.Schema{
   233  							Type:     schema.TypeString,
   234  							Optional: true,
   235  							ForceNew: true,
   236  						},
   237  
   238  						"domain": &schema.Schema{
   239  							Type:     schema.TypeString,
   240  							Optional: true,
   241  							ForceNew: true,
   242  						},
   243  
   244  						"domain_user_password": &schema.Schema{
   245  							Type:     schema.TypeString,
   246  							Optional: true,
   247  							ForceNew: true,
   248  						},
   249  					},
   250  				},
   251  			},
   252  
   253  			"network_interface": &schema.Schema{
   254  				Type:     schema.TypeList,
   255  				Required: true,
   256  				ForceNew: true,
   257  				Elem: &schema.Resource{
   258  					Schema: map[string]*schema.Schema{
   259  						"label": &schema.Schema{
   260  							Type:     schema.TypeString,
   261  							Required: true,
   262  							ForceNew: true,
   263  						},
   264  
   265  						"ip_address": &schema.Schema{
   266  							Type:       schema.TypeString,
   267  							Optional:   true,
   268  							Computed:   true,
   269  							Deprecated: "Please use ipv4_address",
   270  						},
   271  
   272  						"subnet_mask": &schema.Schema{
   273  							Type:       schema.TypeString,
   274  							Optional:   true,
   275  							Computed:   true,
   276  							Deprecated: "Please use ipv4_prefix_length",
   277  						},
   278  
   279  						"ipv4_address": &schema.Schema{
   280  							Type:     schema.TypeString,
   281  							Optional: true,
   282  							Computed: true,
   283  						},
   284  
   285  						"ipv4_prefix_length": &schema.Schema{
   286  							Type:     schema.TypeInt,
   287  							Optional: true,
   288  							Computed: true,
   289  						},
   290  
   291  						"ipv4_gateway": &schema.Schema{
   292  							Type:     schema.TypeString,
   293  							Optional: true,
   294  							Computed: true,
   295  						},
   296  
   297  						"ipv6_address": &schema.Schema{
   298  							Type:     schema.TypeString,
   299  							Optional: true,
   300  							Computed: true,
   301  						},
   302  
   303  						"ipv6_prefix_length": &schema.Schema{
   304  							Type:     schema.TypeInt,
   305  							Optional: true,
   306  							Computed: true,
   307  						},
   308  
   309  						"ipv6_gateway": &schema.Schema{
   310  							Type:     schema.TypeString,
   311  							Optional: true,
   312  							Computed: true,
   313  						},
   314  
   315  						"adapter_type": &schema.Schema{
   316  							Type:     schema.TypeString,
   317  							Optional: true,
   318  							ForceNew: true,
   319  						},
   320  					},
   321  				},
   322  			},
   323  
   324  			"disk": &schema.Schema{
   325  				Type:     schema.TypeSet,
   326  				Required: true,
   327  				Elem: &schema.Resource{
   328  					Schema: map[string]*schema.Schema{
   329  						"uuid": &schema.Schema{
   330  							Type:     schema.TypeString,
   331  							Computed: true,
   332  						},
   333  
   334  						"key": &schema.Schema{
   335  							Type:     schema.TypeInt,
   336  							Computed: true,
   337  						},
   338  
   339  						"template": &schema.Schema{
   340  							Type:     schema.TypeString,
   341  							Optional: true,
   342  						},
   343  
   344  						"type": &schema.Schema{
   345  							Type:     schema.TypeString,
   346  							Optional: true,
   347  							Default:  "eager_zeroed",
   348  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   349  								value := v.(string)
   350  								if value != "thin" && value != "eager_zeroed" {
   351  									errors = append(errors, fmt.Errorf(
   352  										"only 'thin' and 'eager_zeroed' are supported values for 'type'"))
   353  								}
   354  								return
   355  							},
   356  						},
   357  
   358  						"datastore": &schema.Schema{
   359  							Type:     schema.TypeString,
   360  							Optional: true,
   361  						},
   362  
   363  						"size": &schema.Schema{
   364  							Type:     schema.TypeInt,
   365  							Optional: true,
   366  						},
   367  
   368  						"name": &schema.Schema{
   369  							Type:     schema.TypeString,
   370  							Optional: true,
   371  						},
   372  
   373  						"iops": &schema.Schema{
   374  							Type:     schema.TypeInt,
   375  							Optional: true,
   376  						},
   377  
   378  						"vmdk": &schema.Schema{
   379  							// TODO: Add ValidateFunc to confirm path exists
   380  							Type:     schema.TypeString,
   381  							Optional: true,
   382  						},
   383  
   384  						"bootable": &schema.Schema{
   385  							Type:     schema.TypeBool,
   386  							Optional: true,
   387  						},
   388  
   389  						"keep_on_remove": &schema.Schema{
   390  							Type:     schema.TypeBool,
   391  							Optional: true,
   392  						},
   393  
   394  						"controller_type": &schema.Schema{
   395  							Type:     schema.TypeString,
   396  							Optional: true,
   397  							Default:  "scsi",
   398  							ForceNew: true,
   399  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   400  								value := v.(string)
   401  								if value != "scsi" && value != "ide" {
   402  									errors = append(errors, fmt.Errorf(
   403  										"only 'scsi' and 'ide' are supported values for 'controller_type'"))
   404  								}
   405  								return
   406  							},
   407  						},
   408  					},
   409  				},
   410  			},
   411  
   412  			"cdrom": &schema.Schema{
   413  				Type:     schema.TypeList,
   414  				Optional: true,
   415  				ForceNew: true,
   416  				Elem: &schema.Resource{
   417  					Schema: map[string]*schema.Schema{
   418  						"datastore": &schema.Schema{
   419  							Type:     schema.TypeString,
   420  							Required: true,
   421  							ForceNew: true,
   422  						},
   423  
   424  						"path": &schema.Schema{
   425  							Type:     schema.TypeString,
   426  							Required: true,
   427  							ForceNew: true,
   428  						},
   429  					},
   430  				},
   431  			},
   432  		},
   433  	}
   434  }
   435  
   436  func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error {
   437  	// flag if changes have to be applied
   438  	hasChanges := false
   439  	// flag if changes have to be done when powered off
   440  	rebootRequired := false
   441  
   442  	// make config spec
   443  	configSpec := types.VirtualMachineConfigSpec{}
   444  
   445  	if d.HasChange("vcpu") {
   446  		configSpec.NumCPUs = int32(d.Get("vcpu").(int))
   447  		hasChanges = true
   448  		rebootRequired = true
   449  	}
   450  
   451  	if d.HasChange("memory") {
   452  		configSpec.MemoryMB = int64(d.Get("memory").(int))
   453  		hasChanges = true
   454  		rebootRequired = true
   455  	}
   456  
   457  	client := meta.(*govmomi.Client)
   458  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   459  	if err != nil {
   460  		return err
   461  	}
   462  	finder := find.NewFinder(client.Client, true)
   463  	finder = finder.SetDatacenter(dc)
   464  
   465  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
   466  	if err != nil {
   467  		return err
   468  	}
   469  
   470  	if d.HasChange("disk") {
   471  		hasChanges = true
   472  		oldDisks, newDisks := d.GetChange("disk")
   473  		oldDiskSet := oldDisks.(*schema.Set)
   474  		newDiskSet := newDisks.(*schema.Set)
   475  
   476  		addedDisks := newDiskSet.Difference(oldDiskSet)
   477  		removedDisks := oldDiskSet.Difference(newDiskSet)
   478  
   479  		// Removed disks
   480  		for _, diskRaw := range removedDisks.List() {
   481  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   482  				devices, err := vm.Device(context.TODO())
   483  				if err != nil {
   484  					return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err)
   485  				}
   486  				virtualDisk := devices.FindByKey(int32(disk["key"].(int)))
   487  
   488  				keep := false
   489  				if v, ok := d.GetOk("keep_on_remove"); ok {
   490  					keep = v.(bool)
   491  				}
   492  
   493  				err = vm.RemoveDevice(context.TODO(), keep, virtualDisk)
   494  				if err != nil {
   495  					return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
   496  				}
   497  			}
   498  		}
   499  		// Added disks
   500  		for _, diskRaw := range addedDisks.List() {
   501  			if disk, ok := diskRaw.(map[string]interface{}); ok {
   502  
   503  				var datastore *object.Datastore
   504  				if disk["datastore"] == "" {
   505  					datastore, err = finder.DefaultDatastore(context.TODO())
   506  					if err != nil {
   507  						return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err)
   508  					}
   509  				} else {
   510  					datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string))
   511  					if err != nil {
   512  						log.Printf("[ERROR] Couldn't find datastore %v.  %s", disk["datastore"].(string), err)
   513  						return err
   514  					}
   515  				}
   516  
   517  				var size int64
   518  				if disk["size"] == 0 {
   519  					size = 0
   520  				} else {
   521  					size = int64(disk["size"].(int))
   522  				}
   523  				iops := int64(disk["iops"].(int))
   524  				controller_type := disk["controller"].(string)
   525  
   526  				var mo mo.VirtualMachine
   527  				vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo)
   528  
   529  				var diskPath string
   530  				switch {
   531  				case disk["vmdk"] != "":
   532  					diskPath = disk["vmdk"].(string)
   533  				case disk["name"] != "":
   534  					snapshotFullDir := mo.Config.Files.SnapshotDirectory
   535  					split := strings.Split(snapshotFullDir, " ")
   536  					if len(split) != 2 {
   537  						return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
   538  					}
   539  					vmWorkingPath := split[1]
   540  					diskPath = vmWorkingPath + disk["name"].(string)
   541  				default:
   542  					return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given")
   543  				}
   544  
   545  				log.Printf("[INFO] Attaching disk: %v", diskPath)
   546  				err = addHardDisk(vm, size, iops, "thin", datastore, diskPath, controller_type)
   547  				if err != nil {
   548  					log.Printf("[ERROR] Add Hard Disk Failed: %v", err)
   549  					return err
   550  				}
   551  			}
   552  			if err != nil {
   553  				return err
   554  			}
   555  		}
   556  	}
   557  
   558  	// do nothing if there are no changes
   559  	if !hasChanges {
   560  		return nil
   561  	}
   562  
   563  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
   564  
   565  	if rebootRequired {
   566  		log.Printf("[INFO] Shutting down virtual machine: %s", d.Id())
   567  
   568  		task, err := vm.PowerOff(context.TODO())
   569  		if err != nil {
   570  			return err
   571  		}
   572  
   573  		err = task.Wait(context.TODO())
   574  		if err != nil {
   575  			return err
   576  		}
   577  	}
   578  
   579  	log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id())
   580  
   581  	task, err := vm.Reconfigure(context.TODO(), configSpec)
   582  	if err != nil {
   583  		log.Printf("[ERROR] %s", err)
   584  	}
   585  
   586  	err = task.Wait(context.TODO())
   587  	if err != nil {
   588  		log.Printf("[ERROR] %s", err)
   589  	}
   590  
   591  	if rebootRequired {
   592  		task, err = vm.PowerOn(context.TODO())
   593  		if err != nil {
   594  			return err
   595  		}
   596  
   597  		err = task.Wait(context.TODO())
   598  		if err != nil {
   599  			log.Printf("[ERROR] %s", err)
   600  		}
   601  	}
   602  
   603  	ip, err := vm.WaitForIP(context.TODO())
   604  	if err != nil {
   605  		return err
   606  	}
   607  	log.Printf("[DEBUG] ip address: %v", ip)
   608  
   609  	return resourceVSphereVirtualMachineRead(d, meta)
   610  }
   611  
   612  func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
   613  	client := meta.(*govmomi.Client)
   614  
   615  	vm := virtualMachine{
   616  		name:     d.Get("name").(string),
   617  		vcpu:     int32(d.Get("vcpu").(int)),
   618  		memoryMb: int64(d.Get("memory").(int)),
   619  		memoryAllocation: memoryAllocation{
   620  			reservation: int64(d.Get("memory_reservation").(int)),
   621  		},
   622  	}
   623  
   624  	if v, ok := d.GetOk("folder"); ok {
   625  		vm.folder = v.(string)
   626  	}
   627  
   628  	if v, ok := d.GetOk("datacenter"); ok {
   629  		vm.datacenter = v.(string)
   630  	}
   631  
   632  	if v, ok := d.GetOk("cluster"); ok {
   633  		vm.cluster = v.(string)
   634  	}
   635  
   636  	if v, ok := d.GetOk("resource_pool"); ok {
   637  		vm.resourcePool = v.(string)
   638  	}
   639  
   640  	if v, ok := d.GetOk("domain"); ok {
   641  		vm.domain = v.(string)
   642  	}
   643  
   644  	if v, ok := d.GetOk("time_zone"); ok {
   645  		vm.timeZone = v.(string)
   646  	}
   647  
   648  	if v, ok := d.GetOk("linked_clone"); ok {
   649  		vm.linkedClone = v.(bool)
   650  	}
   651  
   652  	if v, ok := d.GetOk("skip_customization"); ok {
   653  		vm.skipCustomization = v.(bool)
   654  	}
   655  
   656  	if raw, ok := d.GetOk("dns_suffixes"); ok {
   657  		for _, v := range raw.([]interface{}) {
   658  			vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string))
   659  		}
   660  	} else {
   661  		vm.dnsSuffixes = DefaultDNSSuffixes
   662  	}
   663  
   664  	if raw, ok := d.GetOk("dns_servers"); ok {
   665  		for _, v := range raw.([]interface{}) {
   666  			vm.dnsServers = append(vm.dnsServers, v.(string))
   667  		}
   668  	} else {
   669  		vm.dnsServers = DefaultDNSServers
   670  	}
   671  
   672  	if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
   673  		if custom_configs, ok := vL.(map[string]interface{}); ok {
   674  			custom := make(map[string]types.AnyType)
   675  			for k, v := range custom_configs {
   676  				custom[k] = v
   677  			}
   678  			vm.customConfigurations = custom
   679  			log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
   680  		}
   681  	}
   682  
   683  	if vL, ok := d.GetOk("network_interface"); ok {
   684  		networks := make([]networkInterface, len(vL.([]interface{})))
   685  		for i, v := range vL.([]interface{}) {
   686  			network := v.(map[string]interface{})
   687  			networks[i].label = network["label"].(string)
   688  			if v, ok := network["ip_address"].(string); ok && v != "" {
   689  				networks[i].ipv4Address = v
   690  			}
   691  			if v, ok := d.GetOk("gateway"); ok {
   692  				networks[i].ipv4Gateway = v.(string)
   693  			}
   694  			if v, ok := network["subnet_mask"].(string); ok && v != "" {
   695  				ip := net.ParseIP(v).To4()
   696  				if ip != nil {
   697  					mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3])
   698  					pl, _ := mask.Size()
   699  					networks[i].ipv4PrefixLength = pl
   700  				} else {
   701  					return fmt.Errorf("subnet_mask parameter is invalid.")
   702  				}
   703  			}
   704  			if v, ok := network["ipv4_address"].(string); ok && v != "" {
   705  				networks[i].ipv4Address = v
   706  			}
   707  			if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 {
   708  				networks[i].ipv4PrefixLength = v
   709  			}
   710  			if v, ok := network["ipv4_gateway"].(string); ok && v != "" {
   711  				networks[i].ipv4Gateway = v
   712  			}
   713  			if v, ok := network["ipv6_address"].(string); ok && v != "" {
   714  				networks[i].ipv6Address = v
   715  			}
   716  			if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 {
   717  				networks[i].ipv6PrefixLength = v
   718  			}
   719  			if v, ok := network["ipv6_gateway"].(string); ok && v != "" {
   720  				networks[i].ipv6Gateway = v
   721  			}
   722  		}
   723  		vm.networkInterfaces = networks
   724  		log.Printf("[DEBUG] network_interface init: %v", networks)
   725  	}
   726  
   727  	if vL, ok := d.GetOk("windows_opt_config"); ok {
   728  		var winOpt windowsOptConfig
   729  		custom_configs := (vL.([]interface{}))[0].(map[string]interface{})
   730  		if v, ok := custom_configs["admin_password"].(string); ok && v != "" {
   731  			winOpt.adminPassword = v
   732  		}
   733  		if v, ok := custom_configs["domain"].(string); ok && v != "" {
   734  			winOpt.domain = v
   735  		}
   736  		if v, ok := custom_configs["domain_user"].(string); ok && v != "" {
   737  			winOpt.domainUser = v
   738  		}
   739  		if v, ok := custom_configs["product_key"].(string); ok && v != "" {
   740  			winOpt.productKey = v
   741  		}
   742  		if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" {
   743  			winOpt.domainUserPassword = v
   744  		}
   745  		vm.windowsOptionalConfig = winOpt
   746  		log.Printf("[DEBUG] windows config init: %v", winOpt)
   747  	}
   748  
   749  	if vL, ok := d.GetOk("disk"); ok {
   750  		if diskSet, ok := vL.(*schema.Set); ok {
   751  
   752  			disks := []hardDisk{}
   753  			hasBootableDisk := false
   754  			for _, value := range diskSet.List() {
   755  				disk := value.(map[string]interface{})
   756  				newDisk := hardDisk{}
   757  
   758  				if v, ok := disk["template"].(string); ok && v != "" {
   759  					if v, ok := disk["name"].(string); ok && v != "" {
   760  						return fmt.Errorf("Cannot specify name of a template")
   761  					}
   762  					vm.template = v
   763  					if hasBootableDisk {
   764  						return fmt.Errorf("[ERROR] Only one bootable disk or template may be given")
   765  					}
   766  					hasBootableDisk = true
   767  				}
   768  
   769  				if v, ok := disk["type"].(string); ok && v != "" {
   770  					newDisk.initType = v
   771  				}
   772  
   773  				if v, ok := disk["datastore"].(string); ok && v != "" {
   774  					vm.datastore = v
   775  				}
   776  
   777  				if v, ok := disk["size"].(int); ok && v != 0 {
   778  					if v, ok := disk["template"].(string); ok && v != "" {
   779  						return fmt.Errorf("Cannot specify size of a template")
   780  					}
   781  
   782  					if v, ok := disk["name"].(string); ok && v != "" {
   783  						newDisk.name = v
   784  					} else {
   785  						return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk")
   786  					}
   787  
   788  					newDisk.size = int64(v)
   789  				}
   790  
   791  				if v, ok := disk["iops"].(int); ok && v != 0 {
   792  					newDisk.iops = int64(v)
   793  				}
   794  
   795  				if v, ok := disk["controller_type"].(string); ok && v != "" {
   796  					newDisk.controller = v
   797  				}
   798  
   799  				if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" {
   800  					if v, ok := disk["template"].(string); ok && v != "" {
   801  						return fmt.Errorf("Cannot specify a vmdk for a template")
   802  					}
   803  					if v, ok := disk["size"].(string); ok && v != "" {
   804  						return fmt.Errorf("Cannot specify size of a vmdk")
   805  					}
   806  					if v, ok := disk["name"].(string); ok && v != "" {
   807  						return fmt.Errorf("Cannot specify name of a vmdk")
   808  					}
   809  					if vBootable, ok := disk["bootable"].(bool); ok {
   810  						hasBootableDisk = true
   811  						newDisk.bootable = vBootable
   812  						vm.hasBootableVmdk = vBootable
   813  					}
   814  					newDisk.vmdkPath = vVmdk
   815  				}
   816  				// Preserves order so bootable disk is first
   817  				if newDisk.bootable == true || disk["template"] != "" {
   818  					disks = append([]hardDisk{newDisk}, disks...)
   819  				} else {
   820  					disks = append(disks, newDisk)
   821  				}
   822  			}
   823  			vm.hardDisks = disks
   824  			log.Printf("[DEBUG] disk init: %v", disks)
   825  		}
   826  	}
   827  
   828  	if vL, ok := d.GetOk("cdrom"); ok {
   829  		cdroms := make([]cdrom, len(vL.([]interface{})))
   830  		for i, v := range vL.([]interface{}) {
   831  			c := v.(map[string]interface{})
   832  			if v, ok := c["datastore"].(string); ok && v != "" {
   833  				cdroms[i].datastore = v
   834  			} else {
   835  				return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.")
   836  			}
   837  			if v, ok := c["path"].(string); ok && v != "" {
   838  				cdroms[i].path = v
   839  			} else {
   840  				return fmt.Errorf("Path argument must be specified when attaching a cdrom image.")
   841  			}
   842  		}
   843  		vm.cdroms = cdroms
   844  		log.Printf("[DEBUG] cdrom init: %v", cdroms)
   845  	}
   846  
   847  	err := vm.setupVirtualMachine(client)
   848  	if err != nil {
   849  		return err
   850  	}
   851  
   852  	d.SetId(vm.Path())
   853  	log.Printf("[INFO] Created virtual machine: %s", d.Id())
   854  
   855  	return resourceVSphereVirtualMachineRead(d, meta)
   856  }
   857  
   858  func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
   859  	log.Printf("[DEBUG] virtual machine resource data: %#v", d)
   860  	client := meta.(*govmomi.Client)
   861  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   862  	if err != nil {
   863  		return err
   864  	}
   865  	finder := find.NewFinder(client.Client, true)
   866  	finder = finder.SetDatacenter(dc)
   867  
   868  	vm, err := finder.VirtualMachine(context.TODO(), d.Id())
   869  	if err != nil {
   870  		d.SetId("")
   871  		return nil
   872  	}
   873  
   874  	var mvm mo.VirtualMachine
   875  
   876  	// wait for interfaces to appear
   877  	_, err = vm.WaitForNetIP(context.TODO(), true)
   878  	if err != nil {
   879  		return err
   880  	}
   881  
   882  	collector := property.DefaultCollector(client.Client)
   883  	if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil {
   884  		return err
   885  	}
   886  
   887  	log.Printf("[DEBUG] Datacenter - %#v", dc)
   888  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config)
   889  	log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config)
   890  	log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net)
   891  
   892  	disks := make([]map[string]interface{}, 0)
   893  	templateDisk := make(map[string]interface{}, 1)
   894  	for _, device := range mvm.Config.Hardware.Device {
   895  		if vd, ok := device.(*types.VirtualDisk); ok {
   896  
   897  			virtualDevice := vd.GetVirtualDevice()
   898  
   899  			diskFullPath := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo).FileName
   900  			log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath)
   901  
   902  			// Separate datastore and path
   903  			diskFullPathSplit := strings.Split(diskFullPath, " ")
   904  			if len(diskFullPathSplit) != 2 {
   905  				return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath)
   906  			}
   907  			diskPath := diskFullPathSplit[1]
   908  			// Isolate filename
   909  			diskNameSplit := strings.Split(diskPath, "/")
   910  			diskName := diskNameSplit[len(diskNameSplit)-1]
   911  			// Remove possible extension
   912  			diskName = strings.Split(diskName, ".")[0]
   913  
   914  			if prevDisks, ok := d.GetOk("disk"); ok {
   915  				if prevDiskSet, ok := prevDisks.(*schema.Set); ok {
   916  					for _, v := range prevDiskSet.List() {
   917  						prevDisk := v.(map[string]interface{})
   918  
   919  						// We're guaranteed only one template disk.  Passing value directly through since templates should be immutable
   920  						if prevDisk["template"] != "" {
   921  							if len(templateDisk) == 0 {
   922  								templateDisk = prevDisk
   923  								disks = append(disks, templateDisk)
   924  								break
   925  							}
   926  						}
   927  
   928  						// It is enforced that prevDisk["name"] should only be set in the case
   929  						// of creating a new disk for the user.
   930  						// size case:  name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name
   931  						// vmdk case:  compare prevDisk["vmdk"] and mo.Filename
   932  						if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] {
   933  
   934  							prevDisk["key"] = virtualDevice.Key
   935  							prevDisk["uuid"] = virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo).Uuid
   936  
   937  							disks = append(disks, prevDisk)
   938  							break
   939  						}
   940  					}
   941  				}
   942  			}
   943  			log.Printf("[DEBUG] disks: %#v", disks)
   944  		}
   945  	}
   946  	err = d.Set("disk", disks)
   947  	if err != nil {
   948  		return fmt.Errorf("Invalid disks to set: %#v", disks)
   949  	}
   950  
   951  	networkInterfaces := make([]map[string]interface{}, 0)
   952  	for _, v := range mvm.Guest.Net {
   953  		if v.DeviceConfigId >= 0 {
   954  			log.Printf("[DEBUG] v.Network - %#v", v.Network)
   955  			networkInterface := make(map[string]interface{})
   956  			networkInterface["label"] = v.Network
   957  			for _, ip := range v.IpConfig.IpAddress {
   958  				p := net.ParseIP(ip.IpAddress)
   959  				if p.To4() != nil {
   960  					log.Printf("[DEBUG] p.String - %#v", p.String())
   961  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
   962  					networkInterface["ipv4_address"] = p.String()
   963  					networkInterface["ipv4_prefix_length"] = ip.PrefixLength
   964  				} else if p.To16() != nil {
   965  					log.Printf("[DEBUG] p.String - %#v", p.String())
   966  					log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength)
   967  					networkInterface["ipv6_address"] = p.String()
   968  					networkInterface["ipv6_prefix_length"] = ip.PrefixLength
   969  				}
   970  				log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
   971  			}
   972  			log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
   973  			networkInterfaces = append(networkInterfaces, networkInterface)
   974  		}
   975  	}
   976  	if mvm.Guest.IpStack != nil {
   977  		for _, v := range mvm.Guest.IpStack {
   978  			if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil {
   979  				for _, route := range v.IpRouteConfig.IpRoute {
   980  					if route.Gateway.Device != "" {
   981  						gatewaySetting := ""
   982  						if route.Network == "::" {
   983  							gatewaySetting = "ipv6_gateway"
   984  						} else if route.Network == "0.0.0.0" {
   985  							gatewaySetting = "ipv4_gateway"
   986  						}
   987  						if gatewaySetting != "" {
   988  							deviceID, err := strconv.Atoi(route.Gateway.Device)
   989  							if err != nil {
   990  								log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err)
   991  							} else {
   992  								log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress)
   993  								networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress
   994  							}
   995  						}
   996  					}
   997  				}
   998  			}
   999  		}
  1000  	}
  1001  	log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces)
  1002  	err = d.Set("network_interface", networkInterfaces)
  1003  	if err != nil {
  1004  		return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces)
  1005  	}
  1006  
  1007  	log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string))
  1008  	d.SetConnInfo(map[string]string{
  1009  		"type": "ssh",
  1010  		"host": networkInterfaces[0]["ipv4_address"].(string),
  1011  	})
  1012  
  1013  	var rootDatastore string
  1014  	for _, v := range mvm.Datastore {
  1015  		var md mo.Datastore
  1016  		if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil {
  1017  			return err
  1018  		}
  1019  		if md.Parent.Type == "StoragePod" {
  1020  			var msp mo.StoragePod
  1021  			if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil {
  1022  				return err
  1023  			}
  1024  			rootDatastore = msp.Name
  1025  			log.Printf("[DEBUG] %#v", msp.Name)
  1026  		} else {
  1027  			rootDatastore = md.Name
  1028  			log.Printf("[DEBUG] %#v", md.Name)
  1029  		}
  1030  		break
  1031  	}
  1032  
  1033  	d.Set("datacenter", dc)
  1034  	d.Set("memory", mvm.Summary.Config.MemorySizeMB)
  1035  	d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation)
  1036  	d.Set("cpu", mvm.Summary.Config.NumCpu)
  1037  	d.Set("datastore", rootDatastore)
  1038  
  1039  	return nil
  1040  }
  1041  
  1042  func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
  1043  	client := meta.(*govmomi.Client)
  1044  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
  1045  	if err != nil {
  1046  		return err
  1047  	}
  1048  	finder := find.NewFinder(client.Client, true)
  1049  	finder = finder.SetDatacenter(dc)
  1050  
  1051  	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
  1052  	if err != nil {
  1053  		return err
  1054  	}
  1055  
  1056  	log.Printf("[INFO] Deleting virtual machine: %s", d.Id())
  1057  	state, err := vm.PowerState(context.TODO())
  1058  	if err != nil {
  1059  		return err
  1060  	}
  1061  
  1062  	if state == types.VirtualMachinePowerStatePoweredOn {
  1063  		task, err := vm.PowerOff(context.TODO())
  1064  		if err != nil {
  1065  			return err
  1066  		}
  1067  
  1068  		err = task.Wait(context.TODO())
  1069  		if err != nil {
  1070  			return err
  1071  		}
  1072  	}
  1073  
  1074  	task, err := vm.Destroy(context.TODO())
  1075  	if err != nil {
  1076  		return err
  1077  	}
  1078  
  1079  	err = task.Wait(context.TODO())
  1080  	if err != nil {
  1081  		return err
  1082  	}
  1083  
  1084  	d.SetId("")
  1085  	return nil
  1086  }
  1087  
  1088  // addHardDisk adds a new Hard Disk to the VirtualMachine.
  1089  func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error {
  1090  	devices, err := vm.Device(context.TODO())
  1091  	if err != nil {
  1092  		return err
  1093  	}
  1094  	log.Printf("[DEBUG] vm devices: %#v\n", devices)
  1095  
  1096  	controller, err := devices.FindDiskController(controller_type)
  1097  	if err != nil {
  1098  		return err
  1099  	}
  1100  	log.Printf("[DEBUG] disk controller: %#v\n", controller)
  1101  
  1102  	// TODO Check if diskPath & datastore exist
  1103  	// If diskPath is not specified, pass empty string to CreateDisk()
  1104  	if diskPath == "" {
  1105  		return fmt.Errorf("[ERROR] addHardDisk - No path proided")
  1106  	} else {
  1107  		// TODO Check if diskPath & datastore exist
  1108  		diskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath)
  1109  	}
  1110  	log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath)
  1111  	disk := devices.CreateDisk(controller, datastore.Reference(), diskPath)
  1112  
  1113  	existing := devices.SelectByBackingInfo(disk.Backing)
  1114  	log.Printf("[DEBUG] disk: %#v\n", disk)
  1115  
  1116  	if len(existing) == 0 {
  1117  		disk.CapacityInKB = int64(size * 1024 * 1024)
  1118  		if iops != 0 {
  1119  			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
  1120  				Limit: iops,
  1121  			}
  1122  		}
  1123  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
  1124  
  1125  		if diskType == "eager_zeroed" {
  1126  			// eager zeroed thick virtual disk
  1127  			backing.ThinProvisioned = types.NewBool(false)
  1128  			backing.EagerlyScrub = types.NewBool(true)
  1129  		} else if diskType == "thin" {
  1130  			// thin provisioned virtual disk
  1131  			backing.ThinProvisioned = types.NewBool(true)
  1132  		}
  1133  
  1134  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
  1135  		log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB)
  1136  
  1137  		return vm.AddDevice(context.TODO(), disk)
  1138  	} else {
  1139  		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")
  1140  
  1141  		return nil
  1142  	}
  1143  }
  1144  
  1145  // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path.
  1146  func addCdrom(vm *object.VirtualMachine, datastore, path string) error {
  1147  	devices, err := vm.Device(context.TODO())
  1148  	if err != nil {
  1149  		return err
  1150  	}
  1151  	log.Printf("[DEBUG] vm devices: %#v", devices)
  1152  
  1153  	controller, err := devices.FindIDEController("")
  1154  	if err != nil {
  1155  		return err
  1156  	}
  1157  	log.Printf("[DEBUG] ide controller: %#v", controller)
  1158  
  1159  	c, err := devices.CreateCdrom(controller)
  1160  	if err != nil {
  1161  		return err
  1162  	}
  1163  
  1164  	c = devices.InsertIso(c, fmt.Sprintf("[%s] %s", datastore, path))
  1165  	log.Printf("[DEBUG] addCdrom: %#v", c)
  1166  
  1167  	return vm.AddDevice(context.TODO(), c)
  1168  }
  1169  
  1170  // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device.
  1171  func buildNetworkDevice(f *find.Finder, label, adapterType string) (*types.VirtualDeviceConfigSpec, error) {
  1172  	network, err := f.Network(context.TODO(), "*"+label)
  1173  	if err != nil {
  1174  		return nil, err
  1175  	}
  1176  
  1177  	backing, err := network.EthernetCardBackingInfo(context.TODO())
  1178  	if err != nil {
  1179  		return nil, err
  1180  	}
  1181  
  1182  	if adapterType == "vmxnet3" {
  1183  		return &types.VirtualDeviceConfigSpec{
  1184  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1185  			Device: &types.VirtualVmxnet3{
  1186  				VirtualVmxnet: types.VirtualVmxnet{
  1187  					VirtualEthernetCard: types.VirtualEthernetCard{
  1188  						VirtualDevice: types.VirtualDevice{
  1189  							Key:     -1,
  1190  							Backing: backing,
  1191  						},
  1192  						AddressType: string(types.VirtualEthernetCardMacTypeGenerated),
  1193  					},
  1194  				},
  1195  			},
  1196  		}, nil
  1197  	} else if adapterType == "e1000" {
  1198  		return &types.VirtualDeviceConfigSpec{
  1199  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1200  			Device: &types.VirtualE1000{
  1201  				VirtualEthernetCard: types.VirtualEthernetCard{
  1202  					VirtualDevice: types.VirtualDevice{
  1203  						Key:     -1,
  1204  						Backing: backing,
  1205  					},
  1206  					AddressType: string(types.VirtualEthernetCardMacTypeGenerated),
  1207  				},
  1208  			},
  1209  		}, nil
  1210  	} else {
  1211  		return nil, fmt.Errorf("Invalid network adapter type.")
  1212  	}
  1213  }
  1214  
  1215  // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
  1216  func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) {
  1217  	var key int32
  1218  	var moveType string
  1219  	if linkedClone {
  1220  		moveType = "createNewChildDiskBacking"
  1221  	} else {
  1222  		moveType = "moveAllDiskBackingsAndDisallowSharing"
  1223  	}
  1224  	log.Printf("[DEBUG] relocate type: [%s]", moveType)
  1225  
  1226  	devices, err := vm.Device(context.TODO())
  1227  	if err != nil {
  1228  		return types.VirtualMachineRelocateSpec{}, err
  1229  	}
  1230  	for _, d := range devices {
  1231  		if devices.Type(d) == "disk" {
  1232  			key = int32(d.GetVirtualDevice().Key)
  1233  		}
  1234  	}
  1235  
  1236  	isThin := initType == "thin"
  1237  	rpr := rp.Reference()
  1238  	dsr := ds.Reference()
  1239  	return types.VirtualMachineRelocateSpec{
  1240  		Datastore:    &dsr,
  1241  		Pool:         &rpr,
  1242  		DiskMoveType: moveType,
  1243  		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1244  			{
  1245  				Datastore: dsr,
  1246  				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
  1247  					DiskMode:        "persistent",
  1248  					ThinProvisioned: types.NewBool(isThin),
  1249  					EagerlyScrub:    types.NewBool(!isThin),
  1250  				},
  1251  				DiskId: key,
  1252  			},
  1253  		},
  1254  	}, nil
  1255  }
  1256  
  1257  // getDatastoreObject gets datastore object.
  1258  func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) {
  1259  	s := object.NewSearchIndex(client.Client)
  1260  	ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name)
  1261  	if err != nil {
  1262  		return types.ManagedObjectReference{}, err
  1263  	}
  1264  	if ref == nil {
  1265  		return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name)
  1266  	}
  1267  	log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref)
  1268  	return ref.Reference(), nil
  1269  }
  1270  
  1271  // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action.
  1272  func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec {
  1273  	vmfr := f.VmFolder.Reference()
  1274  	rpr := rp.Reference()
  1275  	spr := storagePod.Reference()
  1276  
  1277  	sps := types.StoragePlacementSpec{
  1278  		Type:       "create",
  1279  		ConfigSpec: &configSpec,
  1280  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1281  			StoragePod: &spr,
  1282  		},
  1283  		Folder:       &vmfr,
  1284  		ResourcePool: &rpr,
  1285  	}
  1286  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1287  	return sps
  1288  }
  1289  
  1290  // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action.
  1291  func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
  1292  	vmr := vm.Reference()
  1293  	vmfr := f.VmFolder.Reference()
  1294  	rpr := rp.Reference()
  1295  	spr := storagePod.Reference()
  1296  
  1297  	var o mo.VirtualMachine
  1298  	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
  1299  	if err != nil {
  1300  		return types.StoragePlacementSpec{}
  1301  	}
  1302  	ds := object.NewDatastore(c.Client, o.Datastore[0])
  1303  	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)
  1304  
  1305  	devices, err := vm.Device(context.TODO())
  1306  	if err != nil {
  1307  		return types.StoragePlacementSpec{}
  1308  	}
  1309  
  1310  	var key int32
  1311  	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
  1312  		key = int32(d.GetVirtualDevice().Key)
  1313  		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
  1314  	}
  1315  
  1316  	sps := types.StoragePlacementSpec{
  1317  		Type: "clone",
  1318  		Vm:   &vmr,
  1319  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
  1320  			StoragePod: &spr,
  1321  		},
  1322  		CloneSpec: &types.VirtualMachineCloneSpec{
  1323  			Location: types.VirtualMachineRelocateSpec{
  1324  				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
  1325  					{
  1326  						Datastore:       ds.Reference(),
  1327  						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
  1328  						DiskId:          key,
  1329  					},
  1330  				},
  1331  				Pool: &rpr,
  1332  			},
  1333  			PowerOn:  false,
  1334  			Template: false,
  1335  		},
  1336  		CloneName: "dummy",
  1337  		Folder:    &vmfr,
  1338  	}
  1339  	return sps
  1340  }
  1341  
  1342  // findDatastore finds Datastore object.
  1343  func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) {
  1344  	var datastore *object.Datastore
  1345  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
  1346  
  1347  	srm := object.NewStorageResourceManager(c.Client)
  1348  	rds, err := srm.RecommendDatastores(context.TODO(), sps)
  1349  	if err != nil {
  1350  		return nil, err
  1351  	}
  1352  	log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)
  1353  
  1354  	spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
  1355  	datastore = object.NewDatastore(c.Client, spa.Destination)
  1356  	log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore)
  1357  
  1358  	return datastore, nil
  1359  }
  1360  
  1361  // createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller.
  1362  func createCdroms(vm *object.VirtualMachine, cdroms []cdrom) error {
  1363  	log.Printf("[DEBUG] add cdroms: %v", cdroms)
  1364  	for _, cd := range cdroms {
  1365  		log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore)
  1366  		log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path)
  1367  		err := addCdrom(vm, cd.datastore, cd.path)
  1368  		if err != nil {
  1369  			return err
  1370  		}
  1371  	}
  1372  
  1373  	return nil
  1374  }
  1375  
  1376  func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error {
  1377  	dc, err := getDatacenter(c, vm.datacenter)
  1378  
  1379  	if err != nil {
  1380  		return err
  1381  	}
  1382  	finder := find.NewFinder(c.Client, true)
  1383  	finder = finder.SetDatacenter(dc)
  1384  
  1385  	var template *object.VirtualMachine
  1386  	var template_mo mo.VirtualMachine
  1387  	var vm_mo mo.VirtualMachine
  1388  	if vm.template != "" {
  1389  		template, err = finder.VirtualMachine(context.TODO(), vm.template)
  1390  		if err != nil {
  1391  			return err
  1392  		}
  1393  		log.Printf("[DEBUG] template: %#v", template)
  1394  
  1395  		err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)
  1396  		if err != nil {
  1397  			return err
  1398  		}
  1399  	}
  1400  
  1401  	var resourcePool *object.ResourcePool
  1402  	if vm.resourcePool == "" {
  1403  		if vm.cluster == "" {
  1404  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
  1405  			if err != nil {
  1406  				return err
  1407  			}
  1408  		} else {
  1409  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
  1410  			if err != nil {
  1411  				return err
  1412  			}
  1413  		}
  1414  	} else {
  1415  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
  1416  		if err != nil {
  1417  			return err
  1418  		}
  1419  	}
  1420  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
  1421  
  1422  	dcFolders, err := dc.Folders(context.TODO())
  1423  	if err != nil {
  1424  		return err
  1425  	}
  1426  	log.Printf("[DEBUG] folder: %#v", vm.folder)
  1427  
  1428  	folder := dcFolders.VmFolder
  1429  	if len(vm.folder) > 0 {
  1430  		si := object.NewSearchIndex(c.Client)
  1431  		folderRef, err := si.FindByInventoryPath(
  1432  			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
  1433  		if err != nil {
  1434  			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
  1435  		} else if folderRef == nil {
  1436  			return fmt.Errorf("Cannot find folder %s", vm.folder)
  1437  		} else {
  1438  			folder = folderRef.(*object.Folder)
  1439  		}
  1440  	}
  1441  
  1442  	// make config spec
  1443  	configSpec := types.VirtualMachineConfigSpec{
  1444  		Name:              vm.name,
  1445  		NumCPUs:           vm.vcpu,
  1446  		NumCoresPerSocket: 1,
  1447  		MemoryMB:          vm.memoryMb,
  1448  		MemoryAllocation: &types.ResourceAllocationInfo{
  1449  			Reservation: vm.memoryAllocation.reservation,
  1450  		},
  1451  	}
  1452  	if vm.template == "" {
  1453  		configSpec.GuestId = "otherLinux64Guest"
  1454  	}
  1455  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
  1456  
  1457  	// make ExtraConfig
  1458  	log.Printf("[DEBUG] virtual machine Extra Config spec start")
  1459  	if len(vm.customConfigurations) > 0 {
  1460  		var ov []types.BaseOptionValue
  1461  		for k, v := range vm.customConfigurations {
  1462  			key := k
  1463  			value := v
  1464  			o := types.OptionValue{
  1465  				Key:   key,
  1466  				Value: &value,
  1467  			}
  1468  			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
  1469  			ov = append(ov, &o)
  1470  		}
  1471  		configSpec.ExtraConfig = ov
  1472  		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
  1473  	}
  1474  
  1475  	var datastore *object.Datastore
  1476  	if vm.datastore == "" {
  1477  		datastore, err = finder.DefaultDatastore(context.TODO())
  1478  		if err != nil {
  1479  			return err
  1480  		}
  1481  	} else {
  1482  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
  1483  		if err != nil {
  1484  			// TODO: datastore cluster support in govmomi finder function
  1485  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
  1486  			if err != nil {
  1487  				return err
  1488  			}
  1489  
  1490  			if d.Type == "StoragePod" {
  1491  				sp := object.StoragePod{
  1492  					Folder: object.NewFolder(c.Client, d),
  1493  				}
  1494  
  1495  				var sps types.StoragePlacementSpec
  1496  				if vm.template != "" {
  1497  					sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
  1498  				} else {
  1499  					sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
  1500  				}
  1501  
  1502  				datastore, err = findDatastore(c, sps)
  1503  				if err != nil {
  1504  					return err
  1505  				}
  1506  			} else {
  1507  				datastore = object.NewDatastore(c.Client, d)
  1508  			}
  1509  		}
  1510  	}
  1511  
  1512  	log.Printf("[DEBUG] datastore: %#v", datastore)
  1513  
  1514  	// network
  1515  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
  1516  	networkConfigs := []types.CustomizationAdapterMapping{}
  1517  	for _, network := range vm.networkInterfaces {
  1518  		// network device
  1519  		var networkDeviceType string
  1520  		if vm.template == "" {
  1521  			networkDeviceType = "e1000"
  1522  		} else {
  1523  			networkDeviceType = "vmxnet3"
  1524  		}
  1525  		nd, err := buildNetworkDevice(finder, network.label, networkDeviceType)
  1526  		if err != nil {
  1527  			return err
  1528  		}
  1529  		networkDevices = append(networkDevices, nd)
  1530  
  1531  		if vm.template != "" {
  1532  			var ipSetting types.CustomizationIPSettings
  1533  			if network.ipv4Address == "" {
  1534  				ipSetting.Ip = &types.CustomizationDhcpIpGenerator{}
  1535  			} else {
  1536  				if network.ipv4PrefixLength == 0 {
  1537  					return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
  1538  				}
  1539  				m := net.CIDRMask(network.ipv4PrefixLength, 32)
  1540  				sm := net.IPv4(m[0], m[1], m[2], m[3])
  1541  				subnetMask := sm.String()
  1542  				log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway)
  1543  				log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address)
  1544  				log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength)
  1545  				log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask)
  1546  				ipSetting.Gateway = []string{
  1547  					network.ipv4Gateway,
  1548  				}
  1549  				ipSetting.Ip = &types.CustomizationFixedIp{
  1550  					IpAddress: network.ipv4Address,
  1551  				}
  1552  				ipSetting.SubnetMask = subnetMask
  1553  			}
  1554  
  1555  			ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{}
  1556  			if network.ipv6Address == "" {
  1557  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1558  					&types.CustomizationDhcpIpV6Generator{},
  1559  				}
  1560  			} else {
  1561  				log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway)
  1562  				log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address)
  1563  				log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength)
  1564  
  1565  				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
  1566  					&types.CustomizationFixedIpV6{
  1567  						IpAddress:  network.ipv6Address,
  1568  						SubnetMask: int32(network.ipv6PrefixLength),
  1569  					},
  1570  				}
  1571  				ipv6Spec.Gateway = []string{network.ipv6Gateway}
  1572  			}
  1573  			ipSetting.IpV6Spec = ipv6Spec
  1574  
  1575  			// network config
  1576  			config := types.CustomizationAdapterMapping{
  1577  				Adapter: ipSetting,
  1578  			}
  1579  			networkConfigs = append(networkConfigs, config)
  1580  		}
  1581  	}
  1582  	log.Printf("[DEBUG] network devices: %v", networkDevices)
  1583  	log.Printf("[DEBUG] network configs: %v", networkConfigs)
  1584  
  1585  	var task *object.Task
  1586  	if vm.template == "" {
  1587  		var mds mo.Datastore
  1588  		if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
  1589  			return err
  1590  		}
  1591  		log.Printf("[DEBUG] datastore: %#v", mds.Name)
  1592  		scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
  1593  		if err != nil {
  1594  			log.Printf("[ERROR] %s", err)
  1595  		}
  1596  
  1597  		configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
  1598  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
  1599  			Device:    scsi,
  1600  		})
  1601  
  1602  		configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
  1603  
  1604  		task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
  1605  		if err != nil {
  1606  			log.Printf("[ERROR] %s", err)
  1607  		}
  1608  
  1609  		err = task.Wait(context.TODO())
  1610  		if err != nil {
  1611  			log.Printf("[ERROR] %s", err)
  1612  		}
  1613  
  1614  	} else {
  1615  
  1616  		relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
  1617  		if err != nil {
  1618  			return err
  1619  		}
  1620  
  1621  		log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
  1622  
  1623  		// make vm clone spec
  1624  		cloneSpec := types.VirtualMachineCloneSpec{
  1625  			Location: relocateSpec,
  1626  			Template: false,
  1627  			Config:   &configSpec,
  1628  			PowerOn:  false,
  1629  		}
  1630  		if vm.linkedClone {
  1631  			if template_mo.Snapshot == nil {
  1632  				return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
  1633  			}
  1634  			cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
  1635  		}
  1636  		log.Printf("[DEBUG] clone spec: %v", cloneSpec)
  1637  
  1638  		task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec)
  1639  		if err != nil {
  1640  			return err
  1641  		}
  1642  	}
  1643  
  1644  	err = task.Wait(context.TODO())
  1645  	if err != nil {
  1646  		log.Printf("[ERROR] %s", err)
  1647  	}
  1648  
  1649  	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
  1650  	if err != nil {
  1651  		return err
  1652  	}
  1653  	log.Printf("[DEBUG] new vm: %v", newVM)
  1654  
  1655  	devices, err := newVM.Device(context.TODO())
  1656  	if err != nil {
  1657  		log.Printf("[DEBUG] Template devices can't be found")
  1658  		return err
  1659  	}
  1660  
  1661  	for _, dvc := range devices {
  1662  		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
  1663  		if devices.Type(dvc) == "ethernet" {
  1664  			err := newVM.RemoveDevice(context.TODO(), false, dvc)
  1665  			if err != nil {
  1666  				return err
  1667  			}
  1668  		}
  1669  	}
  1670  	// Add Network devices
  1671  	for _, dvc := range networkDevices {
  1672  		err := newVM.AddDevice(
  1673  			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
  1674  		if err != nil {
  1675  			return err
  1676  		}
  1677  	}
  1678  
  1679  	// Create the cdroms if needed.
  1680  	if err := createCdroms(newVM, vm.cdroms); err != nil {
  1681  		return err
  1682  	}
  1683  
  1684  	newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo)
  1685  	firstDisk := 0
  1686  	if vm.template != "" {
  1687  		firstDisk++
  1688  	}
  1689  	for i := firstDisk; i < len(vm.hardDisks); i++ {
  1690  		log.Printf("[DEBUG] disk index: %v", i)
  1691  
  1692  		var diskPath string
  1693  		switch {
  1694  		case vm.hardDisks[i].vmdkPath != "":
  1695  			diskPath = vm.hardDisks[i].vmdkPath
  1696  		case vm.hardDisks[i].name != "":
  1697  			snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory
  1698  			split := strings.Split(snapshotFullDir, " ")
  1699  			if len(split) != 2 {
  1700  				return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
  1701  			}
  1702  			vmWorkingPath := split[1]
  1703  			diskPath = vmWorkingPath + vm.hardDisks[i].name
  1704  		default:
  1705  			return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i])
  1706  		}
  1707  
  1708  		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
  1709  		if err != nil {
  1710  			return err
  1711  		}
  1712  	}
  1713  
  1714  	if vm.skipCustomization || vm.template == "" {
  1715  		log.Printf("[DEBUG] VM customization skipped")
  1716  	} else {
  1717  		var identity_options types.BaseCustomizationIdentitySettings
  1718  		if strings.HasPrefix(template_mo.Config.GuestId, "win") {
  1719  			var timeZone int
  1720  			if vm.timeZone == "Etc/UTC" {
  1721  				vm.timeZone = "085"
  1722  			}
  1723  			timeZone, err := strconv.Atoi(vm.timeZone)
  1724  			if err != nil {
  1725  				return fmt.Errorf("Error converting TimeZone: %s", err)
  1726  			}
  1727  
  1728  			guiUnattended := types.CustomizationGuiUnattended{
  1729  				AutoLogon:      false,
  1730  				AutoLogonCount: 1,
  1731  				TimeZone:       int32(timeZone),
  1732  			}
  1733  
  1734  			customIdentification := types.CustomizationIdentification{}
  1735  
  1736  			userData := types.CustomizationUserData{
  1737  				ComputerName: &types.CustomizationFixedName{
  1738  					Name: strings.Split(vm.name, ".")[0],
  1739  				},
  1740  				ProductId: vm.windowsOptionalConfig.productKey,
  1741  				FullName:  "terraform",
  1742  				OrgName:   "terraform",
  1743  			}
  1744  
  1745  			if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
  1746  				customIdentification.DomainAdminPassword = &types.CustomizationPassword{
  1747  					PlainText: true,
  1748  					Value:     vm.windowsOptionalConfig.domainUserPassword,
  1749  				}
  1750  				customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
  1751  				customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
  1752  			}
  1753  
  1754  			if vm.windowsOptionalConfig.adminPassword != "" {
  1755  				guiUnattended.Password = &types.CustomizationPassword{
  1756  					PlainText: true,
  1757  					Value:     vm.windowsOptionalConfig.adminPassword,
  1758  				}
  1759  			}
  1760  
  1761  			identity_options = &types.CustomizationSysprep{
  1762  				GuiUnattended:  guiUnattended,
  1763  				Identification: customIdentification,
  1764  				UserData:       userData,
  1765  			}
  1766  		} else {
  1767  			identity_options = &types.CustomizationLinuxPrep{
  1768  				HostName: &types.CustomizationFixedName{
  1769  					Name: strings.Split(vm.name, ".")[0],
  1770  				},
  1771  				Domain:     vm.domain,
  1772  				TimeZone:   vm.timeZone,
  1773  				HwClockUTC: types.NewBool(true),
  1774  			}
  1775  		}
  1776  
  1777  		// create CustomizationSpec
  1778  		customSpec := types.CustomizationSpec{
  1779  			Identity: identity_options,
  1780  			GlobalIPSettings: types.CustomizationGlobalIPSettings{
  1781  				DnsSuffixList: vm.dnsSuffixes,
  1782  				DnsServerList: vm.dnsServers,
  1783  			},
  1784  			NicSettingMap: networkConfigs,
  1785  		}
  1786  		log.Printf("[DEBUG] custom spec: %v", customSpec)
  1787  
  1788  		log.Printf("[DEBUG] VM customization starting")
  1789  		taskb, err := newVM.Customize(context.TODO(), customSpec)
  1790  		if err != nil {
  1791  			return err
  1792  		}
  1793  		_, err = taskb.WaitForResult(context.TODO(), nil)
  1794  		if err != nil {
  1795  			return err
  1796  		}
  1797  		log.Printf("[DEBUG] VM customization finished")
  1798  	}
  1799  
  1800  	if vm.hasBootableVmdk || vm.template != "" {
  1801  		newVM.PowerOn(context.TODO())
  1802  	}
  1803  	return nil
  1804  }