github.com/jmbataller/terraform@v0.6.8-0.20151125192640-b7a12e3a580c/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about)

     1  package vsphere
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"strings"
     8  	"time"
     9  
    10  	"github.com/hashicorp/terraform/helper/resource"
    11  	"github.com/hashicorp/terraform/helper/schema"
    12  	"github.com/vmware/govmomi"
    13  	"github.com/vmware/govmomi/find"
    14  	"github.com/vmware/govmomi/object"
    15  	"github.com/vmware/govmomi/property"
    16  	"github.com/vmware/govmomi/vim25/mo"
    17  	"github.com/vmware/govmomi/vim25/types"
    18  	"golang.org/x/net/context"
    19  )
    20  
    21  var DefaultDNSSuffixes = []string{
    22  	"vsphere.local",
    23  }
    24  
    25  var DefaultDNSServers = []string{
    26  	"8.8.8.8",
    27  	"8.8.4.4",
    28  }
    29  
    30  type networkInterface struct {
    31  	deviceName  string
    32  	label       string
    33  	ipAddress   string
    34  	subnetMask  string
    35  	adapterType string // TODO: Make "adapter_type" argument
    36  }
    37  
    38  type hardDisk struct {
    39  	size int64
    40  	iops int64
    41  }
    42  
    43  type virtualMachine struct {
    44  	name              string
    45  	datacenter        string
    46  	cluster           string
    47  	resourcePool      string
    48  	datastore         string
    49  	vcpu              int
    50  	memoryMb          int64
    51  	template          string
    52  	networkInterfaces []networkInterface
    53  	hardDisks         []hardDisk
    54  	gateway           string
    55  	domain            string
    56  	timeZone          string
    57  	dnsSuffixes       []string
    58  	dnsServers        []string
    59  }
    60  
    61  func resourceVSphereVirtualMachine() *schema.Resource {
    62  	return &schema.Resource{
    63  		Create: resourceVSphereVirtualMachineCreate,
    64  		Read:   resourceVSphereVirtualMachineRead,
    65  		Delete: resourceVSphereVirtualMachineDelete,
    66  
    67  		Schema: map[string]*schema.Schema{
    68  			"name": &schema.Schema{
    69  				Type:     schema.TypeString,
    70  				Required: true,
    71  				ForceNew: true,
    72  			},
    73  
    74  			"vcpu": &schema.Schema{
    75  				Type:     schema.TypeInt,
    76  				Required: true,
    77  				ForceNew: true,
    78  			},
    79  
    80  			"memory": &schema.Schema{
    81  				Type:     schema.TypeInt,
    82  				Required: true,
    83  				ForceNew: true,
    84  			},
    85  
    86  			"datacenter": &schema.Schema{
    87  				Type:     schema.TypeString,
    88  				Optional: true,
    89  				ForceNew: true,
    90  			},
    91  
    92  			"cluster": &schema.Schema{
    93  				Type:     schema.TypeString,
    94  				Optional: true,
    95  				ForceNew: true,
    96  			},
    97  
    98  			"resource_pool": &schema.Schema{
    99  				Type:     schema.TypeString,
   100  				Optional: true,
   101  				ForceNew: true,
   102  			},
   103  
   104  			"gateway": &schema.Schema{
   105  				Type:     schema.TypeString,
   106  				Optional: true,
   107  				ForceNew: true,
   108  			},
   109  
   110  			"domain": &schema.Schema{
   111  				Type:     schema.TypeString,
   112  				Optional: true,
   113  				ForceNew: true,
   114  				Default:  "vsphere.local",
   115  			},
   116  
   117  			"time_zone": &schema.Schema{
   118  				Type:     schema.TypeString,
   119  				Optional: true,
   120  				ForceNew: true,
   121  				Default:  "Etc/UTC",
   122  			},
   123  
   124  			"dns_suffixes": &schema.Schema{
   125  				Type:     schema.TypeList,
   126  				Optional: true,
   127  				Elem:     &schema.Schema{Type: schema.TypeString},
   128  				ForceNew: true,
   129  			},
   130  
   131  			"dns_servers": &schema.Schema{
   132  				Type:     schema.TypeList,
   133  				Optional: true,
   134  				Elem:     &schema.Schema{Type: schema.TypeString},
   135  				ForceNew: true,
   136  			},
   137  
   138  			"network_interface": &schema.Schema{
   139  				Type:     schema.TypeList,
   140  				Required: true,
   141  				ForceNew: true,
   142  				Elem: &schema.Resource{
   143  					Schema: map[string]*schema.Schema{
   144  						"label": &schema.Schema{
   145  							Type:     schema.TypeString,
   146  							Required: true,
   147  							ForceNew: true,
   148  						},
   149  
   150  						"ip_address": &schema.Schema{
   151  							Type:     schema.TypeString,
   152  							Optional: true,
   153  							Computed: true,
   154  							ForceNew: true,
   155  						},
   156  
   157  						"subnet_mask": &schema.Schema{
   158  							Type:     schema.TypeString,
   159  							Optional: true,
   160  							Computed: true,
   161  							ForceNew: true,
   162  						},
   163  
   164  						"adapter_type": &schema.Schema{
   165  							Type:     schema.TypeString,
   166  							Optional: true,
   167  							ForceNew: true,
   168  						},
   169  					},
   170  				},
   171  			},
   172  
   173  			"disk": &schema.Schema{
   174  				Type:     schema.TypeList,
   175  				Required: true,
   176  				ForceNew: true,
   177  				Elem: &schema.Resource{
   178  					Schema: map[string]*schema.Schema{
   179  						"template": &schema.Schema{
   180  							Type:     schema.TypeString,
   181  							Optional: true,
   182  							ForceNew: true,
   183  						},
   184  
   185  						"datastore": &schema.Schema{
   186  							Type:     schema.TypeString,
   187  							Optional: true,
   188  							ForceNew: true,
   189  						},
   190  
   191  						"size": &schema.Schema{
   192  							Type:     schema.TypeInt,
   193  							Optional: true,
   194  							ForceNew: true,
   195  						},
   196  
   197  						"iops": &schema.Schema{
   198  							Type:     schema.TypeInt,
   199  							Optional: true,
   200  							ForceNew: true,
   201  						},
   202  					},
   203  				},
   204  			},
   205  
   206  			"boot_delay": &schema.Schema{
   207  				Type:     schema.TypeInt,
   208  				Optional: true,
   209  				ForceNew: true,
   210  			},
   211  		},
   212  	}
   213  }
   214  
   215  func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
   216  	client := meta.(*govmomi.Client)
   217  
   218  	vm := virtualMachine{
   219  		name:     d.Get("name").(string),
   220  		vcpu:     d.Get("vcpu").(int),
   221  		memoryMb: int64(d.Get("memory").(int)),
   222  	}
   223  
   224  	if v, ok := d.GetOk("datacenter"); ok {
   225  		vm.datacenter = v.(string)
   226  	}
   227  
   228  	if v, ok := d.GetOk("cluster"); ok {
   229  		vm.cluster = v.(string)
   230  	}
   231  
   232  	if v, ok := d.GetOk("resource_pool"); ok {
   233  		vm.resourcePool = v.(string)
   234  	}
   235  
   236  	if v, ok := d.GetOk("gateway"); ok {
   237  		vm.gateway = v.(string)
   238  	}
   239  
   240  	if v, ok := d.GetOk("domain"); ok {
   241  		vm.domain = v.(string)
   242  	}
   243  
   244  	if v, ok := d.GetOk("time_zone"); ok {
   245  		vm.timeZone = v.(string)
   246  	}
   247  
   248  	if raw, ok := d.GetOk("dns_suffixes"); ok {
   249  		for _, v := range raw.([]interface{}) {
   250  			vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string))
   251  		}
   252  	} else {
   253  		vm.dnsSuffixes = DefaultDNSSuffixes
   254  	}
   255  
   256  	if raw, ok := d.GetOk("dns_servers"); ok {
   257  		for _, v := range raw.([]interface{}) {
   258  			vm.dnsServers = append(vm.dnsServers, v.(string))
   259  		}
   260  	} else {
   261  		vm.dnsServers = DefaultDNSServers
   262  	}
   263  
   264  	if vL, ok := d.GetOk("network_interface"); ok {
   265  		networks := make([]networkInterface, len(vL.([]interface{})))
   266  		for i, v := range vL.([]interface{}) {
   267  			network := v.(map[string]interface{})
   268  			networks[i].label = network["label"].(string)
   269  			if v, ok := network["ip_address"].(string); ok && v != "" {
   270  				networks[i].ipAddress = v
   271  			}
   272  			if v, ok := network["subnet_mask"].(string); ok && v != "" {
   273  				networks[i].subnetMask = v
   274  			}
   275  		}
   276  		vm.networkInterfaces = networks
   277  		log.Printf("[DEBUG] network_interface init: %v", networks)
   278  	}
   279  
   280  	if vL, ok := d.GetOk("disk"); ok {
   281  		disks := make([]hardDisk, len(vL.([]interface{})))
   282  		for i, v := range vL.([]interface{}) {
   283  			disk := v.(map[string]interface{})
   284  			if i == 0 {
   285  				if v, ok := disk["template"].(string); ok && v != "" {
   286  					vm.template = v
   287  				} else {
   288  					if v, ok := disk["size"].(int); ok && v != 0 {
   289  						disks[i].size = int64(v)
   290  					} else {
   291  						return fmt.Errorf("If template argument is not specified, size argument is required.")
   292  					}
   293  				}
   294  				if v, ok := disk["datastore"].(string); ok && v != "" {
   295  					vm.datastore = v
   296  				}
   297  			} else {
   298  				if v, ok := disk["size"].(int); ok && v != 0 {
   299  					disks[i].size = int64(v)
   300  				} else {
   301  					return fmt.Errorf("Size argument is required.")
   302  				}
   303  			}
   304  			if v, ok := disk["iops"].(int); ok && v != 0 {
   305  				disks[i].iops = int64(v)
   306  			}
   307  		}
   308  		vm.hardDisks = disks
   309  		log.Printf("[DEBUG] disk init: %v", disks)
   310  	}
   311  
   312  	if vm.template != "" {
   313  		err := vm.deployVirtualMachine(client)
   314  		if err != nil {
   315  			return err
   316  		}
   317  	} else {
   318  		err := vm.createVirtualMachine(client)
   319  		if err != nil {
   320  			return err
   321  		}
   322  	}
   323  
   324  	if _, ok := d.GetOk("network_interface.0.ip_address"); !ok {
   325  		if v, ok := d.GetOk("boot_delay"); ok {
   326  			stateConf := &resource.StateChangeConf{
   327  				Pending:    []string{"pending"},
   328  				Target:     "active",
   329  				Refresh:    waitForNetworkingActive(client, vm.datacenter, vm.name),
   330  				Timeout:    600 * time.Second,
   331  				Delay:      time.Duration(v.(int)) * time.Second,
   332  				MinTimeout: 2 * time.Second,
   333  			}
   334  
   335  			_, err := stateConf.WaitForState()
   336  			if err != nil {
   337  				return err
   338  			}
   339  		}
   340  	}
   341  	d.SetId(vm.name)
   342  	log.Printf("[INFO] Created virtual machine: %s", d.Id())
   343  
   344  	return resourceVSphereVirtualMachineRead(d, meta)
   345  }
   346  
   347  func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
   348  	client := meta.(*govmomi.Client)
   349  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   350  	if err != nil {
   351  		return err
   352  	}
   353  	finder := find.NewFinder(client.Client, true)
   354  	finder = finder.SetDatacenter(dc)
   355  
   356  	vm, err := finder.VirtualMachine(context.TODO(), d.Get("name").(string))
   357  	if err != nil {
   358  		log.Printf("[ERROR] Virtual machine not found: %s", d.Get("name").(string))
   359  		d.SetId("")
   360  		return nil
   361  	}
   362  
   363  	var mvm mo.VirtualMachine
   364  
   365  	collector := property.DefaultCollector(client.Client)
   366  	if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore"}, &mvm); err != nil {
   367  		return err
   368  	}
   369  
   370  	log.Printf("[DEBUG] %#v", dc)
   371  	log.Printf("[DEBUG] %#v", mvm.Summary.Config)
   372  	log.Printf("[DEBUG] %#v", mvm.Guest.Net)
   373  
   374  	networkInterfaces := make([]map[string]interface{}, 0)
   375  	for _, v := range mvm.Guest.Net {
   376  		if v.DeviceConfigId >= 0 {
   377  			log.Printf("[DEBUG] %#v", v.Network)
   378  			networkInterface := make(map[string]interface{})
   379  			networkInterface["label"] = v.Network
   380  			if len(v.IpAddress) > 0 {
   381  				log.Printf("[DEBUG] %#v", v.IpAddress[0])
   382  				networkInterface["ip_address"] = v.IpAddress[0]
   383  
   384  				m := net.CIDRMask(v.IpConfig.IpAddress[0].PrefixLength, 32)
   385  				subnetMask := net.IPv4(m[0], m[1], m[2], m[3])
   386  				networkInterface["subnet_mask"] = subnetMask.String()
   387  				log.Printf("[DEBUG] %#v", subnetMask.String())
   388  			}
   389  			networkInterfaces = append(networkInterfaces, networkInterface)
   390  		}
   391  	}
   392  	log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces)
   393  	err = d.Set("network_interface", networkInterfaces)
   394  	if err != nil {
   395  		return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces)
   396  	}
   397  
   398  	var rootDatastore string
   399  	for _, v := range mvm.Datastore {
   400  		var md mo.Datastore
   401  		if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil {
   402  			return err
   403  		}
   404  		if md.Parent.Type == "StoragePod" {
   405  			var msp mo.StoragePod
   406  			if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil {
   407  				return err
   408  			}
   409  			rootDatastore = msp.Name
   410  			log.Printf("[DEBUG] %#v", msp.Name)
   411  		} else {
   412  			rootDatastore = md.Name
   413  			log.Printf("[DEBUG] %#v", md.Name)
   414  		}
   415  		break
   416  	}
   417  
   418  	d.Set("datacenter", dc)
   419  	d.Set("memory", mvm.Summary.Config.MemorySizeMB)
   420  	d.Set("cpu", mvm.Summary.Config.NumCpu)
   421  	d.Set("datastore", rootDatastore)
   422  
   423  	// Initialize the connection info
   424  	if len(networkInterfaces) > 0 {
   425  		d.SetConnInfo(map[string]string{
   426  			"type": "ssh",
   427  			"host": networkInterfaces[0]["ip_address"].(string),
   428  		})
   429  	}
   430  
   431  	return nil
   432  }
   433  
   434  func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
   435  	client := meta.(*govmomi.Client)
   436  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   437  	if err != nil {
   438  		return err
   439  	}
   440  	finder := find.NewFinder(client.Client, true)
   441  	finder = finder.SetDatacenter(dc)
   442  
   443  	vm, err := finder.VirtualMachine(context.TODO(), d.Get("name").(string))
   444  	if err != nil {
   445  		return err
   446  	}
   447  
   448  	log.Printf("[INFO] Deleting virtual machine: %s", d.Id())
   449  
   450  	task, err := vm.PowerOff(context.TODO())
   451  	if err != nil {
   452  		return err
   453  	}
   454  
   455  	err = task.Wait(context.TODO())
   456  	if err != nil {
   457  		return err
   458  	}
   459  
   460  	task, err = vm.Destroy(context.TODO())
   461  	if err != nil {
   462  		return err
   463  	}
   464  
   465  	err = task.Wait(context.TODO())
   466  	if err != nil {
   467  		return err
   468  	}
   469  
   470  	d.SetId("")
   471  	return nil
   472  }
   473  
   474  func waitForNetworkingActive(client *govmomi.Client, datacenter, name string) resource.StateRefreshFunc {
   475  	return func() (interface{}, string, error) {
   476  		dc, err := getDatacenter(client, datacenter)
   477  		if err != nil {
   478  			log.Printf("[ERROR] %#v", err)
   479  			return nil, "", err
   480  		}
   481  		finder := find.NewFinder(client.Client, true)
   482  		finder = finder.SetDatacenter(dc)
   483  
   484  		vm, err := finder.VirtualMachine(context.TODO(), name)
   485  		if err != nil {
   486  			log.Printf("[ERROR] %#v", err)
   487  			return nil, "", err
   488  		}
   489  
   490  		var mvm mo.VirtualMachine
   491  		collector := property.DefaultCollector(client.Client)
   492  		if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"summary"}, &mvm); err != nil {
   493  			log.Printf("[ERROR] %#v", err)
   494  			return nil, "", err
   495  		}
   496  
   497  		if mvm.Summary.Guest.IpAddress != "" {
   498  			log.Printf("[DEBUG] IP address with DHCP: %v", mvm.Summary.Guest.IpAddress)
   499  			return mvm.Summary, "active", err
   500  		} else {
   501  			log.Printf("[DEBUG] Waiting for IP address")
   502  			return nil, "pending", err
   503  		}
   504  	}
   505  }
   506  
   507  // getDatacenter gets datacenter object
   508  func getDatacenter(c *govmomi.Client, dc string) (*object.Datacenter, error) {
   509  	finder := find.NewFinder(c.Client, true)
   510  	if dc != "" {
   511  		d, err := finder.Datacenter(context.TODO(), dc)
   512  		return d, err
   513  	} else {
   514  		d, err := finder.DefaultDatacenter(context.TODO())
   515  		return d, err
   516  	}
   517  }
   518  
   519  // addHardDisk adds a new Hard Disk to the VirtualMachine.
   520  func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string) error {
   521  	devices, err := vm.Device(context.TODO())
   522  	if err != nil {
   523  		return err
   524  	}
   525  	log.Printf("[DEBUG] vm devices: %#v\n", devices)
   526  
   527  	controller, err := devices.FindDiskController("scsi")
   528  	if err != nil {
   529  		return err
   530  	}
   531  	log.Printf("[DEBUG] disk controller: %#v\n", controller)
   532  
   533  	disk := devices.CreateDisk(controller, "")
   534  	existing := devices.SelectByBackingInfo(disk.Backing)
   535  	log.Printf("[DEBUG] disk: %#v\n", disk)
   536  
   537  	if len(existing) == 0 {
   538  		disk.CapacityInKB = int64(size * 1024 * 1024)
   539  		if iops != 0 {
   540  			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
   541  				Limit: iops,
   542  			}
   543  		}
   544  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
   545  
   546  		if diskType == "eager_zeroed" {
   547  			// eager zeroed thick virtual disk
   548  			backing.ThinProvisioned = types.NewBool(false)
   549  			backing.EagerlyScrub = types.NewBool(true)
   550  		} else if diskType == "thin" {
   551  			// thin provisioned virtual disk
   552  			backing.ThinProvisioned = types.NewBool(true)
   553  		}
   554  
   555  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
   556  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk.CapacityInKB)
   557  
   558  		return vm.AddDevice(context.TODO(), disk)
   559  	} else {
   560  		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")
   561  
   562  		return nil
   563  	}
   564  }
   565  
   566  // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device.
   567  func buildNetworkDevice(f *find.Finder, label, adapterType string) (*types.VirtualDeviceConfigSpec, error) {
   568  	network, err := f.Network(context.TODO(), "*"+label)
   569  	if err != nil {
   570  		return nil, err
   571  	}
   572  
   573  	backing, err := network.EthernetCardBackingInfo(context.TODO())
   574  	if err != nil {
   575  		return nil, err
   576  	}
   577  
   578  	if adapterType == "vmxnet3" {
   579  		return &types.VirtualDeviceConfigSpec{
   580  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
   581  			Device: &types.VirtualVmxnet3{
   582  				types.VirtualVmxnet{
   583  					types.VirtualEthernetCard{
   584  						VirtualDevice: types.VirtualDevice{
   585  							Key:     -1,
   586  							Backing: backing,
   587  						},
   588  						AddressType: string(types.VirtualEthernetCardMacTypeGenerated),
   589  					},
   590  				},
   591  			},
   592  		}, nil
   593  	} else if adapterType == "e1000" {
   594  		return &types.VirtualDeviceConfigSpec{
   595  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
   596  			Device: &types.VirtualE1000{
   597  				types.VirtualEthernetCard{
   598  					VirtualDevice: types.VirtualDevice{
   599  						Key:     -1,
   600  						Backing: backing,
   601  					},
   602  					AddressType: string(types.VirtualEthernetCardMacTypeGenerated),
   603  				},
   604  			},
   605  		}, nil
   606  	} else {
   607  		return nil, fmt.Errorf("Invalid network adapter type.")
   608  	}
   609  }
   610  
   611  // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
   612  func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine) (types.VirtualMachineRelocateSpec, error) {
   613  	var key int
   614  
   615  	devices, err := vm.Device(context.TODO())
   616  	if err != nil {
   617  		return types.VirtualMachineRelocateSpec{}, err
   618  	}
   619  	for _, d := range devices {
   620  		if devices.Type(d) == "disk" {
   621  			key = d.GetVirtualDevice().Key
   622  		}
   623  	}
   624  
   625  	rpr := rp.Reference()
   626  	dsr := ds.Reference()
   627  	return types.VirtualMachineRelocateSpec{
   628  		Datastore: &dsr,
   629  		Pool:      &rpr,
   630  		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
   631  			types.VirtualMachineRelocateSpecDiskLocator{
   632  				Datastore: dsr,
   633  				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
   634  					DiskMode:        "persistent",
   635  					ThinProvisioned: types.NewBool(false),
   636  					EagerlyScrub:    types.NewBool(true),
   637  				},
   638  				DiskId: key,
   639  			},
   640  		},
   641  	}, nil
   642  }
   643  
   644  // getDatastoreObject gets datastore object.
   645  func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) {
   646  	s := object.NewSearchIndex(client.Client)
   647  	ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name)
   648  	if err != nil {
   649  		return types.ManagedObjectReference{}, err
   650  	}
   651  	if ref == nil {
   652  		return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name)
   653  	}
   654  	log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref)
   655  	return ref.Reference(), nil
   656  }
   657  
   658  // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action.
   659  func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec {
   660  	vmfr := f.VmFolder.Reference()
   661  	rpr := rp.Reference()
   662  	spr := storagePod.Reference()
   663  
   664  	sps := types.StoragePlacementSpec{
   665  		Type:       "create",
   666  		ConfigSpec: &configSpec,
   667  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
   668  			StoragePod: &spr,
   669  		},
   670  		Folder:       &vmfr,
   671  		ResourcePool: &rpr,
   672  	}
   673  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
   674  	return sps
   675  }
   676  
   677  // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action.
   678  func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
   679  	vmr := vm.Reference()
   680  	vmfr := f.VmFolder.Reference()
   681  	rpr := rp.Reference()
   682  	spr := storagePod.Reference()
   683  
   684  	var o mo.VirtualMachine
   685  	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
   686  	if err != nil {
   687  		return types.StoragePlacementSpec{}
   688  	}
   689  	ds := object.NewDatastore(c.Client, o.Datastore[0])
   690  	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)
   691  
   692  	devices, err := vm.Device(context.TODO())
   693  	if err != nil {
   694  		return types.StoragePlacementSpec{}
   695  	}
   696  
   697  	var key int
   698  	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
   699  		key = d.GetVirtualDevice().Key
   700  		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
   701  	}
   702  
   703  	sps := types.StoragePlacementSpec{
   704  		Type: "clone",
   705  		Vm:   &vmr,
   706  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
   707  			StoragePod: &spr,
   708  		},
   709  		CloneSpec: &types.VirtualMachineCloneSpec{
   710  			Location: types.VirtualMachineRelocateSpec{
   711  				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
   712  					types.VirtualMachineRelocateSpecDiskLocator{
   713  						Datastore:       ds.Reference(),
   714  						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
   715  						DiskId:          key,
   716  					},
   717  				},
   718  				Pool: &rpr,
   719  			},
   720  			PowerOn:  false,
   721  			Template: false,
   722  		},
   723  		CloneName: "dummy",
   724  		Folder:    &vmfr,
   725  	}
   726  	return sps
   727  }
   728  
   729  // findDatastore finds Datastore object.
   730  func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) {
   731  	var datastore *object.Datastore
   732  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
   733  
   734  	srm := object.NewStorageResourceManager(c.Client)
   735  	rds, err := srm.RecommendDatastores(context.TODO(), sps)
   736  	if err != nil {
   737  		return nil, err
   738  	}
   739  	log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)
   740  
   741  	spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
   742  	datastore = object.NewDatastore(c.Client, spa.Destination)
   743  	log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore)
   744  
   745  	return datastore, nil
   746  }
   747  
   748  // createVirtualMchine creates a new VirtualMachine.
   749  func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
   750  	dc, err := getDatacenter(c, vm.datacenter)
   751  	if err != nil {
   752  		return err
   753  	}
   754  	finder := find.NewFinder(c.Client, true)
   755  	finder = finder.SetDatacenter(dc)
   756  
   757  	var resourcePool *object.ResourcePool
   758  	if vm.resourcePool == "" {
   759  		if vm.cluster == "" {
   760  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
   761  			if err != nil {
   762  				return err
   763  			}
   764  		} else {
   765  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
   766  			if err != nil {
   767  				return err
   768  			}
   769  		}
   770  	} else {
   771  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
   772  		if err != nil {
   773  			return err
   774  		}
   775  	}
   776  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
   777  
   778  	dcFolders, err := dc.Folders(context.TODO())
   779  	if err != nil {
   780  		return err
   781  	}
   782  
   783  	// network
   784  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
   785  	for _, network := range vm.networkInterfaces {
   786  		// network device
   787  		nd, err := buildNetworkDevice(finder, network.label, "e1000")
   788  		if err != nil {
   789  			return err
   790  		}
   791  		networkDevices = append(networkDevices, nd)
   792  	}
   793  
   794  	// make config spec
   795  	configSpec := types.VirtualMachineConfigSpec{
   796  		GuestId:           "otherLinux64Guest",
   797  		Name:              vm.name,
   798  		NumCPUs:           vm.vcpu,
   799  		NumCoresPerSocket: 1,
   800  		MemoryMB:          vm.memoryMb,
   801  		DeviceChange:      networkDevices,
   802  	}
   803  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
   804  
   805  	var datastore *object.Datastore
   806  	if vm.datastore == "" {
   807  		datastore, err = finder.DefaultDatastore(context.TODO())
   808  		if err != nil {
   809  			return err
   810  		}
   811  	} else {
   812  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
   813  		if err != nil {
   814  			// TODO: datastore cluster support in govmomi finder function
   815  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
   816  			if err != nil {
   817  				return err
   818  			}
   819  
   820  			if d.Type == "StoragePod" {
   821  				sp := object.StoragePod{
   822  					object.NewFolder(c.Client, d),
   823  				}
   824  				sps := buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
   825  				datastore, err = findDatastore(c, sps)
   826  				if err != nil {
   827  					return err
   828  				}
   829  			} else {
   830  				datastore = object.NewDatastore(c.Client, d)
   831  			}
   832  		}
   833  	}
   834  
   835  	log.Printf("[DEBUG] datastore: %#v", datastore)
   836  
   837  	var mds mo.Datastore
   838  	if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
   839  		return err
   840  	}
   841  	log.Printf("[DEBUG] datastore: %#v", mds.Name)
   842  	scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
   843  	if err != nil {
   844  		log.Printf("[ERROR] %s", err)
   845  	}
   846  
   847  	configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
   848  		Operation: types.VirtualDeviceConfigSpecOperationAdd,
   849  		Device:    scsi,
   850  	})
   851  	configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
   852  
   853  	task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
   854  	if err != nil {
   855  		log.Printf("[ERROR] %s", err)
   856  	}
   857  
   858  	err = task.Wait(context.TODO())
   859  	if err != nil {
   860  		log.Printf("[ERROR] %s", err)
   861  	}
   862  
   863  	newVM, err := finder.VirtualMachine(context.TODO(), vm.name)
   864  	if err != nil {
   865  		return err
   866  	}
   867  	log.Printf("[DEBUG] new vm: %v", newVM)
   868  
   869  	log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks)
   870  	for _, hd := range vm.hardDisks {
   871  		log.Printf("[DEBUG] add hard disk: %v", hd.size)
   872  		log.Printf("[DEBUG] add hard disk: %v", hd.iops)
   873  		err = addHardDisk(newVM, hd.size, hd.iops, "thin")
   874  		if err != nil {
   875  			return err
   876  		}
   877  	}
   878  	return nil
   879  }
   880  
   881  // deployVirtualMchine deploys a new VirtualMachine.
   882  func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
   883  	dc, err := getDatacenter(c, vm.datacenter)
   884  	if err != nil {
   885  		return err
   886  	}
   887  	finder := find.NewFinder(c.Client, true)
   888  	finder = finder.SetDatacenter(dc)
   889  
   890  	template, err := finder.VirtualMachine(context.TODO(), vm.template)
   891  	if err != nil {
   892  		return err
   893  	}
   894  	log.Printf("[DEBUG] template: %#v", template)
   895  
   896  	var resourcePool *object.ResourcePool
   897  	if vm.resourcePool == "" {
   898  		if vm.cluster == "" {
   899  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
   900  			if err != nil {
   901  				return err
   902  			}
   903  		} else {
   904  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
   905  			if err != nil {
   906  				return err
   907  			}
   908  		}
   909  	} else {
   910  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
   911  		if err != nil {
   912  			return err
   913  		}
   914  	}
   915  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
   916  
   917  	dcFolders, err := dc.Folders(context.TODO())
   918  	if err != nil {
   919  		return err
   920  	}
   921  
   922  	var datastore *object.Datastore
   923  	if vm.datastore == "" {
   924  		datastore, err = finder.DefaultDatastore(context.TODO())
   925  		if err != nil {
   926  			return err
   927  		}
   928  	} else {
   929  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
   930  		if err != nil {
   931  			// TODO: datastore cluster support in govmomi finder function
   932  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
   933  			if err != nil {
   934  				return err
   935  			}
   936  
   937  			if d.Type == "StoragePod" {
   938  				sp := object.StoragePod{
   939  					object.NewFolder(c.Client, d),
   940  				}
   941  				sps := buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
   942  				datastore, err = findDatastore(c, sps)
   943  				if err != nil {
   944  					return err
   945  				}
   946  			} else {
   947  				datastore = object.NewDatastore(c.Client, d)
   948  			}
   949  		}
   950  	}
   951  	log.Printf("[DEBUG] datastore: %#v", datastore)
   952  
   953  	relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template)
   954  	if err != nil {
   955  		return err
   956  	}
   957  	log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
   958  
   959  	// network
   960  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
   961  	networkConfigs := []types.CustomizationAdapterMapping{}
   962  	for _, network := range vm.networkInterfaces {
   963  		// network device
   964  		nd, err := buildNetworkDevice(finder, network.label, "vmxnet3")
   965  		if err != nil {
   966  			return err
   967  		}
   968  		networkDevices = append(networkDevices, nd)
   969  
   970  		var ipSetting types.CustomizationIPSettings
   971  		if network.ipAddress == "" {
   972  			ipSetting = types.CustomizationIPSettings{
   973  				Ip: &types.CustomizationDhcpIpGenerator{},
   974  			}
   975  		} else {
   976  			log.Printf("[DEBUG] gateway: %v", vm.gateway)
   977  			log.Printf("[DEBUG] ip address: %v", network.ipAddress)
   978  			log.Printf("[DEBUG] subnet mask: %v", network.subnetMask)
   979  			ipSetting = types.CustomizationIPSettings{
   980  				Gateway: []string{
   981  					vm.gateway,
   982  				},
   983  				Ip: &types.CustomizationFixedIp{
   984  					IpAddress: network.ipAddress,
   985  				},
   986  				SubnetMask: network.subnetMask,
   987  			}
   988  		}
   989  
   990  		// network config
   991  		config := types.CustomizationAdapterMapping{
   992  			Adapter: ipSetting,
   993  		}
   994  		networkConfigs = append(networkConfigs, config)
   995  	}
   996  	log.Printf("[DEBUG] network configs: %v", networkConfigs[0].Adapter)
   997  
   998  	// make config spec
   999  	configSpec := types.VirtualMachineConfigSpec{
  1000  		NumCPUs:           vm.vcpu,
  1001  		NumCoresPerSocket: 1,
  1002  		MemoryMB:          vm.memoryMb,
  1003  	}
  1004  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
  1005  
  1006  	// build CustomizationSpec
  1007  	customSpec := types.CustomizationSpec{
  1008  		Identity: &types.CustomizationLinuxPrep{
  1009  			HostName: &types.CustomizationFixedName{
  1010  				Name: strings.Split(vm.name, ".")[0],
  1011  			},
  1012  			Domain:     vm.domain,
  1013  			TimeZone:   vm.timeZone,
  1014  			HwClockUTC: types.NewBool(true),
  1015  		},
  1016  		GlobalIPSettings: types.CustomizationGlobalIPSettings{
  1017  			DnsSuffixList: vm.dnsSuffixes,
  1018  			DnsServerList: vm.dnsServers,
  1019  		},
  1020  		NicSettingMap: networkConfigs,
  1021  	}
  1022  	log.Printf("[DEBUG] custom spec: %v", customSpec)
  1023  
  1024  	// make vm clone spec
  1025  	cloneSpec := types.VirtualMachineCloneSpec{
  1026  		Location: relocateSpec,
  1027  		Template: false,
  1028  		Config:   &configSpec,
  1029  		PowerOn:  false,
  1030  	}
  1031  	log.Printf("[DEBUG] clone spec: %v", cloneSpec)
  1032  
  1033  	task, err := template.Clone(context.TODO(), dcFolders.VmFolder, vm.name, cloneSpec)
  1034  	if err != nil {
  1035  		return err
  1036  	}
  1037  
  1038  	_, err = task.WaitForResult(context.TODO(), nil)
  1039  	if err != nil {
  1040  		return err
  1041  	}
  1042  
  1043  	newVM, err := finder.VirtualMachine(context.TODO(), vm.name)
  1044  	if err != nil {
  1045  		return err
  1046  	}
  1047  	log.Printf("[DEBUG] new vm: %v", newVM)
  1048  
  1049  	devices, err := newVM.Device(context.TODO())
  1050  	if err != nil {
  1051  		log.Printf("[DEBUG] Template devices can't be found")
  1052  		return err
  1053  	}
  1054  
  1055  	for _, dvc := range devices {
  1056  		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
  1057  		if devices.Type(dvc) == "ethernet" {
  1058  			err := newVM.RemoveDevice(context.TODO(), dvc)
  1059  			if err != nil {
  1060  				return err
  1061  			}
  1062  		}
  1063  	}
  1064  	// Add Network devices
  1065  	for _, dvc := range networkDevices {
  1066  		err := newVM.AddDevice(
  1067  			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
  1068  		if err != nil {
  1069  			return err
  1070  		}
  1071  	}
  1072  
  1073  	taskb, err := newVM.Customize(context.TODO(), customSpec)
  1074  	if err != nil {
  1075  		return err
  1076  	}
  1077  
  1078  	_, err = taskb.WaitForResult(context.TODO(), nil)
  1079  	if err != nil {
  1080  		return err
  1081  	}
  1082  	log.Printf("[DEBUG]VM customization finished")
  1083  
  1084  	newVM.PowerOn(context.TODO())
  1085  
  1086  	ip, err := newVM.WaitForIP(context.TODO())
  1087  	if err != nil {
  1088  		return err
  1089  	}
  1090  	log.Printf("[DEBUG] ip address: %v", ip)
  1091  
  1092  	for i := 1; i < len(vm.hardDisks); i++ {
  1093  		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, "eager_zeroed")
  1094  		if err != nil {
  1095  			return err
  1096  		}
  1097  	}
  1098  	return nil
  1099  }