github.com/jsoriano/terraform@v0.6.7-0.20151026070445-8b70867fdd95/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about)

     1  package vsphere
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"strings"
     8  	"time"
     9  
    10  	"github.com/hashicorp/terraform/helper/resource"
    11  	"github.com/hashicorp/terraform/helper/schema"
    12  	"github.com/vmware/govmomi"
    13  	"github.com/vmware/govmomi/find"
    14  	"github.com/vmware/govmomi/object"
    15  	"github.com/vmware/govmomi/property"
    16  	"github.com/vmware/govmomi/vim25/mo"
    17  	"github.com/vmware/govmomi/vim25/types"
    18  	"golang.org/x/net/context"
    19  )
    20  
    21  var DefaultDNSSuffixes = []string{
    22  	"vsphere.local",
    23  }
    24  
    25  var DefaultDNSServers = []string{
    26  	"8.8.8.8",
    27  	"8.8.4.4",
    28  }
    29  
    30  type networkInterface struct {
    31  	deviceName  string
    32  	label       string
    33  	ipAddress   string
    34  	subnetMask  string
    35  	adapterType string // TODO: Make "adapter_type" argument
    36  }
    37  
    38  type hardDisk struct {
    39  	size int64
    40  	iops int64
    41  }
    42  
    43  type virtualMachine struct {
    44  	name              string
    45  	datacenter        string
    46  	cluster           string
    47  	resourcePool      string
    48  	datastore         string
    49  	vcpu              int
    50  	memoryMb          int64
    51  	template          string
    52  	networkInterfaces []networkInterface
    53  	hardDisks         []hardDisk
    54  	gateway           string
    55  	domain            string
    56  	timeZone          string
    57  	dnsSuffixes       []string
    58  	dnsServers        []string
    59  }
    60  
    61  func resourceVSphereVirtualMachine() *schema.Resource {
    62  	return &schema.Resource{
    63  		Create: resourceVSphereVirtualMachineCreate,
    64  		Read:   resourceVSphereVirtualMachineRead,
    65  		Delete: resourceVSphereVirtualMachineDelete,
    66  
    67  		Schema: map[string]*schema.Schema{
    68  			"name": &schema.Schema{
    69  				Type:     schema.TypeString,
    70  				Required: true,
    71  				ForceNew: true,
    72  			},
    73  
    74  			"vcpu": &schema.Schema{
    75  				Type:     schema.TypeInt,
    76  				Required: true,
    77  				ForceNew: true,
    78  			},
    79  
    80  			"memory": &schema.Schema{
    81  				Type:     schema.TypeInt,
    82  				Required: true,
    83  				ForceNew: true,
    84  			},
    85  
    86  			"datacenter": &schema.Schema{
    87  				Type:     schema.TypeString,
    88  				Optional: true,
    89  				ForceNew: true,
    90  			},
    91  
    92  			"cluster": &schema.Schema{
    93  				Type:     schema.TypeString,
    94  				Optional: true,
    95  				ForceNew: true,
    96  			},
    97  
    98  			"resource_pool": &schema.Schema{
    99  				Type:     schema.TypeString,
   100  				Optional: true,
   101  				ForceNew: true,
   102  			},
   103  
   104  			"gateway": &schema.Schema{
   105  				Type:     schema.TypeString,
   106  				Optional: true,
   107  				ForceNew: true,
   108  			},
   109  
   110  			"domain": &schema.Schema{
   111  				Type:     schema.TypeString,
   112  				Optional: true,
   113  				ForceNew: true,
   114  				Default:  "vsphere.local",
   115  			},
   116  
   117  			"time_zone": &schema.Schema{
   118  				Type:     schema.TypeString,
   119  				Optional: true,
   120  				ForceNew: true,
   121  				Default:  "Etc/UTC",
   122  			},
   123  
   124  			"dns_suffixes": &schema.Schema{
   125  				Type:     schema.TypeList,
   126  				Optional: true,
   127  				Elem:     &schema.Schema{Type: schema.TypeString},
   128  				ForceNew: true,
   129  			},
   130  
   131  			"dns_servers": &schema.Schema{
   132  				Type:     schema.TypeList,
   133  				Optional: true,
   134  				Elem:     &schema.Schema{Type: schema.TypeString},
   135  				ForceNew: true,
   136  			},
   137  
   138  			"network_interface": &schema.Schema{
   139  				Type:     schema.TypeList,
   140  				Required: true,
   141  				ForceNew: true,
   142  				Elem: &schema.Resource{
   143  					Schema: map[string]*schema.Schema{
   144  						"label": &schema.Schema{
   145  							Type:     schema.TypeString,
   146  							Required: true,
   147  							ForceNew: true,
   148  						},
   149  
   150  						"ip_address": &schema.Schema{
   151  							Type:     schema.TypeString,
   152  							Optional: true,
   153  							Computed: true,
   154  							ForceNew: true,
   155  						},
   156  
   157  						"subnet_mask": &schema.Schema{
   158  							Type:     schema.TypeString,
   159  							Optional: true,
   160  							Computed: true,
   161  							ForceNew: true,
   162  						},
   163  
   164  						"adapter_type": &schema.Schema{
   165  							Type:     schema.TypeString,
   166  							Optional: true,
   167  							ForceNew: true,
   168  						},
   169  					},
   170  				},
   171  			},
   172  
   173  			"disk": &schema.Schema{
   174  				Type:     schema.TypeList,
   175  				Required: true,
   176  				ForceNew: true,
   177  				Elem: &schema.Resource{
   178  					Schema: map[string]*schema.Schema{
   179  						"template": &schema.Schema{
   180  							Type:     schema.TypeString,
   181  							Optional: true,
   182  							ForceNew: true,
   183  						},
   184  
   185  						"datastore": &schema.Schema{
   186  							Type:     schema.TypeString,
   187  							Optional: true,
   188  							ForceNew: true,
   189  						},
   190  
   191  						"size": &schema.Schema{
   192  							Type:     schema.TypeInt,
   193  							Optional: true,
   194  							ForceNew: true,
   195  						},
   196  
   197  						"iops": &schema.Schema{
   198  							Type:     schema.TypeInt,
   199  							Optional: true,
   200  							ForceNew: true,
   201  						},
   202  					},
   203  				},
   204  			},
   205  
   206  			"boot_delay": &schema.Schema{
   207  				Type:     schema.TypeInt,
   208  				Optional: true,
   209  				ForceNew: true,
   210  			},
   211  		},
   212  	}
   213  }
   214  
   215  func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
   216  	client := meta.(*govmomi.Client)
   217  
   218  	vm := virtualMachine{
   219  		name:     d.Get("name").(string),
   220  		vcpu:     d.Get("vcpu").(int),
   221  		memoryMb: int64(d.Get("memory").(int)),
   222  	}
   223  
   224  	if v, ok := d.GetOk("datacenter"); ok {
   225  		vm.datacenter = v.(string)
   226  	}
   227  
   228  	if v, ok := d.GetOk("cluster"); ok {
   229  		vm.cluster = v.(string)
   230  	}
   231  
   232  	if v, ok := d.GetOk("resource_pool"); ok {
   233  		vm.resourcePool = v.(string)
   234  	}
   235  
   236  	if v, ok := d.GetOk("gateway"); ok {
   237  		vm.gateway = v.(string)
   238  	}
   239  
   240  	if v, ok := d.GetOk("domain"); ok {
   241  		vm.domain = v.(string)
   242  	}
   243  
   244  	if v, ok := d.GetOk("time_zone"); ok {
   245  		vm.timeZone = v.(string)
   246  	}
   247  
   248  	if raw, ok := d.GetOk("dns_suffixes"); ok {
   249  		for _, v := range raw.([]interface{}) {
   250  			vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string))
   251  		}
   252  	} else {
   253  		vm.dnsSuffixes = DefaultDNSSuffixes
   254  	}
   255  
   256  	if raw, ok := d.GetOk("dns_servers"); ok {
   257  		for _, v := range raw.([]interface{}) {
   258  			vm.dnsServers = append(vm.dnsServers, v.(string))
   259  		}
   260  	} else {
   261  		vm.dnsServers = DefaultDNSServers
   262  	}
   263  
   264  	if vL, ok := d.GetOk("network_interface"); ok {
   265  		networks := make([]networkInterface, len(vL.([]interface{})))
   266  		for i, v := range vL.([]interface{}) {
   267  			network := v.(map[string]interface{})
   268  			networks[i].label = network["label"].(string)
   269  			if v, ok := network["ip_address"].(string); ok && v != "" {
   270  				networks[i].ipAddress = v
   271  			}
   272  			if v, ok := network["subnet_mask"].(string); ok && v != "" {
   273  				networks[i].subnetMask = v
   274  			}
   275  		}
   276  		vm.networkInterfaces = networks
   277  		log.Printf("[DEBUG] network_interface init: %v", networks)
   278  	}
   279  
   280  	if vL, ok := d.GetOk("disk"); ok {
   281  		disks := make([]hardDisk, len(vL.([]interface{})))
   282  		for i, v := range vL.([]interface{}) {
   283  			disk := v.(map[string]interface{})
   284  			if i == 0 {
   285  				if v, ok := disk["template"].(string); ok && v != "" {
   286  					vm.template = v
   287  				} else {
   288  					if v, ok := disk["size"].(int); ok && v != 0 {
   289  						disks[i].size = int64(v)
   290  					} else {
   291  						return fmt.Errorf("If template argument is not specified, size argument is required.")
   292  					}
   293  				}
   294  				if v, ok := disk["datastore"].(string); ok && v != "" {
   295  					vm.datastore = v
   296  				}
   297  			} else {
   298  				if v, ok := disk["size"].(int); ok && v != 0 {
   299  					disks[i].size = int64(v)
   300  				} else {
   301  					return fmt.Errorf("Size argument is required.")
   302  				}
   303  			}
   304  			if v, ok := disk["iops"].(int); ok && v != 0 {
   305  				disks[i].iops = int64(v)
   306  			}
   307  		}
   308  		vm.hardDisks = disks
   309  		log.Printf("[DEBUG] disk init: %v", disks)
   310  	}
   311  
   312  	if vm.template != "" {
   313  		err := vm.deployVirtualMachine(client)
   314  		if err != nil {
   315  			return err
   316  		}
   317  	} else {
   318  		err := vm.createVirtualMachine(client)
   319  		if err != nil {
   320  			return err
   321  		}
   322  	}
   323  
   324  	if _, ok := d.GetOk("network_interface.0.ip_address"); !ok {
   325  		if v, ok := d.GetOk("boot_delay"); ok {
   326  			stateConf := &resource.StateChangeConf{
   327  				Pending:    []string{"pending"},
   328  				Target:     "active",
   329  				Refresh:    waitForNetworkingActive(client, vm.datacenter, vm.name),
   330  				Timeout:    600 * time.Second,
   331  				Delay:      time.Duration(v.(int)) * time.Second,
   332  				MinTimeout: 2 * time.Second,
   333  			}
   334  
   335  			_, err := stateConf.WaitForState()
   336  			if err != nil {
   337  				return err
   338  			}
   339  		}
   340  	}
   341  	d.SetId(vm.name)
   342  	log.Printf("[INFO] Created virtual machine: %s", d.Id())
   343  
   344  	return resourceVSphereVirtualMachineRead(d, meta)
   345  }
   346  
   347  func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
   348  	client := meta.(*govmomi.Client)
   349  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   350  	if err != nil {
   351  		return err
   352  	}
   353  	finder := find.NewFinder(client.Client, true)
   354  	finder = finder.SetDatacenter(dc)
   355  
   356  	vm, err := finder.VirtualMachine(context.TODO(), d.Get("name").(string))
   357  	if err != nil {
   358  		log.Printf("[ERROR] Virtual machine not found: %s", d.Get("name").(string))
   359  		d.SetId("")
   360  		return nil
   361  	}
   362  
   363  	var mvm mo.VirtualMachine
   364  
   365  	collector := property.DefaultCollector(client.Client)
   366  	if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore"}, &mvm); err != nil {
   367  		return err
   368  	}
   369  
   370  	log.Printf("[DEBUG] %#v", dc)
   371  	log.Printf("[DEBUG] %#v", mvm.Summary.Config)
   372  	log.Printf("[DEBUG] %#v", mvm.Guest.Net)
   373  
   374  	networkInterfaces := make([]map[string]interface{}, 0)
   375  	for _, v := range mvm.Guest.Net {
   376  		if v.DeviceConfigId >= 0 {
   377  			log.Printf("[DEBUG] %#v", v.Network)
   378  			networkInterface := make(map[string]interface{})
   379  			networkInterface["label"] = v.Network
   380  			if len(v.IpAddress) > 0 {
   381  				log.Printf("[DEBUG] %#v", v.IpAddress[0])
   382  				networkInterface["ip_address"] = v.IpAddress[0]
   383  
   384  				m := net.CIDRMask(v.IpConfig.IpAddress[0].PrefixLength, 32)
   385  				subnetMask := net.IPv4(m[0], m[1], m[2], m[3])
   386  				networkInterface["subnet_mask"] = subnetMask.String()
   387  				log.Printf("[DEBUG] %#v", subnetMask.String())
   388  			}
   389  			networkInterfaces = append(networkInterfaces, networkInterface)
   390  		}
   391  	}
   392  	err = d.Set("network_interface", networkInterfaces)
   393  	if err != nil {
   394  		return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces)
   395  	}
   396  
   397  	var rootDatastore string
   398  	for _, v := range mvm.Datastore {
   399  		var md mo.Datastore
   400  		if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil {
   401  			return err
   402  		}
   403  		if md.Parent.Type == "StoragePod" {
   404  			var msp mo.StoragePod
   405  			if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil {
   406  				return err
   407  			}
   408  			rootDatastore = msp.Name
   409  			log.Printf("[DEBUG] %#v", msp.Name)
   410  		} else {
   411  			rootDatastore = md.Name
   412  			log.Printf("[DEBUG] %#v", md.Name)
   413  		}
   414  		break
   415  	}
   416  
   417  	d.Set("datacenter", dc)
   418  	d.Set("memory", mvm.Summary.Config.MemorySizeMB)
   419  	d.Set("cpu", mvm.Summary.Config.NumCpu)
   420  	d.Set("datastore", rootDatastore)
   421  
   422  	// Initialize the connection info
   423  	d.SetConnInfo(map[string]string{
   424  		"type": "ssh",
   425  		"host": networkInterfaces[0]["ip_address"].(string),
   426  	})
   427  
   428  	return nil
   429  }
   430  
   431  func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
   432  	client := meta.(*govmomi.Client)
   433  	dc, err := getDatacenter(client, d.Get("datacenter").(string))
   434  	if err != nil {
   435  		return err
   436  	}
   437  	finder := find.NewFinder(client.Client, true)
   438  	finder = finder.SetDatacenter(dc)
   439  
   440  	vm, err := finder.VirtualMachine(context.TODO(), d.Get("name").(string))
   441  	if err != nil {
   442  		return err
   443  	}
   444  
   445  	log.Printf("[INFO] Deleting virtual machine: %s", d.Id())
   446  
   447  	task, err := vm.PowerOff(context.TODO())
   448  	if err != nil {
   449  		return err
   450  	}
   451  
   452  	err = task.Wait(context.TODO())
   453  	if err != nil {
   454  		return err
   455  	}
   456  
   457  	task, err = vm.Destroy(context.TODO())
   458  	if err != nil {
   459  		return err
   460  	}
   461  
   462  	err = task.Wait(context.TODO())
   463  	if err != nil {
   464  		return err
   465  	}
   466  
   467  	d.SetId("")
   468  	return nil
   469  }
   470  
   471  func waitForNetworkingActive(client *govmomi.Client, datacenter, name string) resource.StateRefreshFunc {
   472  	return func() (interface{}, string, error) {
   473  		dc, err := getDatacenter(client, datacenter)
   474  		if err != nil {
   475  			log.Printf("[ERROR] %#v", err)
   476  			return nil, "", err
   477  		}
   478  		finder := find.NewFinder(client.Client, true)
   479  		finder = finder.SetDatacenter(dc)
   480  
   481  		vm, err := finder.VirtualMachine(context.TODO(), name)
   482  		if err != nil {
   483  			log.Printf("[ERROR] %#v", err)
   484  			return nil, "", err
   485  		}
   486  
   487  		var mvm mo.VirtualMachine
   488  		collector := property.DefaultCollector(client.Client)
   489  		if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"summary"}, &mvm); err != nil {
   490  			log.Printf("[ERROR] %#v", err)
   491  			return nil, "", err
   492  		}
   493  
   494  		if mvm.Summary.Guest.IpAddress != "" {
   495  			log.Printf("[DEBUG] IP address with DHCP: %v", mvm.Summary.Guest.IpAddress)
   496  			return mvm.Summary, "active", err
   497  		} else {
   498  			log.Printf("[DEBUG] Waiting for IP address")
   499  			return nil, "pending", err
   500  		}
   501  	}
   502  }
   503  
   504  // getDatacenter gets datacenter object
   505  func getDatacenter(c *govmomi.Client, dc string) (*object.Datacenter, error) {
   506  	finder := find.NewFinder(c.Client, true)
   507  	if dc != "" {
   508  		d, err := finder.Datacenter(context.TODO(), dc)
   509  		return d, err
   510  	} else {
   511  		d, err := finder.DefaultDatacenter(context.TODO())
   512  		return d, err
   513  	}
   514  }
   515  
   516  // addHardDisk adds a new Hard Disk to the VirtualMachine.
   517  func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string) error {
   518  	devices, err := vm.Device(context.TODO())
   519  	if err != nil {
   520  		return err
   521  	}
   522  	log.Printf("[DEBUG] vm devices: %#v\n", devices)
   523  
   524  	controller, err := devices.FindDiskController("scsi")
   525  	if err != nil {
   526  		return err
   527  	}
   528  	log.Printf("[DEBUG] disk controller: %#v\n", controller)
   529  
   530  	disk := devices.CreateDisk(controller, "")
   531  	existing := devices.SelectByBackingInfo(disk.Backing)
   532  	log.Printf("[DEBUG] disk: %#v\n", disk)
   533  
   534  	if len(existing) == 0 {
   535  		disk.CapacityInKB = int64(size * 1024 * 1024)
   536  		if iops != 0 {
   537  			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
   538  				Limit: iops,
   539  			}
   540  		}
   541  		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
   542  
   543  		if diskType == "eager_zeroed" {
   544  			// eager zeroed thick virtual disk
   545  			backing.ThinProvisioned = types.NewBool(false)
   546  			backing.EagerlyScrub = types.NewBool(true)
   547  		} else if diskType == "thin" {
   548  			// thin provisioned virtual disk
   549  			backing.ThinProvisioned = types.NewBool(true)
   550  		}
   551  
   552  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
   553  		log.Printf("[DEBUG] addHardDisk: %#v\n", disk.CapacityInKB)
   554  
   555  		return vm.AddDevice(context.TODO(), disk)
   556  	} else {
   557  		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")
   558  
   559  		return nil
   560  	}
   561  }
   562  
   563  // createNetworkDevice creates VirtualDeviceConfigSpec for Network Device.
   564  func createNetworkDevice(f *find.Finder, label, adapterType string) (*types.VirtualDeviceConfigSpec, error) {
   565  	network, err := f.Network(context.TODO(), "*"+label)
   566  	if err != nil {
   567  		return nil, err
   568  	}
   569  
   570  	backing, err := network.EthernetCardBackingInfo(context.TODO())
   571  	if err != nil {
   572  		return nil, err
   573  	}
   574  
   575  	if adapterType == "vmxnet3" {
   576  		return &types.VirtualDeviceConfigSpec{
   577  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
   578  			Device: &types.VirtualVmxnet3{
   579  				types.VirtualVmxnet{
   580  					types.VirtualEthernetCard{
   581  						VirtualDevice: types.VirtualDevice{
   582  							Key:     -1,
   583  							Backing: backing,
   584  						},
   585  						AddressType: string(types.VirtualEthernetCardMacTypeGenerated),
   586  					},
   587  				},
   588  			},
   589  		}, nil
   590  	} else if adapterType == "e1000" {
   591  		return &types.VirtualDeviceConfigSpec{
   592  			Operation: types.VirtualDeviceConfigSpecOperationAdd,
   593  			Device: &types.VirtualE1000{
   594  				types.VirtualEthernetCard{
   595  					VirtualDevice: types.VirtualDevice{
   596  						Key:     -1,
   597  						Backing: backing,
   598  					},
   599  					AddressType: string(types.VirtualEthernetCardMacTypeGenerated),
   600  				},
   601  			},
   602  		}, nil
   603  	} else {
   604  		return nil, fmt.Errorf("Invalid network adapter type.")
   605  	}
   606  }
   607  
   608  // createVMRelocateSpec creates VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
   609  func createVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine) (types.VirtualMachineRelocateSpec, error) {
   610  	var key int
   611  
   612  	devices, err := vm.Device(context.TODO())
   613  	if err != nil {
   614  		return types.VirtualMachineRelocateSpec{}, err
   615  	}
   616  	for _, d := range devices {
   617  		if devices.Type(d) == "disk" {
   618  			key = d.GetVirtualDevice().Key
   619  		}
   620  	}
   621  
   622  	rpr := rp.Reference()
   623  	dsr := ds.Reference()
   624  	return types.VirtualMachineRelocateSpec{
   625  		Datastore: &dsr,
   626  		Pool:      &rpr,
   627  		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
   628  			types.VirtualMachineRelocateSpecDiskLocator{
   629  				Datastore: dsr,
   630  				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
   631  					DiskMode:        "persistent",
   632  					ThinProvisioned: types.NewBool(false),
   633  					EagerlyScrub:    types.NewBool(true),
   634  				},
   635  				DiskId: key,
   636  			},
   637  		},
   638  	}, nil
   639  }
   640  
   641  // getDatastoreObject gets datastore object.
   642  func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) {
   643  	s := object.NewSearchIndex(client.Client)
   644  	ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name)
   645  	if err != nil {
   646  		return types.ManagedObjectReference{}, err
   647  	}
   648  	if ref == nil {
   649  		return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name)
   650  	}
   651  	log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref)
   652  	return ref.Reference(), nil
   653  }
   654  
   655  // createStoragePlacementSpecCreate creates StoragePlacementSpec for create action.
   656  func createStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec {
   657  	vmfr := f.VmFolder.Reference()
   658  	rpr := rp.Reference()
   659  	spr := storagePod.Reference()
   660  
   661  	sps := types.StoragePlacementSpec{
   662  		Type:       "create",
   663  		ConfigSpec: &configSpec,
   664  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
   665  			StoragePod: &spr,
   666  		},
   667  		Folder:       &vmfr,
   668  		ResourcePool: &rpr,
   669  	}
   670  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
   671  	return sps
   672  }
   673  
   674  // createStoragePlacementSpecClone creates StoragePlacementSpec for clone action.
   675  func createStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
   676  	vmr := vm.Reference()
   677  	vmfr := f.VmFolder.Reference()
   678  	rpr := rp.Reference()
   679  	spr := storagePod.Reference()
   680  
   681  	var o mo.VirtualMachine
   682  	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
   683  	if err != nil {
   684  		return types.StoragePlacementSpec{}
   685  	}
   686  	ds := object.NewDatastore(c.Client, o.Datastore[0])
   687  	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)
   688  
   689  	devices, err := vm.Device(context.TODO())
   690  	if err != nil {
   691  		return types.StoragePlacementSpec{}
   692  	}
   693  
   694  	var key int
   695  	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
   696  		key = d.GetVirtualDevice().Key
   697  		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
   698  	}
   699  
   700  	sps := types.StoragePlacementSpec{
   701  		Type: "clone",
   702  		Vm:   &vmr,
   703  		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
   704  			StoragePod: &spr,
   705  		},
   706  		CloneSpec: &types.VirtualMachineCloneSpec{
   707  			Location: types.VirtualMachineRelocateSpec{
   708  				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
   709  					types.VirtualMachineRelocateSpecDiskLocator{
   710  						Datastore:       ds.Reference(),
   711  						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
   712  						DiskId:          key,
   713  					},
   714  				},
   715  				Pool: &rpr,
   716  			},
   717  			PowerOn:  false,
   718  			Template: false,
   719  		},
   720  		CloneName: "dummy",
   721  		Folder:    &vmfr,
   722  	}
   723  	return sps
   724  }
   725  
   726  // findDatastore finds Datastore object.
   727  func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) {
   728  	var datastore *object.Datastore
   729  	log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)
   730  
   731  	srm := object.NewStorageResourceManager(c.Client)
   732  	rds, err := srm.RecommendDatastores(context.TODO(), sps)
   733  	if err != nil {
   734  		return nil, err
   735  	}
   736  	log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)
   737  
   738  	spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
   739  	datastore = object.NewDatastore(c.Client, spa.Destination)
   740  	log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore)
   741  
   742  	return datastore, nil
   743  }
   744  
   745  // createVirtualMchine creates a new VirtualMachine.
   746  func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
   747  	dc, err := getDatacenter(c, vm.datacenter)
   748  	if err != nil {
   749  		return err
   750  	}
   751  	finder := find.NewFinder(c.Client, true)
   752  	finder = finder.SetDatacenter(dc)
   753  
   754  	var resourcePool *object.ResourcePool
   755  	if vm.resourcePool == "" {
   756  		if vm.cluster == "" {
   757  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
   758  			if err != nil {
   759  				return err
   760  			}
   761  		} else {
   762  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
   763  			if err != nil {
   764  				return err
   765  			}
   766  		}
   767  	} else {
   768  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
   769  		if err != nil {
   770  			return err
   771  		}
   772  	}
   773  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
   774  
   775  	dcFolders, err := dc.Folders(context.TODO())
   776  	if err != nil {
   777  		return err
   778  	}
   779  
   780  	// network
   781  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
   782  	for _, network := range vm.networkInterfaces {
   783  		// network device
   784  		nd, err := createNetworkDevice(finder, network.label, "e1000")
   785  		if err != nil {
   786  			return err
   787  		}
   788  		networkDevices = append(networkDevices, nd)
   789  	}
   790  
   791  	// make config spec
   792  	configSpec := types.VirtualMachineConfigSpec{
   793  		GuestId:           "otherLinux64Guest",
   794  		Name:              vm.name,
   795  		NumCPUs:           vm.vcpu,
   796  		NumCoresPerSocket: 1,
   797  		MemoryMB:          vm.memoryMb,
   798  		DeviceChange:      networkDevices,
   799  	}
   800  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
   801  
   802  	var datastore *object.Datastore
   803  	if vm.datastore == "" {
   804  		datastore, err = finder.DefaultDatastore(context.TODO())
   805  		if err != nil {
   806  			return err
   807  		}
   808  	} else {
   809  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
   810  		if err != nil {
   811  			// TODO: datastore cluster support in govmomi finder function
   812  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
   813  			if err != nil {
   814  				return err
   815  			}
   816  
   817  			if d.Type == "StoragePod" {
   818  				sp := object.StoragePod{
   819  					object.NewFolder(c.Client, d),
   820  				}
   821  				sps := createStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
   822  				datastore, err = findDatastore(c, sps)
   823  				if err != nil {
   824  					return err
   825  				}
   826  			} else {
   827  				datastore = object.NewDatastore(c.Client, d)
   828  			}
   829  		}
   830  	}
   831  
   832  	log.Printf("[DEBUG] datastore: %#v", datastore)
   833  
   834  	var mds mo.Datastore
   835  	if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
   836  		return err
   837  	}
   838  	log.Printf("[DEBUG] datastore: %#v", mds.Name)
   839  	scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
   840  	if err != nil {
   841  		log.Printf("[ERROR] %s", err)
   842  	}
   843  
   844  	configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
   845  		Operation: types.VirtualDeviceConfigSpecOperationAdd,
   846  		Device:    scsi,
   847  	})
   848  	configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
   849  
   850  	task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
   851  	if err != nil {
   852  		log.Printf("[ERROR] %s", err)
   853  	}
   854  
   855  	err = task.Wait(context.TODO())
   856  	if err != nil {
   857  		log.Printf("[ERROR] %s", err)
   858  	}
   859  
   860  	newVM, err := finder.VirtualMachine(context.TODO(), vm.name)
   861  	if err != nil {
   862  		return err
   863  	}
   864  	log.Printf("[DEBUG] new vm: %v", newVM)
   865  
   866  	log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks)
   867  	for _, hd := range vm.hardDisks {
   868  		log.Printf("[DEBUG] add hard disk: %v", hd.size)
   869  		log.Printf("[DEBUG] add hard disk: %v", hd.iops)
   870  		err = addHardDisk(newVM, hd.size, hd.iops, "thin")
   871  		if err != nil {
   872  			return err
   873  		}
   874  	}
   875  	return nil
   876  }
   877  
   878  // deployVirtualMchine deploys a new VirtualMachine.
   879  func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
   880  	dc, err := getDatacenter(c, vm.datacenter)
   881  	if err != nil {
   882  		return err
   883  	}
   884  	finder := find.NewFinder(c.Client, true)
   885  	finder = finder.SetDatacenter(dc)
   886  
   887  	template, err := finder.VirtualMachine(context.TODO(), vm.template)
   888  	if err != nil {
   889  		return err
   890  	}
   891  	log.Printf("[DEBUG] template: %#v", template)
   892  
   893  	var resourcePool *object.ResourcePool
   894  	if vm.resourcePool == "" {
   895  		if vm.cluster == "" {
   896  			resourcePool, err = finder.DefaultResourcePool(context.TODO())
   897  			if err != nil {
   898  				return err
   899  			}
   900  		} else {
   901  			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
   902  			if err != nil {
   903  				return err
   904  			}
   905  		}
   906  	} else {
   907  		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
   908  		if err != nil {
   909  			return err
   910  		}
   911  	}
   912  	log.Printf("[DEBUG] resource pool: %#v", resourcePool)
   913  
   914  	dcFolders, err := dc.Folders(context.TODO())
   915  	if err != nil {
   916  		return err
   917  	}
   918  
   919  	var datastore *object.Datastore
   920  	if vm.datastore == "" {
   921  		datastore, err = finder.DefaultDatastore(context.TODO())
   922  		if err != nil {
   923  			return err
   924  		}
   925  	} else {
   926  		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
   927  		if err != nil {
   928  			// TODO: datastore cluster support in govmomi finder function
   929  			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
   930  			if err != nil {
   931  				return err
   932  			}
   933  
   934  			if d.Type == "StoragePod" {
   935  				sp := object.StoragePod{
   936  					object.NewFolder(c.Client, d),
   937  				}
   938  				sps := createStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
   939  				datastore, err = findDatastore(c, sps)
   940  				if err != nil {
   941  					return err
   942  				}
   943  			} else {
   944  				datastore = object.NewDatastore(c.Client, d)
   945  			}
   946  		}
   947  	}
   948  	log.Printf("[DEBUG] datastore: %#v", datastore)
   949  
   950  	relocateSpec, err := createVMRelocateSpec(resourcePool, datastore, template)
   951  	if err != nil {
   952  		return err
   953  	}
   954  	log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
   955  
   956  	// network
   957  	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
   958  	networkConfigs := []types.CustomizationAdapterMapping{}
   959  	for _, network := range vm.networkInterfaces {
   960  		// network device
   961  		nd, err := createNetworkDevice(finder, network.label, "vmxnet3")
   962  		if err != nil {
   963  			return err
   964  		}
   965  		networkDevices = append(networkDevices, nd)
   966  
   967  		var ipSetting types.CustomizationIPSettings
   968  		if network.ipAddress == "" {
   969  			ipSetting = types.CustomizationIPSettings{
   970  				Ip: &types.CustomizationDhcpIpGenerator{},
   971  			}
   972  		} else {
   973  			log.Printf("[DEBUG] gateway: %v", vm.gateway)
   974  			log.Printf("[DEBUG] ip address: %v", network.ipAddress)
   975  			log.Printf("[DEBUG] subnet mask: %v", network.subnetMask)
   976  			ipSetting = types.CustomizationIPSettings{
   977  				Gateway: []string{
   978  					vm.gateway,
   979  				},
   980  				Ip: &types.CustomizationFixedIp{
   981  					IpAddress: network.ipAddress,
   982  				},
   983  				SubnetMask: network.subnetMask,
   984  			}
   985  		}
   986  
   987  		// network config
   988  		config := types.CustomizationAdapterMapping{
   989  			Adapter: ipSetting,
   990  		}
   991  		networkConfigs = append(networkConfigs, config)
   992  	}
   993  	log.Printf("[DEBUG] network configs: %v", networkConfigs[0].Adapter)
   994  
   995  	// make config spec
   996  	configSpec := types.VirtualMachineConfigSpec{
   997  		NumCPUs:           vm.vcpu,
   998  		NumCoresPerSocket: 1,
   999  		MemoryMB:          vm.memoryMb,
  1000  		DeviceChange:      networkDevices,
  1001  	}
  1002  	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
  1003  
  1004  	// create CustomizationSpec
  1005  	customSpec := types.CustomizationSpec{
  1006  		Identity: &types.CustomizationLinuxPrep{
  1007  			HostName: &types.CustomizationFixedName{
  1008  				Name: strings.Split(vm.name, ".")[0],
  1009  			},
  1010  			Domain:     vm.domain,
  1011  			TimeZone:   vm.timeZone,
  1012  			HwClockUTC: types.NewBool(true),
  1013  		},
  1014  		GlobalIPSettings: types.CustomizationGlobalIPSettings{
  1015  			DnsSuffixList: vm.dnsSuffixes,
  1016  			DnsServerList: vm.dnsServers,
  1017  		},
  1018  		NicSettingMap: networkConfigs,
  1019  	}
  1020  	log.Printf("[DEBUG] custom spec: %v", customSpec)
  1021  
  1022  	// make vm clone spec
  1023  	cloneSpec := types.VirtualMachineCloneSpec{
  1024  		Location:      relocateSpec,
  1025  		Template:      false,
  1026  		Config:        &configSpec,
  1027  		Customization: &customSpec,
  1028  		PowerOn:       true,
  1029  	}
  1030  	log.Printf("[DEBUG] clone spec: %v", cloneSpec)
  1031  
  1032  	task, err := template.Clone(context.TODO(), dcFolders.VmFolder, vm.name, cloneSpec)
  1033  	if err != nil {
  1034  		return err
  1035  	}
  1036  
  1037  	_, err = task.WaitForResult(context.TODO(), nil)
  1038  	if err != nil {
  1039  		return err
  1040  	}
  1041  
  1042  	newVM, err := finder.VirtualMachine(context.TODO(), vm.name)
  1043  	if err != nil {
  1044  		return err
  1045  	}
  1046  	log.Printf("[DEBUG] new vm: %v", newVM)
  1047  
  1048  	ip, err := newVM.WaitForIP(context.TODO())
  1049  	if err != nil {
  1050  		return err
  1051  	}
  1052  	log.Printf("[DEBUG] ip address: %v", ip)
  1053  
  1054  	for i := 1; i < len(vm.hardDisks); i++ {
  1055  		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, "eager_zeroed")
  1056  		if err != nil {
  1057  			return err
  1058  		}
  1059  	}
  1060  	return nil
  1061  }