github.com/turtlemonvh/terraform@v0.6.9-0.20151204001754-8e40b6b855e8/builtin/providers/vsphere/resource_vsphere_virtual_machine.go (about) 1 package vsphere 2 3 import ( 4 "fmt" 5 "log" 6 "net" 7 "strings" 8 "time" 9 10 "github.com/hashicorp/terraform/helper/resource" 11 "github.com/hashicorp/terraform/helper/schema" 12 "github.com/vmware/govmomi" 13 "github.com/vmware/govmomi/find" 14 "github.com/vmware/govmomi/object" 15 "github.com/vmware/govmomi/property" 16 "github.com/vmware/govmomi/vim25/mo" 17 "github.com/vmware/govmomi/vim25/types" 18 "golang.org/x/net/context" 19 ) 20 21 var DefaultDNSSuffixes = []string{ 22 "vsphere.local", 23 } 24 25 var DefaultDNSServers = []string{ 26 "8.8.8.8", 27 "8.8.4.4", 28 } 29 30 type networkInterface struct { 31 deviceName string 32 label string 33 ipAddress string 34 subnetMask string 35 adapterType string // TODO: Make "adapter_type" argument 36 } 37 38 type hardDisk struct { 39 size int64 40 iops int64 41 } 42 43 type virtualMachine struct { 44 name string 45 datacenter string 46 cluster string 47 resourcePool string 48 datastore string 49 vcpu int 50 memoryMb int64 51 template string 52 networkInterfaces []networkInterface 53 hardDisks []hardDisk 54 gateway string 55 domain string 56 timeZone string 57 dnsSuffixes []string 58 dnsServers []string 59 customConfigurations map[string](types.AnyType) 60 } 61 62 func resourceVSphereVirtualMachine() *schema.Resource { 63 return &schema.Resource{ 64 Create: resourceVSphereVirtualMachineCreate, 65 Read: resourceVSphereVirtualMachineRead, 66 Delete: resourceVSphereVirtualMachineDelete, 67 68 Schema: map[string]*schema.Schema{ 69 "name": &schema.Schema{ 70 Type: schema.TypeString, 71 Required: true, 72 ForceNew: true, 73 }, 74 75 "vcpu": &schema.Schema{ 76 Type: schema.TypeInt, 77 Required: true, 78 ForceNew: true, 79 }, 80 81 "memory": &schema.Schema{ 82 Type: schema.TypeInt, 83 Required: true, 84 ForceNew: true, 85 }, 86 87 "datacenter": &schema.Schema{ 88 Type: schema.TypeString, 89 Optional: true, 90 ForceNew: true, 91 }, 92 93 "cluster": &schema.Schema{ 94 Type: schema.TypeString, 95 Optional: true, 96 ForceNew: true, 97 }, 98 99 "resource_pool": &schema.Schema{ 100 Type: schema.TypeString, 101 Optional: true, 102 ForceNew: true, 103 }, 104 105 "gateway": &schema.Schema{ 106 Type: schema.TypeString, 107 Optional: true, 108 ForceNew: true, 109 }, 110 111 "domain": &schema.Schema{ 112 Type: schema.TypeString, 113 Optional: true, 114 ForceNew: true, 115 Default: "vsphere.local", 116 }, 117 118 "time_zone": &schema.Schema{ 119 Type: schema.TypeString, 120 Optional: true, 121 ForceNew: true, 122 Default: "Etc/UTC", 123 }, 124 125 "dns_suffixes": &schema.Schema{ 126 Type: schema.TypeList, 127 Optional: true, 128 Elem: &schema.Schema{Type: schema.TypeString}, 129 ForceNew: true, 130 }, 131 132 "dns_servers": &schema.Schema{ 133 Type: schema.TypeList, 134 Optional: true, 135 Elem: &schema.Schema{Type: schema.TypeString}, 136 ForceNew: true, 137 }, 138 139 "custom_configuration_parameters": &schema.Schema{ 140 Type: schema.TypeMap, 141 Optional: true, 142 ForceNew: true, 143 }, 144 145 "network_interface": &schema.Schema{ 146 Type: schema.TypeList, 147 Required: true, 148 ForceNew: true, 149 Elem: &schema.Resource{ 150 Schema: map[string]*schema.Schema{ 151 "label": &schema.Schema{ 152 Type: schema.TypeString, 153 Required: true, 154 ForceNew: true, 155 }, 156 157 "ip_address": &schema.Schema{ 158 Type: schema.TypeString, 159 Optional: true, 160 Computed: true, 161 ForceNew: true, 162 }, 163 164 "subnet_mask": &schema.Schema{ 165 Type: schema.TypeString, 166 Optional: true, 167 Computed: true, 168 ForceNew: true, 169 }, 170 171 "adapter_type": &schema.Schema{ 172 Type: schema.TypeString, 173 Optional: true, 174 ForceNew: true, 175 }, 176 }, 177 }, 178 }, 179 180 "disk": &schema.Schema{ 181 Type: schema.TypeList, 182 Required: true, 183 ForceNew: true, 184 Elem: &schema.Resource{ 185 Schema: map[string]*schema.Schema{ 186 "template": &schema.Schema{ 187 Type: schema.TypeString, 188 Optional: true, 189 ForceNew: true, 190 }, 191 192 "datastore": &schema.Schema{ 193 Type: schema.TypeString, 194 Optional: true, 195 ForceNew: true, 196 }, 197 198 "size": &schema.Schema{ 199 Type: schema.TypeInt, 200 Optional: true, 201 ForceNew: true, 202 }, 203 204 "iops": &schema.Schema{ 205 Type: schema.TypeInt, 206 Optional: true, 207 ForceNew: true, 208 }, 209 }, 210 }, 211 }, 212 213 "boot_delay": &schema.Schema{ 214 Type: schema.TypeInt, 215 Optional: true, 216 ForceNew: true, 217 }, 218 }, 219 } 220 } 221 222 func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { 223 client := meta.(*govmomi.Client) 224 225 vm := virtualMachine{ 226 name: d.Get("name").(string), 227 vcpu: d.Get("vcpu").(int), 228 memoryMb: int64(d.Get("memory").(int)), 229 } 230 231 if v, ok := d.GetOk("datacenter"); ok { 232 vm.datacenter = v.(string) 233 } 234 235 if v, ok := d.GetOk("cluster"); ok { 236 vm.cluster = v.(string) 237 } 238 239 if v, ok := d.GetOk("resource_pool"); ok { 240 vm.resourcePool = v.(string) 241 } 242 243 if v, ok := d.GetOk("gateway"); ok { 244 vm.gateway = v.(string) 245 } 246 247 if v, ok := d.GetOk("domain"); ok { 248 vm.domain = v.(string) 249 } 250 251 if v, ok := d.GetOk("time_zone"); ok { 252 vm.timeZone = v.(string) 253 } 254 255 if raw, ok := d.GetOk("dns_suffixes"); ok { 256 for _, v := range raw.([]interface{}) { 257 vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string)) 258 } 259 } else { 260 vm.dnsSuffixes = DefaultDNSSuffixes 261 } 262 263 if raw, ok := d.GetOk("dns_servers"); ok { 264 for _, v := range raw.([]interface{}) { 265 vm.dnsServers = append(vm.dnsServers, v.(string)) 266 } 267 } else { 268 vm.dnsServers = DefaultDNSServers 269 } 270 271 if vL, ok := d.GetOk("custom_configuration_parameters"); ok { 272 if custom_configs, ok := vL.(map[string]interface{}); ok { 273 custom := make(map[string]types.AnyType) 274 for k,v := range custom_configs { 275 custom[k] = v 276 } 277 vm.customConfigurations = custom 278 log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations) 279 } 280 } 281 282 if vL, ok := d.GetOk("network_interface"); ok { 283 networks := make([]networkInterface, len(vL.([]interface{}))) 284 for i, v := range vL.([]interface{}) { 285 network := v.(map[string]interface{}) 286 networks[i].label = network["label"].(string) 287 if v, ok := network["ip_address"].(string); ok && v != "" { 288 networks[i].ipAddress = v 289 } 290 if v, ok := network["subnet_mask"].(string); ok && v != "" { 291 networks[i].subnetMask = v 292 } 293 } 294 vm.networkInterfaces = networks 295 log.Printf("[DEBUG] network_interface init: %v", networks) 296 } 297 298 if vL, ok := d.GetOk("disk"); ok { 299 disks := make([]hardDisk, len(vL.([]interface{}))) 300 for i, v := range vL.([]interface{}) { 301 disk := v.(map[string]interface{}) 302 if i == 0 { 303 if v, ok := disk["template"].(string); ok && v != "" { 304 vm.template = v 305 } else { 306 if v, ok := disk["size"].(int); ok && v != 0 { 307 disks[i].size = int64(v) 308 } else { 309 return fmt.Errorf("If template argument is not specified, size argument is required.") 310 } 311 } 312 if v, ok := disk["datastore"].(string); ok && v != "" { 313 vm.datastore = v 314 } 315 } else { 316 if v, ok := disk["size"].(int); ok && v != 0 { 317 disks[i].size = int64(v) 318 } else { 319 return fmt.Errorf("Size argument is required.") 320 } 321 } 322 if v, ok := disk["iops"].(int); ok && v != 0 { 323 disks[i].iops = int64(v) 324 } 325 } 326 vm.hardDisks = disks 327 log.Printf("[DEBUG] disk init: %v", disks) 328 } 329 330 if vm.template != "" { 331 err := vm.deployVirtualMachine(client) 332 if err != nil { 333 return err 334 } 335 } else { 336 err := vm.createVirtualMachine(client) 337 if err != nil { 338 return err 339 } 340 } 341 342 if _, ok := d.GetOk("network_interface.0.ip_address"); !ok { 343 if v, ok := d.GetOk("boot_delay"); ok { 344 stateConf := &resource.StateChangeConf{ 345 Pending: []string{"pending"}, 346 Target: "active", 347 Refresh: waitForNetworkingActive(client, vm.datacenter, vm.name), 348 Timeout: 600 * time.Second, 349 Delay: time.Duration(v.(int)) * time.Second, 350 MinTimeout: 2 * time.Second, 351 } 352 353 _, err := stateConf.WaitForState() 354 if err != nil { 355 return err 356 } 357 } 358 } 359 d.SetId(vm.name) 360 log.Printf("[INFO] Created virtual machine: %s", d.Id()) 361 362 return resourceVSphereVirtualMachineRead(d, meta) 363 } 364 365 func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { 366 client := meta.(*govmomi.Client) 367 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 368 if err != nil { 369 return err 370 } 371 finder := find.NewFinder(client.Client, true) 372 finder = finder.SetDatacenter(dc) 373 374 vm, err := finder.VirtualMachine(context.TODO(), d.Get("name").(string)) 375 if err != nil { 376 log.Printf("[ERROR] Virtual machine not found: %s", d.Get("name").(string)) 377 d.SetId("") 378 return nil 379 } 380 381 var mvm mo.VirtualMachine 382 383 collector := property.DefaultCollector(client.Client) 384 if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore"}, &mvm); err != nil { 385 return err 386 } 387 388 log.Printf("[DEBUG] %#v", dc) 389 log.Printf("[DEBUG] %#v", mvm.Summary.Config) 390 log.Printf("[DEBUG] %#v", mvm.Guest.Net) 391 392 networkInterfaces := make([]map[string]interface{}, 0) 393 for _, v := range mvm.Guest.Net { 394 if v.DeviceConfigId >= 0 { 395 log.Printf("[DEBUG] %#v", v.Network) 396 networkInterface := make(map[string]interface{}) 397 networkInterface["label"] = v.Network 398 if len(v.IpAddress) > 0 { 399 log.Printf("[DEBUG] %#v", v.IpAddress[0]) 400 networkInterface["ip_address"] = v.IpAddress[0] 401 402 m := net.CIDRMask(v.IpConfig.IpAddress[0].PrefixLength, 32) 403 subnetMask := net.IPv4(m[0], m[1], m[2], m[3]) 404 networkInterface["subnet_mask"] = subnetMask.String() 405 log.Printf("[DEBUG] %#v", subnetMask.String()) 406 } 407 networkInterfaces = append(networkInterfaces, networkInterface) 408 } 409 } 410 log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces) 411 err = d.Set("network_interface", networkInterfaces) 412 if err != nil { 413 return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces) 414 } 415 416 var rootDatastore string 417 for _, v := range mvm.Datastore { 418 var md mo.Datastore 419 if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil { 420 return err 421 } 422 if md.Parent.Type == "StoragePod" { 423 var msp mo.StoragePod 424 if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil { 425 return err 426 } 427 rootDatastore = msp.Name 428 log.Printf("[DEBUG] %#v", msp.Name) 429 } else { 430 rootDatastore = md.Name 431 log.Printf("[DEBUG] %#v", md.Name) 432 } 433 break 434 } 435 436 d.Set("datacenter", dc) 437 d.Set("memory", mvm.Summary.Config.MemorySizeMB) 438 d.Set("cpu", mvm.Summary.Config.NumCpu) 439 d.Set("datastore", rootDatastore) 440 441 // Initialize the connection info 442 if len(networkInterfaces) > 0 { 443 d.SetConnInfo(map[string]string{ 444 "type": "ssh", 445 "host": networkInterfaces[0]["ip_address"].(string), 446 }) 447 } 448 449 return nil 450 } 451 452 func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { 453 client := meta.(*govmomi.Client) 454 dc, err := getDatacenter(client, d.Get("datacenter").(string)) 455 if err != nil { 456 return err 457 } 458 finder := find.NewFinder(client.Client, true) 459 finder = finder.SetDatacenter(dc) 460 461 vm, err := finder.VirtualMachine(context.TODO(), d.Get("name").(string)) 462 if err != nil { 463 return err 464 } 465 466 log.Printf("[INFO] Deleting virtual machine: %s", d.Id()) 467 468 task, err := vm.PowerOff(context.TODO()) 469 if err != nil { 470 return err 471 } 472 473 err = task.Wait(context.TODO()) 474 if err != nil { 475 return err 476 } 477 478 task, err = vm.Destroy(context.TODO()) 479 if err != nil { 480 return err 481 } 482 483 err = task.Wait(context.TODO()) 484 if err != nil { 485 return err 486 } 487 488 d.SetId("") 489 return nil 490 } 491 492 func waitForNetworkingActive(client *govmomi.Client, datacenter, name string) resource.StateRefreshFunc { 493 return func() (interface{}, string, error) { 494 dc, err := getDatacenter(client, datacenter) 495 if err != nil { 496 log.Printf("[ERROR] %#v", err) 497 return nil, "", err 498 } 499 finder := find.NewFinder(client.Client, true) 500 finder = finder.SetDatacenter(dc) 501 502 vm, err := finder.VirtualMachine(context.TODO(), name) 503 if err != nil { 504 log.Printf("[ERROR] %#v", err) 505 return nil, "", err 506 } 507 508 var mvm mo.VirtualMachine 509 collector := property.DefaultCollector(client.Client) 510 if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"summary"}, &mvm); err != nil { 511 log.Printf("[ERROR] %#v", err) 512 return nil, "", err 513 } 514 515 if mvm.Summary.Guest.IpAddress != "" { 516 log.Printf("[DEBUG] IP address with DHCP: %v", mvm.Summary.Guest.IpAddress) 517 return mvm.Summary, "active", err 518 } else { 519 log.Printf("[DEBUG] Waiting for IP address") 520 return nil, "pending", err 521 } 522 } 523 } 524 525 // getDatacenter gets datacenter object 526 func getDatacenter(c *govmomi.Client, dc string) (*object.Datacenter, error) { 527 finder := find.NewFinder(c.Client, true) 528 if dc != "" { 529 d, err := finder.Datacenter(context.TODO(), dc) 530 return d, err 531 } else { 532 d, err := finder.DefaultDatacenter(context.TODO()) 533 return d, err 534 } 535 } 536 537 // addHardDisk adds a new Hard Disk to the VirtualMachine. 538 func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string) error { 539 devices, err := vm.Device(context.TODO()) 540 if err != nil { 541 return err 542 } 543 log.Printf("[DEBUG] vm devices: %#v\n", devices) 544 545 controller, err := devices.FindDiskController("scsi") 546 if err != nil { 547 return err 548 } 549 log.Printf("[DEBUG] disk controller: %#v\n", controller) 550 551 disk := devices.CreateDisk(controller, "") 552 existing := devices.SelectByBackingInfo(disk.Backing) 553 log.Printf("[DEBUG] disk: %#v\n", disk) 554 555 if len(existing) == 0 { 556 disk.CapacityInKB = int64(size * 1024 * 1024) 557 if iops != 0 { 558 disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ 559 Limit: iops, 560 } 561 } 562 backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) 563 564 if diskType == "eager_zeroed" { 565 // eager zeroed thick virtual disk 566 backing.ThinProvisioned = types.NewBool(false) 567 backing.EagerlyScrub = types.NewBool(true) 568 } else if diskType == "thin" { 569 // thin provisioned virtual disk 570 backing.ThinProvisioned = types.NewBool(true) 571 } 572 573 log.Printf("[DEBUG] addHardDisk: %#v\n", disk) 574 log.Printf("[DEBUG] addHardDisk: %#v\n", disk.CapacityInKB) 575 576 return vm.AddDevice(context.TODO(), disk) 577 } else { 578 log.Printf("[DEBUG] addHardDisk: Disk already present.\n") 579 580 return nil 581 } 582 } 583 584 // buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device. 585 func buildNetworkDevice(f *find.Finder, label, adapterType string) (*types.VirtualDeviceConfigSpec, error) { 586 network, err := f.Network(context.TODO(), "*"+label) 587 if err != nil { 588 return nil, err 589 } 590 591 backing, err := network.EthernetCardBackingInfo(context.TODO()) 592 if err != nil { 593 return nil, err 594 } 595 596 if adapterType == "vmxnet3" { 597 return &types.VirtualDeviceConfigSpec{ 598 Operation: types.VirtualDeviceConfigSpecOperationAdd, 599 Device: &types.VirtualVmxnet3{ 600 types.VirtualVmxnet{ 601 types.VirtualEthernetCard{ 602 VirtualDevice: types.VirtualDevice{ 603 Key: -1, 604 Backing: backing, 605 }, 606 AddressType: string(types.VirtualEthernetCardMacTypeGenerated), 607 }, 608 }, 609 }, 610 }, nil 611 } else if adapterType == "e1000" { 612 return &types.VirtualDeviceConfigSpec{ 613 Operation: types.VirtualDeviceConfigSpecOperationAdd, 614 Device: &types.VirtualE1000{ 615 types.VirtualEthernetCard{ 616 VirtualDevice: types.VirtualDevice{ 617 Key: -1, 618 Backing: backing, 619 }, 620 AddressType: string(types.VirtualEthernetCardMacTypeGenerated), 621 }, 622 }, 623 }, nil 624 } else { 625 return nil, fmt.Errorf("Invalid network adapter type.") 626 } 627 } 628 629 // buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. 630 func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine) (types.VirtualMachineRelocateSpec, error) { 631 var key int 632 633 devices, err := vm.Device(context.TODO()) 634 if err != nil { 635 return types.VirtualMachineRelocateSpec{}, err 636 } 637 for _, d := range devices { 638 if devices.Type(d) == "disk" { 639 key = d.GetVirtualDevice().Key 640 } 641 } 642 643 rpr := rp.Reference() 644 dsr := ds.Reference() 645 return types.VirtualMachineRelocateSpec{ 646 Datastore: &dsr, 647 Pool: &rpr, 648 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 649 types.VirtualMachineRelocateSpecDiskLocator{ 650 Datastore: dsr, 651 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ 652 DiskMode: "persistent", 653 ThinProvisioned: types.NewBool(false), 654 EagerlyScrub: types.NewBool(true), 655 }, 656 DiskId: key, 657 }, 658 }, 659 }, nil 660 } 661 662 // getDatastoreObject gets datastore object. 663 func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) { 664 s := object.NewSearchIndex(client.Client) 665 ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name) 666 if err != nil { 667 return types.ManagedObjectReference{}, err 668 } 669 if ref == nil { 670 return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name) 671 } 672 log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref) 673 return ref.Reference(), nil 674 } 675 676 // buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action. 677 func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec { 678 vmfr := f.VmFolder.Reference() 679 rpr := rp.Reference() 680 spr := storagePod.Reference() 681 682 sps := types.StoragePlacementSpec{ 683 Type: "create", 684 ConfigSpec: &configSpec, 685 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 686 StoragePod: &spr, 687 }, 688 Folder: &vmfr, 689 ResourcePool: &rpr, 690 } 691 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 692 return sps 693 } 694 695 // buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action. 696 func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec { 697 vmr := vm.Reference() 698 vmfr := f.VmFolder.Reference() 699 rpr := rp.Reference() 700 spr := storagePod.Reference() 701 702 var o mo.VirtualMachine 703 err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o) 704 if err != nil { 705 return types.StoragePlacementSpec{} 706 } 707 ds := object.NewDatastore(c.Client, o.Datastore[0]) 708 log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds) 709 710 devices, err := vm.Device(context.TODO()) 711 if err != nil { 712 return types.StoragePlacementSpec{} 713 } 714 715 var key int 716 for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) { 717 key = d.GetVirtualDevice().Key 718 log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice()) 719 } 720 721 sps := types.StoragePlacementSpec{ 722 Type: "clone", 723 Vm: &vmr, 724 PodSelectionSpec: types.StorageDrsPodSelectionSpec{ 725 StoragePod: &spr, 726 }, 727 CloneSpec: &types.VirtualMachineCloneSpec{ 728 Location: types.VirtualMachineRelocateSpec{ 729 Disk: []types.VirtualMachineRelocateSpecDiskLocator{ 730 types.VirtualMachineRelocateSpecDiskLocator{ 731 Datastore: ds.Reference(), 732 DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{}, 733 DiskId: key, 734 }, 735 }, 736 Pool: &rpr, 737 }, 738 PowerOn: false, 739 Template: false, 740 }, 741 CloneName: "dummy", 742 Folder: &vmfr, 743 } 744 return sps 745 } 746 747 // findDatastore finds Datastore object. 748 func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) { 749 var datastore *object.Datastore 750 log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) 751 752 srm := object.NewStorageResourceManager(c.Client) 753 rds, err := srm.RecommendDatastores(context.TODO(), sps) 754 if err != nil { 755 return nil, err 756 } 757 log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) 758 759 spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) 760 datastore = object.NewDatastore(c.Client, spa.Destination) 761 log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore) 762 763 return datastore, nil 764 } 765 766 // createVirtualMchine creates a new VirtualMachine. 767 func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { 768 dc, err := getDatacenter(c, vm.datacenter) 769 if err != nil { 770 return err 771 } 772 finder := find.NewFinder(c.Client, true) 773 finder = finder.SetDatacenter(dc) 774 775 var resourcePool *object.ResourcePool 776 if vm.resourcePool == "" { 777 if vm.cluster == "" { 778 resourcePool, err = finder.DefaultResourcePool(context.TODO()) 779 if err != nil { 780 return err 781 } 782 } else { 783 resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") 784 if err != nil { 785 return err 786 } 787 } 788 } else { 789 resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) 790 if err != nil { 791 return err 792 } 793 } 794 log.Printf("[DEBUG] resource pool: %#v", resourcePool) 795 796 dcFolders, err := dc.Folders(context.TODO()) 797 if err != nil { 798 return err 799 } 800 801 // network 802 networkDevices := []types.BaseVirtualDeviceConfigSpec{} 803 for _, network := range vm.networkInterfaces { 804 // network device 805 nd, err := buildNetworkDevice(finder, network.label, "e1000") 806 if err != nil { 807 return err 808 } 809 networkDevices = append(networkDevices, nd) 810 } 811 812 // make config spec 813 configSpec := types.VirtualMachineConfigSpec{ 814 GuestId: "otherLinux64Guest", 815 Name: vm.name, 816 NumCPUs: vm.vcpu, 817 NumCoresPerSocket: 1, 818 MemoryMB: vm.memoryMb, 819 DeviceChange: networkDevices, 820 } 821 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 822 823 // make ExtraConfig 824 log.Printf("[DEBUG] virtual machine Extra Config spec start") 825 if len(vm.customConfigurations) > 0 { 826 var ov []types.BaseOptionValue 827 for k, v := range vm.customConfigurations { 828 key := k 829 value := v 830 o := types.OptionValue{ 831 Key: key, 832 Value: &value, 833 } 834 log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k,v) 835 ov = append(ov, &o) 836 } 837 configSpec.ExtraConfig = ov 838 log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) 839 } 840 841 var datastore *object.Datastore 842 if vm.datastore == "" { 843 datastore, err = finder.DefaultDatastore(context.TODO()) 844 if err != nil { 845 return err 846 } 847 } else { 848 datastore, err = finder.Datastore(context.TODO(), vm.datastore) 849 if err != nil { 850 // TODO: datastore cluster support in govmomi finder function 851 d, err := getDatastoreObject(c, dcFolders, vm.datastore) 852 if err != nil { 853 return err 854 } 855 856 if d.Type == "StoragePod" { 857 sp := object.StoragePod{ 858 object.NewFolder(c.Client, d), 859 } 860 sps := buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) 861 datastore, err = findDatastore(c, sps) 862 if err != nil { 863 return err 864 } 865 } else { 866 datastore = object.NewDatastore(c.Client, d) 867 } 868 } 869 } 870 871 log.Printf("[DEBUG] datastore: %#v", datastore) 872 873 var mds mo.Datastore 874 if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { 875 return err 876 } 877 log.Printf("[DEBUG] datastore: %#v", mds.Name) 878 scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") 879 if err != nil { 880 log.Printf("[ERROR] %s", err) 881 } 882 883 configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ 884 Operation: types.VirtualDeviceConfigSpecOperationAdd, 885 Device: scsi, 886 }) 887 configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} 888 889 task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil) 890 if err != nil { 891 log.Printf("[ERROR] %s", err) 892 } 893 894 err = task.Wait(context.TODO()) 895 if err != nil { 896 log.Printf("[ERROR] %s", err) 897 } 898 899 newVM, err := finder.VirtualMachine(context.TODO(), vm.name) 900 if err != nil { 901 return err 902 } 903 log.Printf("[DEBUG] new vm: %v", newVM) 904 905 log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) 906 for _, hd := range vm.hardDisks { 907 log.Printf("[DEBUG] add hard disk: %v", hd.size) 908 log.Printf("[DEBUG] add hard disk: %v", hd.iops) 909 err = addHardDisk(newVM, hd.size, hd.iops, "thin") 910 if err != nil { 911 return err 912 } 913 } 914 return nil 915 } 916 917 // deployVirtualMchine deploys a new VirtualMachine. 918 func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error { 919 dc, err := getDatacenter(c, vm.datacenter) 920 if err != nil { 921 return err 922 } 923 finder := find.NewFinder(c.Client, true) 924 finder = finder.SetDatacenter(dc) 925 926 template, err := finder.VirtualMachine(context.TODO(), vm.template) 927 if err != nil { 928 return err 929 } 930 log.Printf("[DEBUG] template: %#v", template) 931 932 var resourcePool *object.ResourcePool 933 if vm.resourcePool == "" { 934 if vm.cluster == "" { 935 resourcePool, err = finder.DefaultResourcePool(context.TODO()) 936 if err != nil { 937 return err 938 } 939 } else { 940 resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") 941 if err != nil { 942 return err 943 } 944 } 945 } else { 946 resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) 947 if err != nil { 948 return err 949 } 950 } 951 log.Printf("[DEBUG] resource pool: %#v", resourcePool) 952 953 dcFolders, err := dc.Folders(context.TODO()) 954 if err != nil { 955 return err 956 } 957 958 var datastore *object.Datastore 959 if vm.datastore == "" { 960 datastore, err = finder.DefaultDatastore(context.TODO()) 961 if err != nil { 962 return err 963 } 964 } else { 965 datastore, err = finder.Datastore(context.TODO(), vm.datastore) 966 if err != nil { 967 // TODO: datastore cluster support in govmomi finder function 968 d, err := getDatastoreObject(c, dcFolders, vm.datastore) 969 if err != nil { 970 return err 971 } 972 973 if d.Type == "StoragePod" { 974 sp := object.StoragePod{ 975 object.NewFolder(c.Client, d), 976 } 977 sps := buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) 978 datastore, err = findDatastore(c, sps) 979 if err != nil { 980 return err 981 } 982 } else { 983 datastore = object.NewDatastore(c.Client, d) 984 } 985 } 986 } 987 log.Printf("[DEBUG] datastore: %#v", datastore) 988 989 relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template) 990 if err != nil { 991 return err 992 } 993 log.Printf("[DEBUG] relocate spec: %v", relocateSpec) 994 995 // network 996 networkDevices := []types.BaseVirtualDeviceConfigSpec{} 997 networkConfigs := []types.CustomizationAdapterMapping{} 998 for _, network := range vm.networkInterfaces { 999 // network device 1000 nd, err := buildNetworkDevice(finder, network.label, "vmxnet3") 1001 if err != nil { 1002 return err 1003 } 1004 networkDevices = append(networkDevices, nd) 1005 1006 var ipSetting types.CustomizationIPSettings 1007 if network.ipAddress == "" { 1008 ipSetting = types.CustomizationIPSettings{ 1009 Ip: &types.CustomizationDhcpIpGenerator{}, 1010 } 1011 } else { 1012 log.Printf("[DEBUG] gateway: %v", vm.gateway) 1013 log.Printf("[DEBUG] ip address: %v", network.ipAddress) 1014 log.Printf("[DEBUG] subnet mask: %v", network.subnetMask) 1015 ipSetting = types.CustomizationIPSettings{ 1016 Gateway: []string{ 1017 vm.gateway, 1018 }, 1019 Ip: &types.CustomizationFixedIp{ 1020 IpAddress: network.ipAddress, 1021 }, 1022 SubnetMask: network.subnetMask, 1023 } 1024 } 1025 1026 // network config 1027 config := types.CustomizationAdapterMapping{ 1028 Adapter: ipSetting, 1029 } 1030 networkConfigs = append(networkConfigs, config) 1031 } 1032 log.Printf("[DEBUG] network configs: %v", networkConfigs[0].Adapter) 1033 1034 // make config spec 1035 configSpec := types.VirtualMachineConfigSpec{ 1036 NumCPUs: vm.vcpu, 1037 NumCoresPerSocket: 1, 1038 MemoryMB: vm.memoryMb, 1039 } 1040 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 1041 1042 log.Printf("[DEBUG] starting extra custom config spec: %v", vm.customConfigurations) 1043 1044 // make ExtraConfig 1045 if len(vm.customConfigurations) > 0 { 1046 var ov []types.BaseOptionValue 1047 for k, v := range vm.customConfigurations { 1048 key := k 1049 value := v 1050 o := types.OptionValue{ 1051 Key: key, 1052 Value: &value, 1053 } 1054 ov = append(ov, &o) 1055 } 1056 configSpec.ExtraConfig = ov 1057 log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) 1058 } 1059 1060 // create CustomizationSpec 1061 customSpec := types.CustomizationSpec{ 1062 Identity: &types.CustomizationLinuxPrep{ 1063 HostName: &types.CustomizationFixedName{ 1064 Name: strings.Split(vm.name, ".")[0], 1065 }, 1066 Domain: vm.domain, 1067 TimeZone: vm.timeZone, 1068 HwClockUTC: types.NewBool(true), 1069 }, 1070 GlobalIPSettings: types.CustomizationGlobalIPSettings{ 1071 DnsSuffixList: vm.dnsSuffixes, 1072 DnsServerList: vm.dnsServers, 1073 }, 1074 NicSettingMap: networkConfigs, 1075 } 1076 log.Printf("[DEBUG] custom spec: %v", customSpec) 1077 1078 // make vm clone spec 1079 cloneSpec := types.VirtualMachineCloneSpec{ 1080 Location: relocateSpec, 1081 Template: false, 1082 Config: &configSpec, 1083 PowerOn: false, 1084 } 1085 log.Printf("[DEBUG] clone spec: %v", cloneSpec) 1086 1087 task, err := template.Clone(context.TODO(), dcFolders.VmFolder, vm.name, cloneSpec) 1088 if err != nil { 1089 return err 1090 } 1091 1092 _, err = task.WaitForResult(context.TODO(), nil) 1093 if err != nil { 1094 return err 1095 } 1096 1097 newVM, err := finder.VirtualMachine(context.TODO(), vm.name) 1098 if err != nil { 1099 return err 1100 } 1101 log.Printf("[DEBUG] new vm: %v", newVM) 1102 1103 devices, err := newVM.Device(context.TODO()) 1104 if err != nil { 1105 log.Printf("[DEBUG] Template devices can't be found") 1106 return err 1107 } 1108 1109 for _, dvc := range devices { 1110 // Issue 3559/3560: Delete all ethernet devices to add the correct ones later 1111 if devices.Type(dvc) == "ethernet" { 1112 err := newVM.RemoveDevice(context.TODO(), dvc) 1113 if err != nil { 1114 return err 1115 } 1116 } 1117 } 1118 // Add Network devices 1119 for _, dvc := range networkDevices { 1120 err := newVM.AddDevice( 1121 context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) 1122 if err != nil { 1123 return err 1124 } 1125 } 1126 1127 taskb, err := newVM.Customize(context.TODO(), customSpec) 1128 if err != nil { 1129 return err 1130 } 1131 1132 _, err = taskb.WaitForResult(context.TODO(), nil) 1133 if err != nil { 1134 return err 1135 } 1136 log.Printf("[DEBUG]VM customization finished") 1137 1138 newVM.PowerOn(context.TODO()) 1139 1140 ip, err := newVM.WaitForIP(context.TODO()) 1141 if err != nil { 1142 return err 1143 } 1144 log.Printf("[DEBUG] ip address: %v", ip) 1145 1146 for i := 1; i < len(vm.hardDisks); i++ { 1147 err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, "eager_zeroed") 1148 if err != nil { 1149 return err 1150 } 1151 } 1152 log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) 1153 return nil 1154 }