github.com/vmware/govmomi@v0.51.0/simulator/folder.go (about)

     1  // © Broadcom. All Rights Reserved.
     2  // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
     3  // SPDX-License-Identifier: Apache-2.0
     4  
     5  package simulator
     6  
     7  import (
     8  	"errors"
     9  	"fmt"
    10  	"math/rand"
    11  	"net/url"
    12  	"path"
    13  	"strings"
    14  	"time"
    15  
    16  	"github.com/google/uuid"
    17  
    18  	"github.com/vmware/govmomi/object"
    19  	"github.com/vmware/govmomi/vim25/methods"
    20  	"github.com/vmware/govmomi/vim25/mo"
    21  	"github.com/vmware/govmomi/vim25/soap"
    22  	"github.com/vmware/govmomi/vim25/types"
    23  )
    24  
    25  type Folder struct {
    26  	mo.Folder
    27  }
    28  
    29  func asFolderMO(obj mo.Reference) (*mo.Folder, bool) {
    30  	if obj == nil {
    31  		return nil, false
    32  	}
    33  	f, ok := getManagedObject(obj).Addr().Interface().(*mo.Folder)
    34  	return f, ok
    35  }
    36  
    37  func folderEventArgument(f *mo.Folder) types.FolderEventArgument {
    38  	return types.FolderEventArgument{
    39  		Folder:              f.Self,
    40  		EntityEventArgument: types.EntityEventArgument{Name: f.Name},
    41  	}
    42  }
    43  
    44  // update references when objects are added/removed from a Folder
    45  func folderUpdate(ctx *Context, f *mo.Folder, o mo.Reference, u func(*Context, mo.Reference, *[]types.ManagedObjectReference, types.ManagedObjectReference)) {
    46  	ref := o.Reference()
    47  
    48  	if f.Parent == nil {
    49  		return // this is the root folder
    50  	}
    51  
    52  	switch ref.Type {
    53  	case "Datacenter", "Folder":
    54  		return // nothing to update
    55  	}
    56  
    57  	dc := ctx.Map.getEntityDatacenter(f)
    58  
    59  	switch ref.Type {
    60  	case "Network", "DistributedVirtualSwitch", "DistributedVirtualPortgroup":
    61  		u(ctx, dc, &dc.Network, ref)
    62  	case "Datastore":
    63  		u(ctx, dc, &dc.Datastore, ref)
    64  	}
    65  }
    66  
    67  func networkSummary(n *mo.Network) types.BaseNetworkSummary {
    68  	if n.Summary != nil {
    69  		return n.Summary
    70  	}
    71  	return &types.NetworkSummary{
    72  		Network:    &n.Self,
    73  		Name:       n.Name,
    74  		Accessible: true,
    75  	}
    76  }
    77  
    78  func folderPutChild(ctx *Context, f *mo.Folder, o mo.Entity) {
    79  	ctx.WithLock(f, func() {
    80  		ctx.WithLock(o, func() {
    81  			// Need to update ChildEntity before Map.Put for ContainerView updates to work properly
    82  			f.ChildEntity = append(f.ChildEntity, ctx.Map.reference(o))
    83  			ctx.Map.PutEntity(f, o)
    84  
    85  			folderUpdate(ctx, f, o, ctx.Map.AddReference)
    86  
    87  			switch e := o.(type) {
    88  			case *mo.Network:
    89  				e.Summary = networkSummary(e)
    90  			case *mo.OpaqueNetwork:
    91  				e.Summary = networkSummary(&e.Network)
    92  			case *DistributedVirtualPortgroup:
    93  				e.Summary = networkSummary(&e.Network)
    94  			}
    95  		})
    96  	})
    97  }
    98  
    99  func folderRemoveChild(ctx *Context, f *mo.Folder, o mo.Reference) {
   100  	ctx.Map.Remove(ctx, o.Reference())
   101  	folderRemoveReference(ctx, f, o)
   102  }
   103  
   104  func folderRemoveReference(ctx *Context, f *mo.Folder, o mo.Reference) {
   105  	ctx.WithLock(f, func() {
   106  		RemoveReference(&f.ChildEntity, o.Reference())
   107  
   108  		folderUpdate(ctx, f, o, ctx.Map.RemoveReference)
   109  	})
   110  }
   111  
   112  func folderHasChildType(f *mo.Folder, kind string) bool {
   113  	for _, t := range f.ChildType {
   114  		if t == kind {
   115  			return true
   116  		}
   117  	}
   118  	return false
   119  }
   120  
   121  func (f *Folder) typeNotSupported() *soap.Fault {
   122  	return Fault(fmt.Sprintf("%s supports types: %#v", f.Self, f.ChildType), &types.NotSupported{})
   123  }
   124  
   125  // AddOpaqueNetwork adds an OpaqueNetwork type to the inventory, with default backing to that of an nsx.LogicalSwitch.
   126  // The vSphere API does not have a method to add this directly, so it must either be called directly or via Model.OpaqueNetwork setting.
   127  func (f *Folder) AddOpaqueNetwork(ctx *Context, summary types.OpaqueNetworkSummary) error {
   128  	if !folderHasChildType(&f.Folder, "Network") {
   129  		return errors.New("not a network folder")
   130  	}
   131  
   132  	if summary.OpaqueNetworkId == "" {
   133  		summary.OpaqueNetworkId = uuid.New().String()
   134  	}
   135  	if summary.OpaqueNetworkType == "" {
   136  		summary.OpaqueNetworkType = "nsx.LogicalSwitch"
   137  	}
   138  	if summary.Name == "" {
   139  		summary.Name = summary.OpaqueNetworkType + "-" + summary.OpaqueNetworkId
   140  	}
   141  
   142  	net := new(mo.OpaqueNetwork)
   143  	if summary.Network == nil {
   144  		summary.Network = &net.Self
   145  	} else {
   146  		net.Self = *summary.Network
   147  	}
   148  	summary.Accessible = true
   149  	net.Network.Name = summary.Name
   150  	net.Summary = &summary
   151  
   152  	folderPutChild(ctx, &f.Folder, net)
   153  
   154  	return nil
   155  }
   156  
   157  type addStandaloneHost struct {
   158  	*Folder
   159  	ctx *Context
   160  	req *types.AddStandaloneHost_Task
   161  }
   162  
   163  func (add *addStandaloneHost) Run(task *Task) (types.AnyType, types.BaseMethodFault) {
   164  	host, err := CreateStandaloneHost(add.ctx, add.Folder, add.req.Spec)
   165  	if err != nil {
   166  		return nil, err
   167  	}
   168  
   169  	if add.req.AddConnected {
   170  		host.Runtime.ConnectionState = types.HostSystemConnectionStateConnected
   171  	}
   172  
   173  	return host.Reference(), nil
   174  }
   175  
   176  func (f *Folder) AddStandaloneHostTask(ctx *Context, a *types.AddStandaloneHost_Task) soap.HasFault {
   177  	r := &methods.AddStandaloneHost_TaskBody{}
   178  
   179  	if folderHasChildType(&f.Folder, "ComputeResource") && folderHasChildType(&f.Folder, "Folder") {
   180  		r.Res = &types.AddStandaloneHost_TaskResponse{
   181  			Returnval: NewTask(&addStandaloneHost{f, ctx, a}).Run(ctx),
   182  		}
   183  	} else {
   184  		r.Fault_ = f.typeNotSupported()
   185  	}
   186  
   187  	return r
   188  }
   189  
   190  func (f *Folder) CreateFolder(ctx *Context, c *types.CreateFolder) soap.HasFault {
   191  	r := &methods.CreateFolderBody{}
   192  
   193  	if folderHasChildType(&f.Folder, "Folder") {
   194  		name := escapeSpecialCharacters(c.Name)
   195  
   196  		if obj := ctx.Map.FindByName(name, f.ChildEntity); obj != nil {
   197  			r.Fault_ = Fault("", &types.DuplicateName{
   198  				Name:   name,
   199  				Object: obj.Reference(),
   200  			})
   201  
   202  			return r
   203  		}
   204  
   205  		folder := &Folder{}
   206  
   207  		folder.Name = name
   208  		folder.ChildType = f.ChildType
   209  
   210  		folderPutChild(ctx, &f.Folder, folder)
   211  
   212  		r.Res = &types.CreateFolderResponse{
   213  			Returnval: folder.Self,
   214  		}
   215  	} else {
   216  		r.Fault_ = f.typeNotSupported()
   217  	}
   218  
   219  	return r
   220  }
   221  
   222  func escapeSpecialCharacters(name string) string {
   223  	name = strings.ReplaceAll(name, `%`, strings.ToLower(url.QueryEscape(`%`)))
   224  	name = strings.ReplaceAll(name, `/`, strings.ToLower(url.QueryEscape(`/`)))
   225  	name = strings.ReplaceAll(name, `\`, strings.ToLower(url.QueryEscape(`\`)))
   226  	return name
   227  }
   228  
   229  // StoragePod aka "Datastore Cluster"
   230  type StoragePod struct {
   231  	mo.StoragePod
   232  }
   233  
   234  func (f *Folder) CreateStoragePod(ctx *Context, c *types.CreateStoragePod) soap.HasFault {
   235  	r := &methods.CreateStoragePodBody{}
   236  
   237  	if folderHasChildType(&f.Folder, "StoragePod") {
   238  		if obj := ctx.Map.FindByName(c.Name, f.ChildEntity); obj != nil {
   239  			r.Fault_ = Fault("", &types.DuplicateName{
   240  				Name:   c.Name,
   241  				Object: obj.Reference(),
   242  			})
   243  
   244  			return r
   245  		}
   246  
   247  		pod := &StoragePod{}
   248  
   249  		pod.Name = c.Name
   250  		pod.ChildType = []string{"Datastore"}
   251  		pod.Summary = new(types.StoragePodSummary)
   252  		pod.PodStorageDrsEntry = new(types.PodStorageDrsEntry)
   253  		pod.PodStorageDrsEntry.StorageDrsConfig.PodConfig.Enabled = true
   254  
   255  		folderPutChild(ctx, &f.Folder, pod)
   256  
   257  		r.Res = &types.CreateStoragePodResponse{
   258  			Returnval: pod.Self,
   259  		}
   260  	} else {
   261  		r.Fault_ = f.typeNotSupported()
   262  	}
   263  
   264  	return r
   265  }
   266  
   267  func (p *StoragePod) MoveIntoFolderTask(ctx *Context, c *types.MoveIntoFolder_Task) soap.HasFault {
   268  	task := CreateTask(p, "moveIntoFolder", func(*Task) (types.AnyType, types.BaseMethodFault) {
   269  		f := &Folder{Folder: p.Folder}
   270  		id := f.MoveIntoFolderTask(ctx, c).(*methods.MoveIntoFolder_TaskBody).Res.Returnval
   271  		ftask := ctx.Map.Get(id).(*Task)
   272  		ftask.Wait()
   273  		if ftask.Info.Error != nil {
   274  			return nil, ftask.Info.Error.Fault
   275  		}
   276  		p.ChildEntity = append(p.ChildEntity, f.ChildEntity...)
   277  		return nil, nil
   278  	})
   279  	return &methods.MoveIntoFolder_TaskBody{
   280  		Res: &types.MoveIntoFolder_TaskResponse{
   281  			Returnval: task.Run(ctx),
   282  		},
   283  	}
   284  }
   285  
   286  func (f *Folder) CreateDatacenter(ctx *Context, c *types.CreateDatacenter) soap.HasFault {
   287  	r := &methods.CreateDatacenterBody{}
   288  
   289  	if folderHasChildType(&f.Folder, "Datacenter") && folderHasChildType(&f.Folder, "Folder") {
   290  		dc := NewDatacenter(ctx, &f.Folder)
   291  
   292  		ctx.Update(dc, []types.PropertyChange{
   293  			{Name: "name", Val: c.Name},
   294  		})
   295  
   296  		r.Res = &types.CreateDatacenterResponse{
   297  			Returnval: dc.Self,
   298  		}
   299  
   300  		ctx.postEvent(&types.DatacenterCreatedEvent{
   301  			DatacenterEvent: types.DatacenterEvent{
   302  				Event: types.Event{
   303  					Datacenter: datacenterEventArgument(ctx, dc),
   304  				},
   305  			},
   306  			Parent: folderEventArgument(&f.Folder),
   307  		})
   308  	} else {
   309  		r.Fault_ = f.typeNotSupported()
   310  	}
   311  
   312  	return r
   313  }
   314  
   315  func (f *Folder) CreateClusterEx(ctx *Context, c *types.CreateClusterEx) soap.HasFault {
   316  	r := &methods.CreateClusterExBody{}
   317  
   318  	if folderHasChildType(&f.Folder, "ComputeResource") && folderHasChildType(&f.Folder, "Folder") {
   319  		cluster, err := CreateClusterComputeResource(ctx, f, c.Name, c.Spec)
   320  		if err != nil {
   321  			r.Fault_ = Fault("", err)
   322  			return r
   323  		}
   324  
   325  		r.Res = &types.CreateClusterExResponse{
   326  			Returnval: cluster.Self,
   327  		}
   328  	} else {
   329  		r.Fault_ = f.typeNotSupported()
   330  	}
   331  
   332  	return r
   333  }
   334  
   335  type createVM struct {
   336  	*Folder
   337  
   338  	ctx *Context
   339  	req *types.CreateVM_Task
   340  
   341  	register bool
   342  }
   343  
   344  // hostsWithDatastore returns hosts that have access to the given datastore path
   345  func hostsWithDatastore(ctx *Context, hosts []types.ManagedObjectReference, path string) []types.ManagedObjectReference {
   346  	attached := hosts[:0]
   347  	var p object.DatastorePath
   348  	p.FromString(path)
   349  
   350  	for _, host := range hosts {
   351  		h := ctx.Map.Get(host).(*HostSystem)
   352  		if ctx.Map.FindByName(p.Datastore, h.Datastore) != nil {
   353  			attached = append(attached, host)
   354  		}
   355  	}
   356  
   357  	return attached
   358  }
   359  
   360  func (c *createVM) Run(task *Task) (types.AnyType, types.BaseMethodFault) {
   361  	config := &c.req.Config
   362  	// escape special characters in vm name
   363  	if config.Name != escapeSpecialCharacters(config.Name) {
   364  		deepCopy(c.req.Config, config)
   365  		config.Name = escapeSpecialCharacters(config.Name)
   366  	}
   367  
   368  	vm, err := NewVirtualMachine(c.ctx, c.Folder.Self, &c.req.Config)
   369  	if err != nil {
   370  		return nil, err
   371  	}
   372  
   373  	vm.ResourcePool = &c.req.Pool
   374  
   375  	if c.req.Host == nil {
   376  		pool := c.ctx.Map.Get(c.req.Pool).(mo.Entity)
   377  		cr := c.ctx.Map.getEntityComputeResource(pool)
   378  
   379  		c.ctx.WithLock(cr, func() {
   380  			var hosts []types.ManagedObjectReference
   381  			switch cr := cr.(type) {
   382  			case *mo.ComputeResource:
   383  				hosts = cr.Host
   384  			case *ClusterComputeResource:
   385  				hosts = cr.Host
   386  			}
   387  
   388  			hosts = hostsWithDatastore(c.ctx, hosts, c.req.Config.Files.VmPathName)
   389  			host := hosts[rand.Intn(len(hosts))]
   390  			vm.Runtime.Host = &host
   391  		})
   392  	} else {
   393  		vm.Runtime.Host = c.req.Host
   394  	}
   395  
   396  	if cryptoSpec, ok := c.req.Config.Crypto.(*types.CryptoSpecEncrypt); ok {
   397  		if cryptoSpec.CryptoKeyId.KeyId == "" {
   398  			if cryptoSpec.CryptoKeyId.ProviderId == nil {
   399  				providerID, keyID := getDefaultProvider(c.ctx, vm, true)
   400  				if providerID == "" {
   401  					return nil, &types.InvalidVmConfig{Property: "configSpec.crypto"}
   402  				}
   403  				vm.Config.KeyId = &types.CryptoKeyId{
   404  					KeyId: keyID,
   405  					ProviderId: &types.KeyProviderId{
   406  						Id: providerID,
   407  					},
   408  				}
   409  			} else {
   410  				providerID := cryptoSpec.CryptoKeyId.ProviderId.Id
   411  				keyID := generateKeyForProvider(c.ctx, providerID)
   412  				vm.Config.KeyId = &types.CryptoKeyId{
   413  					KeyId: keyID,
   414  					ProviderId: &types.KeyProviderId{
   415  						Id: providerID,
   416  					},
   417  				}
   418  			}
   419  		}
   420  	}
   421  
   422  	vm.Guest = &types.GuestInfo{
   423  		ToolsStatus:        types.VirtualMachineToolsStatusToolsNotInstalled,
   424  		ToolsVersion:       "0",
   425  		ToolsRunningStatus: string(types.VirtualMachineToolsRunningStatusGuestToolsNotRunning),
   426  	}
   427  
   428  	vm.Summary.Guest = &types.VirtualMachineGuestSummary{
   429  		ToolsStatus: vm.Guest.ToolsStatus,
   430  	}
   431  	vm.Summary.Config.VmPathName = vm.Config.Files.VmPathName
   432  	vm.Summary.Runtime.Host = vm.Runtime.Host
   433  
   434  	err = vm.create(c.ctx, &c.req.Config, c.register)
   435  	if err != nil {
   436  		folderRemoveChild(c.ctx, &c.Folder.Folder, vm)
   437  		return nil, err
   438  	}
   439  
   440  	host := c.ctx.Map.Get(*vm.Runtime.Host).(*HostSystem)
   441  	c.ctx.Map.AppendReference(c.ctx, host, &host.Vm, vm.Self)
   442  	vm.EnvironmentBrowser = *hostParent(task.ctx, &host.HostSystem).EnvironmentBrowser
   443  
   444  	for i := range vm.Datastore {
   445  		ds := c.ctx.Map.Get(vm.Datastore[i]).(*Datastore)
   446  		c.ctx.Map.AppendReference(c.ctx, ds, &ds.Vm, vm.Self)
   447  	}
   448  
   449  	pool := c.ctx.Map.Get(*vm.ResourcePool)
   450  	// This can be an internal call from VirtualApp.CreateChildVMTask, where pool is already locked.
   451  	c.ctx.WithLock(pool, func() {
   452  		if rp, ok := asResourcePoolMO(pool); ok {
   453  			rp.Vm = append(rp.Vm, vm.Self)
   454  		}
   455  		if vapp, ok := pool.(*VirtualApp); ok {
   456  			vapp.Vm = append(vapp.Vm, vm.Self)
   457  		}
   458  	})
   459  
   460  	event := vm.event(c.ctx)
   461  	c.ctx.postEvent(
   462  		&types.VmBeingCreatedEvent{
   463  			VmEvent:    event,
   464  			ConfigSpec: &c.req.Config,
   465  		},
   466  		&types.VmInstanceUuidAssignedEvent{
   467  			VmEvent:      event,
   468  			InstanceUuid: vm.Config.InstanceUuid,
   469  		},
   470  		&types.VmUuidAssignedEvent{
   471  			VmEvent: event,
   472  			Uuid:    vm.Config.Uuid,
   473  		},
   474  		&types.VmCreatedEvent{
   475  			VmEvent: event,
   476  		},
   477  	)
   478  
   479  	vm.RefreshStorageInfo(c.ctx, nil)
   480  
   481  	c.ctx.Update(vm, []types.PropertyChange{
   482  		{Name: "name", Val: c.req.Config.Name},
   483  	})
   484  
   485  	return vm.Reference(), nil
   486  }
   487  
   488  func (f *Folder) CreateVMTask(ctx *Context, c *types.CreateVM_Task) soap.HasFault {
   489  	return &methods.CreateVM_TaskBody{
   490  		Res: &types.CreateVM_TaskResponse{
   491  			Returnval: NewTask(&createVM{f, ctx, c, false}).Run(ctx),
   492  		},
   493  	}
   494  }
   495  
   496  type registerVM struct {
   497  	*Folder
   498  
   499  	ctx *Context
   500  	req *types.RegisterVM_Task
   501  }
   502  
   503  func (c *registerVM) Run(task *Task) (types.AnyType, types.BaseMethodFault) {
   504  	host := c.req.Host
   505  	pool := c.req.Pool
   506  
   507  	if c.req.AsTemplate {
   508  		if host == nil {
   509  			return nil, &types.InvalidArgument{InvalidProperty: "host"}
   510  		} else if pool != nil {
   511  			return nil, &types.InvalidArgument{InvalidProperty: "pool"}
   512  		}
   513  
   514  		pool = hostParent(c.ctx, &c.ctx.Map.Get(*host).(*HostSystem).HostSystem).ResourcePool
   515  	} else {
   516  		if pool == nil {
   517  			return nil, &types.InvalidArgument{InvalidProperty: "pool"}
   518  		}
   519  	}
   520  
   521  	if c.req.Path == "" {
   522  		return nil, &types.InvalidArgument{InvalidProperty: "path"}
   523  	}
   524  
   525  	s := c.ctx.Map.SearchIndex()
   526  	r := s.FindByDatastorePath(c.ctx, &types.FindByDatastorePath{
   527  		This:       s.Reference(),
   528  		Path:       c.req.Path,
   529  		Datacenter: c.ctx.Map.getEntityDatacenter(c.Folder).Reference(),
   530  	})
   531  
   532  	if ref := r.(*methods.FindByDatastorePathBody).Res.Returnval; ref != nil {
   533  		return nil, &types.AlreadyExists{Name: ref.Value}
   534  	}
   535  
   536  	if c.req.Name == "" {
   537  		p, err := parseDatastorePath(c.req.Path)
   538  		if err != nil {
   539  			return nil, err
   540  		}
   541  
   542  		c.req.Name = path.Dir(p.Path)
   543  	}
   544  
   545  	create := NewTask(&createVM{
   546  		Folder:   c.Folder,
   547  		register: true,
   548  		ctx:      c.ctx,
   549  		req: &types.CreateVM_Task{
   550  			This: c.Folder.Reference(),
   551  			Config: types.VirtualMachineConfigSpec{
   552  				Name: c.req.Name,
   553  				Files: &types.VirtualMachineFileInfo{
   554  					VmPathName: c.req.Path,
   555  				},
   556  			},
   557  			Pool: *pool,
   558  			Host: host,
   559  		},
   560  	})
   561  
   562  	create.RunBlocking(c.ctx)
   563  
   564  	if create.Info.Error != nil {
   565  		return nil, create.Info.Error.Fault
   566  	}
   567  
   568  	return create.Info.Result, nil
   569  }
   570  
   571  func (f *Folder) RegisterVMTask(ctx *Context, c *types.RegisterVM_Task) soap.HasFault {
   572  	return &methods.RegisterVM_TaskBody{
   573  		Res: &types.RegisterVM_TaskResponse{
   574  			Returnval: NewTask(&registerVM{f, ctx, c}).Run(ctx),
   575  		},
   576  	}
   577  }
   578  
   579  func (f *Folder) MoveIntoFolderTask(ctx *Context, c *types.MoveIntoFolder_Task) soap.HasFault {
   580  	task := CreateTask(f, "moveIntoFolder", func(t *Task) (types.AnyType, types.BaseMethodFault) {
   581  		for _, ref := range c.List {
   582  			obj := ctx.Map.Get(ref).(mo.Entity)
   583  
   584  			parent, ok := ctx.Map.Get(*(obj.Entity()).Parent).(*Folder)
   585  
   586  			if !ok || !folderHasChildType(&f.Folder, ref.Type) {
   587  				return nil, &types.NotSupported{}
   588  			}
   589  
   590  			folderRemoveReference(ctx, &parent.Folder, ref)
   591  			folderPutChild(ctx, &f.Folder, obj)
   592  		}
   593  
   594  		return nil, nil
   595  	})
   596  
   597  	return &methods.MoveIntoFolder_TaskBody{
   598  		Res: &types.MoveIntoFolder_TaskResponse{
   599  			Returnval: task.Run(ctx),
   600  		},
   601  	}
   602  }
   603  
   604  func (f *Folder) CreateDVSTask(ctx *Context, req *types.CreateDVS_Task) soap.HasFault {
   605  	task := CreateTask(f, "createDVS", func(t *Task) (types.AnyType, types.BaseMethodFault) {
   606  		spec := req.Spec.ConfigSpec.GetDVSConfigSpec()
   607  		dvs := &DistributedVirtualSwitch{}
   608  		dvs.Name = spec.Name
   609  		dvs.Entity().Name = dvs.Name
   610  
   611  		if ctx.Map.FindByName(dvs.Name, f.ChildEntity) != nil {
   612  			return nil, &types.InvalidArgument{InvalidProperty: "name"}
   613  		}
   614  
   615  		dvs.Uuid = newUUID(dvs.Name)
   616  
   617  		folderPutChild(ctx, &f.Folder, dvs)
   618  
   619  		dvs.Summary = types.DVSSummary{
   620  			Name:        dvs.Name,
   621  			Uuid:        dvs.Uuid,
   622  			NumPorts:    spec.NumStandalonePorts,
   623  			ProductInfo: req.Spec.ProductInfo,
   624  			Description: spec.Description,
   625  		}
   626  
   627  		configInfo := &types.VMwareDVSConfigInfo{
   628  			DVSConfigInfo: types.DVSConfigInfo{
   629  				Uuid:                                dvs.Uuid,
   630  				Name:                                spec.Name,
   631  				ConfigVersion:                       spec.ConfigVersion,
   632  				NumStandalonePorts:                  spec.NumStandalonePorts,
   633  				MaxPorts:                            spec.MaxPorts,
   634  				UplinkPortPolicy:                    spec.UplinkPortPolicy,
   635  				UplinkPortgroup:                     spec.UplinkPortgroup,
   636  				DefaultPortConfig:                   spec.DefaultPortConfig,
   637  				ExtensionKey:                        spec.ExtensionKey,
   638  				Description:                         spec.Description,
   639  				Policy:                              spec.Policy,
   640  				VendorSpecificConfig:                spec.VendorSpecificConfig,
   641  				SwitchIpAddress:                     spec.SwitchIpAddress,
   642  				DefaultProxySwitchMaxNumPorts:       spec.DefaultProxySwitchMaxNumPorts,
   643  				InfrastructureTrafficResourceConfig: spec.InfrastructureTrafficResourceConfig,
   644  				NetworkResourceControlVersion:       spec.NetworkResourceControlVersion,
   645  			},
   646  		}
   647  
   648  		if spec, ok := req.Spec.ConfigSpec.(*types.VMwareDVSConfigSpec); ok {
   649  			configInfo.LinkDiscoveryProtocolConfig = spec.LinkDiscoveryProtocolConfig
   650  			configInfo.MaxMtu = spec.MaxMtu
   651  			configInfo.IpfixConfig = spec.IpfixConfig
   652  			configInfo.LacpApiVersion = spec.LacpApiVersion
   653  			configInfo.MulticastFilteringMode = spec.MulticastFilteringMode
   654  			configInfo.NetworkOffloadSpecId = spec.NetworkOffloadSpecId
   655  		}
   656  
   657  		if spec.Contact != nil {
   658  			configInfo.Contact = *spec.Contact
   659  		}
   660  
   661  		dvs.Config = configInfo
   662  
   663  		if dvs.Summary.ProductInfo == nil {
   664  			product := ctx.Map.content().About
   665  			dvs.Summary.ProductInfo = &types.DistributedVirtualSwitchProductSpec{
   666  				Name:            "DVS",
   667  				Vendor:          product.Vendor,
   668  				Version:         product.Version,
   669  				Build:           product.Build,
   670  				ForwardingClass: "etherswitch",
   671  			}
   672  		}
   673  
   674  		ctx.postEvent(&types.DvsCreatedEvent{
   675  			DvsEvent: dvs.event(ctx),
   676  			Parent:   folderEventArgument(&f.Folder),
   677  		})
   678  
   679  		dvs.AddDVPortgroupTask(ctx, &types.AddDVPortgroup_Task{
   680  			Spec: []types.DVPortgroupConfigSpec{{
   681  				Name:     dvs.Name + "-DVUplinks" + strings.TrimPrefix(dvs.Self.Value, "dvs"),
   682  				Type:     string(types.DistributedVirtualPortgroupPortgroupTypeEarlyBinding),
   683  				NumPorts: 1,
   684  				DefaultPortConfig: &types.VMwareDVSPortSetting{
   685  					Vlan: &types.VmwareDistributedVirtualSwitchTrunkVlanSpec{
   686  						VlanId: []types.NumericRange{{Start: 0, End: 4094}},
   687  					},
   688  					UplinkTeamingPolicy: &types.VmwareUplinkPortTeamingPolicy{
   689  						Policy: &types.StringPolicy{
   690  							Value: "loadbalance_srcid",
   691  						},
   692  						ReversePolicy: &types.BoolPolicy{
   693  							Value: types.NewBool(true),
   694  						},
   695  						NotifySwitches: &types.BoolPolicy{
   696  							Value: types.NewBool(true),
   697  						},
   698  						RollingOrder: &types.BoolPolicy{
   699  							Value: types.NewBool(true),
   700  						},
   701  					},
   702  				},
   703  			}},
   704  		})
   705  
   706  		return dvs.Reference(), nil
   707  	})
   708  
   709  	return &methods.CreateDVS_TaskBody{
   710  		Res: &types.CreateDVS_TaskResponse{
   711  			Returnval: task.Run(ctx),
   712  		},
   713  	}
   714  }
   715  
   716  func (f *Folder) RenameTask(ctx *Context, r *types.Rename_Task) soap.HasFault {
   717  	return RenameTask(ctx, f, r)
   718  }
   719  
   720  func (f *Folder) DestroyTask(ctx *Context, req *types.Destroy_Task) soap.HasFault {
   721  	type destroyer interface {
   722  		mo.Reference
   723  		DestroyTask(*types.Destroy_Task) soap.HasFault
   724  	}
   725  
   726  	task := CreateTask(f, "destroy", func(*Task) (types.AnyType, types.BaseMethodFault) {
   727  		// Attempt to destroy all children
   728  		for _, c := range f.ChildEntity {
   729  			obj, ok := ctx.Map.Get(c).(destroyer)
   730  			if !ok {
   731  				continue
   732  			}
   733  
   734  			var fault types.BaseMethodFault
   735  			ctx.WithLock(obj, func() {
   736  				id := obj.DestroyTask(&types.Destroy_Task{
   737  					This: c,
   738  				}).(*methods.Destroy_TaskBody).Res.Returnval
   739  
   740  				t := ctx.Map.Get(id).(*Task)
   741  				t.Wait()
   742  				if t.Info.Error != nil {
   743  					fault = t.Info.Error.Fault // For example, can't destroy a powered on VM
   744  				}
   745  			})
   746  			if fault != nil {
   747  				return nil, fault
   748  			}
   749  		}
   750  
   751  		// Remove the folder itself
   752  		folderRemoveChild(ctx, &ctx.Map.Get(*f.Parent).(*Folder).Folder, f.Self)
   753  		return nil, nil
   754  	})
   755  
   756  	return &methods.Destroy_TaskBody{
   757  		Res: &types.Destroy_TaskResponse{
   758  			Returnval: task.Run(ctx),
   759  		},
   760  	}
   761  }
   762  
   763  func addPlacementFault(body *methods.PlaceVmsXClusterBody, vmName string, vmRef *types.ManagedObjectReference, poolRef types.ManagedObjectReference) {
   764  	faults := types.PlaceVmsXClusterResultPlacementFaults{
   765  		VmName:       vmName,
   766  		ResourcePool: poolRef,
   767  		Vm:           vmRef,
   768  		Faults: []types.LocalizedMethodFault{
   769  			{
   770  				Fault: &types.GenericDrsFault{},
   771  			},
   772  		},
   773  	}
   774  	body.Res.Returnval.Faults = append(body.Res.Returnval.Faults, faults)
   775  }
   776  
   777  func generateRelocatePlacementAction(ctx *Context, inputRelocateSpec *types.VirtualMachineRelocateSpec, pool *ResourcePool,
   778  	cluster *ClusterComputeResource, hostRequired, datastoreRequired bool) *types.ClusterClusterRelocatePlacementAction {
   779  	var relocateSpec *types.VirtualMachineRelocateSpec
   780  
   781  	placementAction := types.ClusterClusterRelocatePlacementAction{
   782  		Pool: pool.Self,
   783  	}
   784  
   785  	if hostRequired {
   786  		randomHost := cluster.Host[rand.Intn(len(cluster.Host))]
   787  		placementAction.TargetHost = &randomHost
   788  	}
   789  
   790  	if datastoreRequired {
   791  		relocateSpec = inputRelocateSpec
   792  
   793  		ds := ctx.Map.Get(cluster.Datastore[rand.Intn(len(cluster.Datastore))]).(*Datastore)
   794  
   795  		relocateSpec.Datastore = types.NewReference(ds.Reference())
   796  
   797  		for _, diskLocator := range relocateSpec.Disk {
   798  			diskLocator.Datastore = ds.Reference()
   799  		}
   800  	}
   801  
   802  	placementAction.RelocateSpec = relocateSpec
   803  	return &placementAction
   804  }
   805  
   806  func generateRecommendationForRelocate(ctx *Context, req *types.PlaceVmsXCluster) *methods.PlaceVmsXClusterBody {
   807  
   808  	pools := req.PlacementSpec.ResourcePools
   809  	specs := req.PlacementSpec.VmPlacementSpecs
   810  
   811  	body := new(methods.PlaceVmsXClusterBody)
   812  	body.Res = new(types.PlaceVmsXClusterResponse)
   813  	hostRequired := req.PlacementSpec.HostRecommRequired != nil && *req.PlacementSpec.HostRecommRequired
   814  	datastoreRequired := req.PlacementSpec.DatastoreRecommRequired != nil && *req.PlacementSpec.DatastoreRecommRequired
   815  
   816  	for _, spec := range specs {
   817  
   818  		// The RelocateSpec must be set.
   819  		if spec.RelocateSpec == nil {
   820  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "relocateSpec"})
   821  			return body
   822  		}
   823  
   824  		// The VM Reference must be set.
   825  		if spec.Vm == nil {
   826  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "vm"})
   827  			return body
   828  		}
   829  
   830  		vmRef := ctx.Map.Get(*spec.Vm)
   831  		if vmRef == nil {
   832  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "vm"})
   833  			return body
   834  		}
   835  
   836  		vm := vmRef.(*VirtualMachine)
   837  		pool := ctx.Map.Get(pools[rand.Intn(len(pools))]).(*ResourcePool)
   838  		cluster := ctx.Map.Get(pool.Owner).(*ClusterComputeResource)
   839  
   840  		if len(cluster.Host) == 0 {
   841  			addPlacementFault(body, spec.ConfigSpec.Name, &vm.Self, pool.Self)
   842  			continue
   843  		}
   844  
   845  		reco := types.ClusterRecommendation{
   846  			Key:        "1",
   847  			Type:       "V1",
   848  			Time:       time.Now(),
   849  			Rating:     1,
   850  			Reason:     string(types.RecommendationReasonCodeXClusterPlacement),
   851  			ReasonText: string(types.RecommendationReasonCodeXClusterPlacement),
   852  			Target:     &cluster.Self,
   853  		}
   854  
   855  		placementAction := generateRelocatePlacementAction(ctx, spec.RelocateSpec, pool, cluster, hostRequired, datastoreRequired)
   856  
   857  		reco.Action = append(reco.Action, placementAction)
   858  
   859  		body.Res.Returnval.PlacementInfos = append(body.Res.Returnval.PlacementInfos,
   860  			types.PlaceVmsXClusterResultPlacementInfo{
   861  				VmName:         vm.Name,
   862  				Recommendation: reco,
   863  				Vm:             &vm.Self,
   864  			},
   865  		)
   866  	}
   867  	return body
   868  }
   869  
   870  func generateReconfigurePlacementAction(ctx *Context, inputConfigSpec *types.VirtualMachineConfigSpec, pool *ResourcePool,
   871  	cluster *ClusterComputeResource, hostRequired, datastoreRequired bool) *types.ClusterClusterReconfigurePlacementAction {
   872  	var configSpec *types.VirtualMachineConfigSpec
   873  
   874  	placementAction := types.ClusterClusterReconfigurePlacementAction{
   875  		Pool: pool.Self,
   876  	}
   877  
   878  	if hostRequired {
   879  		randomHost := cluster.Host[rand.Intn(len(cluster.Host))]
   880  		placementAction.TargetHost = &randomHost
   881  	}
   882  
   883  	if datastoreRequired {
   884  		configSpec = inputConfigSpec
   885  
   886  		ds := ctx.Map.Get(cluster.Datastore[rand.Intn(len(cluster.Datastore))]).(*Datastore)
   887  
   888  		fillConfigSpecWithDatastore(ctx, inputConfigSpec, configSpec, ds)
   889  	}
   890  
   891  	placementAction.ConfigSpec = configSpec
   892  	return &placementAction
   893  }
   894  
   895  func generateRecommendationForReconfigure(ctx *Context, req *types.PlaceVmsXCluster) *methods.PlaceVmsXClusterBody {
   896  
   897  	pools := req.PlacementSpec.ResourcePools
   898  	specs := req.PlacementSpec.VmPlacementSpecs
   899  
   900  	body := new(methods.PlaceVmsXClusterBody)
   901  	body.Res = new(types.PlaceVmsXClusterResponse)
   902  	hostRequired := req.PlacementSpec.HostRecommRequired != nil && *req.PlacementSpec.HostRecommRequired
   903  	datastoreRequired := req.PlacementSpec.DatastoreRecommRequired != nil && *req.PlacementSpec.DatastoreRecommRequired
   904  
   905  	for _, spec := range specs {
   906  
   907  		// Only a single pool must be set
   908  		if len(pools) != 1 {
   909  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "resourcePools"})
   910  			return body
   911  		}
   912  
   913  		// The RelocateSpec must not be set.
   914  		if spec.RelocateSpec != nil {
   915  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "relocateSpec"})
   916  			return body
   917  		}
   918  
   919  		// The VM Reference must be set.
   920  		if spec.Vm == nil {
   921  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "vm"})
   922  			return body
   923  		}
   924  
   925  		vmRef := ctx.Map.Get(*spec.Vm)
   926  		if vmRef == nil {
   927  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "vm"})
   928  			return body
   929  		}
   930  
   931  		vm := vmRef.(*VirtualMachine)
   932  
   933  		// Use VM's current host
   934  		host := ctx.Map.Get(vm.Runtime.Host.Reference()).(*HostSystem)
   935  
   936  		if host.Parent.Type != "ClusterComputeResource" {
   937  			addPlacementFault(body, spec.ConfigSpec.Name, &vm.Self, host.Self)
   938  			continue
   939  		}
   940  
   941  		cluster := ctx.Map.Get(*host.Parent).(*ClusterComputeResource)
   942  		pool := ctx.Map.Get(*cluster.ResourcePool).(*ResourcePool)
   943  
   944  		reco := types.ClusterRecommendation{
   945  			Key:        "1",
   946  			Type:       "V1",
   947  			Time:       time.Now(),
   948  			Rating:     1,
   949  			Reason:     string(types.RecommendationReasonCodeXClusterPlacement),
   950  			ReasonText: string(types.RecommendationReasonCodeXClusterPlacement),
   951  			Target:     &cluster.Self,
   952  		}
   953  
   954  		placementAction := generateReconfigurePlacementAction(ctx, &spec.ConfigSpec, pool, cluster, hostRequired, datastoreRequired)
   955  
   956  		reco.Action = append(reco.Action, placementAction)
   957  
   958  		body.Res.Returnval.PlacementInfos = append(body.Res.Returnval.PlacementInfos,
   959  			types.PlaceVmsXClusterResultPlacementInfo{
   960  				VmName:         vm.Name,
   961  				Recommendation: reco,
   962  				Vm:             &vm.Self,
   963  			},
   964  		)
   965  	}
   966  	return body
   967  }
   968  
   969  func fillConfigSpecWithDatastore(ctx *Context, inputConfigSpec, configSpec *types.VirtualMachineConfigSpec, ds *Datastore) {
   970  	if configSpec.Files == nil {
   971  		configSpec.Files = new(types.VirtualMachineFileInfo)
   972  	}
   973  	configSpec.Files.VmPathName = fmt.Sprintf("[%[1]s] %[2]s/%[2]s.vmx", ds.Name, inputConfigSpec.Name)
   974  
   975  	for _, change := range configSpec.DeviceChange {
   976  		dspec := change.GetVirtualDeviceConfigSpec()
   977  
   978  		if dspec.FileOperation != types.VirtualDeviceConfigSpecFileOperationCreate {
   979  			continue
   980  		}
   981  
   982  		switch dspec.Operation {
   983  		case types.VirtualDeviceConfigSpecOperationAdd:
   984  			device := dspec.Device
   985  			d := device.GetVirtualDevice()
   986  
   987  			switch device.(type) {
   988  			case *types.VirtualDisk:
   989  				switch b := d.Backing.(type) {
   990  				case types.BaseVirtualDeviceFileBackingInfo:
   991  					info := b.GetVirtualDeviceFileBackingInfo()
   992  					info.Datastore = types.NewReference(ds.Reference())
   993  
   994  					var dsPath object.DatastorePath
   995  					if dsPath.FromString(info.FileName) {
   996  						dsPath.Datastore = ds.Name
   997  						info.FileName = dsPath.String()
   998  					}
   999  				}
  1000  			}
  1001  		}
  1002  	}
  1003  }
  1004  
  1005  func generateInitialPlacementAction(ctx *Context, inputConfigSpec *types.VirtualMachineConfigSpec, pool *ResourcePool,
  1006  	cluster *ClusterComputeResource, hostRequired, datastoreRequired bool) *types.ClusterClusterInitialPlacementAction {
  1007  	var configSpec *types.VirtualMachineConfigSpec
  1008  
  1009  	placementAction := types.ClusterClusterInitialPlacementAction{
  1010  		Pool: pool.Self,
  1011  	}
  1012  
  1013  	if hostRequired {
  1014  		randomHost := cluster.Host[rand.Intn(len(cluster.Host))]
  1015  		placementAction.TargetHost = &randomHost
  1016  	}
  1017  
  1018  	if datastoreRequired {
  1019  		configSpec = inputConfigSpec
  1020  
  1021  		// TODO: This is just an initial implementation aimed at returning some data but it is not
  1022  		// necessarily fully consistent, like we should ensure the host, if also required, has the
  1023  		// datastore mounted.
  1024  		ds := ctx.Map.Get(cluster.Datastore[rand.Intn(len(cluster.Datastore))]).(*Datastore)
  1025  
  1026  		fillConfigSpecWithDatastore(ctx, inputConfigSpec, configSpec, ds)
  1027  	}
  1028  
  1029  	placementAction.ConfigSpec = configSpec
  1030  	return &placementAction
  1031  }
  1032  
  1033  func generateRecommendationForCreateAndPowerOn(ctx *Context, req *types.PlaceVmsXCluster) *methods.PlaceVmsXClusterBody {
  1034  
  1035  	pools := req.PlacementSpec.ResourcePools
  1036  	specs := req.PlacementSpec.VmPlacementSpecs
  1037  
  1038  	body := new(methods.PlaceVmsXClusterBody)
  1039  	body.Res = new(types.PlaceVmsXClusterResponse)
  1040  	hostRequired := req.PlacementSpec.HostRecommRequired != nil && *req.PlacementSpec.HostRecommRequired
  1041  	datastoreRequired := req.PlacementSpec.DatastoreRecommRequired != nil && *req.PlacementSpec.DatastoreRecommRequired
  1042  
  1043  	for _, spec := range specs {
  1044  
  1045  		// The RelocateSpec must not be set.
  1046  		if spec.RelocateSpec != nil {
  1047  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "relocateSpec"})
  1048  			return body
  1049  		}
  1050  
  1051  		// The name in the ConfigSpec must set.
  1052  		if spec.ConfigSpec.Name == "" {
  1053  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "configSpec.name"})
  1054  			return body
  1055  		}
  1056  
  1057  		pool := ctx.Map.Get(pools[rand.Intn(len(pools))]).(*ResourcePool)
  1058  		cluster := ctx.Map.Get(pool.Owner).(*ClusterComputeResource)
  1059  
  1060  		if len(cluster.Host) == 0 {
  1061  			addPlacementFault(body, spec.ConfigSpec.Name, nil, pool.Self)
  1062  			continue
  1063  		}
  1064  
  1065  		reco := types.ClusterRecommendation{
  1066  			Key:        "1",
  1067  			Type:       "V1",
  1068  			Time:       time.Now(),
  1069  			Rating:     1,
  1070  			Reason:     string(types.RecommendationReasonCodeXClusterPlacement),
  1071  			ReasonText: string(types.RecommendationReasonCodeXClusterPlacement),
  1072  			Target:     &cluster.Self,
  1073  		}
  1074  
  1075  		placementAction := generateInitialPlacementAction(ctx, &spec.ConfigSpec, pool, cluster, hostRequired, datastoreRequired)
  1076  
  1077  		reco.Action = append(reco.Action, placementAction)
  1078  
  1079  		body.Res.Returnval.PlacementInfos = append(body.Res.Returnval.PlacementInfos,
  1080  			types.PlaceVmsXClusterResultPlacementInfo{
  1081  				VmName:         spec.ConfigSpec.Name,
  1082  				Recommendation: reco,
  1083  			},
  1084  		)
  1085  	}
  1086  	return body
  1087  }
  1088  
  1089  func (f *Folder) PlaceVmsXCluster(ctx *Context, req *types.PlaceVmsXCluster) soap.HasFault {
  1090  	body := new(methods.PlaceVmsXClusterBody)
  1091  
  1092  	// Reject the request if it is against any folder other than the root folder.
  1093  	if req.This != ctx.Map.content().RootFolder {
  1094  		body.Fault_ = Fault("", new(types.InvalidRequest))
  1095  		return body
  1096  	}
  1097  
  1098  	pools := req.PlacementSpec.ResourcePools
  1099  	specs := req.PlacementSpec.VmPlacementSpecs
  1100  
  1101  	if len(pools) == 0 {
  1102  		body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "resourcePools"})
  1103  		return body
  1104  	}
  1105  
  1106  	// Do not allow duplicate clusters.
  1107  	clusters := map[mo.Reference]struct{}{}
  1108  	for _, obj := range pools {
  1109  		o := ctx.Map.Get(obj)
  1110  		pool, ok := o.(*ResourcePool)
  1111  		if !ok {
  1112  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "resourcePool"})
  1113  			return body
  1114  		}
  1115  		if _, exists := clusters[pool.Owner]; exists {
  1116  			body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "clusters"})
  1117  			return body
  1118  		}
  1119  		clusters[pool.Owner] = struct{}{}
  1120  	}
  1121  
  1122  	// MVP: Only a single VM placement spec is supported.
  1123  	if len(specs) != 1 {
  1124  		body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "vmPlacementSpecs"})
  1125  		return body
  1126  	}
  1127  
  1128  	placementType := types.PlaceVmsXClusterSpecPlacementType(req.PlacementSpec.PlacementType)
  1129  
  1130  	// An empty placement type defaults to CreateAndPowerOn.
  1131  	if req.PlacementSpec.PlacementType == "" {
  1132  		placementType = types.PlaceVmsXClusterSpecPlacementTypeCreateAndPowerOn
  1133  	}
  1134  
  1135  	switch placementType {
  1136  	case types.PlaceVmsXClusterSpecPlacementTypeCreateAndPowerOn:
  1137  		return generateRecommendationForCreateAndPowerOn(ctx, req)
  1138  	case types.PlaceVmsXClusterSpecPlacementTypeRelocate:
  1139  		return generateRecommendationForRelocate(ctx, req)
  1140  	case types.PlaceVmsXClusterSpecPlacementTypeReconfigure:
  1141  		return generateRecommendationForReconfigure(ctx, req)
  1142  	}
  1143  
  1144  	body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "placementType"})
  1145  	return body
  1146  }