github.com/vmware/govmomi@v0.51.0/simulator/host_system.go (about)

     1  // © Broadcom. All Rights Reserved.
     2  // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
     3  // SPDX-License-Identifier: Apache-2.0
     4  
     5  package simulator
     6  
     7  import (
     8  	"fmt"
     9  	"net"
    10  	"os"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/vmware/govmomi/simulator/esx"
    15  	"github.com/vmware/govmomi/vim25/methods"
    16  	"github.com/vmware/govmomi/vim25/mo"
    17  	"github.com/vmware/govmomi/vim25/soap"
    18  	"github.com/vmware/govmomi/vim25/types"
    19  )
    20  
    21  var (
    22  	hostPortUnique = os.Getenv("VCSIM_HOST_PORT_UNIQUE") == "true"
    23  
    24  	globalLock sync.Mutex
    25  	// globalHostCount is used to construct unique hostnames. Should be consumed under globalLock.
    26  	globalHostCount = 0
    27  )
    28  
    29  type HostSystem struct {
    30  	mo.HostSystem
    31  
    32  	sh  *simHost
    33  	mme *ManagedMethodExecuter
    34  	dtm *DynamicTypeManager
    35  
    36  	types.QueryTpmAttestationReportResponse
    37  }
    38  
    39  func asHostSystemMO(obj mo.Reference) (*mo.HostSystem, bool) {
    40  	h, ok := getManagedObject(obj).Addr().Interface().(*mo.HostSystem)
    41  	return h, ok
    42  }
    43  
    44  func NewHostSystem(ctx *Context, host mo.HostSystem) *HostSystem {
    45  	if hostPortUnique { // configure unique port for each host
    46  		port := &esx.HostSystem.Summary.Config.Port
    47  		*port++
    48  		host.Summary.Config.Port = *port
    49  	}
    50  
    51  	now := time.Now()
    52  
    53  	hs := &HostSystem{
    54  		HostSystem: host,
    55  	}
    56  
    57  	hs.Name = hs.Summary.Config.Name
    58  	hs.Summary.Runtime = &hs.Runtime
    59  	hs.Summary.Runtime.BootTime = &now
    60  
    61  	// shallow copy Summary.Hardware, as each host will be assigned its own .Uuid
    62  	hardware := *host.Summary.Hardware
    63  	hs.Summary.Hardware = &hardware
    64  
    65  	if hs.Hardware == nil {
    66  		// shallow copy Hardware, as each host will be assigned its own .Uuid
    67  		info := *esx.HostHardwareInfo
    68  		hs.Hardware = &info
    69  	}
    70  	if hs.Capability == nil {
    71  		capability := *esx.HostCapability
    72  		hs.Capability = &capability
    73  	}
    74  
    75  	cfg := new(types.HostConfigInfo)
    76  	deepCopy(hs.Config, cfg)
    77  	hs.Config = cfg
    78  
    79  	// copy over the reference advanced options so each host can have it's own, allowing hosts to be configured for
    80  	// container backing individually
    81  	deepCopy(esx.AdvancedOptions, &cfg.Option)
    82  
    83  	// add a supported option to the AdvancedOption manager
    84  	simOption := types.OptionDef{ElementDescription: types.ElementDescription{Key: advOptContainerBackingImage}}
    85  	// TODO: how do we enter patterns here? Or should we stick to a list in the value?
    86  	// patterns become necessary if we want to enforce correctness on options for RUN.underlay.<pnic> or allow RUN.port.xxx
    87  	hs.Config.OptionDef = append(hs.Config.OptionDef, simOption)
    88  
    89  	config := []struct {
    90  		ref **types.ManagedObjectReference
    91  		obj mo.Reference
    92  	}{
    93  		{&hs.ConfigManager.DatastoreSystem, &HostDatastoreSystem{Host: &hs.HostSystem}},
    94  		{&hs.ConfigManager.NetworkSystem, NewHostNetworkSystem(&hs.HostSystem)},
    95  		{&hs.ConfigManager.VirtualNicManager, NewHostVirtualNicManager(&hs.HostSystem)},
    96  		{&hs.ConfigManager.AdvancedOption, NewOptionManager(nil, nil, &hs.Config.Option)},
    97  		{&hs.ConfigManager.FirewallSystem, NewHostFirewallSystem(&hs.HostSystem)},
    98  		{&hs.ConfigManager.StorageSystem, NewHostStorageSystem(&hs.HostSystem)},
    99  		{&hs.ConfigManager.CertificateManager, NewHostCertificateManager(ctx, &hs.HostSystem)},
   100  	}
   101  
   102  	for _, c := range config {
   103  		ref := ctx.Map.Put(c.obj).Reference()
   104  
   105  		*c.ref = &ref
   106  	}
   107  
   108  	return hs
   109  }
   110  
   111  func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connected bool) {
   112  	h.Runtime.ConnectionState = types.HostSystemConnectionStateDisconnected
   113  	if connected {
   114  		h.Runtime.ConnectionState = types.HostSystemConnectionStateConnected
   115  	}
   116  
   117  	// lets us construct non-conflicting hostname automatically if omitted
   118  	// does not use the unique port instead to avoid constraints on port, such as >1024
   119  
   120  	globalLock.Lock()
   121  	instanceID := globalHostCount
   122  	globalHostCount++
   123  	globalLock.Unlock()
   124  
   125  	if spec.HostName == "" {
   126  		spec.HostName = fmt.Sprintf("esx-%d", instanceID)
   127  	} else if net.ParseIP(spec.HostName) != nil {
   128  		h.Config.Network.Vnic[0].Spec.Ip.IpAddress = spec.HostName
   129  	}
   130  
   131  	h.Summary.Config.Name = spec.HostName
   132  	h.Name = h.Summary.Config.Name
   133  	id := newUUID(h.Name)
   134  	h.Summary.Hardware.Uuid = id
   135  	h.Hardware.SystemInfo.Uuid = id
   136  
   137  	var err error
   138  	h.sh, err = createSimulationHost(ctx, h)
   139  	if err != nil {
   140  		panic("failed to create simulation host and no path to return error: " + err.Error())
   141  	}
   142  }
   143  
   144  // configureContainerBacking sets up _this_ host for simulation using a container backing.
   145  // Args:
   146  //
   147  //		image - the container image with which to simulate the host
   148  //		mounts - array of mount info that should be translated into /vmfs/volumes/... mounts backed by container volumes
   149  //	 	networks - names of bridges to use for underlays. Will create a pNIC for each. The first will be treated as the management network.
   150  //
   151  // Restrictions adopted from createSimulationHost:
   152  // * no mock of VLAN connectivity
   153  // * only a single vmknic, used for "the management IP"
   154  // * pNIC connectivity does not directly impact VMs/vmks using it as uplink
   155  //
   156  // The pnics will be named using standard pattern, ie. vmnic0, vmnic1, ...
   157  // This will sanity check the NetConfig for "management" nicType to ensure that it maps through PortGroup->vSwitch->pNIC to vmnic0.
   158  func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mounts []types.HostFileSystemMountInfo, networks ...string) error {
   159  	option := &types.OptionValue{
   160  		Key:   advOptContainerBackingImage,
   161  		Value: image,
   162  	}
   163  
   164  	advOpts := ctx.Map.Get(h.ConfigManager.AdvancedOption.Reference()).(*OptionManager)
   165  	fault := advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault()
   166  	if fault != nil {
   167  		panic(fault)
   168  	}
   169  
   170  	h.Config.FileSystemVolume = nil
   171  	if mounts != nil {
   172  		h.Config.FileSystemVolume = &types.HostFileSystemVolumeInfo{
   173  			VolumeTypeList: []string{"VMFS", "OTHER"},
   174  			MountInfo:      mounts,
   175  		}
   176  	}
   177  
   178  	// force at least a management network
   179  	if len(networks) == 0 {
   180  		networks = []string{defaultUnderlayBridgeName}
   181  	}
   182  
   183  	// purge pNICs from the template - it makes no sense to keep them for a sim host
   184  	h.Config.Network.Pnic = make([]types.PhysicalNic, len(networks))
   185  
   186  	// purge any IPs and MACs associated with existing NetConfigs for the host
   187  	for cfgIdx := range h.Config.VirtualNicManagerInfo.NetConfig {
   188  		config := &h.Config.VirtualNicManagerInfo.NetConfig[cfgIdx]
   189  		for candidateIdx := range config.CandidateVnic {
   190  			candidate := &config.CandidateVnic[candidateIdx]
   191  			candidate.Spec.Ip.IpAddress = "0.0.0.0"
   192  			candidate.Spec.Ip.SubnetMask = "0.0.0.0"
   193  			candidate.Spec.Mac = "00:00:00:00:00:00"
   194  		}
   195  	}
   196  
   197  	// The presence of a pNIC is used to indicate connectivity to a specific underlay. We construct an empty pNIC entry and specify the underly via
   198  	// host.ConfigManager.AdvancedOptions. The pNIC will be populated with the MAC (accurate) and IP (divergence - we need to stash it somewhere) for the veth.
   199  	// We create a NetConfig "management" entry for the first pNIC - this will be populated with the IP of the "host" container.
   200  
   201  	// create a pNIC for each underlay
   202  	for i, net := range networks {
   203  		name := fmt.Sprintf("vmnic%d", i)
   204  
   205  		// we don't have a natural field for annotating which pNIC is connected to which network, so stash it in an adv option.
   206  		option := &types.OptionValue{
   207  			Key:   advOptPrefixPnicToUnderlayPrefix + name,
   208  			Value: net,
   209  		}
   210  		fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault()
   211  		if fault != nil {
   212  			panic(fault)
   213  		}
   214  
   215  		h.Config.Network.Pnic[i] = types.PhysicalNic{
   216  			Key:             "key-vim.host.PhysicalNic-" + name,
   217  			Device:          name,
   218  			Pci:             fmt.Sprintf("0000:%2d:00.0", i+1),
   219  			Driver:          "vcsim-bridge",
   220  			DriverVersion:   "1.2.10.0",
   221  			FirmwareVersion: "1.57, 0x80000185",
   222  			LinkSpeed: &types.PhysicalNicLinkInfo{
   223  				SpeedMb: 10000,
   224  				Duplex:  true,
   225  			},
   226  			ValidLinkSpecification: []types.PhysicalNicLinkInfo{
   227  				{
   228  					SpeedMb: 10000,
   229  					Duplex:  true,
   230  				},
   231  			},
   232  			Spec: types.PhysicalNicSpec{
   233  				Ip:                            &types.HostIpConfig{},
   234  				LinkSpeed:                     (*types.PhysicalNicLinkInfo)(nil),
   235  				EnableEnhancedNetworkingStack: types.NewBool(false),
   236  				EnsInterruptEnabled:           types.NewBool(false),
   237  			},
   238  			WakeOnLanSupported: false,
   239  			Mac:                "00:00:00:00:00:00",
   240  			FcoeConfiguration: &types.FcoeConfig{
   241  				PriorityClass: 3,
   242  				SourceMac:     "00:00:00:00:00:00",
   243  				VlanRange: []types.FcoeConfigVlanRange{
   244  					{},
   245  				},
   246  				Capabilities: types.FcoeConfigFcoeCapabilities{},
   247  				FcoeActive:   false,
   248  			},
   249  			VmDirectPathGen2Supported:             types.NewBool(false),
   250  			VmDirectPathGen2SupportedMode:         "",
   251  			ResourcePoolSchedulerAllowed:          types.NewBool(false),
   252  			ResourcePoolSchedulerDisallowedReason: nil,
   253  			AutoNegotiateSupported:                types.NewBool(true),
   254  			EnhancedNetworkingStackSupported:      types.NewBool(false),
   255  			EnsInterruptSupported:                 types.NewBool(false),
   256  			RdmaDevice:                            "",
   257  			DpuId:                                 "",
   258  		}
   259  	}
   260  
   261  	// sanity check that everything's hung together sufficiently well
   262  	details, err := h.getNetConfigInterface(ctx, "management")
   263  	if err != nil {
   264  		return err
   265  	}
   266  
   267  	if details.uplink == nil || details.uplink.Device != "vmnic0" {
   268  		return fmt.Errorf("Config provided for host %s does not result in a consistent 'management' NetConfig that's bound to 'vmnic0'", h.Name)
   269  	}
   270  
   271  	return nil
   272  }
   273  
   274  // netConfigDetails is used to packaged up all the related network entities associated with a NetConfig binding
   275  type netConfigDetails struct {
   276  	nicType   string
   277  	netconfig *types.VirtualNicManagerNetConfig
   278  	vmk       *types.HostVirtualNic
   279  	netstack  *types.HostNetStackInstance
   280  	portgroup *types.HostPortGroup
   281  	vswitch   *types.HostVirtualSwitch
   282  	uplink    *types.PhysicalNic
   283  }
   284  
   285  // getNetConfigInterface returns the set of constructs active for a given nicType (eg. "management", "vmotion")
   286  // This method is provided because the Config structure held by HostSystem is heavily interconnected but serialized and not cross-linked with pointers.
   287  // As such there's a _lot_ of cross-referencing that needs to be done to navigate.
   288  // The pNIC returned is the uplink associated with the vSwitch for the netconfig
   289  func (h *HostSystem) getNetConfigInterface(ctx *Context, nicType string) (*netConfigDetails, error) {
   290  	details := &netConfigDetails{
   291  		nicType: nicType,
   292  	}
   293  
   294  	for i := range h.Config.VirtualNicManagerInfo.NetConfig {
   295  		if h.Config.VirtualNicManagerInfo.NetConfig[i].NicType == nicType {
   296  			details.netconfig = &h.Config.VirtualNicManagerInfo.NetConfig[i]
   297  			break
   298  		}
   299  	}
   300  	if details.netconfig == nil {
   301  		return nil, fmt.Errorf("no matching NetConfig for NicType=%s", nicType)
   302  	}
   303  
   304  	if details.netconfig.SelectedVnic == nil {
   305  		return details, nil
   306  	}
   307  
   308  	vnicKey := details.netconfig.SelectedVnic[0]
   309  	for i := range details.netconfig.CandidateVnic {
   310  		if details.netconfig.CandidateVnic[i].Key == vnicKey {
   311  			details.vmk = &details.netconfig.CandidateVnic[i]
   312  			break
   313  		}
   314  	}
   315  	if details.vmk == nil {
   316  		panic(fmt.Sprintf("NetConfig for host %s references non-existant vNIC key %s for %s nicType", h.Name, vnicKey, nicType))
   317  	}
   318  
   319  	portgroupName := details.vmk.Portgroup
   320  	netstackKey := details.vmk.Spec.NetStackInstanceKey
   321  
   322  	for i := range h.Config.Network.NetStackInstance {
   323  		if h.Config.Network.NetStackInstance[i].Key == netstackKey {
   324  			details.netstack = &h.Config.Network.NetStackInstance[i]
   325  			break
   326  		}
   327  	}
   328  	if details.netstack == nil {
   329  		panic(fmt.Sprintf("NetConfig for host %s references non-existant NetStack key %s for %s nicType", h.Name, netstackKey, nicType))
   330  	}
   331  
   332  	for i := range h.Config.Network.Portgroup {
   333  		// TODO: confirm correctness of this - seems weird it references the Spec.Name instead of the key like everything else.
   334  		if h.Config.Network.Portgroup[i].Spec.Name == portgroupName {
   335  			details.portgroup = &h.Config.Network.Portgroup[i]
   336  			break
   337  		}
   338  	}
   339  	if details.portgroup == nil {
   340  		panic(fmt.Sprintf("NetConfig for host %s references non-existant PortGroup name %s for %s nicType", h.Name, portgroupName, nicType))
   341  	}
   342  
   343  	vswitchKey := details.portgroup.Vswitch
   344  	for i := range h.Config.Network.Vswitch {
   345  		if h.Config.Network.Vswitch[i].Key == vswitchKey {
   346  			details.vswitch = &h.Config.Network.Vswitch[i]
   347  			break
   348  		}
   349  	}
   350  	if details.vswitch == nil {
   351  		panic(fmt.Sprintf("NetConfig for host %s references non-existant vSwitch key %s for %s nicType", h.Name, vswitchKey, nicType))
   352  	}
   353  
   354  	if len(details.vswitch.Pnic) != 1 {
   355  		// to change this, look at the Active NIC in the NicTeamingPolicy, but for now not worth it
   356  		panic(fmt.Sprintf("vSwitch %s for host %s has multiple pNICs associated which is not supported.", vswitchKey, h.Name))
   357  	}
   358  
   359  	pnicKey := details.vswitch.Pnic[0]
   360  	for i := range h.Config.Network.Pnic {
   361  		if h.Config.Network.Pnic[i].Key == pnicKey {
   362  			details.uplink = &h.Config.Network.Pnic[i]
   363  			break
   364  		}
   365  	}
   366  	if details.uplink == nil {
   367  		panic(fmt.Sprintf("NetConfig for host %s references non-existant pNIC key %s for %s nicType", h.Name, pnicKey, nicType))
   368  	}
   369  
   370  	return details, nil
   371  }
   372  
   373  func (h *HostSystem) event(ctx *Context) types.HostEvent {
   374  	return types.HostEvent{
   375  		Event: types.Event{
   376  			Datacenter:      datacenterEventArgument(ctx, h),
   377  			ComputeResource: h.eventArgumentParent(ctx),
   378  			Host:            h.eventArgument(),
   379  		},
   380  	}
   381  }
   382  
   383  func (h *HostSystem) eventArgument() *types.HostEventArgument {
   384  	return &types.HostEventArgument{
   385  		Host:                h.Self,
   386  		EntityEventArgument: types.EntityEventArgument{Name: h.Name},
   387  	}
   388  }
   389  
   390  func (h *HostSystem) eventArgumentParent(ctx *Context) *types.ComputeResourceEventArgument {
   391  	parent := hostParent(ctx, &h.HostSystem)
   392  
   393  	return &types.ComputeResourceEventArgument{
   394  		ComputeResource:     parent.Self,
   395  		EntityEventArgument: types.EntityEventArgument{Name: parent.Name},
   396  	}
   397  }
   398  
   399  func hostParent(ctx *Context, host *mo.HostSystem) *mo.ComputeResource {
   400  	switch parent := ctx.Map.Get(*host.Parent).(type) {
   401  	case *mo.ComputeResource:
   402  		return parent
   403  	case *ClusterComputeResource:
   404  		return &parent.ComputeResource
   405  	default:
   406  		return nil
   407  	}
   408  }
   409  
   410  func addComputeResource(s *types.ComputeResourceSummary, h *HostSystem) {
   411  	s.TotalCpu += h.Summary.Hardware.CpuMhz
   412  	s.TotalMemory += h.Summary.Hardware.MemorySize
   413  	s.NumCpuCores += h.Summary.Hardware.NumCpuCores
   414  	s.NumCpuThreads += h.Summary.Hardware.NumCpuThreads
   415  	s.EffectiveCpu += h.Summary.Hardware.CpuMhz
   416  	s.EffectiveMemory += h.Summary.Hardware.MemorySize
   417  	s.NumHosts++
   418  	s.NumEffectiveHosts++
   419  	s.OverallStatus = types.ManagedEntityStatusGreen
   420  }
   421  
   422  // CreateDefaultESX creates a standalone ESX
   423  // Adds objects of type: Datacenter, Network, ComputeResource, ResourcePool and HostSystem
   424  func CreateDefaultESX(ctx *Context, f *Folder) {
   425  	dc := NewDatacenter(ctx, &f.Folder)
   426  
   427  	host := NewHostSystem(ctx, esx.HostSystem)
   428  
   429  	summary := new(types.ComputeResourceSummary)
   430  	addComputeResource(summary, host)
   431  
   432  	cr := &mo.ComputeResource{
   433  		Summary: summary,
   434  		Network: esx.Datacenter.Network,
   435  	}
   436  	cr.Self = *host.Parent
   437  	cr.Name = host.Name
   438  	cr.Host = append(cr.Host, host.Reference())
   439  	host.Network = cr.Network
   440  	ctx.Map.PutEntity(cr, host)
   441  	cr.EnvironmentBrowser = newEnvironmentBrowser(ctx, host.Reference())
   442  
   443  	pool := NewResourcePool(ctx)
   444  	cr.ResourcePool = &pool.Self
   445  	ctx.Map.PutEntity(cr, pool)
   446  	pool.Owner = cr.Self
   447  
   448  	folderPutChild(ctx, &ctx.Map.Get(dc.HostFolder).(*Folder).Folder, cr)
   449  }
   450  
   451  // CreateStandaloneHost uses esx.HostSystem as a template, applying the given spec
   452  // and creating the ComputeResource parent and ResourcePool sibling.
   453  func CreateStandaloneHost(ctx *Context, f *Folder, spec types.HostConnectSpec) (*HostSystem, types.BaseMethodFault) {
   454  	if spec.HostName == "" {
   455  		return nil, &types.NoHost{}
   456  	}
   457  
   458  	template := esx.HostSystem
   459  	network := ctx.Map.getEntityDatacenter(f).defaultNetwork()
   460  
   461  	if p := ctx.Map.FindByName(spec.UserName, f.ChildEntity); p != nil {
   462  		cr := p.(*mo.ComputeResource)
   463  		h := ctx.Map.Get(cr.Host[0])
   464  		// "clone" an existing host from the inventory
   465  		template = h.(*HostSystem).HostSystem
   466  		template.Vm = nil
   467  		network = cr.Network
   468  	}
   469  
   470  	pool := NewResourcePool(ctx)
   471  	host := NewHostSystem(ctx, template)
   472  	host.configure(ctx, spec, false)
   473  
   474  	summary := new(types.ComputeResourceSummary)
   475  	addComputeResource(summary, host)
   476  
   477  	cr := &mo.ComputeResource{
   478  		ConfigurationEx: &types.ComputeResourceConfigInfo{
   479  			VmSwapPlacement: string(types.VirtualMachineConfigInfoSwapPlacementTypeVmDirectory),
   480  		},
   481  		Summary: summary,
   482  	}
   483  
   484  	ctx.Map.PutEntity(cr, ctx.Map.NewEntity(host))
   485  	cr.EnvironmentBrowser = newEnvironmentBrowser(ctx, host.Reference())
   486  
   487  	host.Summary.Host = &host.Self
   488  	host.Config.Host = host.Self
   489  
   490  	ctx.Map.PutEntity(cr, ctx.Map.NewEntity(pool))
   491  
   492  	cr.Name = host.Name
   493  	cr.Network = network
   494  	cr.Host = append(cr.Host, host.Reference())
   495  	cr.ResourcePool = &pool.Self
   496  
   497  	folderPutChild(ctx, &f.Folder, cr)
   498  	pool.Owner = cr.Self
   499  	host.Network = cr.Network
   500  
   501  	return host, nil
   502  }
   503  
   504  func (h *HostSystem) DestroyTask(ctx *Context, req *types.Destroy_Task) soap.HasFault {
   505  	task := CreateTask(h, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) {
   506  		if len(h.Vm) > 0 {
   507  			return nil, &types.ResourceInUse{}
   508  		}
   509  
   510  		ctx.postEvent(&types.HostRemovedEvent{HostEvent: h.event(ctx)})
   511  
   512  		f := ctx.Map.getEntityParent(h, "Folder").(*Folder)
   513  		folderRemoveChild(ctx, &f.Folder, h.Reference())
   514  		err := h.sh.remove(ctx)
   515  
   516  		if err != nil {
   517  			return nil, &types.RuntimeFault{
   518  				MethodFault: types.MethodFault{
   519  					FaultCause: &types.LocalizedMethodFault{
   520  						Fault:            &types.SystemErrorFault{Reason: err.Error()},
   521  						LocalizedMessage: err.Error()}}}
   522  		}
   523  
   524  		// TODO: should there be events on lifecycle operations as with VMs?
   525  
   526  		return nil, nil
   527  	})
   528  
   529  	return &methods.Destroy_TaskBody{
   530  		Res: &types.Destroy_TaskResponse{
   531  			Returnval: task.Run(ctx),
   532  		},
   533  	}
   534  }
   535  
   536  func (h *HostSystem) EnterMaintenanceModeTask(ctx *Context, spec *types.EnterMaintenanceMode_Task) soap.HasFault {
   537  	task := CreateTask(h, "enterMaintenanceMode", func(t *Task) (types.AnyType, types.BaseMethodFault) {
   538  		h.Runtime.InMaintenanceMode = true
   539  		return nil, nil
   540  	})
   541  
   542  	return &methods.EnterMaintenanceMode_TaskBody{
   543  		Res: &types.EnterMaintenanceMode_TaskResponse{
   544  			Returnval: task.Run(ctx),
   545  		},
   546  	}
   547  }
   548  
   549  func (h *HostSystem) ExitMaintenanceModeTask(ctx *Context, spec *types.ExitMaintenanceMode_Task) soap.HasFault {
   550  	task := CreateTask(h, "exitMaintenanceMode", func(t *Task) (types.AnyType, types.BaseMethodFault) {
   551  		h.Runtime.InMaintenanceMode = false
   552  		return nil, nil
   553  	})
   554  
   555  	return &methods.ExitMaintenanceMode_TaskBody{
   556  		Res: &types.ExitMaintenanceMode_TaskResponse{
   557  			Returnval: task.Run(ctx),
   558  		},
   559  	}
   560  }
   561  
   562  func (h *HostSystem) DisconnectHostTask(ctx *Context, spec *types.DisconnectHost_Task) soap.HasFault {
   563  	task := CreateTask(h, "disconnectHost", func(t *Task) (types.AnyType, types.BaseMethodFault) {
   564  		h.Runtime.ConnectionState = types.HostSystemConnectionStateDisconnected
   565  		return nil, nil
   566  	})
   567  
   568  	return &methods.DisconnectHost_TaskBody{
   569  		Res: &types.DisconnectHost_TaskResponse{
   570  			Returnval: task.Run(ctx),
   571  		},
   572  	}
   573  }
   574  
   575  func (h *HostSystem) ReconnectHostTask(ctx *Context, spec *types.ReconnectHost_Task) soap.HasFault {
   576  	task := CreateTask(h, "reconnectHost", func(t *Task) (types.AnyType, types.BaseMethodFault) {
   577  		h.Runtime.ConnectionState = types.HostSystemConnectionStateConnected
   578  		return nil, nil
   579  	})
   580  
   581  	return &methods.ReconnectHost_TaskBody{
   582  		Res: &types.ReconnectHost_TaskResponse{
   583  			Returnval: task.Run(ctx),
   584  		},
   585  	}
   586  }
   587  
   588  func (s *HostSystem) QueryTpmAttestationReport(ctx *Context, req *types.QueryTpmAttestationReport) soap.HasFault {
   589  	body := new(methods.QueryTpmAttestationReportBody)
   590  
   591  	if ctx.Map.IsVPX() {
   592  		body.Res = &s.QueryTpmAttestationReportResponse
   593  	} else {
   594  		body.Fault_ = Fault("", new(types.NotSupported))
   595  	}
   596  
   597  	return body
   598  }