github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/vmx/vmcs_sw_object.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "file_codes.h"
    16  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(VMCS_SW_OBJECT_C)
    17  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(VMCS_SW_OBJECT_C, __condition)
    18  #include "vmm_defs.h"
    19  #include "vmm_dbg.h"
    20  #include "memory_allocator.h"
    21  #include "cache64.h"
    22  #include "vmm_objects.h"
    23  #include "guest.h"
    24  #include "guest_cpu.h"
    25  #include "gpm_api.h"
    26  #include "host_memory_manager_api.h"
    27  #include "vmcs_api.h"
    28  #include "vmcs_internal.h"
    29  #ifdef JLMDEBUG
    30  #include "jlmdebug.h"
    31  #endif
    32  
    33  #pragma warning( disable: 4100 )
    34  
    35  typedef struct _VMCS_SOFTWARE_OBJECT {
    36      struct _VMCS_OBJECT vmcs_base[1];
    37      CACHE64_OBJECT      cache;
    38      GUEST_CPU_HANDLE    gcpu;
    39      ADDRESS             gpa;    // if !=0 then it's original GPA
    40  } VMCS_SOFTWARE_OBJECT;
    41  
    42  static UINT64   vmcs_sw_read(const struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id);
    43  static void     vmcs_sw_write(struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id, UINT64 value);
    44  static void     vmcs_sw_flush_to_cpu(const struct _VMCS_OBJECT *vmcs);
    45  static BOOLEAN  vmcs_sw_is_dirty(const struct _VMCS_OBJECT *vmcs);
    46  static GUEST_CPU_HANDLE vmcs_sw_get_owner(const struct _VMCS_OBJECT *vmcs);
    47  
    48  static void     vmcs_sw_add_msr_to_vmexit_store_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index, UINT64 value);
    49  static void     vmcs_sw_add_msr_to_vmexit_load_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index, UINT64 value);
    50  //static void     vmcs_sw_add_msr_to_vmenter_load_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index, UINT64 value);
    51  static void     vmcs_sw_add_msr_to_vmexit_store_and_vmenter_load_lists(struct _VMCS_OBJECT *vmcs, UINT32 msr_index, UINT64 value);
    52  
    53  static void     vmcs_sw_delete_msr_from_vmexit_store_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index);
    54  static void     vmcs_sw_delete_msr_from_vmexit_load_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index);
    55  static void     vmcs_sw_delete_msr_from_vmenter_load_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index);
    56  static void     vmcs_sw_delete_msr_from_vmexit_store_and_vmenter_load_lists(struct _VMCS_OBJECT *vmcs, UINT32 msr_index);
    57  
    58  static void     vmcs_0_flush_to_memory(struct _VMCS_OBJECT *vmcs);
    59  static void     vmcs_1_flush_to_memory(struct _VMCS_OBJECT *vmcs);
    60  static void     vmcs_0_destroy(struct _VMCS_OBJECT *vmcs);
    61  static void     vmcs_1_destroy(struct _VMCS_OBJECT *vmcs);
    62  static void     vmcs_copy_extra_buffer( void *dst, const struct _VMCS_OBJECT *vmcs_src,
    63                      VMCS_FIELD field, UINT32 bytes_to_copy);
    64  
    65  static void vmcs_0_copy_msr_list_to_merged(struct _VMCS_OBJECT *merged_vmcs,
    66                          struct _VMCS_SOFTWARE_OBJECT* sw_vmcs, VMCS_FIELD address_field,
    67                          VMCS_FIELD count_field, VMCS_ADD_MSR_FUNC add_msr_func)
    68  {
    69      IA32_VMX_MSR_ENTRY* msr_list_ptr = (IA32_VMX_MSR_ENTRY*)vmcs_read(sw_vmcs->vmcs_base, address_field);
    70      UINT32 msr_list_count = (UINT32)vmcs_read(sw_vmcs->vmcs_base, count_field);
    71      UINT32 i;
    72  
    73      for (i = 0; i < msr_list_count; i++) {
    74          add_msr_func(merged_vmcs, msr_list_ptr[i].MsrIndex, msr_list_ptr[i].MsrData);
    75      }
    76  }
    77  
    78  static void vmcs_0_take_msr_list_from_merged(VMCS_SOFTWARE_OBJECT *vmcs_0,
    79                    struct _VMCS_OBJECT *merged_vmcs, VMCS_FIELD address_field,
    80                    VMCS_FIELD count_field)
    81  {
    82      UINT64 addr_hpa = vmcs_read(merged_vmcs, address_field);
    83      UINT64 addr_hva;
    84      UINT64 count_value;
    85  
    86      if (VMCS_INVALID_ADDRESS == addr_hpa) {
    87          addr_hva = VMCS_INVALID_ADDRESS;
    88          count_value = 0;
    89      }
    90      else if ( !hmm_hpa_to_hva(addr_hpa, &addr_hva)) {
    91          VMM_LOG(mask_anonymous, level_trace,"%s: Failed translate HPA(%P) to HVA\n", __FUNCTION__);
    92          VMM_DEADLOOP();
    93          addr_hva = VMCS_INVALID_ADDRESS;
    94          count_value = 0;
    95      }
    96      else {
    97          count_value = vmcs_read(merged_vmcs, count_field);
    98          VMM_ASSERT(addr_hva == ALIGN_BACKWARD(addr_hva, sizeof(IA32_VMX_MSR_ENTRY)));
    99      }
   100  
   101      vmcs_write(vmcs_0->vmcs_base, address_field, addr_hva);
   102      vmcs_write(vmcs_0->vmcs_base, count_field, count_value);
   103  }
   104  
   105  struct _VMCS_OBJECT * vmcs_0_create(struct _VMCS_OBJECT *vmcs_origin)
   106  {
   107      VMCS_SOFTWARE_OBJECT   *vmcs_clone;
   108      void                   *io_a_page = NULL;
   109      void                   *io_b_page = NULL;
   110      void                   *msr_page = NULL;
   111      VMCS_FIELD              field_id;
   112  
   113      vmcs_clone = vmm_malloc(sizeof(*vmcs_clone));
   114      if (NULL == vmcs_clone) {
   115          VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Allocation failed\n", __FUNCTION__);
   116          return NULL;
   117      }
   118  
   119      vmcs_clone->cache = cache64_create(VMCS_FIELD_COUNT);
   120      if (NULL == vmcs_clone->cache) {
   121          vmm_mfree(vmcs_clone);
   122          VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Allocation failed\n", __FUNCTION__);
   123          return NULL;
   124      }
   125  
   126      // allocate VMCS extra pages, which exist at origin VMCS
   127      // and write them back into clone vmcs
   128      // translation HVA->HPA is not necessary, since
   129      // these pages are never applied to hardware
   130  
   131      if (NULL == (io_a_page = vmm_page_alloc(1)) ||  NULL == (io_b_page = vmm_page_alloc(1))
   132            ||  NULL == (msr_page = vmm_page_alloc(1))) {
   133          VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Allocation of extra pages failed\n", __FUNCTION__);
   134          if (NULL != io_a_page)              vmm_page_free(io_a_page);
   135          if (NULL != io_b_page)              vmm_page_free(io_b_page);
   136          if (NULL != msr_page)               vmm_page_free(msr_page);
   137          cache64_destroy(vmcs_clone->cache);
   138  
   139          vmm_mfree(vmcs_clone);
   140  
   141          return NULL;
   142      }
   143  
   144      vmcs_clone->gcpu = vmcs_get_owner(vmcs_origin);
   145      vmcs_clone->gpa  = 0;
   146  
   147  #ifdef JLMDEBUG
   148      bprint("about to set vmcs entries in object\n");
   149  #endif
   150      vmcs_clone->vmcs_base->vmcs_read = vmcs_sw_read;
   151      vmcs_clone->vmcs_base->vmcs_write = vmcs_sw_write;
   152      vmcs_clone->vmcs_base->vmcs_flush_to_cpu = vmcs_sw_flush_to_cpu;
   153      vmcs_clone->vmcs_base->vmcs_is_dirty = vmcs_sw_is_dirty;
   154      vmcs_clone->vmcs_base->vmcs_get_owner = vmcs_sw_get_owner;
   155      vmcs_clone->vmcs_base->vmcs_flush_to_memory = vmcs_0_flush_to_memory;
   156      vmcs_clone->vmcs_base->vmcs_destroy = vmcs_0_destroy;
   157  
   158      vmcs_clone->vmcs_base->vmcs_add_msr_to_vmexit_store_list = vmcs_sw_add_msr_to_vmexit_store_list;
   159      vmcs_clone->vmcs_base->vmcs_add_msr_to_vmexit_load_list = vmcs_sw_add_msr_to_vmexit_load_list;
   160      vmcs_clone->vmcs_base->vmcs_add_msr_to_vmenter_load_list = vmcs_sw_add_msr_to_vmexit_load_list;
   161      vmcs_clone->vmcs_base->vmcs_add_msr_to_vmexit_store_and_vmenter_load_list  = vmcs_sw_add_msr_to_vmexit_store_and_vmenter_load_lists;
   162  
   163      vmcs_clone->vmcs_base->vmcs_delete_msr_from_vmexit_store_list = vmcs_sw_delete_msr_from_vmexit_store_list;
   164      vmcs_clone->vmcs_base->vmcs_delete_msr_from_vmexit_load_list = vmcs_sw_delete_msr_from_vmexit_load_list;
   165      vmcs_clone->vmcs_base->vmcs_delete_msr_from_vmenter_load_list = vmcs_sw_delete_msr_from_vmenter_load_list;
   166      vmcs_clone->vmcs_base->vmcs_delete_msr_from_vmexit_store_and_vmenter_load_list  = vmcs_sw_delete_msr_from_vmexit_store_and_vmenter_load_lists;
   167  
   168      vmcs_clone->vmcs_base->level                  = VMCS_LEVEL_0;
   169      vmcs_clone->vmcs_base->skip_access_checking   = FALSE;
   170      vmcs_clone->vmcs_base->signature              = VMCS_SIGNATURE;
   171  
   172      vmcs_init_all_msr_lists(vmcs_clone->vmcs_base);
   173  
   174      // copy all fields as is
   175      for (field_id = (VMCS_FIELD)0; field_id < VMCS_FIELD_COUNT; (VMCS_FIELD)++field_id) {
   176          if (vmcs_field_is_supported(field_id)) {
   177              UINT64 value = vmcs_read(vmcs_origin, field_id);
   178              vmcs_write_nocheck(vmcs_clone->vmcs_base, field_id, value);
   179          }
   180      }
   181  
   182      /* Copy host bitmaps into newly created VMCS#0.
   183      *  Host HPA must be translated to HVA
   184      */
   185      vmcs_copy_extra_buffer(io_a_page, vmcs_origin, VMCS_IO_BITMAP_ADDRESS_A, PAGE_4KB_SIZE);
   186      vmcs_copy_extra_buffer(io_b_page, vmcs_origin, VMCS_IO_BITMAP_ADDRESS_B, PAGE_4KB_SIZE);
   187      vmcs_copy_extra_buffer(msr_page, vmcs_origin, VMCS_MSR_BITMAP_ADDRESS, PAGE_4KB_SIZE);
   188  
   189      // TODO: Copy MSR lists
   190      //vmcs_copy_extra_buffer(msr_vmexit_load_page, vmcs_origin, VMCS_EXIT_MSR_STORE_ADDRESS, 2*PAGE_4KB_SIZE);
   191      //vmcs_copy_extra_buffer(msr_vmexit_store_page, vmcs_origin, VMCS_EXIT_MSR_LOAD_ADDRESS, 2*PAGE_4KB_SIZE);
   192      //vmcs_copy_extra_buffer(msr_vmenter_load_page, vmcs_origin, VMCS_ENTER_MSR_LOAD_ADDRESS, 2*PAGE_4KB_SIZE);
   193  
   194  
   195      // Take all MSR lists from merged
   196      VMM_ASSERT(vmcs_origin->level == VMCS_MERGED);// Assuming that creation is from merged vmcs
   197  
   198      vmcs_0_take_msr_list_from_merged(vmcs_clone, vmcs_origin, VMCS_EXIT_MSR_STORE_ADDRESS, VMCS_EXIT_MSR_STORE_COUNT);
   199      vmcs_0_take_msr_list_from_merged(vmcs_clone, vmcs_origin, VMCS_EXIT_MSR_LOAD_ADDRESS, VMCS_EXIT_MSR_LOAD_COUNT);
   200      vmcs_0_take_msr_list_from_merged(vmcs_clone, vmcs_origin, VMCS_ENTER_MSR_LOAD_ADDRESS, VMCS_ENTER_MSR_LOAD_COUNT);
   201  
   202      vmcs_init_all_msr_lists(vmcs_origin);
   203  
   204      VMM_ASSERT(vmcs_get_owner(vmcs_origin) != NULL);
   205  
   206      // Fill anew MSR lists for merged vmcs
   207      vmcs_0_copy_msr_list_to_merged(vmcs_origin, vmcs_clone, VMCS_EXIT_MSR_STORE_ADDRESS, VMCS_EXIT_MSR_STORE_COUNT, vmcs_add_msr_to_vmexit_store_list);
   208      vmcs_0_copy_msr_list_to_merged(vmcs_origin, vmcs_clone, VMCS_EXIT_MSR_LOAD_ADDRESS, VMCS_EXIT_MSR_LOAD_COUNT, vmcs_add_msr_to_vmexit_load_list);
   209      vmcs_0_copy_msr_list_to_merged(vmcs_origin, vmcs_clone, VMCS_ENTER_MSR_LOAD_ADDRESS, VMCS_ENTER_MSR_LOAD_COUNT, vmcs_add_msr_to_vmenter_load_list);
   210  
   211  
   212      /* update extra pages, which are different for vmcs-0.
   213      * translation HVA->HPA is not necessary, since
   214      * these pages are never applied to hardware.
   215      */
   216      vmcs_write(vmcs_clone->vmcs_base, VMCS_IO_BITMAP_ADDRESS_A,    (UINT64) io_a_page);
   217      vmcs_write(vmcs_clone->vmcs_base, VMCS_IO_BITMAP_ADDRESS_B,    (UINT64) io_b_page);
   218      vmcs_write(vmcs_clone->vmcs_base, VMCS_MSR_BITMAP_ADDRESS,     (UINT64) msr_page);
   219  
   220      return vmcs_clone->vmcs_base;
   221  }
   222  
   223  
   224  void vmcs_copy_extra_buffer( void *dst, const struct _VMCS_OBJECT *vmcs_src,
   225                               VMCS_FIELD field, UINT32 bytes_to_copy)
   226  {
   227      ADDRESS hpa, hva;
   228  
   229      hpa = vmcs_read(vmcs_src, field);
   230      if (TRUE == hmm_hpa_to_hva(hpa, &hva)) {
   231          vmm_memcpy(dst, (void *) hva, bytes_to_copy);
   232      }
   233      else {
   234          vmm_memset(dst, 0, PAGE_4KB_SIZE);
   235      }
   236  }
   237  
   238  
   239  void vmcs_0_destroy(struct _VMCS_OBJECT *vmcs)
   240  {
   241      struct _VMCS_SOFTWARE_OBJECT *p_vmcs = (struct _VMCS_SOFTWARE_OBJECT *) vmcs;
   242      void *page;
   243  
   244      VMM_ASSERT(p_vmcs);
   245  
   246      page = (void *) vmcs_read(vmcs, VMCS_IO_BITMAP_ADDRESS_A);
   247      if (NULL != page) vmm_page_free(page);
   248      page = (void *) vmcs_read(vmcs, VMCS_IO_BITMAP_ADDRESS_B);
   249      if (NULL != page) vmm_page_free(page);
   250      page = (void *) vmcs_read(vmcs, VMCS_MSR_BITMAP_ADDRESS);
   251      if (NULL != page) vmm_page_free(page);
   252      vmcs_destroy_all_msr_lists_internal(vmcs, FALSE);
   253      cache64_destroy(p_vmcs->cache);
   254  }
   255  
   256  
   257  void vmcs_0_flush_to_memory(struct _VMCS_OBJECT *vmcs)
   258  {
   259      VMM_ASSERT(vmcs);
   260  }
   261  
   262  
   263  struct _VMCS_OBJECT * vmcs_1_create(GUEST_CPU_HANDLE gcpu, ADDRESS gpa)
   264  {
   265      struct _VMCS_SOFTWARE_OBJECT *p_vmcs;
   266      ADDRESS hva;
   267      BOOLEAN status;
   268      GUEST_HANDLE guest;
   269  
   270      VMM_ASSERT(gcpu);
   271  
   272      guest = gcpu_guest_handle(gcpu);
   273      VMM_ASSERT(guest);
   274  
   275      if (0 != gpa) {  // gpa==0 means that VMCS-1 creation was requested for emulated guest
   276          // validate alignment
   277          if (0 != (gpa & PAGE_4KB_MASK)) {
   278              VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: GPA is NOT 4K aligned\n", __FUNCTION__);
   279              return NULL;
   280          }
   281          // map to host address space
   282          status = gpm_gpa_to_hva(gcpu_get_current_gpm(guest), gpa, &hva);
   283  
   284          if (TRUE != status) {
   285              VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Failed to translate GPA to HVA\n", __FUNCTION__);
   286              return NULL;
   287          }
   288          // check memory type TBD
   289      }
   290  
   291      p_vmcs = vmm_malloc(sizeof(*p_vmcs));
   292      if (NULL == p_vmcs) {
   293          VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Allocation failed\n", __FUNCTION__);
   294          return NULL;
   295      }
   296  
   297      p_vmcs->cache = cache64_create(VMCS_FIELD_COUNT);
   298      if (NULL == p_vmcs->cache) {
   299          vmm_mfree(p_vmcs);
   300          VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Allocation failed\n", __FUNCTION__);
   301          return NULL;
   302      }
   303  
   304      p_vmcs->gcpu = gcpu;
   305      p_vmcs->gpa  = gpa;
   306  
   307      p_vmcs->vmcs_base->vmcs_read = vmcs_sw_read;
   308      p_vmcs->vmcs_base->vmcs_write = vmcs_sw_write;
   309      p_vmcs->vmcs_base->vmcs_flush_to_cpu = vmcs_sw_flush_to_cpu;
   310      p_vmcs->vmcs_base->vmcs_is_dirty = vmcs_sw_is_dirty;
   311      p_vmcs->vmcs_base->vmcs_get_owner = vmcs_sw_get_owner;
   312      p_vmcs->vmcs_base->vmcs_flush_to_memory   = vmcs_1_flush_to_memory;
   313      p_vmcs->vmcs_base->vmcs_destroy           = vmcs_1_destroy;
   314      p_vmcs->vmcs_base->vmcs_add_msr_to_vmexit_store_list = NULL; // should not be used for level1
   315      p_vmcs->vmcs_base->vmcs_add_msr_to_vmexit_load_list = NULL; // should not be used for level1
   316      p_vmcs->vmcs_base->vmcs_add_msr_to_vmenter_load_list = NULL; // should not be used for level1
   317      p_vmcs->vmcs_base->level                  = VMCS_LEVEL_1;
   318      p_vmcs->vmcs_base->skip_access_checking   = TRUE;
   319      p_vmcs->vmcs_base->signature              = VMCS_SIGNATURE;
   320  
   321      vmcs_init_all_msr_lists(p_vmcs->vmcs_base);
   322  
   323      return p_vmcs->vmcs_base;
   324  }
   325  
   326  
   327  void vmcs_1_destroy(struct _VMCS_OBJECT *vmcs)
   328  {
   329      struct _VMCS_SOFTWARE_OBJECT *p_vmcs = (struct _VMCS_SOFTWARE_OBJECT *) vmcs;
   330      VMM_ASSERT(p_vmcs);
   331      vmcs_1_flush_to_memory(vmcs);
   332      cache64_destroy(p_vmcs->cache);
   333      // return VMCS page to the guest. TBD
   334  }
   335  
   336  void vmcs_1_flush_to_memory(struct _VMCS_OBJECT *vmcs)
   337  {
   338      struct _VMCS_SOFTWARE_OBJECT *p_vmcs = (struct _VMCS_SOFTWARE_OBJECT *) vmcs;
   339  
   340      VMM_ASSERT(p_vmcs);
   341  
   342      do {    // do only once
   343          GUEST_HANDLE guest;
   344          ADDRESS      hva;
   345          BOOLEAN      status;
   346  
   347          if (0 == p_vmcs->gpa) {
   348              break;  // nothing to do. It's an emulated guest
   349          }
   350  
   351          if (NULL == p_vmcs->gcpu) {
   352              VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: GCPU is NULL\n", __FUNCTION__);
   353  	    VMM_ASSERT(p_vmcs->gcpu);
   354              break;
   355          }
   356  
   357          guest = gcpu_guest_handle(p_vmcs->gcpu);
   358          if (NULL == guest) {
   359              VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Guest is NULL\n", __FUNCTION__);
   360  	    VMM_ASSERT(guest);
   361              break;
   362          }
   363  
   364          status = gpm_gpa_to_hva(gcpu_get_current_gpm(guest), p_vmcs->gpa, &hva);
   365  
   366          VMM_ASSERT(TRUE == status);
   367          VMM_ASSERT(p_vmcs->gpa & PAGE_4KB_MASK);
   368  
   369          // check memory type TBD
   370          if (TRUE == status && 0 == (p_vmcs->gpa & PAGE_4KB_MASK)) {
   371              cache64_flush_to_memory(p_vmcs->cache, (void *)hva, PAGE_4KB_SIZE);
   372          }
   373          else {
   374              VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Failed to map GPA to HVA\n", __FUNCTION__);
   375          }
   376  
   377      } while (0);
   378  
   379  }
   380  
   381  
   382  void vmcs_sw_write(struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id, UINT64 value)
   383  {
   384      struct _VMCS_SOFTWARE_OBJECT *p_vmcs = (struct _VMCS_SOFTWARE_OBJECT *) vmcs;
   385      VMM_ASSERT(p_vmcs);
   386      cache64_write(p_vmcs->cache, value, (UINT32 )field_id);
   387  }
   388  
   389  
   390  UINT64 vmcs_sw_read(const struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id)
   391  {
   392      struct _VMCS_SOFTWARE_OBJECT *p_vmcs = (struct _VMCS_SOFTWARE_OBJECT *) vmcs;
   393      UINT64 value;
   394      VMM_ASSERT(p_vmcs);
   395      if (FALSE == cache64_read(p_vmcs->cache, &value, (UINT32 )field_id)) {
   396          value = 0;
   397      }
   398      return value;
   399  }
   400  
   401  
   402  void vmcs_sw_flush_to_cpu(const struct _VMCS_OBJECT *vmcs)
   403  {
   404      struct _VMCS_SOFTWARE_OBJECT *p_vmcs = (struct _VMCS_SOFTWARE_OBJECT *) vmcs;
   405      VMM_ASSERT(p_vmcs);
   406      cache64_flush_dirty(p_vmcs->cache, VMCS_FIELD_COUNT, NULL, NULL);    // just clean dirty bits
   407  }
   408  
   409  
   410  BOOLEAN vmcs_sw_is_dirty(const struct _VMCS_OBJECT *vmcs)
   411  {
   412      struct _VMCS_SOFTWARE_OBJECT *p_vmcs = (struct _VMCS_SOFTWARE_OBJECT *) vmcs;
   413      VMM_ASSERT(p_vmcs);
   414      return cache64_is_dirty(p_vmcs->cache);
   415  }
   416  
   417  
   418  GUEST_CPU_HANDLE vmcs_sw_get_owner(const struct _VMCS_OBJECT *vmcs)
   419  {
   420      struct _VMCS_SOFTWARE_OBJECT *p_vmcs = (struct _VMCS_SOFTWARE_OBJECT *) vmcs;
   421      VMM_ASSERT(p_vmcs);
   422      return p_vmcs->gcpu;
   423  }
   424  
   425  static void vmcs_sw_add_msr_to_vmexit_store_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index, 
   426                                                  UINT64 value)
   427  {
   428      vmcs_add_msr_to_vmexit_store_list_internal(vmcs, msr_index, value, FALSE);
   429  }
   430  
   431  static void vmcs_sw_add_msr_to_vmexit_load_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index, 
   432                                                  UINT64 value)
   433  {
   434      vmcs_add_msr_to_vmexit_load_list_internal(vmcs, msr_index, value, FALSE);
   435  }
   436  
   437  #if 0   // Debug support
   438  void vmcs_sw_add_msr_to_vmenter_load_list(struct _VMCS_OBJECT *vmcs, 
   439                                            UINT32 msr_index, UINT64 value)
   440  {
   441      vmcs_add_msr_to_vmenter_load_list_internal(vmcs, msr_index, value, FALSE);
   442  }
   443  #endif
   444  
   445  static void vmcs_sw_add_msr_to_vmexit_store_and_vmenter_load_lists(struct _VMCS_OBJECT *vmcs, 
   446                                                  UINT32 msr_index, UINT64 value)
   447  {
   448      vmcs_add_msr_to_vmexit_store_and_vmenter_load_lists_internal(vmcs, msr_index, value, FALSE);
   449  }
   450  
   451  static void vmcs_sw_delete_msr_from_vmexit_store_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index)
   452  {
   453      vmcs_delete_msr_from_vmexit_store_list_internal(vmcs, msr_index, FALSE);
   454  }
   455  
   456  static void vmcs_sw_delete_msr_from_vmexit_load_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index)
   457  {
   458      vmcs_delete_msr_from_vmexit_load_list_internal(vmcs, msr_index, FALSE);
   459  }
   460  
   461  static void vmcs_sw_delete_msr_from_vmenter_load_list(struct _VMCS_OBJECT *vmcs, UINT32 msr_index)
   462  {
   463      vmcs_delete_msr_from_vmenter_load_list_internal(vmcs, msr_index, FALSE);
   464  }
   465  
   466  static void vmcs_sw_delete_msr_from_vmexit_store_and_vmenter_load_lists(struct _VMCS_OBJECT *vmcs, UINT32 msr_index)
   467  {
   468      vmcs_delete_msr_from_vmexit_store_and_vmenter_load_lists_internal(vmcs, msr_index, FALSE);
   469  }
   470