github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/vmx/vmcs_actual.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  
    16  #include "file_codes.h"
    17  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(VMCS_ACTUAL_C)
    18  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(VMCS_ACTUAL_C, __condition)
    19  #include "vmm_defs.h"
    20  #include "vmm_dbg.h"
    21  #include "memory_allocator.h"
    22  #include "cache64.h"
    23  #include "vmm_objects.h"
    24  #include "guest.h"
    25  #include "gpm_api.h"
    26  #include "vmcs_init.h"
    27  #include "hw_vmx_utils.h"
    28  #include "hw_utils.h"
    29  #include "hw_interlocked.h"
    30  #include "gdt.h"
    31  #include "libc.h"
    32  #include "vmcs_actual.h"
    33  #include "vmcs_internal.h"
    34  #include "vmx_nmi.h"
    35  #ifdef JLMDEBUG
    36  #include "jlmdebug.h"
    37  #endif
    38  
    39  #define UPDATE_SUCCEEDED    0
    40  #define UPDATE_FINISHED     1
    41  #define UPDATE_FAILED       2
    42  #define LAUNCHED_FLAG 1
    43  #define ACTIVATED_FLAG 2
    44  #define NEVER_ACTIVATED_FLAG 4
    45  
    46  #define FIELD_IS_HW_WRITABLE(__access) (VMCS_WRITABLE & (__access))
    47  #define NMI_WINDOW_BIT  22
    48  
    49  typedef struct _VMCS_ACTUAL_OBJECT {
    50      struct _VMCS_OBJECT     vmcs_base[1];
    51      CACHE64_OBJECT      cache;
    52      ADDRESS             hpa;
    53      ADDRESS             hva;
    54      GUEST_CPU_HANDLE    gcpu_owner;
    55      UINT32              update_status;
    56      UINT16              flags;
    57      CPU_ID              owning_host_cpu; // the VMCS object was launched in this cpu
    58      UINT8               pad[6];
    59  } VMCS_ACTUAL_OBJECT;
    60  
    61  #define CPU_NEVER_USED ((CPU_ID)-1)
    62  #define HW_VMCS_IS_EMPTY ((UINT64)-1)
    63  
    64  static const char* g_instr_error_message[] = {
    65      "VMCS_INSTR_NO_INSTRUCTION_ERROR",                                  // VMxxxxx
    66      "VMCS_INSTR_VMCALL_IN_ROOT_ERROR",                                  // VMCALL
    67      "VMCS_INSTR_VMCLEAR_INVALID_PHYSICAL_ADDRESS_ERROR",                // VMCLEAR
    68      "VMCS_INSTR_VMCLEAR_WITH_CURRENT_CONTROLLING_PTR_ERROR",            // VMCLEAR
    69      "VMCS_INSTR_VMLAUNCH_WITH_NON_CLEAR_VMCS_ERROR",                    // VMLAUNCH
    70      "VMCS_INSTR_VMRESUME_WITH_NON_LAUNCHED_VMCS_ERROR",                 // VMRESUME
    71      "VMCS_INSTR_VMRESUME_WITH_NON_CHILD_VMCS_ERROR",                    // VMRESUME
    72      "VMCS_INSTR_VMENTER_BAD_CONTROL_FIELD_ERROR",                       // VMENTER
    73      "VMCS_INSTR_VMENTER_BAD_MONITOR_STATE_ERROR",                       // VMENTER
    74      "VMCS_INSTR_VMPTRLD_INVALID_PHYSICAL_ADDRESS_ERROR",                // VMPTRLD
    75      "VMCS_INSTR_VMPTRLD_WITH_CURRENT_CONTROLLING_PTR_ERROR",            // VMPTRLD
    76      "VMCS_INSTR_VMPTRLD_WITH_BAD_REVISION_ID_ERROR",                    // VMPTRLD
    77      "VMCS_INSTR_VMREAD_OR_VMWRITE_OF_UNSUPPORTED_COMPONENT_ERROR",      // VMREAD
    78      "VMCS_INSTR_VMWRITE_OF_READ_ONLY_COMPONENT_ERROR",                  // VMWRITE
    79      "VMCS_INSTR_VMWRITE_INVALID_FIELD_VALUE_ERROR",                     // VMWRITE
    80      "VMCS_INSTR_VMXON_IN_VMX_ROOT_OPERATION_ERROR",                     // VMXON
    81      "VMCS_INSTR_VMENTRY_WITH_BAD_OSV_CONTROLLING_VMCS_ERROR",           // VMENTER
    82      "VMCS_INSTR_VMENTRY_WITH_NON_LAUNCHED_OSV_CONTROLLING_VMCS_ERROR",  // VMENTER
    83      "VMCS_INSTR_VMENTRY_WITH_NON_ROOT_OSV_CONTROLLING_VMCS_ERROR",      // VMENTER
    84      "VMCS_INSTR_VMCALL_WITH_NON_CLEAR_VMCS_ERROR",                      // VMCALL
    85      "VMCS_INSTR_VMCALL_WITH_BAD_VMEXIT_FIELDS_ERROR",                   // VMCALL
    86      "VMCS_INSTR_VMCALL_WITH_INVALID_MSEG_MSR_ERROR",                    // VMCALL
    87      "VMCS_INSTR_VMCALL_WITH_INVALID_MSEG_REVISION_ERROR",               // VMCALL
    88      "VMCS_INSTR_VMXOFF_WITH_CONFIGURED_SMM_MONITOR_ERROR",              // VMXOFF
    89      "VMCS_INSTR_VMCALL_WITH_BAD_SMM_MONITOR_FEATURES_ERROR",            // VMCALL
    90      "VMCS_INSTR_RETURN_FROM_SMM_WITH_BAD_VM_EXECUTION_CONTROLS_ERROR",  // Return from SMM
    91      "VMCS_INSTR_VMENTRY_WITH_EVENTS_BLOCKED_BY_MOV_SS_ERROR",           // VMENTER
    92      "VMCS_INSTR_BAD_ERROR_CODE",                                        // Bad error code
    93      "VMCS_INSTR_INVALIDATION_WITH_INVALID_OPERAND"                      // INVEPT, INVVPID
    94  };
    95  
    96  
    97  static UINT64 vmcs_act_read(const struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id);
    98  static void vmcs_act_write(struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id, 
    99                              UINT64 value);
   100  static void vmcs_act_flush_to_cpu(const struct _VMCS_OBJECT *vmcs);
   101  static void vmcs_act_flush_to_memory(struct _VMCS_OBJECT *vmcs);
   102  static BOOLEAN vmcs_act_is_dirty(const struct _VMCS_OBJECT *vmcs);
   103  static GUEST_CPU_HANDLE vmcs_act_get_owner(const struct _VMCS_OBJECT *vmcs);
   104  static void vmcs_act_destroy(struct _VMCS_OBJECT *vmcs);
   105  static void vmcs_act_add_msr_to_vmexit_store_list(struct _VMCS_OBJECT *vmcs, 
   106                              UINT32 msr_index, UINT64 value);
   107  static void vmcs_act_add_msr_to_vmexit_load_list(struct _VMCS_OBJECT *vmcs, 
   108                              UINT32 msr_index, UINT64 value);
   109  static void vmcs_act_add_msr_to_vmenter_load_list(struct _VMCS_OBJECT *vmcs, 
   110                              UINT32 msr_index, UINT64 value);
   111  static void vmcs_act_add_msr_to_vmexit_store_and_vmenter_load_lists(
   112                          struct _VMCS_OBJECT *vmcs, UINT32 msr_index, UINT64 value);
   113  static void vmcs_act_delete_msr_from_vmexit_store_list(struct _VMCS_OBJECT *vmcs, 
   114                          UINT32 msr_index);
   115  static void vmcs_act_delete_msr_from_vmexit_load_list(struct _VMCS_OBJECT *vmcs, 
   116                          UINT32 msr_index);
   117  static void vmcs_act_delete_msr_from_vmenter_load_list(struct _VMCS_OBJECT *vmcs, 
   118                          UINT32 msr_index);
   119  static void vmcs_act_delete_msr_from_vmexit_store_and_vmenter_load_lists(
   120                          struct _VMCS_OBJECT *vmcs, UINT32 msr_index);
   121  
   122  static void vmcs_act_flush_field_to_cpu(UINT32 entry_no, VMCS_ACTUAL_OBJECT *p_vmcs);
   123  static void vmcs_act_flush_nmi_depended_field_to_cpu(VMCS_ACTUAL_OBJECT *p_vmcs, 
   124                          UINT64 value);
   125  static UINT64 vmcs_act_read_from_hardware(VMCS_ACTUAL_OBJECT *p_vmcs, 
   126                          VMCS_FIELD field_id);
   127  static void vmcs_act_write_to_hardware(VMCS_ACTUAL_OBJECT *p_vmcs, 
   128                          VMCS_FIELD field_id, UINT64 value);
   129  
   130  static UINT64  temp_replace_vmcs_ptr(UINT64 new_ptr);
   131  static void    restore_previous_vmcs_ptr(UINT64 ptr_to_restore);
   132  static void    error_processing(UINT64 vmcs, int ret_val,
   133                                const char* operation, VMCS_FIELD  field);
   134  static BOOLEAN nmi_window[VMM_MAX_CPU_SUPPORTED]; // stores NMI Windows which should be injected per CPU
   135  
   136  
   137  // JLM:added
   138  extern int hw_vmx_read_current_vmcs(UINT64 field_id, UINT64 *value );
   139  extern int hw_vmx_flush_current_vmcs(UINT64 *address);
   140  extern int hw_vmx_write_current_vmcs(UINT64 field_id, UINT64 value);
   141  
   142  /*----------------------------------------------------------------------------*
   143  **                              NMI Handling
   144  **  When NMI occured:
   145  **    FS := non zero value        ; mark that NMI occured during VMEXIT
   146  **    nmi_window[cpu-no] := TRUE  ; mark that NMI Window should be injected on next VMENTER
   147  **    spoil transaction status (see below).
   148  **
   149  **  When NMI-Window is set - like ordinar VMCS field
   150  **  When NMI-Window is clear - clear it, but then check FS !=0 and if so, set NMI-Window back
   151  **  When flushing VMCS cache into CPU:
   152  **    do it in transactional way, i.e.
   153  **        set start transaction flage
   154  **        do the job
   155  **        check if succeeded
   156  **        if not repeat
   157  *----------------------------------------------------------------------------*/
   158  
   159  INLINE BOOLEAN nmi_is_nmi_occured(void) {
   160      return (0 != hw_read_fs());
   161  }
   162  
   163  INLINE void nmi_window_set(void)
   164  {
   165      nmi_window[hw_cpu_id()] = TRUE;
   166  }
   167  
   168  INLINE void nmi_window_clear(void)
   169  {
   170      nmi_window[hw_cpu_id()] = FALSE;
   171      if (nmi_is_nmi_occured()) {
   172          nmi_window[hw_cpu_id()] = TRUE;
   173      }
   174  }
   175  
   176  INLINE void nmi_remember_occured_nmi(void) {
   177      hw_write_fs(DATA32_GDT_ENTRY_OFFSET);
   178      nmi_window_set();
   179  }
   180  
   181  INLINE BOOLEAN nmi_window_is_requested(void)
   182  {
   183      return nmi_is_nmi_occured() || nmi_window[hw_cpu_id()];
   184  }
   185  
   186  void vmcs_nmi_handler(struct _VMCS_OBJECT *vmcs)
   187  {
   188      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
   189      UINT64  value;
   190      VMM_ASSERT(p_vmcs);
   191  
   192      // mark that NMI Window must be set, in case that SW still did not flush VMCSS to hardware
   193      nmi_remember_occured_nmi();
   194  
   195      // spoil VMCS flush process in case it is in progress
   196      p_vmcs->update_status = UPDATE_FAILED;
   197  
   198      // write directly into hardware in case that SW already did flush to CPU
   199      value = vmcs_act_read_from_hardware(p_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS);
   200      BIT_SET64(value, NMI_WINDOW_BIT);
   201      vmcs_act_write_to_hardware(p_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS, value);
   202  }
   203  
   204  void nmi_window_update_before_vmresume(struct _VMCS_OBJECT *vmcs)
   205  {
   206      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
   207      UINT64 value;
   208  
   209      if(nmi_is_nmi_occured() || nmi_is_pending_this()) {
   210          VMM_ASSERT(p_vmcs);
   211          value = vmcs_act_read_from_hardware(p_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS);
   212          BIT_SET64(value, NMI_WINDOW_BIT);
   213          vmcs_act_write_to_hardware(p_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS, value);
   214          nmi_window_set();
   215      }
   216  }
   217  
   218  void vmcs_write_nmi_window_bit(struct _VMCS_OBJECT *vmcs, BOOLEAN value)
   219  {
   220      vmcs_update(vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS,
   221                  FALSE == value ? 0 : (UINT64) -1, BIT_VALUE(NMI_WINDOW_BIT));
   222      if (value)
   223          nmi_window_set();
   224      else
   225          nmi_window_clear();
   226  }
   227  
   228  
   229  BOOLEAN vmcs_read_nmi_window_bit(struct _VMCS_OBJECT *vmcs)
   230  {
   231      UINT64 value = vmcs_read(vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS);
   232      return (0 != BIT_GET64(value, NMI_WINDOW_BIT));
   233  }
   234  
   235  
   236  struct _VMCS_OBJECT * vmcs_act_create(GUEST_CPU_HANDLE gcpu)
   237  {
   238      struct _VMCS_ACTUAL_OBJECT *p_vmcs;
   239  
   240  #ifdef JLMDEBUG
   241      bprint("vmcs_act_create\n");
   242  #endif
   243      p_vmcs = vmm_malloc(sizeof(*p_vmcs));
   244      if (NULL == p_vmcs) {
   245          VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Allocation failed\n", __FUNCTION__);
   246          return NULL;
   247      }
   248      p_vmcs->cache = cache64_create(VMCS_FIELD_COUNT);
   249      if (NULL == p_vmcs->cache) {
   250          vmm_mfree(p_vmcs);
   251          VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Allocation failed\n", __FUNCTION__);
   252          return NULL;
   253      }
   254  #ifdef JLMDEBUG
   255      bprint("about to set vmcs entries in vmcs create\n");
   256  #endif
   257      p_vmcs->hva = vmcs_hw_allocate_region(&p_vmcs->hpa);    // validate it's ok TBD
   258      p_vmcs->flags|= NEVER_ACTIVATED_FLAG;
   259      p_vmcs->owning_host_cpu = CPU_NEVER_USED;
   260      p_vmcs->gcpu_owner = gcpu;
   261      p_vmcs->vmcs_base->vmcs_read = vmcs_act_read;
   262      p_vmcs->vmcs_base->vmcs_write = vmcs_act_write;
   263      p_vmcs->vmcs_base->vmcs_flush_to_cpu = vmcs_act_flush_to_cpu;
   264      p_vmcs->vmcs_base->vmcs_flush_to_memory = vmcs_act_flush_to_memory;
   265      p_vmcs->vmcs_base->vmcs_is_dirty = vmcs_act_is_dirty;
   266      p_vmcs->vmcs_base->vmcs_get_owner = vmcs_act_get_owner;
   267      p_vmcs->vmcs_base->vmcs_destroy = vmcs_act_destroy;
   268      p_vmcs->vmcs_base->vmcs_add_msr_to_vmexit_store_list = 
   269          vmcs_act_add_msr_to_vmexit_store_list;
   270      p_vmcs->vmcs_base->vmcs_add_msr_to_vmexit_load_list = 
   271          vmcs_act_add_msr_to_vmexit_load_list;
   272      p_vmcs->vmcs_base->vmcs_add_msr_to_vmenter_load_list = 
   273          vmcs_act_add_msr_to_vmenter_load_list;
   274      p_vmcs->vmcs_base->vmcs_add_msr_to_vmexit_store_and_vmenter_load_list  = 
   275          vmcs_act_add_msr_to_vmexit_store_and_vmenter_load_lists;
   276      p_vmcs->vmcs_base->vmcs_delete_msr_from_vmexit_store_list = 
   277          vmcs_act_delete_msr_from_vmexit_store_list;
   278      p_vmcs->vmcs_base->vmcs_delete_msr_from_vmexit_load_list = 
   279          vmcs_act_delete_msr_from_vmexit_load_list;
   280      p_vmcs->vmcs_base->vmcs_delete_msr_from_vmenter_load_list = 
   281          vmcs_act_delete_msr_from_vmenter_load_list;
   282      p_vmcs->vmcs_base->vmcs_delete_msr_from_vmexit_store_and_vmenter_load_list  = 
   283          vmcs_act_delete_msr_from_vmexit_store_and_vmenter_load_lists;
   284      p_vmcs->vmcs_base->level = VMCS_MERGED;
   285      p_vmcs->vmcs_base->skip_access_checking = FALSE;
   286      p_vmcs->vmcs_base->signature = VMCS_SIGNATURE;
   287      vmcs_init_all_msr_lists(p_vmcs->vmcs_base);
   288      return p_vmcs->vmcs_base;
   289  }
   290  
   291  
   292  BOOLEAN vmcs_act_is_dirty(const struct _VMCS_OBJECT *vmcs)
   293  {
   294      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
   295      VMM_ASSERT(p_vmcs);
   296      return cache64_is_dirty(p_vmcs->cache);
   297  }
   298  
   299  GUEST_CPU_HANDLE vmcs_act_get_owner(const struct _VMCS_OBJECT *vmcs)
   300  {
   301      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
   302      VMM_ASSERT(p_vmcs);
   303      return p_vmcs->gcpu_owner;
   304  }
   305  
   306  extern BOOLEAN vmcs_sw_shadow_disable[];
   307  void vmcs_act_write(struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id, UINT64 value)
   308  {
   309      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
   310      VMM_ASSERT(p_vmcs);
   311      if (!vmcs_sw_shadow_disable[hw_cpu_id()])
   312          cache64_write(p_vmcs->cache, value, (UINT32 )field_id);
   313      else
   314          vmcs_act_write_to_hardware(p_vmcs, field_id, value);
   315  }
   316  
   317  
   318  UINT64 vmcs_act_read(const struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id)
   319  {
   320      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
   321      UINT64           value;
   322  
   323      VMM_ASSERT(p_vmcs);
   324      VMM_ASSERT(field_id < VMCS_FIELD_COUNT);
   325      if (TRUE != cache64_read(p_vmcs->cache, &value, (UINT32) field_id)) {
   326          // special case - if hw VMCS was never filled, there is nothing to read
   327          // from HW
   328          if (p_vmcs->flags&NEVER_ACTIVATED_FLAG) {
   329              // assume the init was with all 0
   330              cache64_write(p_vmcs->cache, 0, (UINT32) field_id);
   331              return 0;
   332          }
   333          value = vmcs_act_read_from_hardware(p_vmcs, field_id);
   334          cache64_write(p_vmcs->cache, value, (UINT32) field_id); // update cache
   335      }
   336      return value;
   337  }
   338  
   339  
   340  UINT64 vmcs_act_read_from_hardware(VMCS_ACTUAL_OBJECT *p_vmcs, VMCS_FIELD field_id)
   341  {
   342      UINT64           value;
   343      int              ret_val;
   344      UINT64           previous_vmcs = 0; // 0 - not replaced
   345      UINT32           encoding;
   346  
   347      VMM_DEBUG_CODE(
   348          if ((p_vmcs->owning_host_cpu != CPU_NEVER_USED) && (p_vmcs->owning_host_cpu != hw_cpu_id())) {
   349              VMM_LOG(mask_anonymous, level_trace,
   350                      "Trying to access VMCS, used on another CPU\n");
   351              VMM_DEADLOOP();
   352          }
   353      )
   354      encoding = vmcs_get_field_encoding(field_id, NULL);
   355      VMM_ASSERT(encoding != VMCS_NO_COMPONENT);
   356      // if VMCS is not "current" now, make it current temporary
   357      if (0 == (p_vmcs->flags&ACTIVATED_FLAG)) {
   358          previous_vmcs = temp_replace_vmcs_ptr(p_vmcs->hpa);
   359      }
   360      ret_val = hw_vmx_read_current_vmcs(encoding, &value);
   361      if (ret_val != HW_VMX_SUCCESS) {
   362          error_processing(p_vmcs->hpa, ret_val, "hw_vmx_read_current_vmcs", field_id);
   363      }
   364      // flush current VMCS if it was never used on this CPU
   365      if (p_vmcs->owning_host_cpu == CPU_NEVER_USED) {
   366          ret_val = hw_vmx_flush_current_vmcs(&p_vmcs->hpa);
   367          if (ret_val != HW_VMX_SUCCESS) {
   368              error_processing(p_vmcs->hpa, ret_val, "hw_vmx_flush_current_vmcs", 
   369                               VMCS_FIELD_COUNT);
   370          }
   371      }
   372      // restore the previous "current" VMCS
   373      if (0 != previous_vmcs) {
   374          restore_previous_vmcs_ptr( previous_vmcs );
   375      }
   376      return value;
   377  }
   378  
   379  
   380  void vmcs_act_write_to_hardware(VMCS_ACTUAL_OBJECT *p_vmcs, VMCS_FIELD field_id, UINT64 value)
   381  {
   382      int              ret_val;
   383      UINT32           encoding;
   384      RW_ACCESS        access_type;
   385  
   386      VMM_DEBUG_CODE(
   387          if ((p_vmcs->owning_host_cpu != CPU_NEVER_USED) &&
   388              (p_vmcs->owning_host_cpu != hw_cpu_id())) {
   389              VMM_LOG(mask_anonymous, level_trace,"Trying to access VMCS, used on another CPU\n");
   390              VMM_DEADLOOP();
   391          }
   392      )
   393      encoding = vmcs_get_field_encoding( field_id, &access_type);
   394      VMM_ASSERT(encoding != VMCS_NO_COMPONENT);
   395  
   396      if (0 == FIELD_IS_HW_WRITABLE(access_type)) {
   397          return;
   398      }
   399      ret_val = hw_vmx_write_current_vmcs(encoding, value);
   400      if (ret_val != HW_VMX_SUCCESS) {
   401          error_processing(p_vmcs->hpa, ret_val, "hw_vmx_write_current_vmcs",
   402                           field_id);
   403      }
   404  }
   405  
   406  
   407  void vmcs_act_flush_to_cpu(const struct _VMCS_OBJECT *vmcs)
   408  {
   409      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
   410  
   411  #ifdef JLMDEBUG1
   412      bprint("vmcs_act_flush_to_cpu\n");
   413  #endif
   414      // TEST VMM_ASSERT((p_vmcs->flags&ACTIVATED_FLAG)!=0);
   415      VMM_ASSERT(p_vmcs->owning_host_cpu == hw_cpu_id());
   416  
   417      /* in case the guest was re-scheduled, NMI Window is set in other VMCS
   418      ** To speed the handling up, set NMI-Window in current VMCS if needed.
   419      */
   420      if (nmi_window_is_requested()) {
   421          vmcs_update((struct _VMCS_OBJECT *)vmcs,
   422              VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS,
   423              UINT64_ALL_ONES, BIT_VALUE64(NMI_WINDOW_BIT));
   424      }
   425  
   426  #ifdef JLMDEBUG1
   427      bprint("Halfway through vmcs_act_flush_to_cpu\n");
   428  #endif
   429      if (cache64_is_dirty(p_vmcs->cache)) {
   430          cache64_flush_dirty(p_vmcs->cache, CACHE_ALL_ENTRIES,
   431              (CACHE64_FIELD_PROCESS_FUNCTION) vmcs_act_flush_field_to_cpu, p_vmcs);
   432      }
   433  #ifdef JLMDEBUG1
   434      bprint("vmcs_act_flush_to_cpu, done\n");
   435  #endif
   436  }
   437  
   438  
   439  void vmcs_act_flush_field_to_cpu(UINT32 field_id, VMCS_ACTUAL_OBJECT *p_vmcs)
   440  {
   441      UINT64 value;
   442  
   443      if(FALSE == cache64_read(p_vmcs->cache, &value, field_id)) {
   444          VMM_LOG(mask_anonymous, level_trace,"Read field %d from cache failed.\n", field_id);
   445          return;
   446      }
   447      if (VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS != field_id) {
   448          vmcs_act_write_to_hardware(p_vmcs, (VMCS_FIELD)field_id, value);
   449      }
   450      else {
   451          vmcs_act_flush_nmi_depended_field_to_cpu(p_vmcs, value);
   452      }
   453  }
   454  
   455  
   456  void vmcs_act_flush_nmi_depended_field_to_cpu(VMCS_ACTUAL_OBJECT *p_vmcs, UINT64 value)
   457  {
   458      BOOLEAN success = FALSE;
   459  
   460      while (FALSE == success) {
   461          p_vmcs->update_status = UPDATE_SUCCEEDED;
   462          if (nmi_window_is_requested()) {
   463              BIT_SET64(value, NMI_WINDOW_BIT);
   464          }
   465          vmcs_act_write_to_hardware(p_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS,
   466                                      value);
   467          if (UPDATE_SUCCEEDED == hw_interlocked_compare_exchange(
   468  				    (INT32 *)&p_vmcs->update_status, UPDATE_SUCCEEDED,
   469                                      UPDATE_FINISHED)) {
   470              success = TRUE;
   471          }
   472          else {
   473              VMM_DEBUG_CODE( VMM_LOG(mask_anonymous, level_trace,"NMI Occured during update\n"); );
   474          }
   475      }
   476  }
   477  
   478  void vmcs_act_flush_to_memory(struct _VMCS_OBJECT *vmcs)
   479  {
   480      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
   481      int ret_val;
   482      UINT64           previous_vmcs;
   483  
   484      VMM_ASSERT(p_vmcs);
   485      // TEST VMM_ASSERT((p_vmcs->flags&ACTIVATED_FLAG) == 0);
   486      if (p_vmcs->owning_host_cpu == CPU_NEVER_USED) {
   487          return;
   488      }
   489      VMM_ASSERT(hw_cpu_id() == p_vmcs->owning_host_cpu);
   490      vmx_vmptrst(&previous_vmcs);
   491      // make my active temporary
   492      vmcs_activate(vmcs);
   493      // flush all modifications from cache to CPU
   494      vmcs_act_flush_to_cpu(vmcs);
   495      // now flush from hardware
   496      ret_val = hw_vmx_flush_current_vmcs(&p_vmcs->hpa);
   497  
   498      if (ret_val != HW_VMX_SUCCESS) {
   499          error_processing(p_vmcs->hpa, ret_val, 
   500          "hw_vmx_flush_current_vmcs", VMCS_FIELD_COUNT);
   501      }
   502      vmcs_deactivate(vmcs);
   503      // reset launching field
   504      p_vmcs->flags&= (UINT16)(~LAUNCHED_FLAG);
   505      p_vmcs->owning_host_cpu = CPU_NEVER_USED;
   506      // restore previous
   507      restore_previous_vmcs_ptr(previous_vmcs);
   508  }
   509  
   510  
   511  void vmcs_act_destroy(struct _VMCS_OBJECT *vmcs)
   512  {
   513      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
   514      VMM_ASSERT(p_vmcs);
   515  
   516      vmcs_act_flush_to_memory(vmcs);
   517      vmcs_destroy_all_msr_lists_internal(vmcs, TRUE);
   518      cache64_destroy(p_vmcs->cache);
   519      vmm_mfree((void *) p_vmcs->hva);
   520  }
   521  
   522  
   523  // Handle temporary VMCS PTR replacements
   524  UINT64 temp_replace_vmcs_ptr( UINT64 new_ptr ) // return previous ptr
   525  {
   526      int ret_val;
   527      UINT64           previous_vmcs;
   528  
   529      vmx_vmptrst(&previous_vmcs);
   530      ret_val = vmx_vmptrld( &new_ptr );
   531      if (ret_val != HW_VMX_SUCCESS) {
   532          error_processing(new_ptr, ret_val, "vmx_vmptrld", VMCS_FIELD_COUNT);
   533      }
   534      return previous_vmcs;
   535  }
   536  
   537  
   538  void restore_previous_vmcs_ptr( UINT64 ptr_to_restore )
   539  {
   540      int ret_val;
   541      UINT64           temp_vmcs_ptr;
   542  
   543      // restore previous VMCS pointer
   544      if (ptr_to_restore != HW_VMCS_IS_EMPTY) {
   545          ret_val = vmx_vmptrld( &ptr_to_restore );
   546  
   547          if (ret_val != HW_VMX_SUCCESS) {
   548              error_processing(ptr_to_restore, ret_val,
   549                               "vmx_vmptrld", VMCS_FIELD_COUNT);
   550          }
   551      }
   552      else {
   553          // reset hw VMCS pointer
   554          vmx_vmptrst( &temp_vmcs_ptr );
   555  
   556          if (temp_vmcs_ptr != HW_VMCS_IS_EMPTY) {
   557              ret_val = hw_vmx_flush_current_vmcs( &temp_vmcs_ptr );
   558  
   559              if (ret_val != HW_VMX_SUCCESS) {
   560                  error_processing(temp_vmcs_ptr, ret_val, "hw_vmx_flush_current_vmcs",
   561                                   VMCS_FIELD_COUNT);
   562              }
   563          }
   564      }
   565  }
   566  
   567  
   568  // Reset all read caching. MUST NOT be called with modifications not flushed to hw
   569  void vmcs_clear_cache( VMCS_OBJECT *obj)
   570  {
   571      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) obj;
   572  
   573      VMM_ASSERT(p_vmcs);
   574      cache64_invalidate(p_vmcs->cache, CACHE_ALL_ENTRIES);
   575  }
   576  
   577  
   578  // Activate
   579  void vmcs_activate(VMCS_OBJECT* obj)
   580  {
   581      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) obj;
   582      CPU_ID                      this_cpu = hw_cpu_id();
   583      int            ret_val;
   584  
   585  #ifdef JLMDEBUG1
   586      bprint("vmcs_activate\n");
   587  #endif
   588      VMM_ASSERT(obj);
   589      VMM_ASSERT(p_vmcs->hpa);
   590      VMM_ASSERT((p_vmcs->flags&ACTIVATED_FLAG) == 0);
   591      VMM_DEBUG_CODE(
   592          if ((p_vmcs->owning_host_cpu != CPU_NEVER_USED) && 
   593              (p_vmcs->owning_host_cpu != this_cpu)) {
   594              VMM_LOG(mask_anonymous, level_trace,"Trying to activate VMCS, used on another CPU\n");
   595              VMM_DEADLOOP();
   596          }
   597      )
   598  
   599      // special case - if VMCS is still in the initialization state (first load)
   600      // init the hw before activating it
   601      if (p_vmcs->flags&NEVER_ACTIVATED_FLAG) {
   602          ret_val = hw_vmx_flush_current_vmcs(&p_vmcs->hpa);
   603          if (ret_val != HW_VMX_SUCCESS) {
   604              error_processing(p_vmcs->hpa, ret_val, 
   605                               "hw_vmx_flush_current_vmcs", VMCS_FIELD_COUNT);
   606          }
   607      }
   608      ret_val = vmx_vmptrld(&p_vmcs->hpa);
   609      if (ret_val != HW_VMX_SUCCESS) {
   610          error_processing(p_vmcs->hpa, ret_val, "vmx_vmptrld", VMCS_FIELD_COUNT);
   611      }
   612      p_vmcs->owning_host_cpu = this_cpu;
   613      p_vmcs->flags|= ACTIVATED_FLAG;
   614      // TEST 1 VMM_ASSERT((p_vmcs->flags&ACTIVATED_FLAG) == 1);
   615      p_vmcs->flags&= (UINT16)(~NEVER_ACTIVATED_FLAG);
   616  }
   617  
   618  
   619  // Deactivate
   620  void vmcs_deactivate( VMCS_OBJECT* obj )
   621  {
   622      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) obj;
   623  
   624      VMM_ASSERT(obj);
   625      VMM_ASSERT(hw_cpu_id() == p_vmcs->owning_host_cpu);
   626      p_vmcs->flags&= (UINT16)(~ACTIVATED_FLAG);
   627  }
   628  
   629  BOOLEAN vmcs_launch_required(const VMCS_OBJECT* obj)
   630  {
   631      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) obj;
   632      VMM_ASSERT(p_vmcs);
   633      return((p_vmcs->flags&LAUNCHED_FLAG)==0);
   634  }
   635  
   636  void vmcs_set_launched(VMCS_OBJECT* obj)
   637  {
   638      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) obj;
   639  
   640  #ifdef JLMDEBUG1
   641     bprint("set launched\n");
   642  #endif
   643      VMM_ASSERT(p_vmcs);
   644      p_vmcs->flags|= LAUNCHED_FLAG;
   645  }
   646  
   647  void vmcs_set_launch_required(VMCS_OBJECT* obj)
   648  {
   649      struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) obj;
   650  #ifdef JLMDEBUG1
   651      bprint("set launch required\n");
   652  #endif
   653      VMM_ASSERT(p_vmcs);
   654      p_vmcs->flags&= (UINT16)(~LAUNCHED_FLAG);
   655  }
   656  
   657  
   658  // Error message
   659  VMCS_INSTRUCTION_ERROR vmcs_last_instruction_error_code(const VMCS_OBJECT* obj,
   660                                  const char** error_message)
   661  {
   662      UINT32 err = (UINT32)vmcs_read( obj, VMCS_EXIT_INFO_INSTRUCTION_ERROR_CODE );
   663  
   664      if (error_message) {
   665          *error_message = (err <= VMCS_INSTR_BAD_ERROR_CODE) ?
   666              g_instr_error_message[err] : "UNKNOWN VMCS_EXIT_INFO_INSTRUCTION_ERROR_CODE";
   667      }
   668      return (VMCS_INSTRUCTION_ERROR)err;
   669  }
   670  
   671  
   672  #pragma warning( push )
   673  #pragma warning( disable : 4100 )
   674  void error_processing(UINT64 vmcs, int ret_val,
   675                        const char* operation, VMCS_FIELD  field)
   676  {
   677      const char* error_message = 0;
   678      UINT64      err = 0;
   679      int my_err;
   680  
   681      switch (ret_val) {
   682          case HW_VMX_SUCCESS:
   683              return;
   684          case HW_VMX_FAILED_WITH_STATUS:
   685              my_err = hw_vmx_read_current_vmcs(
   686                  VM_EXIT_INFO_INSTRUCTION_ERROR_CODE,   // use hard-coded encoding
   687                  &err);
   688  
   689              if (my_err == HW_VMX_SUCCESS) {
   690                  error_message = g_instr_error_message[(UINT32)err];
   691                  break;
   692              }
   693              // fall through
   694          case HW_VMX_FAILED:
   695          default:
   696              error_message = "operation FAILED";
   697      }
   698      if (field == VMCS_FIELD_COUNT) {
   699          VMM_ASSERT(operation != 0);
   700          VMM_ASSERT(vmcs != 0);
   701          VMM_ASSERT(error_message != 0);
   702  #ifdef JLMDEBUG
   703          bprint("%s ( %llx ) failed with the error: %s\n", operation, vmcs,
   704                  error_message ? error_message : "unknown error");
   705  #endif
   706  #if 0   // Debug Support
   707          VMM_LOG(mask_anonymous, level_trace,"%s( %P ) failed with the error: %s\n",
   708                   operation, vmcs, error_message ? error_message : "unknown error");
   709  #endif
   710      }
   711      else {
   712  #ifdef JLMDEBUG
   713          bprint("%s( %llx, %s ) failed with the error: %s\n", operation, vmcs,
   714                  vmcs_get_field_name(field),
   715                  error_message ? error_message : "unknown error");
   716  #endif
   717  #if 0   // Debug Support
   718          VMM_LOG(mask_anonymous, level_trace,"%s( %P, %s ) failed with the error: %s\n",
   719                   operation, vmcs, vmcs_get_field_name(field),
   720                   error_message ? error_message : "unknown error");
   721  #endif
   722      }
   723  
   724  #ifdef JLMDEBUG
   725      LOOP_FOREVER
   726  #endif
   727      VMM_DEADLOOP();
   728      return;
   729  }
   730  #pragma warning( pop )
   731  
   732  static void vmcs_act_add_msr_to_vmexit_store_list(struct _VMCS_OBJECT *vmcs, 
   733                          UINT32 msr_index, UINT64 value)
   734  {
   735      vmcs_add_msr_to_vmexit_store_list_internal(vmcs, msr_index, value, TRUE);
   736  }
   737  
   738  static void vmcs_act_add_msr_to_vmexit_load_list(struct _VMCS_OBJECT *vmcs, 
   739                          UINT32 msr_index, UINT64 value)
   740  {
   741      vmcs_add_msr_to_vmexit_load_list_internal(vmcs, msr_index, value, TRUE);
   742  }
   743  
   744  static void vmcs_act_add_msr_to_vmenter_load_list(struct _VMCS_OBJECT *vmcs, 
   745                          UINT32 msr_index, UINT64 value)
   746  {
   747      vmcs_add_msr_to_vmenter_load_list_internal(vmcs, msr_index, value, TRUE);
   748  }
   749  
   750  static void vmcs_act_add_msr_to_vmexit_store_and_vmenter_load_lists(
   751              struct _VMCS_OBJECT *vmcs, UINT32 msr_index, UINT64 value)
   752  {
   753      vmcs_add_msr_to_vmexit_store_and_vmenter_load_lists_internal(vmcs, msr_index, 
   754                                              value, TRUE);
   755  }
   756  
   757  static void vmcs_act_delete_msr_from_vmexit_store_list(struct _VMCS_OBJECT *vmcs, 
   758              UINT32 msr_index)
   759  {
   760      vmcs_delete_msr_from_vmexit_store_list_internal(vmcs, msr_index, TRUE);
   761  }
   762  
   763  static void vmcs_act_delete_msr_from_vmexit_load_list(struct _VMCS_OBJECT *vmcs, 
   764              UINT32 msr_index)
   765  {
   766      vmcs_delete_msr_from_vmexit_load_list_internal(vmcs, msr_index, TRUE);
   767  }
   768  
   769  static void vmcs_act_delete_msr_from_vmenter_load_list(struct _VMCS_OBJECT *vmcs, 
   770              UINT32 msr_index)
   771  {
   772      vmcs_delete_msr_from_vmenter_load_list_internal(vmcs, msr_index, TRUE);
   773  }
   774  
   775  static void vmcs_act_delete_msr_from_vmexit_store_and_vmenter_load_lists(
   776              struct _VMCS_OBJECT *vmcs, UINT32 msr_index)
   777  {
   778      vmcs_delete_msr_from_vmexit_store_and_vmenter_load_lists_internal(vmcs, 
   779              msr_index, TRUE);
   780  }
   781