github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/vmexit/vmexit_msr.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "file_codes.h"
    16  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(VMEXIT_MSR_C)
    17  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(VMEXIT_MSR_C, __condition)
    18  #include "vmm_defs.h"
    19  #include "heap.h"
    20  #include "memory_allocator.h"
    21  #include "hw_utils.h"
    22  #include "isr.h"
    23  #include "guest.h"
    24  #include "guest_cpu.h"
    25  #include "guest_cpu_vmenter_event.h"
    26  #include "vmx_ctrl_msrs.h"
    27  #include "vmcs_api.h"
    28  #include "vmexit.h"
    29  #include "vmexit_msr.h"
    30  #include "vmm_dbg.h"
    31  #include "mtrrs_abstraction.h"
    32  #include "host_memory_manager_api.h"
    33  #include "vmm_events_data.h"
    34  #include "pat_manager.h"
    35  #include "local_apic.h"
    36  #include "unrestricted_guest.h"
    37  #include "vmm_callback.h"
    38  #include "memory_dump.h"
    39  #ifdef JLMDEBUG
    40  #include "jlmdebug.h"
    41  #endif
    42  
    43  #define MSR_LOW_RANGE_IN_BITS   ((MSR_LOW_LAST - MSR_LOW_FIRST + 1) / 8)
    44  #define MSR_HIGH_RANGE_IN_BITS  ((MSR_HIGH_LAST - MSR_HIGH_FIRST + 1) / 8)
    45  #define MSR_READ_LOW_OFFSET     0
    46  #define MSR_READ_HIGH_OFFSET    (MSR_READ_LOW_OFFSET  + MSR_LOW_RANGE_IN_BITS)
    47  #define MSR_WRITE_LOW_OFFSET    (MSR_READ_HIGH_OFFSET + MSR_LOW_RANGE_IN_BITS)
    48  #define MSR_WRITE_HIGH_OFFSET   (MSR_WRITE_LOW_OFFSET + MSR_HIGH_RANGE_IN_BITS)
    49  
    50  /*
    51   * *** Hyper-V MSRs access workaround  ***
    52   * When we run our pnp launch driver with Alpha4_882 IBAgent,
    53   * we saw msr read at 0x40000081, since it isn't a real hardware MSR, we got
    54   * "RDMSR[0x40000081] failed. FaultVector=0x0000000D ErrCode=0x00000000" message
    55   * in serial port, then got BSOD. After injecting GP to guest in this MSR read,
    56   * Our PnP driver can work with IBAgent. The address range of Hyper-V
    57   * MSRs is from 0x40000000 to 0x400000F0). We need to investigate this
    58   * workaround and check whether it is necessary to extend this fix to any MSR
    59   * read/write outside 0x00000000 to 0x00001FFF and  0xC0000000 to 0xC0001FFF
    60   */
    61  
    62  #define HYPER_V_MSR_MIN 0x40000000
    63  #define HYPER_V_MSR_MAX 0x400000F0
    64  #define LOW_BITS_32_MASK    ((UINT64)UINT32_ALL_ONES)
    65  
    66  typedef struct {
    67      MSR_ID              msr_id;
    68      UINT8               pad[4];
    69      MSR_ACCESS_HANDLER  msr_read_handler;
    70      MSR_ACCESS_HANDLER  msr_write_handler;
    71      void               *msr_context;
    72      LIST_ELEMENT        msr_list;
    73  } MSR_VMEXIT_DESCRIPTOR;
    74  
    75  
    76  static struct {
    77      UINT32      msr_id;
    78      VMCS_FIELD  vmcs_field_id;
    79  } vmcs_resident_guest_msrs[] = {
    80      { IA32_MSR_SYSENTER_CS,     VMCS_GUEST_SYSENTER_CS },
    81      { IA32_MSR_SYSENTER_ESP,    VMCS_GUEST_SYSENTER_ESP },
    82      { IA32_MSR_SYSENTER_EIP,    VMCS_GUEST_SYSENTER_EIP },
    83      { IA32_MSR_DEBUGCTL,        VMCS_GUEST_DEBUG_CONTROL },
    84      { IA32_MSR_PERF_GLOBAL_CTRL,VMCS_GUEST_IA32_PERF_GLOBAL_CTRL },
    85      { IA32_MSR_FS_BASE,         VMCS_GUEST_FS_BASE },
    86      { IA32_MSR_GS_BASE,         VMCS_GUEST_GS_BASE }
    87  };
    88  
    89  
    90  static MSR_VMEXIT_DESCRIPTOR *msr_descriptor_lookup(LIST_ELEMENT *msr_list, MSR_ID msr_id);
    91  VMM_STATUS msr_vmexit_bits_config(UINT8 *p_bitmap, MSR_ID msr_id, RW_ACCESS access, BOOLEAN set);
    92  static BOOLEAN  msr_common_vmexit_handler(GUEST_CPU_HANDLE gcpu, RW_ACCESS access, 
    93                      UINT64 *msr_value);
    94  static BOOLEAN  msr_unsupported_access_handler(GUEST_CPU_HANDLE gcpu, MSR_ID msr_id, 
    95                      UINT64 *value, void *context);
    96  static VMEXIT_HANDLING_STATUS vmexit_msr_read(GUEST_CPU_HANDLE  gcpu);
    97  static VMEXIT_HANDLING_STATUS vmexit_msr_write(GUEST_CPU_HANDLE gcpu);
    98  static BOOLEAN  msr_efer_write_handler(GUEST_CPU_HANDLE gcpu, MSR_ID msr_id, 
    99                                         UINT64 *msr_value, void *context);
   100  static BOOLEAN  msr_efer_read_handler(GUEST_CPU_HANDLE gcpu, MSR_ID msr_id, 
   101                                        UINT64 *msr_value, void *context);
   102  
   103  #if 0 // unused
   104  static BOOLEAN  msr_pat_read_handler(GUEST_CPU_HANDLE gcpu, MSR_ID msr_id, 
   105                                       UINT64 *msr_value, void *context);
   106  static BOOLEAN  msr_pat_write_handler(GUEST_CPU_HANDLE gcpu, MSR_ID msr_id, 
   107                                        UINT64 *msr_value, void *context);
   108  #endif
   109  
   110  static BOOLEAN  msr_lapic_base_write_handler(GUEST_CPU_HANDLE gcpu, MSR_ID msr_id, 
   111                                               UINT64 *msr_value, void *context);
   112  static BOOLEAN  msr_feature_control_read_handler(GUEST_CPU_HANDLE gcpu, 
   113                                  MSR_ID msr_id, UINT64 *msr_value, void *context);
   114  static BOOLEAN  msr_feature_control_write_handler(GUEST_CPU_HANDLE gcpu, 
   115                              MSR_ID msr_id, UINT64 *msr_value, void *context);
   116  static BOOLEAN  msr_mtrr_write_handler(GUEST_CPU_HANDLE gcpu, MSR_ID msr_id, 
   117                                         UINT64 *msr_value, void *context);
   118  static BOOLEAN  msr_vmcs_resident_default_handler(GUEST_CPU_HANDLE gcpu, 
   119                          MSR_ID msr_id, RW_ACCESS access, UINT64 *msr_value);
   120  static BOOLEAN  msr_misc_enable_write_handler(GUEST_CPU_HANDLE gcpu, MSR_ID msr_id, 
   121                                              UINT64 *msr_value, void *context);
   122  
   123  // FUNCTION : msr_vmexit_on_all()
   124  // PURPOSE  : Turns VMEXIT on all ON/OFF
   125  // ARGUMENTS: GUEST_CPU_HANDLE gcpu
   126  //          : BOOLEAN enable
   127  // RETURNS  : none, must succeed.
   128  void msr_vmexit_on_all(GUEST_CPU_HANDLE gcpu, BOOLEAN enable)
   129  {
   130      PROCESSOR_BASED_VM_EXECUTION_CONTROLS exec_controls_mask;
   131      VMEXIT_CONTROL                        vmexit_request;
   132  
   133  #ifdef JLMDEBUG
   134      bprint("msr_vmexit_on_all\n");
   135  #endif
   136      VMM_ASSERT(gcpu);
   137      VMM_LOG(mask_uvmm,level_trace,"[msr] VMEXIT on %s\n", enable ? "all" : "bitmap");
   138      exec_controls_mask.Uint32 = 0;
   139      exec_controls_mask.Bits.UseMsrBitmaps = 1;
   140      vmm_memset(&vmexit_request, 0 , sizeof(vmexit_request));
   141      vmexit_request.proc_ctrls.bit_request = enable ? 0 : UINT64_ALL_ONES;
   142      vmexit_request.proc_ctrls.bit_mask = exec_controls_mask.Uint32;
   143      gcpu_control_setup( gcpu, &vmexit_request );
   144  }
   145  
   146  // VS2010 generates bad code for BITARRAY_SET() in release mode
   147  // workaround by turning off optimization
   148  #pragma optimize("",off)
   149  VMM_STATUS msr_vmexit_bits_config( UINT8 *p_bitmap, MSR_ID msr_id,
   150                  RW_ACCESS access, BOOLEAN set)
   151  {
   152      UINT8       *p_bitarray;
   153      MSR_ID      bitno;
   154      RW_ACCESS   access_index;
   155  
   156  #ifdef JLMDEBUG
   157      bprint("msr_vmexit_bits_config\n");
   158  #endif
   159      for (access_index = WRITE_ACCESS; access_index <= READ_ACCESS; ++access_index) {
   160          if (access_index & access) {  // is access of iterest ?
   161              if (msr_id <= MSR_LOW_LAST) {
   162                  bitno = msr_id;
   163                  p_bitarray = READ_ACCESS == access_index ?
   164                      &p_bitmap[MSR_READ_LOW_OFFSET] :
   165                      &p_bitmap[MSR_WRITE_LOW_OFFSET];
   166              }
   167              else if (MSR_HIGH_FIRST <= msr_id && msr_id <= MSR_HIGH_LAST) {
   168                  bitno = msr_id - MSR_HIGH_FIRST;
   169                  p_bitarray = READ_ACCESS == access_index ?
   170                      &p_bitmap[MSR_READ_HIGH_OFFSET] :
   171                      &p_bitmap[MSR_WRITE_HIGH_OFFSET];
   172              }
   173              else {
   174                  VMM_ASSERT(0);  // wrong MSR ID
   175                  return VMM_ERROR;
   176              }
   177              if (set) {
   178                  BITARRAY_SET(p_bitarray, bitno);
   179              }
   180              else {
   181                  BITARRAY_CLR(p_bitarray, bitno);
   182              }
   183          }
   184      }
   185      return VMM_OK;
   186  }
   187  #pragma optimize("",on)
   188  
   189  
   190  MSR_VMEXIT_DESCRIPTOR * msr_descriptor_lookup( LIST_ELEMENT *msr_list, MSR_ID msr_id)
   191  {
   192      MSR_VMEXIT_DESCRIPTOR   *p_msr_desc;
   193      LIST_ELEMENT            *list_iterator;
   194  
   195  #ifdef JLMDEBUG1
   196      bprint("msr_descriptor_lookup\n");
   197  #endif
   198      LIST_FOR_EACH(msr_list, list_iterator) {
   199          p_msr_desc = LIST_ENTRY(list_iterator, MSR_VMEXIT_DESCRIPTOR, msr_list);
   200          if (p_msr_desc->msr_id == msr_id) {
   201              return p_msr_desc;  // found
   202          }
   203      }
   204      return NULL;
   205  }
   206  
   207  static void msr_vmexit_register_mtrr_accesses_handler(GUEST_HANDLE guest) {
   208  
   209      UINT32 i,msr_addr;
   210  
   211  #ifdef JLMDEBUG
   212      bprint("msr_vmexit_register_mtrr_accesses_handler\n");
   213  #endif
   214      msr_vmexit_handler_register( guest, IA32_MTRRCAP_ADDR, msr_mtrr_write_handler,
   215          WRITE_ACCESS, NULL);
   216      msr_vmexit_handler_register( guest, IA32_MTRR_DEF_TYPE_ADDR, msr_mtrr_write_handler,
   217          WRITE_ACCESS, NULL);
   218      msr_vmexit_handler_register( guest, IA32_MTRR_FIX64K_00000_ADDR, msr_mtrr_write_handler,
   219          WRITE_ACCESS, NULL);
   220      msr_vmexit_handler_register( guest, IA32_MTRR_FIX16K_80000_ADDR, msr_mtrr_write_handler,
   221          WRITE_ACCESS, NULL);
   222      msr_vmexit_handler_register( guest, IA32_MTRR_FIX16K_A0000_ADDR, msr_mtrr_write_handler,
   223          WRITE_ACCESS, NULL);
   224      msr_vmexit_handler_register( guest, IA32_MTRR_FIX4K_C0000_ADDR, msr_mtrr_write_handler,
   225          WRITE_ACCESS, NULL);
   226      msr_vmexit_handler_register( guest, IA32_MTRR_FIX4K_C8000_ADDR, msr_mtrr_write_handler,
   227          WRITE_ACCESS, NULL);
   228      msr_vmexit_handler_register( guest, IA32_MTRR_FIX4K_D0000_ADDR, msr_mtrr_write_handler,
   229          WRITE_ACCESS, NULL);
   230      msr_vmexit_handler_register( guest, IA32_MTRR_FIX4K_D8000_ADDR, msr_mtrr_write_handler,
   231          WRITE_ACCESS, NULL);
   232      msr_vmexit_handler_register( guest, IA32_MTRR_FIX4K_E0000_ADDR, msr_mtrr_write_handler,
   233          WRITE_ACCESS, NULL);
   234      msr_vmexit_handler_register( guest, IA32_MTRR_FIX4K_E8000_ADDR, msr_mtrr_write_handler,
   235          WRITE_ACCESS, NULL);
   236      msr_vmexit_handler_register( guest, IA32_MTRR_FIX4K_F0000_ADDR, msr_mtrr_write_handler,
   237          WRITE_ACCESS, NULL);
   238      msr_vmexit_handler_register( guest, IA32_MTRR_FIX4K_F8000_ADDR, msr_mtrr_write_handler,
   239          WRITE_ACCESS, NULL);
   240  
   241      // all other MTRR registers are sequential
   242      for (msr_addr = IA32_MTRR_PHYSBASE0_ADDR, i=0; 
   243                  i < mtrrs_abstraction_get_num_of_variable_range_regs(); 
   244                  msr_addr += 2, i++) {
   245          if(msr_addr > IA32_MTRR_MAX_PHYSMASK_ADDR ) {
   246                  VMM_LOG(mask_uvmm, level_error, "Error: No. of Variable MTRRs is incorrect\n");
   247          }
   248          /* Register all MTRR PHYSBASE */
   249          msr_vmexit_handler_register( guest, msr_addr, msr_mtrr_write_handler,
   250              WRITE_ACCESS, NULL);
   251          /* Register all MTRR PHYSMASK*/
   252          msr_vmexit_handler_register( guest, msr_addr + 1, msr_mtrr_write_handler,
   253              WRITE_ACCESS, NULL);
   254      }
   255  }
   256  
   257  
   258  // Allocates structures for MSR virtualization
   259  // Must be called prior any other function from the package on this gcpu,
   260  //  but after gcpu VMCS was loaded
   261  // ARGUMENTS: GUEST_HANDLE guest
   262  void msr_vmexit_guest_setup(GUEST_HANDLE guest)
   263  {
   264      MSR_VMEXIT_CONTROL *p_msr_ctrl;
   265      MSR_ID msr_id;
   266  #ifdef JLMDEBUG1
   267      bprint(" msr_vmexit_guest_setup\n");
   268      // LOOP_FOREVER
   269  #endif
   270  
   271      VMM_ASSERT(guest);
   272      VMM_LOG(mask_uvmm, level_trace,"[msr] Setup for Guest\n");
   273      p_msr_ctrl = guest_get_msr_control(guest);
   274  
   275      // allocate zero-filled 4K-page to store MSR VMEXIT bitmap
   276      p_msr_ctrl->msr_bitmap = vmm_memory_alloc(PAGE_4KB_SIZE);
   277      VMM_ASSERT(p_msr_ctrl->msr_bitmap);
   278      vmexit_install_handler(guest_get_id(guest), vmexit_msr_read,  
   279                             Ia32VmxExitBasicReasonMsrRead);
   280      vmexit_install_handler(guest_get_id(guest), vmexit_msr_write, 
   281                             Ia32VmxExitBasicReasonMsrWrite);
   282  
   283      for (msr_id = IA32_MSR_VMX_FIRST; msr_id <= IA32_MSR_VMX_LAST; ++msr_id) {
   284          msr_guest_access_inhibit(guest, msr_id);
   285      }
   286      if( !is_unrestricted_guest_supported() ) {      
   287          msr_vmexit_handler_register( guest, IA32_MSR_EFER,
   288                      msr_efer_write_handler, WRITE_ACCESS, NULL);
   289          msr_vmexit_handler_register( guest, IA32_MSR_EFER,
   290                      msr_efer_read_handler, READ_ACCESS, NULL);
   291      }
   292      msr_vmexit_handler_register( guest, IA32_MSR_APIC_BASE,
   293          msr_lapic_base_write_handler, WRITE_ACCESS, NULL);
   294  
   295      msr_vmexit_handler_register( guest, IA32_MSR_FEATURE_CONTROL,
   296          msr_feature_control_read_handler, READ_ACCESS, NULL);
   297  
   298      msr_vmexit_handler_register( guest, IA32_MSR_FEATURE_CONTROL,
   299          msr_feature_control_write_handler, WRITE_ACCESS, NULL);
   300  
   301      msr_vmexit_handler_register( guest, IA32_MSR_MISC_ENABLE,
   302          msr_misc_enable_write_handler, WRITE_ACCESS, NULL);
   303      msr_vmexit_register_mtrr_accesses_handler(guest);
   304  }
   305  
   306  
   307  // Register MSR related structures with HW (VMCS)
   308  // ARGUMENTS: GUEST_CPU_HANDLE gcpu
   309  // RETURNS  : none, must succeed.
   310  void msr_vmexit_activate(GUEST_CPU_HANDLE gcpu)
   311  {
   312      VMCS_OBJECT            *vmcs = gcpu_get_vmcs(gcpu);
   313      GUEST_HANDLE           guest;
   314      MSR_VMEXIT_CONTROL     *p_msr_ctrl;
   315      UINT64                  msr_bitmap;
   316  
   317  #ifdef JLMDEBUG1
   318      bprint("msr_vmexit_activate\n");
   319  #endif
   320      VMM_ASSERT(gcpu);
   321      VMM_LOG(mask_uvmm, level_trace,"[msr] Activated on GCPU\n");
   322      guest = gcpu_guest_handle(gcpu);
   323      VMM_ASSERT(guest);
   324      p_msr_ctrl = guest_get_msr_control(guest);
   325      msr_bitmap= (UINT64) p_msr_ctrl->msr_bitmap;
   326      msr_vmexit_on_all(gcpu, FALSE);
   327      if (NULL != p_msr_ctrl->msr_bitmap) {
   328          hmm_hva_to_hpa(msr_bitmap, &msr_bitmap);
   329          vmcs_write(vmcs, VMCS_MSR_BITMAP_ADDRESS, msr_bitmap);
   330      }
   331  }
   332  
   333  
   334  // Register specific MSR handler with VMEXIT
   335  // GUEST_HANDLE        guest
   336  // MSR_ID              msr_id
   337  // MSR_ACCESS_HANDLER  msr_handler,
   338  // RW_ACCESS           access
   339  // RETURNS  : VMM_OK if succeeded
   340  VMM_STATUS msr_vmexit_handler_register( GUEST_HANDLE guest, MSR_ID msr_id,
   341                  MSR_ACCESS_HANDLER  msr_handler, RW_ACCESS access, void *context)
   342  {
   343      MSR_VMEXIT_DESCRIPTOR *p_desc;
   344      VMM_STATUS status = VMM_OK;
   345      MSR_VMEXIT_CONTROL *p_msr_ctrl = guest_get_msr_control(guest);
   346  
   347  #ifdef JLMDEBUG
   348      if(msr_id==0x1b)
   349          bprint("msr_vmexit_handler_register 0x1b\n");
   350  #endif
   351      // check first if it already registered
   352      p_desc = msr_descriptor_lookup(p_msr_ctrl->msr_list, msr_id);
   353      if (NULL == p_desc) {
   354          // allocate new descriptor and chain it to the list
   355          p_desc = vmm_malloc(sizeof(*p_desc)); if (NULL != p_desc)
   356          {
   357              vmm_memset(p_desc, 0, sizeof(*p_desc));
   358              list_add(p_msr_ctrl->msr_list, &p_desc->msr_list);
   359          }
   360      }
   361      else {
   362          VMM_LOG(mask_uvmm, level_trace,"MSR(%p) handler already registered. Update...\n", msr_id);
   363      }
   364  
   365      if (NULL != p_desc) {
   366          status = msr_vmexit_bits_config(p_msr_ctrl->msr_bitmap, msr_id, access, TRUE);
   367          if (VMM_OK == status) {
   368              p_desc->msr_id = msr_id;
   369              if (access & WRITE_ACCESS) p_desc->msr_write_handler = msr_handler;
   370              if (access & READ_ACCESS)  p_desc->msr_read_handler = msr_handler;
   371              p_desc->msr_context = context;
   372              // VMM_LOG(mask_uvmm, level_trace,"%s: [msr] Handler(%P) Registered\n", __FUNCTION__, msr_id);
   373          }
   374          else {
   375              VMM_LOG(mask_uvmm, level_trace,"MSR(%p) handler registration failed to bad ID\n", msr_id);
   376          }
   377      }
   378      else {
   379          status = VMM_ERROR;
   380          VMM_LOG(mask_uvmm, level_trace,"MSR(%p) handler registration failed due to lack of space\n", msr_id);
   381      }
   382      return status;
   383  }
   384  
   385  
   386  // Unregister specific MSR VMEXIT handler
   387  // GUEST_HANDLE  guest
   388  // MSR_ID        msr_id
   389  // RETURNS  : VMM_OK if succeeded, VMM_ERROR if no descriptor for MSR
   390  VMM_STATUS msr_vmexit_handler_unregister( GUEST_HANDLE guest,
   391                  MSR_ID msr_id, RW_ACCESS  access)
   392  {
   393      MSR_VMEXIT_DESCRIPTOR *p_desc;
   394      VMM_STATUS status = VMM_OK;
   395      MSR_VMEXIT_CONTROL *p_msr_ctrl = guest_get_msr_control(guest);
   396  
   397  #ifdef JLMDEBUG1
   398      bprint("msr_vmexit_handler_unregister\n");
   399  #endif
   400      p_desc = msr_descriptor_lookup(p_msr_ctrl->msr_list, msr_id);
   401      if (NULL == p_desc) {
   402          status = VMM_ERROR;
   403          VMM_LOG(mask_uvmm, level_trace,"MSR(%p) handler is not registered\n", msr_id);
   404      }
   405      else {
   406          msr_vmexit_bits_config( p_msr_ctrl->msr_bitmap, msr_id,
   407              access, FALSE);
   408  
   409          if (access & WRITE_ACCESS) p_desc->msr_write_handler = NULL;
   410          if (access & READ_ACCESS)  p_desc->msr_read_handler = NULL;
   411  
   412          if (NULL == p_desc->msr_write_handler && NULL == p_desc->msr_read_handler) {
   413              list_remove(&p_desc->msr_list);
   414              vmm_mfree(p_desc);
   415          }
   416      }
   417      return status;
   418  }
   419  
   420  
   421  // Read handler which calls upon VMEXITs resulting from MSR read access
   422  //          : Read MSR value from HW and if OK, stores the result in EDX:EAX
   423  // GUEST_CPU_HANDLE gcp
   424  VMEXIT_HANDLING_STATUS vmexit_msr_read(GUEST_CPU_HANDLE gcpu)
   425  {
   426      UINT64 msr_value = 0;
   427      MSR_ID msr_id = (MSR_ID) gcpu_get_native_gp_reg(gcpu, IA32_REG_RCX);
   428  
   429  #ifdef JLMDEBUG
   430      if(msr_id==0x1b)
   431          bprint("vmexit_msr_read 0x1b\n");
   432  #endif
   433      /* hypervisor synthenic MSR is not hardware MSR, inject GP to guest */
   434      if( (msr_id >= HYPER_V_MSR_MIN) && (msr_id <= HYPER_V_MSR_MAX)) {
   435          gcpu_inject_gp0(gcpu);
   436          return VMEXIT_HANDLED;
   437      }
   438      if (TRUE == msr_common_vmexit_handler(gcpu, READ_ACCESS, &msr_value)) {
   439          // write back to the guest. store MSR value in EDX:EAX
   440          gcpu_set_native_gp_reg(gcpu, IA32_REG_RDX, msr_value >> 32);
   441          gcpu_set_native_gp_reg(gcpu, IA32_REG_RAX, msr_value & LOW_BITS_32_MASK);
   442      }
   443      return VMEXIT_HANDLED;
   444  }
   445  
   446  
   447  // Write handler which calls upon VMEXITs resulting from MSR write access
   448  //          : Read MSR value from guest EDX:EAX and call registered write handler
   449  // ARGUMENTS: GUEST_CPU_HANDLE gcpu
   450  // RETURNS  :
   451  VMEXIT_HANDLING_STATUS vmexit_msr_write(GUEST_CPU_HANDLE gcpu)
   452  {
   453      UINT64 msr_value;
   454  #ifdef JLMDEBUG1
   455      bprint("In vmexit_msr_write\n");
   456  #endif
   457      MSR_ID msr_id = (MSR_ID) gcpu_get_native_gp_reg(gcpu, IA32_REG_RCX);
   458  
   459  #ifdef JLMDEBUG1
   460      bprint("vmexit_msr_write\n");
   461  #endif
   462      /* hypervisor synthenic MSR is not hardware MSR, inject GP to guest */
   463      if( (msr_id >= HYPER_V_MSR_MIN) && (msr_id <= HYPER_V_MSR_MAX)) {
   464  #ifdef JLMDEBUG1
   465          bprint("Injecting GP to guest for msr %x\n", msr_id);
   466  #endif
   467          gcpu_inject_gp0(gcpu);
   468          return VMEXIT_HANDLED;
   469      }
   470      msr_value = (gcpu_get_native_gp_reg(gcpu, IA32_REG_RDX) << 32);
   471      msr_value |= gcpu_get_native_gp_reg(gcpu, IA32_REG_RAX) & LOW_BITS_32_MASK;
   472  
   473  #ifdef JLMDEBUG1
   474      bprint("Handling msr %x\n", msr_id);
   475  #endif
   476      msr_common_vmexit_handler(gcpu, WRITE_ACCESS, &msr_value);
   477  #ifdef JLMDEBUG1
   478      bprint("Handled msr %x\n", msr_id);
   479  #endif
   480      return VMEXIT_HANDLED;
   481  }
   482  
   483  
   484  // If MSR handler is registered, call it, otherwise executes default
   485  // MSR handler. If MSR R/W instruction was executed successfully
   486  // from the Guest point of view, Guest IP is moved forward on instruction
   487  // length, otherwise exception is injected into Guest CPU.
   488  // ARGUMENTS: GUEST_CPU_HANDLE    gcpu
   489  //          : RW_ACCESS           access
   490  // RETURNS  : TRUE if instruction was executed, FALSE otherwise (fault occured)
   491  BOOLEAN msr_common_vmexit_handler( GUEST_CPU_HANDLE gcpu,
   492                  RW_ACCESS access, UINT64 *msr_value)
   493  {
   494      MSR_ID msr_id = (MSR_ID) gcpu_get_native_gp_reg(gcpu, IA32_REG_RCX);
   495      GUEST_HANDLE guest = NULL;
   496      MSR_VMEXIT_CONTROL *p_msr_ctrl = NULL;
   497      MSR_VMEXIT_DESCRIPTOR *msr_descriptor = NULL;
   498      BOOLEAN instruction_was_executed = FALSE;
   499      MSR_ACCESS_HANDLER  msr_handler = NULL;
   500  
   501  #ifdef JLMDEBUG1
   502      bprint("msr_common_vmexit_handler\n");
   503  #endif
   504      guest = gcpu_guest_handle(gcpu);
   505      VMM_ASSERT(guest);
   506      p_msr_ctrl = guest_get_msr_control(guest);
   507      VMM_ASSERT(p_msr_ctrl);
   508      
   509      msr_descriptor = msr_descriptor_lookup(p_msr_ctrl->msr_list, msr_id);
   510  
   511      if (NULL != msr_descriptor) {
   512  #ifdef JLMDEBUG1
   513          bprint("non-null msr_descriptor %p\n", msr_descriptor);
   514  #endif
   515          // VMM_LOG(mask_uvmm, level_trace,"%s: msr_descriptor is NOT NULL.\n", __FUNCTION__);
   516          if (access & WRITE_ACCESS) {
   517  #ifdef JLMDEBUG1
   518              bprint("Write handler\n");
   519  #endif
   520              msr_handler = msr_descriptor->msr_write_handler;
   521           } else if (access & READ_ACCESS) {
   522  #ifdef JLMDEBUG1
   523              bprint("Read handler\n");
   524  #endif
   525              msr_handler = msr_descriptor->msr_read_handler;
   526           }
   527      }
   528  
   529      if (NULL == msr_handler) {
   530  #ifdef JLMDEBUG1
   531          bprint("Case 1: null msr_handler\n");
   532  #endif
   533                  // VMM_LOG(mask_uvmm, level_trace,"%s: msr_handler is NULL.\n", __FUNCTION__);
   534          instruction_was_executed =
   535              msr_vmcs_resident_default_handler(gcpu, msr_id, access, msr_value) ||
   536              msr_trial_access(gcpu, msr_id, access, msr_value);
   537      }
   538      else {
   539  #ifdef JLMDEBUG1
   540          bprint("Case 2: non-null msr_handler for msr %x\n", msr_id);
   541  #endif
   542                  // VMM_LOG(mask_uvmm, level_trace,"%s: msr_handler is NOT NULL.\n", __FUNCTION__);
   543          instruction_was_executed =
   544              msr_handler(gcpu, msr_id, msr_value, msr_descriptor->msr_context);
   545  #ifdef JLMDEBUG1
   546          bprint("Done with handling msr %x\n", msr_id);
   547  #endif
   548      }
   549      if (TRUE == instruction_was_executed) {
   550          gcpu_skip_guest_instruction(gcpu);
   551      }
   552  #ifdef JLMDEBUG1
   553      bprint("Instruction was executed: %d\n", instruction_was_executed);
   554  #endif
   555      return instruction_was_executed;
   556  }
   557  
   558  
   559  // Try to execute real MSR read/write
   560  // If exception was generated, inject it into guest
   561  // ARGUMENTS: GUEST_CPU_HANDLE    gcpu
   562  //          : MSR_ID              msr_id
   563  //          : RW_ACCESS           access
   564  // RETURNS  : TRUE if instruction was executed, FALSE otherwise (fault occured)
   565  BOOLEAN msr_trial_access( GUEST_CPU_HANDLE gcpu, MSR_ID  msr_id,
   566                  RW_ACCESS access, UINT64 *msr_value)
   567  {
   568      BOOLEAN     msr_implemented;
   569      VECTOR_ID   fault_vector= 0;   // just to shut up the warning
   570      UINT32      error_code  = 0;   // just to shut up the warning
   571      VMCS_OBJECT *vmcs       = gcpu_get_vmcs(gcpu);
   572  
   573  #ifdef JLMDEBUG1
   574      bprint("msr_trial_access\n");
   575  #endif
   576      switch (access) {
   577        case READ_ACCESS:
   578          msr_implemented = hw_rdmsr_safe(msr_id, msr_value, &fault_vector, &error_code);
   579          break;
   580        case WRITE_ACCESS:
   581          msr_implemented = hw_wrmsr_safe(msr_id, *msr_value, &fault_vector, &error_code);
   582          break;
   583        default:
   584          VMM_ASSERT(0);  // should not be here
   585          return FALSE;
   586      }
   587  
   588      if (FALSE == msr_implemented) {
   589          // inject GP into guest
   590          VMENTER_EVENT  exception;
   591          UINT16 inst_length = (UINT16) vmcs_read(vmcs, VMCS_EXIT_INFO_INSTRUCTION_LENGTH);
   592  
   593          vmm_memset(&exception, 0, sizeof(exception));
   594          exception.interrupt_info.Bits.Valid = 1;
   595          exception.interrupt_info.Bits.Vector= fault_vector;
   596          exception.interrupt_info.Bits.InterruptType= VmEnterInterruptTypeHardwareException;
   597          exception.interrupt_info.Bits.DeliverCode = 1;
   598          exception.instruction_length = inst_length;
   599          exception.error_code = (ADDRESS) error_code;
   600          gcpu_inject_event(gcpu, &exception);
   601      }
   602      return msr_implemented;
   603  }
   604  
   605  
   606  BOOLEAN msr_vmcs_resident_default_handler( GUEST_CPU_HANDLE gcpu, MSR_ID msr_id,
   607              RW_ACCESS access, UINT64 *msr_value)
   608  {
   609      VMCS_OBJECT *vmcs = gcpu_get_vmcs(gcpu);
   610      VMCS_FIELD  vmcs_field_id = VMCS_FIELD_COUNT;    // invalid
   611      BOOLEAN found = FALSE;
   612      unsigned int i;
   613  
   614      // check if it is MSR which resides in Guest part of VMCS
   615      for (i = 0; i < NELEMENTS(vmcs_resident_guest_msrs); ++i) {
   616          if (vmcs_resident_guest_msrs[i].msr_id == msr_id) {
   617              VM_ENTRY_CONTROLS   vmenter_controls;
   618  
   619              if (IA32_MSR_DEBUGCTL == msr_id) {
   620                  vmenter_controls.Uint32 = (UINT32)vmcs_read(vmcs, VMCS_ENTER_CONTROL_VECTOR);
   621                  if (vmenter_controls.Bits.LoadDebugControls) {
   622                      found = TRUE;
   623                  }
   624              }
   625              else if (IA32_MSR_PERF_GLOBAL_CTRL == msr_id) {
   626                  vmenter_controls.Uint32 = (UINT32)vmcs_read(vmcs, VMCS_ENTER_CONTROL_VECTOR);
   627                  if (vmenter_controls.Bits.Load_IA32_PERF_GLOBAL_CTRL &&
   628                      vmcs_field_is_supported(vmcs_resident_guest_msrs[i].vmcs_field_id)) {
   629                      found = TRUE;
   630                  }
   631              }
   632              else {
   633                  found = TRUE;
   634              }
   635              break;
   636          }
   637      }
   638  
   639      if (found) {
   640          vmcs_field_id = vmcs_resident_guest_msrs[i].vmcs_field_id;
   641          switch (access) {
   642          case READ_ACCESS:
   643              *msr_value = vmcs_read(vmcs, vmcs_field_id);
   644              break;
   645          case WRITE_ACCESS:
   646              vmcs_write(vmcs, vmcs_field_id, *msr_value);
   647              break;
   648          default:
   649              VMM_DEADLOOP();  // should not be here
   650              break;
   651          }
   652      }
   653      return found;
   654  }
   655  
   656  
   657  // Inject General Protection /fault event into the GCPU
   658  // Used for both read and write accesses
   659  // ARGUMENTS: GUEST_CPU_HANDLE  gcpu
   660  //          : MSR_ID            msr_id - not used
   661  //          : UINT64           *value - not used
   662  // RETURNS  : FALSE, which means that instruction caused GP fault.
   663  #pragma warning( push )
   664  #pragma warning (disable : 4100)  // Supress warnings about unreferenced formal parameter
   665  
   666  BOOLEAN msr_unsupported_access_handler( GUEST_CPU_HANDLE gcpu, MSR_ID msr_id UNUSED,
   667                      UINT64 *value  UNUSED, void  *context UNUSED)
   668  {
   669      REPORT_MSR_WRITE_ACCESS_DATA msr_write_access_data;
   670  
   671  #ifdef JLMDEBUG
   672      bprint("msr_unsupported_access_handler\n");
   673  #endif
   674      msr_write_access_data.msr_id = msr_id;
   675      // Using write access method for both read/write access here
   676      if (!report_uvmm_event(UVMM_EVENT_MSR_WRITE_ACCESS, 
   677                             (VMM_IDENTIFICATION_DATA)gcpu, 
   678                             (const GUEST_VCPU*)guest_vcpu(gcpu), 
   679                             &msr_write_access_data))
   680          return FALSE;
   681  
   682      // inject GP Fault into guest
   683      gcpu_inject_gp0(gcpu);
   684      return FALSE;
   685  }
   686  #pragma warning( pop )
   687  
   688  
   689  // Handle guest access to EFER. Update guest visible value.
   690  // GUEST_CPU_HANDLE  gcpu
   691  // MSR_ID            msr_id
   692  // UINT64           *msr_value
   693  // RETURNS  : TRUE, which means that instruction was executed.
   694  #pragma warning( push )
   695  #pragma warning (disable : 4100)  // Supress warnings about unreferenced formal parameter
   696  static BOOLEAN msr_efer_update_is_gpf0(GUEST_CPU_HANDLE gcpu, UINT64 new_value) {
   697      IA32_EFER_S efer;
   698      efer.Uint64 = new_value;
   699  
   700  #ifdef JLMDEBUG1
   701      bprint("msr_efer_update_is_gpf0\n");
   702  #endif
   703      if (efer.Bits.LME) {
   704          EM64T_CR4 cr4;
   705          cr4.Uint64 = gcpu_get_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR4, VMCS_MERGED);
   706  
   707          if (!cr4.Bits.PAE) {
   708              return TRUE;
   709          }
   710      }
   711      return FALSE;
   712  }
   713  
   714  BOOLEAN msr_efer_write_handler( GUEST_CPU_HANDLE gcpu,
   715              MSR_ID msr_id, UINT64 *msr_value, void *context UNUSED)
   716  {
   717      EVENT_GCPU_GUEST_MSR_WRITE_DATA data;
   718      RAISE_EVENT_RETVAL event_retval;
   719      REPORT_MSR_WRITE_ACCESS_DATA msr_write_access_data;
   720  
   721  #ifdef JLMDEBUG
   722      bprint("msr_efer_write_handler, msr: %08x\n", msr_id);
   723  #endif
   724      VMM_ASSERT(IA32_MSR_EFER == msr_id);
   725      msr_write_access_data.msr_id = msr_id;
   726      if (!report_uvmm_event(UVMM_EVENT_MSR_WRITE_ACCESS, 
   727              (VMM_IDENTIFICATION_DATA)gcpu, (const GUEST_VCPU*)guest_vcpu(gcpu), 
   728              &msr_write_access_data))
   729          return FALSE;
   730  
   731     if (msr_efer_update_is_gpf0(gcpu, *msr_value)) {
   732          VMM_LOG(mask_uvmm, level_trace,
   733                  "%s: EFER update should have caused GPF0 in native mode\n", __FUNCTION__);
   734          VMM_LOG(mask_uvmm, level_trace,
   735                  "%s: Changing vmexit to GP is not implemented yet\n", __FUNCTION__);
   736          VMM_DEADLOOP();
   737      }
   738  
   739      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_EFER, *msr_value);
   740      vmm_memset(&data, 0, sizeof(data));
   741      data.new_guest_visible_value = *msr_value;
   742      data.msr_index = msr_id;
   743      event_retval = event_raise( EVENT_GCPU_AFTER_EFER_MSR_WRITE, gcpu, &data );
   744      VMM_ASSERT(event_retval != EVENT_NOT_HANDLED);
   745      return TRUE;
   746  }
   747  
   748  BOOLEAN msr_efer_read_handler( GUEST_CPU_HANDLE gcpu,
   749      MSR_ID msr_id UNUSED, UINT64 *msr_value, void  *context UNUSED)
   750  {
   751  #ifdef JLMDEBUG
   752      bprint("msr_efer_read_handler, msr: %08x\n", msr_id);
   753  #endif
   754  #ifdef USE_MTF_FOR_CR_MSR_AS_WELL
   755      //if( is_unrestricted_guest_supported() )
   756      {
   757          report_uvmm_event(UVMM_EVENT_MSR_READ_ACCESS, (VMM_IDENTIFICATION_DATA)gcpu, (const GUEST_VCPU*)guest_vcpu(gcpu), NULL);
   758          return FALSE;
   759      }
   760  #else
   761      *msr_value = gcpu_get_msr_reg(gcpu, IA32_VMM_MSR_EFER);
   762      return TRUE;
   763  #endif
   764  }
   765  
   766  #if 0 // unused
   767  BOOLEAN msr_pat_write_handler( GUEST_CPU_HANDLE gcpu, MSR_ID msr_id,
   768                  UINT64 *msr_value, void *context UNUSED)
   769  {
   770      REPORT_MSR_WRITE_ACCESS_DATA msr_write_access_data;
   771  
   772  #ifdef JLMDEBUG
   773      bprint("msr_pat_write_handler\n");
   774  #endif
   775      VMM_ASSERT(IA32_MSR_PAT== msr_id);
   776      msr_write_access_data.msr_id = msr_id;
   777      if (!report_uvmm_event(UVMM_EVENT_MSR_WRITE_ACCESS, 
   778                  (VMM_IDENTIFICATION_DATA)gcpu, (const GUEST_VCPU*)guest_vcpu(gcpu), 
   779                  &msr_write_access_data))
   780          return FALSE;
   781      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_PAT, *msr_value);
   782      return TRUE;
   783  }
   784  
   785  BOOLEAN msr_pat_read_handler( GUEST_CPU_HANDLE gcpu, MSR_ID  msr_id,
   786              UINT64  *msr_value, void *context UNUSED)
   787  {
   788  #ifdef JLMDEBUG
   789      bprint("msr_pat_read_handler\n");
   790  #endif
   791      VMM_ASSERT(IA32_MSR_PAT== msr_id);
   792      *msr_value = gcpu_get_msr_reg(gcpu, IA32_VMM_MSR_PAT);
   793      return TRUE;
   794  }
   795  #endif
   796  
   797  static BOOLEAN msr_mtrr_write_handler( GUEST_CPU_HANDLE  gcpu, MSR_ID  msr_id,
   798                  UINT64 *msr_value, void *context UNUSED)
   799  {
   800      EVENT_GCPU_GUEST_MSR_WRITE_DATA data;
   801      RAISE_EVENT_RETVAL event_retval;
   802      REPORT_MSR_WRITE_ACCESS_DATA msr_write_access_data;
   803  
   804  #ifdef JLMDEBUG1
   805      bprint("It's in msr_mtrr_write_handler\n");
   806  #endif
   807      VMM_ASSERT(msr_id != IA32_MTRRCAP_ADDR); // IA32_MTRRCAP_ADDR is read only mtrr
   808      msr_write_access_data.msr_id = msr_id;
   809      if (!report_uvmm_event(UVMM_EVENT_MSR_WRITE_ACCESS, 
   810              (VMM_IDENTIFICATION_DATA)gcpu, (const GUEST_VCPU*)guest_vcpu(gcpu), 
   811              &msr_write_access_data))
   812          return FALSE;
   813  
   814      hw_write_msr(msr_id, *msr_value);
   815      mtrrs_abstraction_track_mtrr_update(msr_id, *msr_value);
   816      vmm_memset(&data, 0, sizeof(data));
   817      data.new_guest_visible_value = *msr_value;
   818      data.msr_index = msr_id;
   819      event_retval = event_raise( EVENT_GCPU_AFTER_MTRR_MSR_WRITE, gcpu, &data );
   820      VMM_ASSERT(event_retval != EVENT_NOT_HANDLED);
   821      return TRUE;
   822  }
   823  
   824  
   825  // Track Guest writes to Loacal APIC Base Register
   826  // GUEST_CPU_HANDLE  gcpu
   827  //          : MSR_ID            msr_id
   828  //          : UINT64           *msr_value
   829  // RETURNS  : TRUE, which means that instruction was executed.
   830  BOOLEAN msr_lapic_base_write_handler( GUEST_CPU_HANDLE gcpu, MSR_ID msr_id,
   831                  UINT64 *msr_value, void *context UNUSED)
   832  {
   833      REPORT_MSR_WRITE_ACCESS_DATA msr_write_access_data;
   834  
   835  #ifdef JLMDEBUG
   836      bprint("msr_lapic_base_write_handler %x\n", msr_id);
   837  #endif
   838      VMM_ASSERT(IA32_MSR_APIC_BASE == msr_id);
   839      msr_write_access_data.msr_id = msr_id;
   840      if (!report_uvmm_event(UVMM_EVENT_MSR_WRITE_ACCESS, 
   841              (VMM_IDENTIFICATION_DATA)gcpu, (const GUEST_VCPU*)guest_vcpu(gcpu), 
   842              &msr_write_access_data))
   843          return FALSE;
   844      if( !validate_APIC_BASE_change(*msr_value)) {
   845          gcpu_inject_gp0(gcpu);
   846          return FALSE;
   847      }
   848      hw_write_msr(IA32_MSR_APIC_BASE, *msr_value);
   849      local_apic_setup_changed();
   850      return TRUE;
   851  }
   852  
   853  
   854  // Handles MSR reads on FEATURE_CONTROL MSR (0x3A). 
   855  // Virtualizes VMX enable bit(bit 2).
   856  // ARGUMENTS: GUEST_CPU_HANDLE  gcpu
   857  //          : MSR_ID            msr_id
   858  //          : UINT64           *msr_value
   859  // RETURNS  : TRUE, which means that instruction was executed.
   860  BOOLEAN msr_feature_control_read_handler( GUEST_CPU_HANDLE gcpu,
   861          MSR_ID  msr_id, UINT64  *msr_value UNUSED, void   *context UNUSED)
   862  {
   863  #ifdef JLMDEBUG1
   864      bprint("msr_feature_control_read_handler\n");
   865  #endif
   866      VMM_ASSERT(IA32_MSR_FEATURE_CONTROL == msr_id);
   867      // IA32 spec V2, 5.3,  GETSEC[SENTER]
   868      // IA32_FEATURE_CONTROL is only available on SMX or VMX enabled processors
   869      // otherwise, it its treated as reserved.
   870      VMM_LOG(mask_uvmm, level_trace,
   871              "%s: IA32_FEATURE_CONTROL is only available on SMX or VMX enabled processors.\n", 
   872              __FUNCTION__);
   873      gcpu_inject_gp0(gcpu);
   874      return TRUE;
   875  }
   876  
   877  
   878  // Handles writes to FEATURE_CONTROL MSR (0x3A). 
   879  // Induces GP(0) exception.
   880  // ARGUMENTS: GUEST_CPU_HANDLE  gcpu
   881  //          : MSR_ID            msr_id
   882  //          : UINT64           *msr_value
   883  // RETURNS  : TRUE, which means that instruction was executed.
   884  BOOLEAN msr_feature_control_write_handler( GUEST_CPU_HANDLE gcpu, MSR_ID msr_id,
   885          UINT64 *msr_value UNUSED, void *context UNUSED)
   886  {
   887  #ifdef JLMDEBUG1
   888      bprint("msr_feature_control_write_handler\n");
   889  #endif
   890      VMM_ASSERT(IA32_MSR_FEATURE_CONTROL == msr_id);
   891      // IA32 spec V2, 5.3,  GETSEC[SENTER]
   892      // IA32_FEATURE_CONTROL is only available on SMX or VMX enabled processors
   893      // otherwise, it its treated as reserved.
   894      VMM_LOG(mask_uvmm, level_trace,"%s: IA32_FEATURE_CONTROL is only available on SMX or VMX enabled processors.\n", __FUNCTION__);
   895      gcpu_inject_gp0(gcpu);
   896      return TRUE;
   897  }
   898  
   899  
   900  // Handles writes to MISC_ENABLE MSR (0x1A0).
   901  // Blocks writes to bits that can impact TMSL behavior
   902  // ARGUMENTS: GUEST_CPU_HANDLE  gcpu
   903  //          : MSR_ID            msr_id
   904  //          : UINT64           *msr_value
   905  // RETURNS  : TRUE, which means that instruction was executed.
   906  BOOLEAN msr_misc_enable_write_handler( GUEST_CPU_HANDLE gcpu, MSR_ID msr_id,
   907          UINT64 *msr_value, void *context UNUSED)
   908  {
   909      REPORT_MSR_WRITE_ACCESS_DATA msr_write_access_data;
   910      VMM_ASSERT(IA32_MSR_MISC_ENABLE == msr_id);
   911  
   912  #ifdef JLMDEBUG1
   913      bprint("msr_misc_enable_write_handler\n");
   914  #endif
   915      msr_write_access_data.msr_id = msr_id;
   916      if (!report_uvmm_event(UVMM_EVENT_MSR_WRITE_ACCESS, 
   917              (VMM_IDENTIFICATION_DATA)gcpu, (const GUEST_VCPU*)guest_vcpu(gcpu), 
   918              &msr_write_access_data))
   919          return FALSE;
   920      BIT_CLR64(*msr_value, 22);   // Limit CPUID MAXVAL
   921      hw_write_msr(IA32_MSR_MISC_ENABLE, *msr_value);
   922      return TRUE;
   923  }
   924  
   925  #pragma warning( pop )
   926  
   927  
   928  // Install VMEXIT handler which prevents access to MSR from the guest
   929  // ARGUMENTS: GUEST_HANDLE    guest
   930  //          : MSR_ID      msr_id
   931  // RETURNS  : VMM_OK if succeeded
   932  VMM_STATUS msr_guest_access_inhibit( GUEST_HANDLE guest, MSR_ID msr_id)
   933  {
   934      return msr_vmexit_handler_register( guest, msr_id, 
   935                 msr_unsupported_access_handler, READ_WRITE_ACCESS, NULL);
   936  }
   937  
   938  #pragma warning( push )
   939  #pragma warning (disable : 4100)  // Supress warnings about unreferenced formal parameter
   940  VMEXIT_HANDLING_STATUS  msr_failed_vmenter_loading_handler(GUEST_CPU_HANDLE gcpu USED_IN_DEBUG_ONLY) {
   941  #ifndef DEBUG
   942          EM64T_RFLAGS rflags;
   943          IA32_VMX_VMCS_GUEST_INTERRUPTIBILITY    interruptibility;
   944  #endif
   945      VMM_LOG(mask_uvmm, level_trace,"%s: VMENTER failed\n", __FUNCTION__);
   946  #ifdef DEBUG
   947      {
   948      VMCS_OBJECT* vmcs = vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy(gcpu), VMCS_MERGED);
   949      vmcs_print_vmenter_msr_load_list(vmcs);
   950      }
   951      VMM_DEADLOOP();
   952  #else
   953      vmm_deadloop_internal(VMEXIT_MSR_C, __LINE__, gcpu);
   954  
   955      // clear interrupt flag
   956      rflags.Uint64 = gcpu_get_gp_reg(gcpu, IA32_REG_RFLAGS);
   957      rflags.Bits.IFL = 0;
   958      gcpu_set_gp_reg(gcpu, IA32_REG_RFLAGS, rflags.Uint64);
   959  
   960      interruptibility.Uint32 = gcpu_get_interruptibility_state(gcpu);
   961      interruptibility.Bits.BlockNextInstruction = 0;
   962      gcpu_set_interruptibility_state(gcpu, interruptibility.Uint32);
   963      gcpu_inject_gp0(gcpu);
   964      gcpu_resume(gcpu);
   965  #endif
   966      return VMEXIT_NOT_HANDLED;
   967  }
   968  #pragma warning( pop )
   969  
   970  BOOLEAN vmexit_register_unregister_for_efer( GUEST_HANDLE guest, MSR_ID msr_id,
   971              RW_ACCESS access, BOOLEAN  reg_dereg)
   972  {
   973  #ifdef JLMDEBUG1
   974      bprint("vmexit_register_unregister_for_efer\n");
   975  #endif
   976      if( !is_unrestricted_guest_supported() )
   977          return FALSE;
   978      if ( (msr_id == IA32_MSR_EFER) && reg_dereg ) {
   979          if ( access == WRITE_ACCESS ) {
   980              msr_vmexit_handler_register( guest, IA32_MSR_EFER,
   981                                  msr_efer_write_handler, WRITE_ACCESS, NULL);
   982                  return TRUE;
   983          }
   984          else {
   985              msr_vmexit_handler_register( guest, IA32_MSR_EFER,
   986                                  msr_efer_read_handler, READ_ACCESS, NULL);
   987              return TRUE;
   988          }
   989      }
   990      if ( (msr_id == IA32_MSR_EFER) && !reg_dereg ) {
   991          msr_vmexit_handler_unregister( guest, msr_id, access);
   992          return TRUE;
   993      }
   994      return FALSE;
   995  }