github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/vmexit/vmexit_cr_access.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "vmm_defs.h"
    16  #include "guest_cpu.h"
    17  #include "vmcs_api.h"
    18  #include "vmm_dbg.h"
    19  #include "em64t_defs.h"
    20  #include "guest_cpu_vmenter_event.h"
    21  #include "policy_manager.h"
    22  #include "vmm_events_data.h"
    23  #include "vmcs_hierarchy.h"
    24  #include "page_walker.h"
    25  #include "ept.h"
    26  #include "unrestricted_guest.h"
    27  #include "vmm_callback.h"
    28  #include "file_codes.h"
    29  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(VMEXIT_CR_ACCESS_C)
    30  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(VMEXIT_CR_ACCESS_C, __condition)
    31  #ifdef JLMDEBUG
    32  #include "jlmdebug.h"
    33  #endif
    34  
    35  #define CR0_TASK_SWITCH     8
    36  #define GCPU_SET_GUEST_VISIBLE_CONTROL_TO_L0_M(__gcpu, __reg_id, __value) {     \
    37      if (IA32_CTRL_CR0 == (__reg_id) ||  IA32_CTRL_CR4 == (__reg_id))           \
    38          gcpu_set_guest_visible_control_reg_layered(__gcpu, __reg_id, __value, VMCS_LEVEL_0);\
    39      gcpu_set_guest_visible_control_reg_layered(__gcpu, __reg_id, __value, VMCS_MERGED);\
    40  }
    41  
    42  extern BOOLEAN is_cr4_osxsave_supported(void);
    43  static UVMM_EVENT lkup_write_event[IA32_CTRL_COUNT] = {
    44      EVENT_GCPU_AFTER_GUEST_CR0_WRITE,   // IA32_CTRL_CR0,
    45      EVENTS_COUNT,                       // IA32_CTRL_CR2,
    46      EVENT_GCPU_AFTER_GUEST_CR3_WRITE,   // IA32_CTRL_CR3,
    47      EVENT_GCPU_AFTER_GUEST_CR4_WRITE,   // IA32_CTRL_CR4,
    48      EVENTS_COUNT,                       // IA32_CTRL_CR8,
    49  };
    50  
    51  #define IA32_REG_COUNT 0x10
    52  
    53  static VMM_IA32_GP_REGISTERS lkup_operand[IA32_REG_COUNT] = {
    54      IA32_REG_RAX,
    55      IA32_REG_RCX,
    56      IA32_REG_RDX,
    57      IA32_REG_RBX,
    58      IA32_REG_RSP,
    59      IA32_REG_RBP,
    60      IA32_REG_RSI,
    61      IA32_REG_RDI,
    62      IA32_REG_R8,
    63      IA32_REG_R9,
    64      IA32_REG_R10,
    65      IA32_REG_R11,
    66      IA32_REG_R12,
    67      IA32_REG_R13,
    68      IA32_REG_R14,
    69      IA32_REG_R15
    70  };
    71  
    72  #define IA32_CR_COUNT   0x9
    73  
    74  static VMM_IA32_CONTROL_REGISTERS lkup_cr[IA32_CR_COUNT] = {
    75      IA32_CTRL_CR0,
    76      UNSUPPORTED_CR,
    77      UNSUPPORTED_CR,
    78      IA32_CTRL_CR3,
    79      IA32_CTRL_CR4,
    80      UNSUPPORTED_CR,
    81      UNSUPPORTED_CR,
    82      UNSUPPORTED_CR,
    83      IA32_CTRL_CR8
    84  };
    85  
    86  #define CPUID_SMEP_SUPPORTED_BIT 0x7
    87  #define CPUID_M_RAX_7 0x7
    88  
    89  /* Method to check if SMEP is supported or not on this processor.
    90   * Returns 0 if SMEP is not supported.
    91   *         1 if SMEP is supported.
    92   */
    93  BOOLEAN is_cr4_smep_supported(void)
    94  {
    95      CPUID_PARAMS cpuid_params;
    96      /* Invoke CPUID with RAX = 7 */
    97      cpuid_params.m_rax = CPUID_M_RAX_7;
    98      /* Set sub-leaf RCX to 0 */
    99      cpuid_params.m_rcx = 0;
   100      /* Execute CPUID */
   101      hw_cpuid(&cpuid_params);
   102      /* Return whether SMEP is supported or not */
   103      return (BOOLEAN) BIT_GET64( cpuid_params.m_rbx, CPUID_SMEP_SUPPORTED_BIT );
   104  }
   105  
   106  static BOOLEAN vmexit_cr_access_is_gpf0(GUEST_CPU_HANDLE gcpu) {
   107      EM64T_CR0 cr0;
   108      UINT64    cr3;
   109      EM64T_CR4 cr4;
   110      IA32_EFER_S efer;
   111  
   112      VMM_ASSERT(gcpu != NULL);
   113      cr0.Uint64 = gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR0);
   114      if ((cr0.Bits.PG && (!cr0.Bits.PE)) || (cr0.Bits.NW && (!cr0.Bits.CD))) {
   115          return TRUE;
   116      }
   117      cr4.Uint64 = gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR4);
   118      if (cr4.Bits.Reserved_0 || cr4.Bits.Reserved_1 ||
   119          cr4.Bits.Reserved_2 || cr4.Bits.Reserved_3 ||
   120          cr4.Bits.VMXE || cr4.Bits.SMXE) {
   121          return TRUE;
   122      }
   123      if ( cr4.Bits.OSXSAVE && !is_cr4_osxsave_supported() ) {
   124          return TRUE;
   125      }
   126      if ( cr4.Bits.SMEP && !is_cr4_smep_supported() ) {
   127          return TRUE;
   128      }
   129      if (cr4.Bits.FSGSBASE && !is_fsgsbase_supported() ){
   130          return TRUE;
   131      }
   132      efer.Uint64 = gcpu_get_msr_reg(gcpu, IA32_VMM_MSR_EFER);
   133      if (efer.Bits.LME && (!cr4.Bits.PAE)) {
   134          return TRUE;
   135      }
   136      // #GP conditions due to PCIDE feature. 
   137      if (cr4.Bits.PCIDE){
   138          //If this bit is not supported by h/w .
   139          if(!is_pcid_supported()){
   140              return TRUE;
   141          }
   142          //PCIDE bit Can be set only in IA-32e mode (if IA32_EFER.LMA = 1).
   143          if(!efer.Bits.LMA ){
   144              return TRUE;
   145          }
   146          cr3 = gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR3);
   147          //software can change CR4.PCIDE from 0 to 1 only if CR3[11:0] = 000H
   148          if(cr3 & 0x0FFF){
   149              return TRUE;
   150          }
   151          //MOVtoCR0 causes a #GP if it would clear CR0.PG to 0 while CR4.PCIDE=1.
   152          if(!cr0.Bits.PG){
   153              return TRUE;
   154          }        
   155      }
   156      if (cr0.Bits.PG && cr4.Bits.PAE && (!efer.Bits.LME)) {
   157          UINT8 pdpt[PW_NUM_OF_PDPT_ENTRIES_IN_32_BIT_MODE * PW_SIZE_OF_PAE_ENTRY];
   158  
   159          gcpu_get_32_bit_pdpt(gcpu, pdpt);
   160          if (!pw_is_pdpt_in_32_bit_pae_mode_valid(gcpu, pdpt)) {
   161              return TRUE;
   162          }
   163      }
   164      return FALSE;
   165  }
   166  
   167  
   168  static BOOLEAN cr_guest_update(GUEST_CPU_HANDLE gcpu, 
   169                  VMM_IA32_CONTROL_REGISTERS reg_id,
   170                  ADDRESS bits_to_update, IA32_VMX_EXIT_QUALIFICATION qualification);
   171  static BOOLEAN cr_mov(GUEST_CPU_HANDLE gcpu, 
   172          IA32_VMX_EXIT_QUALIFICATION qualification);
   173  
   174  
   175  RAISE_EVENT_RETVAL cr_raise_write_events( GUEST_CPU_HANDLE gcpu,
   176                              VMM_IA32_CONTROL_REGISTERS reg_id, ADDRESS new_value )
   177  {
   178      EVENT_GCPU_GUEST_CR_WRITE_DATA event_data = {0};
   179      UVMM_EVENT event;
   180      RAISE_EVENT_RETVAL result = EVENT_NO_HANDLERS_REGISTERED;
   181  
   182      if(reg_id >= IA32_CTRL_COUNT)
   183          return result;
   184      event = lkup_write_event[reg_id];
   185      if (event != (UVMM_EVENT)EVENTS_COUNT) {
   186          event_data.new_guest_visible_value = new_value;
   187          if(TRUE == event_raise(event, gcpu, &event_data)) {
   188              result = EVENT_HANDLED;
   189          } else {
   190              result = EVENT_NOT_HANDLED;
   191          }
   192      }
   193      return result;
   194  }
   195  
   196  BOOLEAN cr_guest_update(GUEST_CPU_HANDLE gcpu, VMM_IA32_CONTROL_REGISTERS reg_id,
   197                  ADDRESS bits_to_update, IA32_VMX_EXIT_QUALIFICATION qualification)
   198  {
   199      UINT64 guest_cr;
   200      UINT64 old_visible_reg_value;
   201      UINT64 visible_guest_cr;
   202      RAISE_EVENT_RETVAL cr_update_event;
   203      ADDRESS value;
   204      REPORT_CR_DR_LOAD_ACCESS_DATA cr_access_data;
   205  
   206  #ifdef JLMDEBUG
   207      bprint("cr_guest_update %d\n", reg_id);
   208  #endif
   209      if(qualification.CrAccess.AccessType == 3)
   210          value = qualification.CrAccess.LmswData;
   211      else
   212          value = 0;
   213      cr_access_data.qualification = qualification.Uint64;
   214      if (report_uvmm_event(UVMM_EVENT_CR_ACCESS, (VMM_IDENTIFICATION_DATA)gcpu, 
   215                            (const GUEST_VCPU*)guest_vcpu(gcpu), 
   216                            (void *)&cr_access_data)) {
   217          return FALSE;
   218      }
   219      old_visible_reg_value = gcpu_get_guest_visible_control_reg_layered(gcpu, 
   220                                              reg_id, VMCS_MERGED);
   221      visible_guest_cr = old_visible_reg_value;
   222      BITMAP_ASSIGN64(visible_guest_cr, bits_to_update, value);
   223  
   224      // update guest visible CR-X
   225      // gcpu_set_guest_visible_control_reg_layered(gcpu, reg_id, 
   226      //                  visible_guest_cr, VMCS_MERGED);
   227      GCPU_SET_GUEST_VISIBLE_CONTROL_TO_L0_M(gcpu, reg_id, visible_guest_cr);
   228      if (vmexit_cr_access_is_gpf0(gcpu)) {
   229      // gcpu_set_guest_visible_control_reg_layered(gcpu, reg_id, 
   230      //          old_visible_reg_value, VMCS_MERGED);
   231          GCPU_SET_GUEST_VISIBLE_CONTROL_TO_L0_M(gcpu, reg_id, old_visible_reg_value);
   232  
   233          // CR* access vmexit is changed to GPF0 exception.
   234          VMM_LOG(mask_anonymous, level_trace,"%s: CR* access caused GPF0\n", 
   235                  __FUNCTION__);
   236          VMM_DEBUG_CODE(VMM_DEADLOOP());
   237          gcpu_inject_gp0(gcpu);
   238          return FALSE;
   239      }
   240      // update guest CR-X
   241      guest_cr = gcpu_get_control_reg_layered(gcpu, reg_id, VMCS_MERGED);
   242      BITMAP_ASSIGN64(guest_cr, bits_to_update, value);
   243      gcpu_set_control_reg_layered(gcpu, reg_id, guest_cr, VMCS_MERGED);
   244      cr_update_event = cr_raise_write_events( gcpu, reg_id, visible_guest_cr );
   245      if(cr_update_event==EVENT_NOT_HANDLED) {
   246  #ifdef JLMDEBUG
   247          bprint("cr_guest_update event not handled\n");
   248          LOOP_FOREVER
   249  #endif
   250      }
   251      return TRUE;
   252  }
   253  
   254  BOOLEAN cr_guest_write( GUEST_CPU_HANDLE gcpu, VMM_IA32_CONTROL_REGISTERS reg_id,
   255                          ADDRESS value)
   256  {
   257      RAISE_EVENT_RETVAL cr_update_event;
   258      UINT64 old_visible_reg_value;
   259      const VIRTUAL_CPU_ID* vcpu_id = NULL;
   260      EPT_GUEST_STATE *ept_guest = NULL;
   261      EPT_GUEST_CPU_STATE *ept_guest_cpu = NULL;
   262  
   263  #ifdef JLMDEBUG1
   264      bprint("cr_guest_write %d\n", reg_id);
   265  #endif
   266      old_visible_reg_value = gcpu_get_guest_visible_control_reg_layered(gcpu, 
   267                                          reg_id, VMCS_MERGED);
   268      // gcpu_set_guest_visible_control_reg_layered(gcpu, reg_id, value, VMCS_MERGED);
   269      GCPU_SET_GUEST_VISIBLE_CONTROL_TO_L0_M(gcpu, reg_id, value);
   270      if (vmexit_cr_access_is_gpf0(gcpu)) {
   271          //  gcpu_set_guest_visible_control_reg_layered(gcpu, reg_id, 
   272          //                  old_visible_reg_value, VMCS_MERGED);
   273          GCPU_SET_GUEST_VISIBLE_CONTROL_TO_L0_M(gcpu, reg_id, old_visible_reg_value);
   274  
   275          // CR* access vmexit is changed to GPF0 exception.
   276          VMM_LOG(mask_anonymous, level_trace,"%s: CR* access caused GPF0\n", 
   277                  __FUNCTION__);
   278          VMM_DEBUG_CODE(VMM_DEADLOOP());
   279          gcpu_inject_gp0(gcpu);
   280          return FALSE;
   281      }
   282      if(is_unrestricted_guest_supported()) {
   283          vcpu_id = guest_vcpu(gcpu);
   284          VMM_ASSERT(vcpu_id);        
   285          ept_guest = ept_find_guest_state(vcpu_id->guest_id);
   286          VMM_ASSERT(ept_guest);
   287          ept_guest_cpu = ept_guest->gcpu_state[vcpu_id->guest_cpu_id];
   288          ept_guest_cpu->cr0 = gcpu_get_control_reg_layered(gcpu, IA32_CTRL_CR0, 
   289                                              VMCS_MERGED);
   290          ept_guest_cpu->cr4 = gcpu_get_control_reg_layered(gcpu, IA32_CTRL_CR4, 
   291                                              VMCS_MERGED);
   292      }
   293      gcpu_set_control_reg_layered(gcpu, reg_id, value, VMCS_MERGED);
   294      cr_update_event= cr_raise_write_events(gcpu, reg_id, value);
   295      if(cr_update_event==EVENT_NOT_HANDLED) {
   296  #ifdef JLMDEBUG
   297          bprint("event not handled\n");
   298          LOOP_FOREVER
   299  #endif
   300      }
   301      if((reg_id == IA32_CTRL_CR4) && is_cr4_osxsave_supported()) {
   302          EM64T_CR4 cr4_mask;
   303                  
   304          cr4_mask.Uint64 = 0;
   305          cr4_mask.Bits.OSXSAVE = 1;
   306          vmcs_write(gcpu_get_vmcs(gcpu), VMCS_HOST_CR4, 
   307                     (vmcs_read(gcpu_get_vmcs(gcpu),VMCS_HOST_CR4) 
   308                      & ~cr4_mask.Uint64) | (value & cr4_mask.Uint64) );
   309      }
   310      return TRUE;
   311  }
   312  
   313  BOOLEAN cr_mov( GUEST_CPU_HANDLE gcpu, IA32_VMX_EXIT_QUALIFICATION qualification)
   314  
   315  {
   316      VMM_IA32_CONTROL_REGISTERS  cr_id;
   317      VMM_IA32_GP_REGISTERS operand;
   318      ADDRESS cr_value;
   319      BOOLEAN status = TRUE;
   320      REPORT_CR_DR_LOAD_ACCESS_DATA cr_access_data;
   321  
   322  #ifdef JLMDEBUG1
   323      bprint("cr_mov\n");
   324  #endif
   325      cr_access_data.qualification = qualification.Uint64;
   326      if (report_uvmm_event(UVMM_EVENT_CR_ACCESS, (VMM_IDENTIFICATION_DATA)gcpu, 
   327                      (const GUEST_VCPU*)guest_vcpu(gcpu), (void *)&cr_access_data)) {
   328          return FALSE;
   329      }
   330      VMM_ASSERT(qualification.CrAccess.Number < NELEMENTS(lkup_cr));
   331      cr_id = lkup_cr[qualification.CrAccess.Number];
   332      VMM_ASSERT(UNSUPPORTED_CR != cr_id);
   333      VMM_ASSERT(qualification.CrAccess.MoveGpr < NELEMENTS(lkup_operand));
   334      operand = lkup_operand[qualification.CrAccess.MoveGpr];
   335  
   336      switch (qualification.CrAccess.AccessType) {
   337        case 0: // move to CR
   338          cr_value = gcpu_get_gp_reg(gcpu, operand);
   339          status = cr_guest_write(gcpu, cr_id, cr_value);
   340          break;
   341        case 1: // move from CR
   342          cr_value = gcpu_get_guest_visible_control_reg(gcpu, cr_id);
   343                  // VMM_LOG(mask_anonymous, level_trace, "move from CR") ;
   344          gcpu_set_gp_reg(gcpu, operand, cr_value);
   345          break;
   346        default:
   347          VMM_DEADLOOP();
   348          break;
   349      }
   350      return status;
   351  }
   352  
   353  VMEXIT_HANDLING_STATUS vmexit_cr_access(GUEST_CPU_HANDLE gcpu)
   354  {
   355      VMCS_OBJECT*                vmcs = gcpu_get_vmcs(gcpu);
   356      IA32_VMX_EXIT_QUALIFICATION qualification;
   357      BOOLEAN                     status = TRUE;
   358  
   359      qualification.Uint64 = vmcs_read(vmcs, VMCS_EXIT_INFO_QUALIFICATION);
   360      switch (qualification.CrAccess.AccessType) {
   361        case 0: // move to CR
   362        case 1: // move from CR
   363          status = cr_mov(gcpu, qualification);
   364          break;
   365        case 2: // CLTS
   366          VMM_ASSERT(0 == qualification.CrAccess.Number);
   367          status= cr_guest_update(gcpu, IA32_CTRL_CR0, CR0_TASK_SWITCH, qualification);
   368          break;
   369        case 3: // LMSW
   370          VMM_ASSERT(0 == qualification.CrAccess.Number);
   371          status = cr_guest_update(gcpu, IA32_CTRL_CR0, 0xFFFF, qualification);
   372          break;
   373      }
   374      if (TRUE == status) {
   375          gcpu_skip_guest_instruction(gcpu);
   376      }
   377      return VMEXIT_HANDLED;
   378  }
   379  
   380  VMM_IA32_CONTROL_REGISTERS vmexit_cr_access_get_cr_from_qualification(
   381                                          UINT64 qualification) {
   382      IA32_VMX_EXIT_QUALIFICATION qualification_tmp;
   383  
   384      qualification_tmp.Uint64 = qualification;
   385      if(qualification_tmp.CrAccess.Number >= IA32_CR_COUNT)
   386          return UNSUPPORTED_CR;
   387      return lkup_cr[qualification_tmp.CrAccess.Number];
   388  }
   389  
   390  VMM_IA32_GP_REGISTERS vmexit_cr_access_get_operand_from_qualification(UINT64 qualification) {
   391      IA32_VMX_EXIT_QUALIFICATION qualification_tmp;
   392  
   393      qualification_tmp.Uint64 = qualification;
   394      VMM_ASSERT(qualification_tmp.CrAccess.MoveGpr < IA32_REG_COUNT);
   395  
   396      return lkup_operand[qualification_tmp.CrAccess.MoveGpr];
   397  }