github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/guest/guest_cpu/guest_cpu_control.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "guest_cpu_internal.h"
    16  #include "vmcs_api.h"
    17  #include "vmcs_init.h"
    18  #include "heap.h"
    19  #include "vmx_vmcs.h"
    20  #include "hw_utils.h"
    21  #include "vmexit_msr.h"
    22  #include "vmexit_io.h"
    23  #include "vmcall.h"
    24  #include "vmm_dbg.h"
    25  #include "policy_manager.h"
    26  #include "vmm_api.h"
    27  #include "unrestricted_guest.h"
    28  #include "file_codes.h"
    29  #define VMM_DEADLOOP()     VMM_DEADLOOP_LOG(GUEST_CPU_CONTROL_C)
    30  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(GUEST_CPU_CONTROL_C, __condition)
    31  #ifdef JLMDEBUG
    32  #include "jlmdebug.h"
    33  #endif
    34  
    35  extern VMM_PAGING_POLICY g_pg_policy;
    36  extern void disable_vmcs_load_save_for_msr (MSR_ID msr_index);
    37  extern BOOLEAN is_cr4_osxsave_supported(void);
    38  
    39  //  Main implementation idea:
    40  //    Count requests for each VmExit control bit. Require VmExit if at least
    41  //    one request is outstanding.
    42  
    43  // global static vars that indicate host CPU support for extra controls
    44  static BOOLEAN g_init_done = FALSE;
    45  static BOOLEAN g_processor_ctrls2_supported = FALSE;
    46  
    47  
    48  typedef enum _EXCEPTIONS_POLICY_TYPE {
    49      EXCEPTIONS_POLICY_CATCH_NOTHING = 0,
    50      EXCEPTIONS_POLICY_CATCH_ALL,
    51  } EXCEPTIONS_POLICY_TYPE;
    52  
    53  
    54  // set bit for each fixed bit - either 0 or 1
    55  #define GET_FIXED_MASK( type, mask, func )                                      \
    56      {                                                                           \
    57          type fixed1, fixed0;                                                    \
    58          fixed1 = (func)( 0 );                                                   \
    59          fixed0 = (func)( (type)-1 );                                            \
    60          (mask) = fixed1 | ~fixed0;                                              \
    61      }
    62  
    63  // init with minimal value
    64  #define GET_MINIMAL_VALUE( value, func )  (value) = (func)( 0 )
    65  
    66  // return fixed0 values
    67  #define GET_FIXED0( func ) (func)( UINT32_ALL_ONES )
    68  
    69  #define MAY_BE_SET1( fixed, defaul, bit ) (!(fixed.Bits.bit) || defaul.Bits.bit)
    70  
    71  // get final field settings
    72  ////#define GET_FINAL_SETTINGS( gcpu, field, final_mask )                           
    73  ////    (((UINT64)(final_mask) | (gcpu)->vmexit_setup.field.enforce_1_settings)    
    74  ////                           & (gcpu)->vmexit_setup.field.enforce_0_settings)
    75  
    76  
    77  #define APPLY_ZEROES(__value, __zeroes) ((__value) & (__zeroes))
    78  #define APPLY_ONES(__value, __ones) ((__value) | (__ones))
    79  #define APPLY_ZEROES_AND_ONES(__value, __zeroes, __ones) \
    80      APPLY_ZEROES(APPLY_ONES(__value, __ones), __zeroes)
    81  
    82  
    83  #define GET_FINAL_SETTINGS( gcpu, field, final_mask )                           \
    84      (((UINT64)(final_mask) | (gcpu)->vmexit_setup.field.minimal_1_settings)     \
    85                             & (gcpu)->vmexit_setup.field.minimal_0_settings)
    86  
    87  
    88  static void gcpu_exceptions_settings_enforce_on_hw(GUEST_CPU_HANDLE  gcpu, UINT32 zeroes, UINT32 ones);
    89  static void gcpu_exceptions_settings_restore_on_hw(GUEST_CPU_HANDLE  gcpu);
    90  static void gcpu_proc_ctrls_enforce_on_hw(GUEST_CPU_HANDLE gcpu, UINT32 zeroes, UINT32 ones);
    91  static void gcpu_proc_ctrls_restore_on_hw(GUEST_CPU_HANDLE gcpu);
    92  static void gcpu_cr0_mask_enforce_on_hw(GUEST_CPU_HANDLE gcpu, UINT64 zeroes, UINT64 ones);
    93  static void gcpu_set_enter_ctrls_for_addons( GUEST_CPU_HANDLE gcpu, UINT32 value, UINT32 bits_untouched );
    94  
    95  
    96  static void set_minimal_cr0_reg_mask( GCPU_VMEXIT_CONTROL_FIELD_COUNTERS* field )
    97  {
    98      UINT64 fixed;
    99  
   100      GET_FIXED_MASK( UINT64, fixed, vmcs_hw_make_compliant_cr0 );
   101      field->minimal_1_settings = (fixed | GCPU_CR0_VMM_CONTROLLED_BITS);
   102      if (global_policy_is_cache_dis_virtualized())
   103          field->minimal_1_settings |= CR0_CD;
   104      field->minimal_0_settings = UINT64_ALL_ONES;
   105  }
   106  
   107  static void set_minimal_cr4_reg_mask( GCPU_VMEXIT_CONTROL_FIELD_COUNTERS* field )
   108  {
   109      UINT64 fixed;
   110  
   111      GET_FIXED_MASK( UINT64, fixed, vmcs_hw_make_compliant_cr4 );
   112  
   113      if( is_unrestricted_guest_supported() )
   114          field->minimal_1_settings = fixed | CR4_SMXE;
   115      else
   116          field->minimal_1_settings = (fixed | GCPU_CR4_VMM_CONTROLLED_BITS);
   117      field->minimal_0_settings = UINT64_ALL_ONES;
   118      if (is_cr4_osxsave_supported())
   119          field->minimal_1_settings = field->minimal_1_settings | CR4_OSXSAVE;
   120  }
   121  
   122  static void set_minimal_pin_ctrls( GCPU_VMEXIT_CONTROL_FIELD_COUNTERS* field )
   123  {
   124      PIN_BASED_VM_EXECUTION_CONTROLS pin_ctrl, pin_ctrl_fixed;
   125  
   126      GET_FIXED_MASK( UINT32, pin_ctrl_fixed.Uint32,
   127                      vmcs_hw_make_compliant_pin_based_exec_ctrl );
   128      GET_MINIMAL_VALUE( pin_ctrl.Uint32,
   129                      vmcs_hw_make_compliant_pin_based_exec_ctrl );
   130      // do not exit on external interrupts
   131      VMM_ASSERT( pin_ctrl.Bits.ExternalInterrupt == 0 );
   132      // setup all NMIs to be processed by VMM
   133      // gcpu receive only virtual NMIs
   134      VMM_ASSERT( MAY_BE_SET1(pin_ctrl_fixed, pin_ctrl, Nmi ));
   135      pin_ctrl.Bits.Nmi = 1;
   136      VMM_ASSERT( MAY_BE_SET1(pin_ctrl_fixed, pin_ctrl, VirtualNmi ));
   137      pin_ctrl.Bits.VirtualNmi = 1;
   138      field->minimal_1_settings = pin_ctrl.Uint32;
   139      field->minimal_0_settings = GET_FIXED0( vmcs_hw_make_compliant_pin_based_exec_ctrl );
   140  }
   141  
   142  static void set_minimal_processor_ctrls( GCPU_VMEXIT_CONTROL_FIELD_COUNTERS* field )
   143  {
   144      PROCESSOR_BASED_VM_EXECUTION_CONTROLS proc_ctrl, proc_ctrl_fixed;
   145  
   146      GET_FIXED_MASK( UINT32, proc_ctrl_fixed.Uint32,
   147                      vmcs_hw_make_compliant_processor_based_exec_ctrl );
   148      GET_MINIMAL_VALUE( proc_ctrl.Uint32,
   149                      vmcs_hw_make_compliant_processor_based_exec_ctrl );
   150  
   151      // do not use TSC offsetting
   152      VMM_ASSERT( proc_ctrl.Bits.UseTscOffsetting == 0 );
   153      // do not exit on halt instruction
   154      VMM_ASSERT( proc_ctrl.Bits.Hlt == 0 );
   155      // do not exit on invalidate page
   156      VMM_ASSERT( proc_ctrl.Bits.Invlpg == 0 );
   157      // do not exit on mwait
   158      VMM_ASSERT( proc_ctrl.Bits.Mwait == 0 );
   159      // do not exit on rdpmc instruction
   160      VMM_ASSERT( proc_ctrl.Bits.Rdpmc == 0 );
   161      // do not exit on rdtsc instruction
   162      VMM_ASSERT( proc_ctrl.Bits.Rdtsc == 0 );
   163      // do not exit on CR8 access
   164      VMM_ASSERT( proc_ctrl.Bits.Cr8Load == 0 );
   165      VMM_ASSERT( proc_ctrl.Bits.Cr8Store == 0 );
   166      // do not exit use TPR shadow
   167      VMM_ASSERT( proc_ctrl.Bits.TprShadow == 0 );
   168      // do not exit on debug registers access
   169      VMM_ASSERT( proc_ctrl.Bits.MovDr == 0 );
   170      // do not exit on I/O ports access
   171      VMM_ASSERT( proc_ctrl.Bits.UnconditionalIo == 0 );
   172      VMM_ASSERT( proc_ctrl.Bits.ActivateIoBitmaps == 0 );
   173      // do not exit on monitor instruction
   174      VMM_ASSERT( proc_ctrl.Bits.Monitor == 0 );
   175      // do not exit on pause instruction
   176      VMM_ASSERT( proc_ctrl.Bits.Pause == 0 );
   177      VMM_LOG(mask_anonymous, level_trace,"%s:: %d \n", __FUNCTION__, g_pg_policy);
   178      if (g_pg_policy == POL_PG_EPT) {
   179          proc_ctrl.Bits.Cr3Load = 0;
   180          proc_ctrl.Bits.Cr3Store = 0;
   181      }
   182  
   183      // if processor_ctrls2 may be enabled, enable them immediately
   184      // to simplify processing
   185      VMM_ASSERT( g_processor_ctrls2_supported ==
   186          (0 != MAY_BE_SET1(proc_ctrl_fixed, proc_ctrl, SecondaryControls )));
   187      if (g_processor_ctrls2_supported) {
   188          proc_ctrl.Bits.SecondaryControls = 1;
   189      }
   190      field->minimal_1_settings = proc_ctrl.Uint32;
   191      field->minimal_0_settings = GET_FIXED0( vmcs_hw_make_compliant_processor_based_exec_ctrl );
   192  }
   193  
   194  static void set_minimal_processor_ctrls2( GCPU_VMEXIT_CONTROL_FIELD_COUNTERS* field )
   195  {
   196      PROCESSOR_BASED_VM_EXECUTION_CONTROLS2 proc_ctrl2, proc_ctrl2_fixed;
   197      (void)proc_ctrl2_fixed;
   198      if (!g_processor_ctrls2_supported) {
   199          return;
   200      }
   201      GET_FIXED_MASK( UINT32, proc_ctrl2_fixed.Uint32,
   202                      vmcs_hw_make_compliant_processor_based_exec_ctrl2 );
   203      GET_MINIMAL_VALUE( proc_ctrl2.Uint32,
   204                      vmcs_hw_make_compliant_processor_based_exec_ctrl2 );
   205  
   206      // enable rdtscp instruction if CPUID.80000001H.EDX[27] reports it is supported
   207      // if "Enable RDTSCP" is 0, execution of RDTSCP in non-root mode will trigger #UD
   208      // Notes:
   209      //   1. Currently, "RDTSC existing" and "use TSC Offsetting" both are ZEROs, since 
   210      //      vmm doesn't virtualize TSC
   211      //   2. Current setting makes RDTSCP operate as normally, and no vmexits happen. 
   212      //      Besides, vmm doesn't use/modify IA32_TSC_AUX.
   213      //   3. If we want to add virtual TSC and support, and virtualization of IA32_TSC_AUX, 
   214      //      current settings must be changed per request.
   215      if(proc_ctrl2.Bits.EnableRDTSCP == 0) {
   216          // set EnableRDTSCP bit if rdtscp is supported        
   217          if(is_rdtscp_supported()) {
   218              proc_ctrl2.Bits.EnableRDTSCP = 1;
   219          }
   220      }
   221  
   222      //INVPCID. Behavior of the INVPCID instruction is determined first
   223      //by the setting of the “enable INVPCID” VM-execution control:
   224      //— If the “enable INVPCID” VM-execution control is 0, 
   225      //  INVPCID causes an invalid-opcode exception (#UD).
   226      //— If the “enable INVPCID” VM-execution control is 1, 
   227      //  treatment is based on the setting of the “INVLPG exiting” VM-execution control:
   228      //  1) If the “INVLPG exiting” VM-execution control is 0, INVPCID operates normally.
   229      //     (this setting is selected)
   230      //  2) If the “INVLPG exiting” VM-execution control is 1, INVPCID causes a VM exit.
   231      //
   232      if(proc_ctrl2.Bits.EnableINVPCID == 0) {
   233          // set EnableINVPCID bit if INVPCID is supported  
   234          if(is_invpcid_supported()){
   235              proc_ctrl2.Bits.EnableINVPCID = 1;
   236          }        
   237      }
   238      field->minimal_1_settings = proc_ctrl2.Uint32;
   239      field->minimal_0_settings = GET_FIXED0( vmcs_hw_make_compliant_processor_based_exec_ctrl2 );
   240  }
   241  
   242  
   243  static void set_minimal_exceptions_map( GCPU_VMEXIT_CONTROL_FIELD_COUNTERS* field )
   244  {
   245      IA32_VMCS_EXCEPTION_BITMAP exceptions;
   246  
   247      exceptions.Uint32 = 0;
   248      // Machine Check: let guest IDT handle the MCE unless vmm has special concern
   249      // exceptions.Bits.MC = 1;
   250      // Page Faults
   251      // exceptions.Bits.PF = 1;   not required for EPT. for VTLB/FPT should be enabled explicitely
   252      field->minimal_1_settings = exceptions.Uint32;
   253      field->minimal_0_settings = UINT64_ALL_ONES;
   254  }
   255  
   256  static void set_minimal_exit_ctrls( GCPU_VMEXIT_CONTROL_FIELD_COUNTERS* field )
   257  {
   258      VM_EXIT_CONTROLS ctrl, ctrl_fixed;
   259  
   260      GET_FIXED_MASK( UINT32, ctrl_fixed.Uint32,
   261                      vmcs_hw_make_compliant_vm_exit_ctrl );
   262      GET_MINIMAL_VALUE( ctrl.Uint32,
   263                      vmcs_hw_make_compliant_vm_exit_ctrl );
   264      // do not acknowledge interrupts on exit
   265      VMM_ASSERT( ctrl.Bits.AcknowledgeInterruptOnExit == 0 );
   266      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, SaveCr0AndCr4 ));
   267      ctrl.Bits.SaveCr0AndCr4 = 1;
   268      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, SaveCr3 ));
   269      ctrl.Bits.SaveCr3 = 1;
   270      if ( MAY_BE_SET1(ctrl_fixed, ctrl, SaveDebugControls )) {
   271          ctrl.Bits.SaveDebugControls = 1;
   272      }
   273      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, SaveSegmentRegisters ));
   274      ctrl.Bits.SaveSegmentRegisters = 1;
   275      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, SaveEspEipEflags ));
   276      ctrl.Bits.SaveEspEipEflags = 1;
   277      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, SavePendingDebugExceptions ));
   278      ctrl.Bits.SavePendingDebugExceptions = 1;
   279      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, SaveInterruptibilityInformation ));
   280      ctrl.Bits.SaveInterruptibilityInformation = 1;
   281      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, SaveActivityState ));
   282      ctrl.Bits.SaveActivityState = 1;
   283      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, SaveWorkingVmcsPointer ));
   284      ctrl.Bits.SaveWorkingVmcsPointer = 1;
   285      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadCr0AndCr4 ));
   286      ctrl.Bits.LoadCr0AndCr4 = 1;
   287      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadCr3 ));
   288      ctrl.Bits.LoadCr3 = 1;
   289      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadSegmentRegisters ));
   290      ctrl.Bits.LoadSegmentRegisters = 1;
   291      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadEspEip ));
   292      ctrl.Bits.LoadEspEip = 1;
   293      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadEspEip ));
   294      ctrl.Bits.LoadEspEip = 1;
   295      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, SaveSysEnterMsrs ));
   296      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadSysEnterMsrs ));
   297      if( MAY_BE_SET1(ctrl_fixed, ctrl, SaveEfer )) {
   298          ctrl.Bits.SaveEfer = 1;
   299      }
   300      if( MAY_BE_SET1(ctrl_fixed, ctrl, LoadEfer )) {
   301          ctrl.Bits.LoadEfer = 1;
   302      }
   303      if ( MAY_BE_SET1(ctrl_fixed, ctrl, Load_IA32_PERF_GLOBAL_CTRL )) {
   304          ctrl.Bits.Load_IA32_PERF_GLOBAL_CTRL = 1;
   305      }
   306      if ( MAY_BE_SET1(ctrl_fixed, ctrl, SavePat )) {
   307          ctrl.Bits.SavePat = 1;
   308      }
   309      if( MAY_BE_SET1(ctrl_fixed, ctrl, LoadPat )) {
   310          ctrl.Bits.LoadPat = 1;
   311      }
   312      field->minimal_1_settings = ctrl.Uint32;
   313      field->minimal_0_settings = GET_FIXED0( vmcs_hw_make_compliant_vm_exit_ctrl );
   314  }
   315  
   316  static void set_minimal_entry_ctrls( GCPU_VMEXIT_CONTROL_FIELD_COUNTERS* field )
   317  {
   318      VM_ENTRY_CONTROLS ctrl, ctrl_fixed;
   319  
   320      GET_FIXED_MASK( UINT32, ctrl_fixed.Uint32,
   321                      vmcs_hw_make_compliant_vm_entry_ctrl );
   322      GET_MINIMAL_VALUE( ctrl.Uint32,
   323                      vmcs_hw_make_compliant_vm_entry_ctrl );
   324  
   325      // we are out of SMM
   326      VMM_ASSERT( ctrl.Bits.EntryToSmm == 0 );
   327      VMM_ASSERT( ctrl.Bits.TearDownSmmMonitor == 0 );
   328      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadCr0AndCr4 ));
   329      ctrl.Bits.LoadCr0AndCr4 = 1;
   330      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadCr3 ));
   331      ctrl.Bits.LoadCr3 = 1;
   332      if( MAY_BE_SET1(ctrl_fixed, ctrl, LoadDebugControls )) {
   333          ctrl.Bits.LoadDebugControls = 1;
   334      }
   335      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadSegmentRegisters ));
   336      ctrl.Bits.LoadSegmentRegisters = 1;
   337      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadEspEipEflags ));
   338      ctrl.Bits.LoadEspEipEflags = 1;
   339      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadPendingDebugExceptions ));
   340      ctrl.Bits.LoadPendingDebugExceptions = 1;
   341      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadInterruptibilityInformation ));
   342      ctrl.Bits.LoadInterruptibilityInformation = 1;
   343      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadActivityState ));
   344      ctrl.Bits.LoadActivityState = 1;
   345      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadWorkingVmcsPointer ));
   346      ctrl.Bits.LoadWorkingVmcsPointer = 1;
   347      VMM_ASSERT( MAY_BE_SET1(ctrl_fixed, ctrl, LoadSysEnterMsrs ));
   348      if( MAY_BE_SET1(ctrl_fixed, ctrl, LoadEfer )) {
   349          ctrl.Bits.LoadEfer = 1;
   350      }
   351      if ( MAY_BE_SET1(ctrl_fixed, ctrl, LoadPat )) {
   352          ctrl.Bits.LoadPat = 1;
   353      }
   354      field->minimal_1_settings = ctrl.Uint32;
   355      field->minimal_0_settings = GET_FIXED0( vmcs_hw_make_compliant_vm_entry_ctrl );
   356  }
   357  
   358  static void init_minimal_controls( GUEST_CPU_HANDLE gcpu )
   359  {
   360      // perform init
   361      if (g_init_done == FALSE) {
   362          g_init_done = TRUE;
   363          g_processor_ctrls2_supported =
   364              vmcs_hw_get_vmx_constraints()->processor_based_exec_ctrl2_supported;
   365      }
   366      set_minimal_cr0_reg_mask    ( &(gcpu->vmexit_setup.cr0) );
   367      set_minimal_cr4_reg_mask    ( &(gcpu->vmexit_setup.cr4) );
   368      set_minimal_pin_ctrls       ( &(gcpu->vmexit_setup.pin_ctrls) );
   369      set_minimal_processor_ctrls ( &(gcpu->vmexit_setup.processor_ctrls) );
   370      set_minimal_processor_ctrls2( &(gcpu->vmexit_setup.processor_ctrls2) );
   371      set_minimal_exceptions_map  ( &(gcpu->vmexit_setup.exceptions_ctrls) );
   372      set_minimal_entry_ctrls     ( &(gcpu->vmexit_setup.vm_entry_ctrls) );
   373      set_minimal_exit_ctrls      ( &(gcpu->vmexit_setup.vm_exit_ctrls) );
   374  }
   375  
   376  //
   377  // Get 64bit mask + flags set. For each 1-bit in mask consult flags bit.
   378  // If flags bit is 1 - increase count, esle - decrease count
   379  // Return bit set with 1bit for each non-zero counter
   380  static UINT64 gcpu_update_control_counters( UINT64 flags, UINT64 mask,
   381                                       GCPU_VMEXIT_CONTROL_FIELD_COUNTERS* counters )
   382  {
   383      UINT32 idx;
   384  
   385      while ( mask ) {
   386          idx = (UINT32)-1;
   387          hw_scan_bit_forward64( &idx, mask );
   388          // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
   389          VMM_ASSERT( idx < 64 );
   390          BIT_CLR64( mask, idx );
   391          if (1 == BIT_GET64( flags, idx )) {
   392              if (0 == counters->counters[idx]) {
   393                  BIT_SET64( counters->bit_field, idx );
   394              }
   395              VMM_ASSERT( counters->counters[idx] < 255 );
   396              ++(counters->counters[idx]);
   397          }
   398          else {
   399              VMM_ASSERT( counters->counters[idx] > 0 );
   400              --(counters->counters[idx]);
   401              if (0 == counters->counters[idx]) {
   402                  BIT_CLR64( counters->bit_field, idx );
   403              }
   404          }
   405      }
   406      return counters->bit_field;
   407  }
   408  
   409  INLINE UINT64 calculate_cr0_reg_mask(GUEST_CPU_HANDLE gcpu, UINT64 request, 
   410                                       UINT64 bitmask )
   411  {
   412      UINT64 final_mask;
   413  
   414      final_mask = gcpu_update_control_counters( request, bitmask, &(gcpu->vmexit_setup.cr0));
   415      return GET_FINAL_SETTINGS( gcpu, cr0, final_mask );
   416  }
   417  
   418  void gcpu_set_cr0_reg_mask_layered(GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level, 
   419                                     UINT64 value )
   420  {
   421      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   422      VMM_ASSERT(vmcs);
   423  
   424      if (vmcs_read( vmcs, VMCS_CR0_MASK ) != value) {
   425          vmcs_write( vmcs, VMCS_CR0_MASK, value );
   426      }
   427  }
   428  
   429  UINT64 gcpu_get_cr0_reg_mask_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level)
   430  {
   431      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   432  
   433      VMM_ASSERT(vmcs);
   434      return vmcs_read( vmcs, VMCS_CR0_MASK );
   435  }
   436  
   437  INLINE UINT64 calculate_cr4_reg_mask(GUEST_CPU_HANDLE gcpu, UINT64 request, 
   438                                       UINT64 bitmask )
   439  {
   440      UINT64 final_mask;
   441  
   442      final_mask = gcpu_update_control_counters( request, bitmask, 
   443                                                  &(gcpu->vmexit_setup.cr4));
   444      return GET_FINAL_SETTINGS( gcpu, cr4, final_mask );
   445  }
   446  
   447  void gcpu_set_cr4_reg_mask_layered(GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level, 
   448                                     UINT64 value )
   449  {
   450      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   451      VMM_ASSERT(vmcs);
   452  
   453      if (vmcs_read( vmcs, VMCS_CR4_MASK ) != value ) {
   454          vmcs_write( vmcs, VMCS_CR4_MASK, value );
   455      }
   456  }
   457  
   458  UINT64 gcpu_get_cr4_reg_mask_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level )
   459  {
   460      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   461      VMM_ASSERT(vmcs);
   462      return vmcs_read( vmcs, VMCS_CR4_MASK );
   463  }
   464  
   465  
   466  INLINE UINT32 calculate_pin_ctrls( GUEST_CPU_HANDLE gcpu, UINT32 request, UINT32 bitmask )
   467  {
   468      UINT32 final_mask;
   469  
   470      final_mask = (UINT32)gcpu_update_control_counters( request, bitmask,
   471                              &(gcpu->vmexit_setup.pin_ctrls));
   472      return (UINT32)GET_FINAL_SETTINGS( gcpu, pin_ctrls, final_mask );
   473  }
   474  
   475  void gcpu_set_pin_ctrls_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level, UINT64 value )
   476  {
   477      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   478      VMM_ASSERT(vmcs);
   479  
   480      if (vmcs_read( vmcs, VMCS_CONTROL_VECTOR_PIN_EVENTS ) != value) {
   481          vmcs_write( vmcs, VMCS_CONTROL_VECTOR_PIN_EVENTS, value );
   482      }
   483  }
   484  
   485  UINT64 gcpu_get_pin_ctrls_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level )
   486  {
   487      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   488      VMM_ASSERT(vmcs);
   489      return vmcs_read( vmcs, VMCS_CONTROL_VECTOR_PIN_EVENTS );
   490  }
   491  
   492  static UINT32 calculate_processor_ctrls( GUEST_CPU_HANDLE gcpu, UINT32 request, UINT32 bitmask )
   493  {
   494      UINT32 final_mask;
   495  
   496      final_mask = (UINT32)gcpu_update_control_counters( request,
   497                              bitmask, &(gcpu->vmexit_setup.processor_ctrls));
   498      return (UINT32)GET_FINAL_SETTINGS( gcpu, processor_ctrls, final_mask );
   499  }
   500  
   501  void gcpu_set_processor_ctrls_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level, UINT64 value )
   502  {
   503      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   504      UINT64 proc_control_temp;
   505      VMM_ASSERT(vmcs);
   506  
   507      proc_control_temp = vmcs_read( vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS );
   508      if ( proc_control_temp != value ) {
   509          vmcs_write(vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS, 
   510                     (value & ~0x8000000) | (proc_control_temp & 0x8000000));
   511      }
   512  }
   513  
   514  UINT64 gcpu_get_processor_ctrls_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level )
   515  {
   516      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   517      VMM_ASSERT(vmcs);
   518  
   519      return vmcs_read( vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS );
   520  }
   521  
   522  static
   523  UINT32 calculate_processor_ctrls2( GUEST_CPU_HANDLE gcpu, UINT32 request, UINT32 bitmask )
   524  {
   525      UINT32 final_mask;
   526  
   527      VMM_ASSERT( g_processor_ctrls2_supported == TRUE );
   528  
   529      final_mask = (UINT32)gcpu_update_control_counters( request,
   530                              bitmask, &(gcpu->vmexit_setup.processor_ctrls2));
   531      return (UINT32)GET_FINAL_SETTINGS( gcpu, processor_ctrls2, final_mask );
   532  }
   533  
   534  void gcpu_set_processor_ctrls2_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level, UINT64 value )
   535  {
   536      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   537      // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
   538      VMM_ASSERT(vmcs);
   539      // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
   540      VMM_ASSERT( g_processor_ctrls2_supported == TRUE );
   541      if (vmcs_read( vmcs, VMCS_CONTROL2_VECTOR_PROCESSOR_EVENTS ) != value ) {
   542          vmcs_write( vmcs, VMCS_CONTROL2_VECTOR_PROCESSOR_EVENTS, value );
   543      }
   544  }
   545  
   546  UINT64 gcpu_get_processor_ctrls2_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level )
   547  {
   548      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   549      VMM_ASSERT(vmcs);
   550      VMM_ASSERT( g_processor_ctrls2_supported == TRUE );
   551      return vmcs_read( vmcs, VMCS_CONTROL2_VECTOR_PROCESSOR_EVENTS );
   552  }
   553  
   554  INLINE UINT32 calculate_exceptions_map( GUEST_CPU_HANDLE gcpu, UINT32 request, 
   555                              UINT32 bitmask, EXCEPTIONS_POLICY_TYPE* pf_policy)
   556  {
   557      IA32_VMCS_EXCEPTION_BITMAP exceptions;
   558  
   559      VMM_ASSERT( pf_policy );
   560      exceptions.Uint32 = (UINT32)gcpu_update_control_counters( request,
   561                                  bitmask, &(gcpu->vmexit_setup.exceptions_ctrls));
   562      *pf_policy = (exceptions.Bits.PF) ?
   563              EXCEPTIONS_POLICY_CATCH_ALL : EXCEPTIONS_POLICY_CATCH_NOTHING;
   564      return (UINT32)GET_FINAL_SETTINGS( gcpu, exceptions_ctrls, exceptions.Uint32 );
   565  }
   566  
   567  void gcpu_set_exceptions_map_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level, 
   568                              UINT64 value)
   569  {
   570      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   571      VMM_ASSERT(vmcs);
   572  
   573      if (vmcs_read( vmcs, VMCS_EXCEPTION_BITMAP ) != value ) {
   574          vmcs_write( vmcs, VMCS_EXCEPTION_BITMAP, value );
   575      }
   576  }
   577  
   578  void gcpu_get_pf_error_code_mask_and_match_layered(GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level, 
   579                          UINT32* pf_mask, UINT32* pf_match)
   580  {
   581      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   582      VMM_ASSERT(vmcs);
   583  
   584      *pf_mask = (UINT32)vmcs_read(vmcs, VMCS_PAGE_FAULT_ERROR_CODE_MASK);
   585      *pf_match = (UINT32)vmcs_read(vmcs, VMCS_PAGE_FAULT_ERROR_CODE_MATCH);
   586  }
   587  
   588  void gcpu_set_pf_error_code_mask_and_match_layered(GUEST_CPU_HANDLE gcpu, 
   589                          VMCS_LEVEL level, UINT32 pf_mask, UINT32 pf_match)
   590  {
   591      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   592      VMM_ASSERT(vmcs);
   593  
   594      vmcs_write(vmcs, VMCS_PAGE_FAULT_ERROR_CODE_MASK, pf_mask);
   595      vmcs_write(vmcs, VMCS_PAGE_FAULT_ERROR_CODE_MATCH, pf_match);
   596  }
   597  
   598  UINT64 gcpu_get_exceptions_map_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level )
   599  {
   600      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   601      VMM_ASSERT(vmcs);
   602  
   603      return vmcs_read( vmcs, VMCS_EXCEPTION_BITMAP );
   604  }
   605  INLINE
   606  UINT32 calculate_exit_ctrls( GUEST_CPU_HANDLE gcpu, UINT32 request, UINT32 bitmask )
   607  {
   608      UINT32 final_mask;
   609  
   610      final_mask = (UINT32)gcpu_update_control_counters( request,
   611                              bitmask, &(gcpu->vmexit_setup.vm_exit_ctrls));
   612      return (UINT32)GET_FINAL_SETTINGS( gcpu, vm_exit_ctrls, final_mask );
   613  }
   614  
   615  void gcpu_set_exit_ctrls_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level, UINT32 value )
   616  {
   617      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   618      VMM_ASSERT(vmcs);
   619  
   620      if (vmcs_read( vmcs, VMCS_EXIT_CONTROL_VECTOR ) != value ) {
   621          vmcs_write( vmcs, VMCS_EXIT_CONTROL_VECTOR, value );
   622      }
   623  }
   624  
   625  UINT32 gcpu_get_exit_ctrls_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level )
   626  {
   627      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   628      VMM_ASSERT(vmcs);
   629  
   630      return (UINT32) vmcs_read( vmcs, VMCS_EXIT_CONTROL_VECTOR );
   631  }
   632  
   633  INLINE UINT32 calculate_enter_ctrls( GUEST_CPU_HANDLE gcpu, UINT32 request, UINT32 bitmask )
   634  {
   635      UINT32 final_mask;
   636  
   637      final_mask = (UINT32)gcpu_update_control_counters( request,
   638                              bitmask, &(gcpu->vmexit_setup.vm_entry_ctrls));
   639      return (UINT32)GET_FINAL_SETTINGS( gcpu, vm_entry_ctrls, final_mask );
   640  }
   641  
   642  void gcpu_set_enter_ctrls_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level, UINT32 value )
   643  {
   644      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   645      VMM_ASSERT(vmcs);
   646  
   647      if (vmcs_read( vmcs, VMCS_ENTER_CONTROL_VECTOR ) != value ) {
   648          vmcs_write( vmcs, VMCS_ENTER_CONTROL_VECTOR, value );
   649      }
   650  }
   651  
   652  static void gcpu_set_enter_ctrls_for_addons( GUEST_CPU_HANDLE gcpu, UINT32 value, UINT32 bits_untouched )
   653  {
   654      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, VMCS_LEVEL_0);
   655      VMM_ASSERT(vmcs);
   656      vmcs_update(vmcs, VMCS_ENTER_CONTROL_VECTOR, value, ~bits_untouched);
   657  }
   658  
   659  
   660  UINT32 gcpu_get_enter_ctrls_layered( GUEST_CPU_HANDLE gcpu, VMCS_LEVEL level )
   661  {
   662      VMCS_OBJECT* vmcs = gcpu_get_vmcs_layered(gcpu, level);
   663      VMM_ASSERT(vmcs);
   664      return  (UINT32)vmcs_read( vmcs, VMCS_ENTER_CONTROL_VECTOR );
   665  }
   666  
   667  static void request_vmexit_on_cr0(GUEST_CPU_HANDLE gcpu, UINT64  bit_request,
   668                             UINT64 bit_mask )
   669  
   670  {
   671      UINT64              cr0_mask;
   672      VMCS_OBJECT*        vmcs;
   673      UINT64              cr0_value, cr0_read_shadow_value;
   674  
   675      cr0_mask = calculate_cr0_reg_mask( gcpu, bit_request, bit_mask );
   676      gcpu_set_cr0_reg_mask_layered( gcpu, VMCS_LEVEL_0, cr0_mask );
   677      vmcs= gcpu_get_vmcs(gcpu);
   678      cr0_value  = vmcs_read(vmcs, VMCS_GUEST_CR0);
   679      cr0_read_shadow_value  = vmcs_read(vmcs, VMCS_CR0_READ_SHADOW);
   680      //   Clear the mask bits that it has been set in cr0 minimal_1_settings,
   681      //   since these bits are controlled by the host.
   682      cr0_mask = cr0_mask & ~(gcpu)->vmexit_setup.cr0.minimal_1_settings;
   683      //1. Keep the original shadow bit corresponding the zero bit in the 
   684      //   cr0_mask.
   685      //2. Update the shadow bit based on the cr0 value correspoinding the
   686      //   set bit in the cr0_mask.
   687      vmcs_write(vmcs, VMCS_CR0_READ_SHADOW, (cr0_read_shadow_value & ~cr0_mask)
   688                      |(cr0_value & cr0_mask));
   689  }
   690  
   691  static void request_vmexit_on_cr4(GUEST_CPU_HANDLE gcpu, UINT64 bit_request, UINT64 bit_mask )
   692  {
   693      UINT64              cr4_mask;
   694  
   695      VMCS_OBJECT* vmcs;
   696      UINT64 cr4_value, cr4_read_shadow_value;
   697      cr4_mask = calculate_cr4_reg_mask( gcpu, bit_request, bit_mask );
   698      gcpu_set_cr4_reg_mask_layered( gcpu, VMCS_LEVEL_0, cr4_mask );
   699      vmcs= gcpu_get_vmcs(gcpu);
   700      cr4_value  = vmcs_read(vmcs, VMCS_GUEST_CR4);
   701      cr4_read_shadow_value  = vmcs_read(vmcs, VMCS_CR4_READ_SHADOW);
   702      //   Clear the mask bits that it has been set in cr4 minimal_1_settings,
   703      //   since these bits are controlled by the host.
   704      cr4_mask = cr4_mask & ~(gcpu)->vmexit_setup.cr4.minimal_1_settings;
   705  
   706      //1. Keep the original shadow bit corresponding the zero bit in the 
   707      //   cr4_mask.
   708      //2. Update the shadow bit based on the cr4 value correspoinding the
   709      //   set bit in the cr4_mask.
   710      vmcs_write(vmcs, VMCS_CR4_READ_SHADOW, (cr4_read_shadow_value & ~cr4_mask)
   711                      |(cr4_value & cr4_mask));
   712  }
   713  
   714  static void update_pfs_setup( GUEST_CPU_HANDLE gcpu, EXCEPTIONS_POLICY_TYPE policy )
   715  {
   716      IA32_VMCS_EXCEPTION_BITMAP exceptions;
   717      // setup page faults
   718      
   719      exceptions.Uint32 = (UINT32)gcpu_get_exceptions_map_layered( gcpu, VMCS_LEVEL_0);
   720      switch (policy) {
   721          case EXCEPTIONS_POLICY_CATCH_NOTHING:
   722              // do not exit on Page Faults at all
   723              gcpu_set_pf_error_code_mask_and_match_layered(gcpu, VMCS_LEVEL_0, 0, ((exceptions.Bits.PF) ? ((UINT32)-1) : 0));
   724              break;
   725          case EXCEPTIONS_POLICY_CATCH_ALL:
   726              // do exit on all Page Faults
   727              gcpu_set_pf_error_code_mask_and_match_layered(gcpu, VMCS_LEVEL_0, 0, ((exceptions.Bits.PF) ? 0 : ((UINT32)-1)));
   728              break;
   729          default:
   730              VMM_LOG(mask_anonymous, level_trace,"update_pfs_setup: Unknown policy type: %d\n", policy);
   731              VMM_ASSERT( FALSE );
   732      }
   733  }
   734  
   735  static void request_vmexit_on_exceptions(GUEST_CPU_HANDLE gcpu, UINT32 bit_request, UINT32 bit_mask )
   736  {
   737      UINT32                  except_map;
   738      EXCEPTIONS_POLICY_TYPE  pf_policy;
   739  
   740      except_map = calculate_exceptions_map(  gcpu, bit_request, bit_mask, &pf_policy );
   741      gcpu_set_exceptions_map_layered( gcpu, VMCS_LEVEL_0, except_map);
   742      update_pfs_setup(gcpu, pf_policy);
   743  }
   744  
   745  static void request_vmexit_on_pin_ctrls( GUEST_CPU_HANDLE gcpu,
   746                                    UINT32 bit_request, UINT32 bit_mask )
   747  {
   748      UINT32              pin_ctrls;
   749  
   750      pin_ctrls = calculate_pin_ctrls( gcpu, bit_request, bit_mask );
   751      gcpu_set_pin_ctrls_layered( gcpu, VMCS_LEVEL_0, pin_ctrls );
   752  }
   753  
   754  static void request_vmexit_on_proc_ctrls(GUEST_CPU_HANDLE gcpu, UINT32 bit_request, UINT32 bit_mask )
   755  {
   756      UINT32              proc_ctrls;
   757  
   758      proc_ctrls = calculate_processor_ctrls( gcpu, bit_request, bit_mask );
   759      gcpu_set_processor_ctrls_layered( gcpu, VMCS_LEVEL_0, proc_ctrls );
   760  }
   761  
   762  static void request_vmexit_on_proc_ctrls2(GUEST_CPU_HANDLE gcpu,
   763                                    UINT32 bit_request, UINT32 bit_mask )
   764  {
   765      UINT32              proc_ctrls2;
   766  
   767      if (g_processor_ctrls2_supported) {
   768          proc_ctrls2 = calculate_processor_ctrls2( gcpu, bit_request, bit_mask );
   769          gcpu_set_processor_ctrls2_layered( gcpu, VMCS_LEVEL_0, proc_ctrls2 );
   770      }
   771  }
   772  
   773  static void request_vmexit_on_vm_enter_ctrls(GUEST_CPU_HANDLE gcpu,
   774                                    UINT32 bit_request, UINT32 bit_mask )
   775  {
   776      UINT32            vm_enter_ctrls;
   777      VM_ENTRY_CONTROLS dont_touch;
   778  
   779      // Do not change IA32e Guest mode here. It is changed as part of EFER!!!!!
   780      dont_touch.Uint32 = 0;
   781      dont_touch.Bits.Ia32eModeGuest = 1;
   782      vm_enter_ctrls = calculate_enter_ctrls( gcpu, bit_request, bit_mask );
   783      gcpu_set_enter_ctrls_for_addons( gcpu, vm_enter_ctrls, dont_touch.Uint32);
   784  }
   785  
   786  static void request_vmexit_on_vm_exit_ctrls(GUEST_CPU_HANDLE gcpu,
   787                                    UINT32 bit_request, UINT32 bit_mask )
   788  {
   789      UINT32              vm_exit_ctrls;
   790  
   791      vm_exit_ctrls = calculate_exit_ctrls( gcpu, bit_request, bit_mask );
   792      gcpu_set_exit_ctrls_layered( gcpu, VMCS_LEVEL_0, vm_exit_ctrls );
   793  }
   794  
   795  static void gcpu_apply_ctrols2( GUEST_CPU_HANDLE gcpu )
   796  {
   797      request_vmexit_on_proc_ctrls2( gcpu, 0, 0 );
   798  }
   799  
   800  static void gcpu_apply_all( GUEST_CPU_HANDLE gcpu )
   801  {
   802      request_vmexit_on_pin_ctrls( gcpu, 0, 0 );
   803      request_vmexit_on_proc_ctrls( gcpu, 0, 0 );
   804      request_vmexit_on_proc_ctrls2( gcpu, 0, 0 );
   805      request_vmexit_on_exceptions( gcpu, 0, 0 );
   806      request_vmexit_on_vm_exit_ctrls( gcpu, 0, 0 );
   807      request_vmexit_on_vm_enter_ctrls( gcpu, 0, 0 );
   808      request_vmexit_on_cr0( gcpu, 0, 0 );
   809      request_vmexit_on_cr4( gcpu, 0, 0 );
   810  }
   811  
   812  // Setup minimal controls for Guest CPU
   813  static void gcpu_minimal_controls( GUEST_CPU_HANDLE gcpu )
   814  {
   815      VMCS_OBJECT* vmcs = gcpu_get_vmcs(gcpu);
   816      UINT32       idx;
   817      const VMCS_HW_CONSTRAINTS* vmx_constraints = vmcs_hw_get_vmx_constraints();
   818  
   819      VMM_ASSERT( vmcs );
   820      init_minimal_controls( gcpu );
   821      gcpu_apply_all( gcpu );
   822  
   823      // Disable CR3 Target Values by setting the count to 0
   824      // Disable CR3 Target Values by setting the count to 0 and all the values to 0xFFFFFFFF
   825      vmcs_write( vmcs, VMCS_CR3_TARGET_COUNT, 0);
   826      for (idx = 0; idx < vmx_constraints->number_of_cr3_target_values; ++idx) {
   827          vmcs_write(vmcs, (VMCS_FIELD)VMCS_CR3_TARGET_VALUE(idx), UINT64_ALL_ONES);
   828      }
   829      // Set additional required fields
   830      vmcs_write(vmcs, VMCS_GUEST_WORKING_VMCS_PTR, UINT64_ALL_ONES );
   831      vmcs_write(vmcs, VMCS_GUEST_SYSENTER_CS, hw_read_msr(IA32_MSR_SYSENTER_CS));
   832      vmcs_write(vmcs, VMCS_GUEST_SYSENTER_ESP, hw_read_msr(IA32_MSR_SYSENTER_ESP));
   833      vmcs_write(vmcs, VMCS_GUEST_SYSENTER_EIP, hw_read_msr(IA32_MSR_SYSENTER_EIP));
   834      vmcs_write(vmcs, VMCS_GUEST_IA32_PERF_GLOBAL_CTRL, hw_read_msr(IA32_MSR_PERF_GLOBAL_CTRL));
   835  }
   836  
   837  
   838  // Apply default policy to gcpu
   839  void guest_cpu_control_setup( GUEST_CPU_HANDLE gcpu )
   840  {
   841      VMM_ASSERT( gcpu );
   842      lock_initialize( &(gcpu->vmexit_setup.lock) );
   843      gcpu_minimal_controls( gcpu );
   844      msr_vmexit_activate(gcpu);
   845      io_vmexit_activate(gcpu);
   846  }
   847  
   848  void gcpu_temp_exceptions_setup( GUEST_CPU_HANDLE gcpu, GCPU_TEMP_EXCEPTIONS_SETUP action )
   849  {
   850      // TODO: Rewrite!!!
   851      // TODO: THIS WILL NOT WORK
   852      VMM_ASSERT( FALSE );
   853  
   854      switch (action) {
   855      case GCPU_TEMP_EXIT_ON_INTR_UNBLOCK:
   856          {
   857              PROCESSOR_BASED_VM_EXECUTION_CONTROLS proc_ctrl;
   858  
   859              proc_ctrl.Uint32 = 0;
   860              proc_ctrl.Bits.VirtualInterrupt = 1;
   861  	    (void)proc_ctrl;
   862              request_vmexit_on_proc_ctrls( gcpu, 0, 0);
   863          }
   864          break;
   865  
   866      case GCPU_TEMP_NO_EXIT_ON_INTR_UNBLOCK:
   867          {
   868              PROCESSOR_BASED_VM_EXECUTION_CONTROLS proc_ctrl;
   869  
   870              proc_ctrl.Uint32 = 0;
   871              proc_ctrl.Bits.VirtualInterrupt = 1;
   872  	    (void)proc_ctrl;
   873  //            gcpu->vmexit_setup.processor_ctrls.enforce_1_settings &= ~(UINT64)proc_ctrl.Uint32;
   874  //            gcpu->vmexit_setup.processor_ctrls.enforce_0_settings |= proc_ctrl.Uint32;
   875              request_vmexit_on_proc_ctrls( gcpu, 0, 0);
   876          }
   877          break;
   878  
   879      default:
   880          VMM_LOG(mask_anonymous, level_trace,"Unknown GUEST_TEMP_EXCEPTIONS_SETUP action: %d\n", action);
   881          VMM_DEADLOOP();
   882      }
   883  }
   884  
   885  void gcpu_control_setup_only( GUEST_CPU_HANDLE gcpu, const VMEXIT_CONTROL* request )
   886  {
   887      VMM_ASSERT( gcpu );
   888      VMM_ASSERT( request );
   889  
   890      lock_acquire( &(gcpu->vmexit_setup.lock) );
   891      if (request->cr0.bit_mask != 0) {
   892          gcpu_update_control_counters( request->cr0.bit_request, request->cr0.bit_mask,
   893                                        &(gcpu->vmexit_setup.cr0) );
   894      }
   895      if (request->cr4.bit_mask != 0) {
   896          gcpu_update_control_counters( request->cr4.bit_request, request->cr4.bit_mask,
   897                                        &(gcpu->vmexit_setup.cr4) );
   898      }
   899      if (request->exceptions.bit_mask != 0) {
   900          gcpu_update_control_counters( request->exceptions.bit_request,
   901                                        request->exceptions.bit_mask,
   902                                        &(gcpu->vmexit_setup.exceptions_ctrls) );
   903      }
   904      if (request->pin_ctrls.bit_mask != 0) {
   905          gcpu_update_control_counters( request->pin_ctrls.bit_request,
   906                                        request->pin_ctrls.bit_mask,
   907                                        &(gcpu->vmexit_setup.pin_ctrls) );
   908      }
   909      if (request->proc_ctrls.bit_mask != 0) {
   910          gcpu_update_control_counters( request->proc_ctrls.bit_request,
   911                                        request->proc_ctrls.bit_mask,
   912                                        &(gcpu->vmexit_setup.processor_ctrls) );
   913      }
   914      if (request->proc_ctrls2.bit_mask != 0) {
   915          VMM_ASSERT( g_processor_ctrls2_supported == TRUE );
   916  
   917          gcpu_update_control_counters( request->proc_ctrls2.bit_request,
   918                                        request->proc_ctrls2.bit_mask,
   919                                        &(gcpu->vmexit_setup.processor_ctrls2) );
   920      }
   921      if (request->vm_enter_ctrls.bit_mask != 0) {
   922          gcpu_update_control_counters( request->vm_enter_ctrls.bit_request,
   923                                        request->vm_enter_ctrls.bit_mask,
   924                                        &(gcpu->vmexit_setup.vm_entry_ctrls) );
   925      }
   926      if (request->vm_exit_ctrls.bit_mask != 0) {
   927          gcpu_update_control_counters( request->vm_exit_ctrls.bit_request,
   928                                        request->vm_exit_ctrls.bit_mask,
   929                                        &(gcpu->vmexit_setup.vm_exit_ctrls) );
   930      }
   931      lock_release( &(gcpu->vmexit_setup.lock) );
   932  }
   933  
   934  void gcpu_control_apply_only( GUEST_CPU_HANDLE gcpu )
   935  {
   936      lock_acquire( &(gcpu->vmexit_setup.lock) );
   937      gcpu_apply_all( gcpu );
   938      lock_release( &(gcpu->vmexit_setup.lock) );
   939  }
   940  
   941  void gcpu_control2_apply_only( GUEST_CPU_HANDLE gcpu )
   942  {
   943      lock_acquire( &(gcpu->vmexit_setup.lock) );
   944      gcpu_apply_ctrols2( gcpu );
   945      lock_release( &(gcpu->vmexit_setup.lock) );
   946  }
   947  
   948  BOOLEAN gcpu_cr3_virtualized( GUEST_CPU_HANDLE gcpu )
   949  {
   950      PROCESSOR_BASED_VM_EXECUTION_CONTROLS proc_ctrl;
   951  
   952      proc_ctrl.Uint32 = (UINT32)(gcpu->vmexit_setup.processor_ctrls.bit_field);
   953      return (proc_ctrl.Bits.Cr3Store && proc_ctrl.Bits.Cr3Load);
   954  }
   955  
   956  
   957  /*
   958   *   Enforce settings on hardware VMCS only
   959   *   these changes are not reflected in vmcs#0
   960   */
   961  void gcpu_enforce_settings_on_hardware(GUEST_CPU_HANDLE  gcpu, GCPU_TEMP_EXCEPTIONS_SETUP  action)
   962  {
   963      switch (action) {
   964      case GCPU_TEMP_EXCEPTIONS_EXIT_ON_ALL:
   965          // enforce all exceptions vmexit
   966          gcpu_exceptions_settings_enforce_on_hw(gcpu, UINT32_ALL_ONES, UINT32_ALL_ONES);
   967          break;
   968  
   969      case GCPU_TEMP_EXIT_ON_PF_AND_CR3:
   970          {
   971              PROCESSOR_BASED_VM_EXECUTION_CONTROLS proc_ctrl;
   972              IA32_VMCS_EXCEPTION_BITMAP            exceptions;
   973  
   974              // enforce all PF vmexits
   975              exceptions.Uint32 = 0;
   976              exceptions.Bits.PF = 1;
   977              gcpu_exceptions_settings_enforce_on_hw(gcpu, UINT32_ALL_ONES, exceptions.Uint32);
   978  
   979              // enforce CR3 access vmexit
   980              proc_ctrl.Uint32  = 0;
   981              proc_ctrl.Bits.Cr3Load = 1;
   982              proc_ctrl.Bits.Cr3Store = 1;
   983              gcpu_proc_ctrls_enforce_on_hw(gcpu, UINT32_ALL_ONES, proc_ctrl.Uint32);
   984          }
   985          break;
   986  
   987      case GCPU_TEMP_EXCEPTIONS_RESTORE_ALL:
   988          // reset to normal exceptions vmexit
   989          gcpu_exceptions_settings_restore_on_hw(gcpu);
   990          break;
   991  
   992      case GCPU_TEMP_RESTORE_PF_AND_CR3:
   993          // reset to normal exceptions vmexit
   994          gcpu_exceptions_settings_restore_on_hw(gcpu);
   995          // reset to normal CR3 vmexits
   996          gcpu_proc_ctrls_restore_on_hw(gcpu);
   997          break;
   998  
   999      case GCPU_TEMP_CR0_NO_EXIT_ON_WP:
  1000          // do not vmexit when guest changes CR0.WP bit
  1001          gcpu_cr0_mask_enforce_on_hw(gcpu,
  1002                  BITMAP_GET64(UINT64_ALL_ONES, ~CR0_WP), // clr CR0_WP bit only
  1003                  0);                                     // not set requirements
  1004          break;
  1005  
  1006      case GCPU_TEMP_CR0_RESTORE_WP:
  1007          // do vmexit when guest changes CR0.WP bit
  1008          gcpu_cr0_mask_enforce_on_hw(gcpu,
  1009                  UINT64_ALL_ONES,                        // no clr requirements
  1010                  CR0_WP);                                // set CR0_WP bit only
  1011          break;
  1012  
  1013      default:
  1014          VMM_LOG(mask_anonymous, level_trace,"Unknown GUEST_TEMP_EXCEPTIONS_SETUP action: %d\n", action);
  1015          // BEFORE_VMLAUNCH. This case should not happen.
  1016          VMM_DEADLOOP();
  1017      }
  1018  }
  1019  
  1020  static void gcpu_exceptions_settings_enforce_on_hw(GUEST_CPU_HANDLE gcpu, 
  1021                      UINT32 zeroes, UINT32 ones)
  1022  {
  1023      IA32_VMCS_EXCEPTION_BITMAP exceptions;
  1024  
  1025      exceptions.Uint32 = (UINT32)gcpu_get_exceptions_map_layered( gcpu, VMCS_MERGED);
  1026      exceptions.Uint32 = APPLY_ZEROES_AND_ONES(exceptions.Uint32, zeroes, ones);
  1027      exceptions.Uint32 = (UINT32)GET_FINAL_SETTINGS(gcpu, exceptions_ctrls, exceptions.Uint32);
  1028      gcpu_set_exceptions_map_layered( gcpu, VMCS_MERGED, exceptions.Uint32);
  1029      update_pfs_setup(gcpu, exceptions.Bits.PF ? EXCEPTIONS_POLICY_CATCH_ALL : EXCEPTIONS_POLICY_CATCH_NOTHING);
  1030  }
  1031  
  1032  static void gcpu_exceptions_settings_restore_on_hw(GUEST_CPU_HANDLE  gcpu)
  1033  {
  1034      if ( ! gcpu_is_vmcs_layered(gcpu)) {
  1035          IA32_VMCS_EXCEPTION_BITMAP exceptions;
  1036          exceptions.Uint32 = (UINT32)gcpu->vmexit_setup.exceptions_ctrls.bit_field;
  1037          exceptions.Uint32 = (UINT32)GET_FINAL_SETTINGS(gcpu, exceptions_ctrls, exceptions.Uint32);
  1038          gcpu_set_exceptions_map_layered(gcpu, VMCS_MERGED, exceptions.Uint32);
  1039          update_pfs_setup(gcpu, exceptions.Bits.PF ? EXCEPTIONS_POLICY_CATCH_ALL : EXCEPTIONS_POLICY_CATCH_NOTHING);
  1040      }
  1041  }
  1042  
  1043  static void gcpu_proc_ctrls_enforce_on_hw(GUEST_CPU_HANDLE gcpu, UINT32 zeroes, UINT32 ones)
  1044  {
  1045      UINT32 proc_ctrls = (UINT32)gcpu_get_processor_ctrls_layered(gcpu, VMCS_MERGED);
  1046      proc_ctrls = APPLY_ZEROES_AND_ONES(proc_ctrls, zeroes, ones);
  1047      proc_ctrls = (UINT32)GET_FINAL_SETTINGS(gcpu, processor_ctrls, proc_ctrls);
  1048      gcpu_set_processor_ctrls_layered(gcpu, VMCS_MERGED, proc_ctrls);
  1049  }
  1050  
  1051  static void gcpu_proc_ctrls_restore_on_hw(GUEST_CPU_HANDLE   gcpu)
  1052  {
  1053      if ( ! gcpu_is_vmcs_layered(gcpu)) {
  1054          UINT32 proc_ctrls = (UINT32)gcpu->vmexit_setup.processor_ctrls.bit_field;
  1055          proc_ctrls = (UINT32)GET_FINAL_SETTINGS(gcpu, processor_ctrls, proc_ctrls);
  1056          gcpu_set_processor_ctrls_layered(gcpu, VMCS_MERGED, proc_ctrls);
  1057      }
  1058  }
  1059  
  1060  static void gcpu_cr0_mask_enforce_on_hw(GUEST_CPU_HANDLE gcpu, UINT64 zeroes, UINT64 ones)
  1061  {
  1062      UINT64 cr0_mask = gcpu_get_cr0_reg_mask_layered(gcpu, VMCS_MERGED);
  1063      cr0_mask = APPLY_ZEROES_AND_ONES(cr0_mask, zeroes, ones);
  1064      cr0_mask = GET_FINAL_SETTINGS(gcpu, cr0, cr0_mask);
  1065      gcpu_set_cr0_reg_mask_layered(gcpu, VMCS_MERGED, cr0_mask);
  1066  }
  1067  
  1068  extern UINT64 ept_get_eptp(GUEST_CPU_HANDLE gcpu);
  1069  extern BOOLEAN ept_set_eptp(GUEST_CPU_HANDLE gcpu, UINT64 ept_root_table_hpa, UINT32 gaw);
  1070  extern GUEST_CPU_HANDLE scheduler_get_current_gcpu_for_guest( GUEST_ID guest_id );
  1071  
  1072  BOOLEAN vmm_get_vmcs_control_state(GUEST_CPU_HANDLE gcpu, 
  1073                       VMM_CONTROL_STATE ControlStateId, VMM_CONTROLS* value)
  1074  {
  1075      VMCS_OBJECT* vmcs;
  1076      VMCS_FIELD vmcs_field_id;
  1077  
  1078  #ifdef JLMDEBUG
  1079      bprint("vmm_get_vmcs_control_state\n");
  1080      LOOP_FOREVER
  1081  #endif
  1082      VMM_ASSERT(gcpu);
  1083      vmcs = gcpu_get_vmcs(gcpu);
  1084      VMM_ASSERT(vmcs);
  1085      if(!value || (UINT32)ControlStateId > (UINT32)NUM_OF_VMM_CONTROL_STATE - 1)
  1086          return FALSE;
  1087  
  1088      // VMCS_FIELD and VMM_CONTROL_STATE are not identically mapped.
  1089      if(ControlStateId < VMM_CR3_TARGET_VALUE_0){
  1090          vmcs_field_id = (VMCS_FIELD)ControlStateId;
  1091      } else {
  1092          vmcs_field_id = (VMCS_FIELD)(VMCS_CR3_TARGET_VALUE_0 + (ControlStateId - VMM_CR3_TARGET_VALUE_0));
  1093      }
  1094  
  1095      switch (vmcs_field_id) {
  1096          case VMCS_CONTROL_VECTOR_PIN_EVENTS:
  1097                  value->value = gcpu_get_pin_ctrls_layered(gcpu, VMCS_MERGED);
  1098                  break;
  1099          case VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS:
  1100                  value->value = gcpu_get_processor_ctrls_layered(gcpu, VMCS_MERGED);
  1101                  break;
  1102          case VMCS_CONTROL2_VECTOR_PROCESSOR_EVENTS:
  1103                  value->value = gcpu_get_processor_ctrls2_layered(gcpu, VMCS_MERGED);
  1104                  break;
  1105          case VMCS_EXCEPTION_BITMAP:
  1106                  value->value = gcpu_get_exceptions_map_layered(gcpu, VMCS_MERGED);
  1107                  break;
  1108          case VMCS_PAGE_FAULT_ERROR_CODE_MASK:
  1109          case VMCS_PAGE_FAULT_ERROR_CODE_MATCH:
  1110                  gcpu_get_pf_error_code_mask_and_match_layered(gcpu, VMCS_MERGED, (UINT32*)&(value->mask_value.mask), (UINT32*)&(value->mask_value.value));
  1111                  break;
  1112          case VMCS_CR0_MASK:
  1113                  value->value = gcpu_get_cr0_reg_mask_layered(gcpu, VMCS_MERGED);
  1114                  break;
  1115          case VMCS_CR4_MASK:
  1116                  value->value = gcpu_get_cr4_reg_mask_layered(gcpu, VMCS_MERGED);
  1117                  break;
  1118          case VMCS_EXIT_CONTROL_VECTOR:
  1119                  value->value = gcpu_get_exit_ctrls_layered(gcpu,VMCS_MERGED);
  1120                  break;
  1121          case VMCS_EPTP_ADDRESS:
  1122                  value->value = ept_get_eptp(gcpu);
  1123                  break;
  1124  
  1125          default:
  1126                  value->value = vmcs_read(vmcs, vmcs_field_id);
  1127                  break;
  1128          }
  1129      return TRUE;
  1130  }
  1131  
  1132  BOOLEAN vmm_set_vmcs_control_state(GUEST_CPU_HANDLE gcpu, 
  1133                                     VMM_CONTROL_STATE ControlStateId, 
  1134                                     VMM_CONTROLS* value)
  1135  {
  1136      VMCS_OBJECT* vmcs;
  1137      VMCS_FIELD vmcs_field_id;
  1138  #ifdef INCLUDE_UNUSED_CODE
  1139      UINT64 cr3_count = 0;
  1140  #endif
  1141  
  1142  #ifdef JLMDEBUG
  1143      bprint("vmm_set_vmcs_control_state\n");
  1144      LOOP_FOREVER
  1145  #endif
  1146      VMM_ASSERT(gcpu);
  1147      vmcs = gcpu_get_vmcs(gcpu);
  1148      VMM_ASSERT(vmcs);
  1149      if(!value || (UINT32)ControlStateId > (UINT32)NUM_OF_VMM_CONTROL_STATE - 1)
  1150          return FALSE;
  1151      // VMCS_FIELD and VMM_CONTROL_STATE are not identically mapped.
  1152      if(ControlStateId < VMM_CR3_TARGET_VALUE_0){
  1153              vmcs_field_id = (VMCS_FIELD)ControlStateId;
  1154      }
  1155      else {
  1156          vmcs_field_id = (VMCS_FIELD)(VMCS_CR3_TARGET_VALUE_0 + (ControlStateId - VMM_CR3_TARGET_VALUE_0));
  1157      }
  1158  
  1159      switch (vmcs_field_id){
  1160          case VMCS_CONTROL_VECTOR_PIN_EVENTS:
  1161              request_vmexit_on_pin_ctrls(gcpu, (UINT32)(value->mask_value.value), (UINT32)(value->mask_value.mask));
  1162              break;
  1163          case VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS:
  1164              if(value->mask_value.mask)
  1165                  request_vmexit_on_proc_ctrls(gcpu, (UINT32)(value->mask_value.value),
  1166                                               (UINT32)(value->mask_value.mask));
  1167              else
  1168                  vmcs_write(vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS, 
  1169                             value->mask_value.value);
  1170              break;
  1171          case VMCS_CONTROL2_VECTOR_PROCESSOR_EVENTS:
  1172              request_vmexit_on_proc_ctrls2(gcpu, (UINT32)(value->mask_value.value), 
  1173                                            (UINT32)(value->mask_value.mask));
  1174              break;
  1175          case VMCS_EXCEPTION_BITMAP:
  1176              request_vmexit_on_exceptions(gcpu, (UINT32)(value->mask_value.value), 
  1177                                           (UINT32)(value->mask_value.mask));
  1178              break;
  1179          case VMCS_CR0_MASK:
  1180              if(value->mask_value.mask  || 
  1181                          ((!value->mask_value.mask) && (!value->mask_value.value)))
  1182                  request_vmexit_on_cr0(gcpu, (UINT32)(value->mask_value.value), 
  1183                                        (UINT32)(value->mask_value.mask));
  1184              else
  1185                  vmcs_write(vmcs, VMCS_CR0_MASK, value->mask_value.value);
  1186              break;
  1187          case VMCS_CR4_MASK:
  1188              if(value->mask_value.mask  || 
  1189                      ((!value->mask_value.mask) && (!value->mask_value.value)))
  1190                  request_vmexit_on_cr4(gcpu, (UINT32)(value->mask_value.value), 
  1191                                        (UINT32)(value->mask_value.mask));
  1192              else
  1193                  vmcs_write(vmcs, VMCS_CR4_MASK, value->mask_value.value);
  1194              break;
  1195          case VMCS_EXIT_CONTROL_VECTOR:
  1196              gcpu_set_exit_ctrls_layered(gcpu,VMCS_MERGED, (UINT32)(value->value));
  1197              break;
  1198          case VMCS_MSR_BITMAP_ADDRESS:
  1199              vmcs_write(vmcs, VMCS_MSR_BITMAP_ADDRESS, value->value);
  1200              break;
  1201          case VMCS_EPTP_INDEX:
  1202              vmcs_write(vmcs, VMCS_EPTP_INDEX, value->value);
  1203              break;
  1204          case VMCS_EPTP_ADDRESS:
  1205              return ept_set_eptp(gcpu, value->ept_value.ept_root_table_hpa, 
  1206                                  (UINT32)(value->ept_value.gaw));
  1207  #ifdef INCLUDE_UNUSED_CODE
  1208          case VMCS_CR3_TARGET_COUNT:
  1209              vmcs_write(vmcs, VMCS_CR3_TARGET_COUNT, value->cr3.cr3_count);
  1210              break;
  1211          case VMCS_CR3_TARGET_VALUE_0:
  1212              cr3_count = vmcs_read(vmcs, VMCS_CR3_TARGET_COUNT);
  1213              if(!cr3_count)
  1214                  return FALSE;
  1215              vmcs_write(vmcs, VMCS_CR3_TARGET_VALUE_0, value->cr3.cr3_value[0]);
  1216              break;
  1217          case VMCS_CR3_TARGET_VALUE_1:
  1218              cr3_count = vmcs_read(vmcs, VMCS_CR3_TARGET_COUNT);
  1219              if(cr3_count < 2)
  1220                  return FALSE;
  1221              vmcs_write(vmcs, VMCS_CR3_TARGET_VALUE_1, value->cr3.cr3_value[1]);
  1222              break;
  1223          case VMCS_CR3_TARGET_VALUE_2:
  1224              cr3_count = vmcs_read(vmcs, VMCS_CR3_TARGET_COUNT);
  1225              if(cr3_count < 3)
  1226                  return FALSE;
  1227              vmcs_write(vmcs, VMCS_CR3_TARGET_VALUE_2, value->cr3.cr3_value[2]);
  1228              break;
  1229          case VMCS_CR3_TARGET_VALUE_3:
  1230              cr3_count = vmcs_read(vmcs, VMCS_CR3_TARGET_COUNT);
  1231              if(cr3_count < 4)
  1232                  return FALSE;
  1233              vmcs_write(vmcs, VMCS_CR3_TARGET_VALUE_3, value->cr3.cr3_value[3]);
  1234              break;
  1235  #endif
  1236          case VMCS_PAGE_FAULT_ERROR_CODE_MATCH:
  1237              //add new func in vmm
  1238              //TBD
  1239              return FALSE;
  1240          case VMCS_VPID:
  1241          case VMCS_PAGE_FAULT_ERROR_CODE_MASK:
  1242          case VMCS_ENTER_CONTROL_VECTOR:
  1243          case VMCS_ENTER_INTERRUPT_INFO:
  1244          case VMCS_ENTER_EXCEPTION_ERROR_CODE:
  1245          case VMCS_ENTER_INSTRUCTION_LENGTH:
  1246              // Not supported. Will support later if required. TBD.
  1247              return FALSE;
  1248          default:
  1249              // Not supported or read-only.
  1250              return FALSE;
  1251          }
  1252      return TRUE;
  1253  }