github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/guest/guest_cpu/guest_cpu.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "file_codes.h"
    16  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(GUEST_CPU_C)
    17  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(GUEST_CPU_C, __condition)
    18  #include "guest_cpu_internal.h"
    19  #include "guest_internal.h"
    20  #include "heap.h"
    21  #include "array_iterators.h"
    22  #include "gpm_api.h"
    23  #include "scheduler.h"
    24  #include "vmx_ctrl_msrs.h"
    25  #include "host_memory_manager_api.h"
    26  #include "vmcs_init.h"
    27  #include "cli.h"
    28  #include "pat_manager.h"
    29  #include "page_walker.h"
    30  #include "vmm_startup.h"
    31  #include "memory_allocator.h"
    32  #include "host_cpu.h"
    33  #include "vmx_timer.h"
    34  #include "unrestricted_guest.h"
    35  #ifdef JLMDEBUG
    36  #include "jlmdebug.h"
    37  #endif
    38  
    39  #pragma warning( disable: 4100 )
    40  
    41  // Guest CPU
    42  // Guest CPU may be in 2 different modes:
    43  //    16 mode - run under emulator
    44  //    any other mode - run native
    45  static GUEST_CPU_HANDLE g_gcpus = NULL; // list of all guest cpus
    46  
    47  // this is a shortcut pointer for assembler code
    48  GUEST_CPU_SAVE_AREA** g_guest_regs_save_area = NULL;
    49  static UINT32         g_host_cpu_count = 0;
    50  
    51  CLI_CODE( static void gcpu_install_show_service(void);)
    52  
    53  // Global gcpu iterator
    54  typedef GUEST_CPU_HANDLE GLOBAL_GUEST_CPU_ITERATOR;
    55  
    56  INLINE GUEST_CPU_HANDLE global_gcpu_first( GLOBAL_GUEST_CPU_ITERATOR* ctx )
    57  {
    58      *ctx = g_gcpus;
    59      return g_gcpus;
    60  }
    61  
    62  INLINE GUEST_CPU_HANDLE global_gcpu_next( GLOBAL_GUEST_CPU_ITERATOR* ctx )
    63  {
    64      GUEST_CPU_HANDLE gcpu;
    65      if(ctx == NULL || *ctx == NULL) {
    66          return NULL;
    67      }
    68      gcpu = *ctx;
    69      *ctx = gcpu->next_gcpu;
    70      return gcpu->next_gcpu;
    71  }
    72  
    73  // cache debug registers
    74  // only dr0-dr6 should be cached here, dr7 is in VMCS
    75  void cache_debug_registers( const GUEST_CPU* gcpu )
    76  {
    77      // make volatile
    78      GUEST_CPU* vgcpu = (GUEST_CPU*)gcpu;
    79  
    80      if (GET_DEBUG_REGS_CACHED_FLAG( vgcpu )) {
    81          return;
    82      }
    83      SET_DEBUG_REGS_CACHED_FLAG(vgcpu);
    84      vgcpu->save_area.debug.reg[IA32_REG_DR0] = hw_read_dr(0);
    85      vgcpu->save_area.debug.reg[IA32_REG_DR1] = hw_read_dr(1);
    86      vgcpu->save_area.debug.reg[IA32_REG_DR2] = hw_read_dr(2);
    87      vgcpu->save_area.debug.reg[IA32_REG_DR3] = hw_read_dr(3);
    88      // dr4 and dr5 are reserved
    89      vgcpu->save_area.debug.reg[IA32_REG_DR6] = hw_read_dr(6);
    90  }
    91  
    92  #ifdef INCLUDE_UNUSED_CODE
    93  void restore_hw_debug_registers( GUEST_CPU* gcpu )
    94  {
    95      // modified without cached is possible for initial start
    96      CLR_DEBUG_REGS_MODIFIED_FLAG(gcpu);
    97      if (! GET_DEBUG_REGS_CACHED_FLAG( gcpu )) {
    98          return;
    99      }
   100      hw_write_dr(0, gcpu->save_area.debug.reg[IA32_REG_DR0]);
   101      hw_write_dr(1, gcpu->save_area.debug.reg[IA32_REG_DR1]);
   102      hw_write_dr(2, gcpu->save_area.debug.reg[IA32_REG_DR2]);
   103      hw_write_dr(3, gcpu->save_area.debug.reg[IA32_REG_DR3]);
   104      // dr4 and dr5 are reserved
   105      // hw_write_dr(6, gcpu->save_area.debug.reg[IA32_REG_DR6]);  Read Only $VT$ 
   106  }
   107  #endif
   108  
   109  // cache fx state
   110  // note, that fx state include mmx registers also, that are wrong at this state,
   111  // because contain VMM and not guest values
   112  void cache_fx_state( const GUEST_CPU* gcpu )
   113  {
   114      // make volatile
   115      GUEST_CPU* vgcpu = (GUEST_CPU*)gcpu;
   116  
   117      if (GET_FX_STATE_CACHED_FLAG( vgcpu )) {
   118          return;
   119      }
   120      SET_FX_STATE_CACHED_FLAG(vgcpu);
   121      hw_fxsave(vgcpu->save_area.fxsave_area);
   122  }
   123  
   124  #ifdef INCLUDE_UNUSED_CODE
   125  void restore_fx_state( GUEST_CPU* gcpu )
   126  {
   127      // modified without cached is possible for initial start
   128      CLR_FX_STATE_MODIFIED_FLAG(gcpu);
   129      if (! GET_FX_STATE_CACHED_FLAG( gcpu )) {
   130          return;
   131      }
   132      hw_fxrestore( gcpu->save_area.fxsave_area );
   133  }
   134  #endif
   135  
   136  
   137  // perform minimal init of vmcs
   138  // assumes that all uninit fields are 0 by default, except those that
   139  // are required to be 1 according to
   140  // Intel(R) 64 and IA-32 Architectures volume 3B,
   141  // paragraph 22.3.1 "Checks on the Guest State Area"
   142  static void setup_default_state( GUEST_CPU_HANDLE gcpu )
   143  {
   144  #ifdef JLMDEBUG
   145      bprint("setup_default_state starting\n");
   146  #endif
   147      VMCS_OBJECT* vmcs = gcpu_get_vmcs(gcpu);
   148      VMM_ASSERT(vmcs);
   149      // init control fields
   150      guest_cpu_control_setup( gcpu );
   151      // set control registers to any supported value
   152      gcpu_set_control_reg( gcpu, IA32_CTRL_CR0, 0);
   153      gcpu_set_control_reg( gcpu, IA32_CTRL_CR4, 0);
   154      gcpu_set_control_reg( gcpu, IA32_CTRL_CR8, 0);
   155  
   156      // set all segment selectors except TR and CS to unusable state
   157      // CS: Accessed Code NotSystem NonConforming Present 32bit bit-granularity
   158      gcpu_set_segment_reg(gcpu, IA32_SEG_CS, 0, 0, 0, 0x99 );
   159      gcpu_set_segment_reg(gcpu, IA32_SEG_DS, 0, 0, 0, EM64T_SEGMENT_IS_UNUSABLE_ATTRUBUTE_VALUE);
   160      gcpu_set_segment_reg(gcpu, IA32_SEG_SS, 0, 0, 0, EM64T_SEGMENT_IS_UNUSABLE_ATTRUBUTE_VALUE);
   161      gcpu_set_segment_reg(gcpu, IA32_SEG_ES, 0, 0, 0, EM64T_SEGMENT_IS_UNUSABLE_ATTRUBUTE_VALUE);
   162      gcpu_set_segment_reg(gcpu, IA32_SEG_FS, 0, 0, 0, EM64T_SEGMENT_IS_UNUSABLE_ATTRUBUTE_VALUE);
   163      gcpu_set_segment_reg(gcpu, IA32_SEG_GS, 0, 0, 0, EM64T_SEGMENT_IS_UNUSABLE_ATTRUBUTE_VALUE);
   164      gcpu_set_segment_reg(gcpu, IA32_SEG_LDTR, 0, 0, 0, EM64T_SEGMENT_IS_UNUSABLE_ATTRUBUTE_VALUE );
   165      // TR: 32bit busy TSS System Present bit-granularity
   166      gcpu_set_segment_reg(gcpu, IA32_SEG_TR,   0, 0, 0, 0x8B);
   167      // FLAGS: reserved bit 1 must be 1, all other - 0
   168      gcpu_set_gp_reg( gcpu, IA32_REG_RFLAGS, 0x2);
   169      vmcs_init_all_msr_lists(vmcs);
   170      host_cpu_init_vmexit_store_and_vmenter_load_msr_lists_according_to_vmexit_load_list(gcpu);
   171      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_EFER, 0);
   172      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_PAT, hw_read_msr(IA32_MSR_PAT));
   173      VMM_ASSERT(vmcs_read(vmcs, VMCS_EXIT_MSR_STORE_ADDRESS) == vmcs_read(vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS));
   174  
   175      // by default put guest CPU into the Wait-for-SIPI state
   176      VMM_ASSERT( vmcs_hw_get_vmx_constraints()->vm_entry_in_wait_for_sipi_state_supported );
   177      gcpu_set_activity_state( gcpu, Ia32VmxVmcsGuestSleepStateWaitForSipi );
   178      vmcs_write( vmcs, VMCS_ENTER_INTERRUPT_INFO, 0 );
   179      vmcs_write( vmcs, VMCS_ENTER_EXCEPTION_ERROR_CODE, 0 );
   180  #ifdef ENABLE_PREEMPTION_TIMER
   181      vmx_timer_create(gcpu);
   182  #endif
   183      vmcs_set_launch_required( vmcs );
   184  }
   185  
   186  
   187  void gcpu_manager_init(UINT16 host_cpu_count)
   188  {
   189  #ifdef JLMDEBUG
   190      bprint("gcpu_manager_init %d\n", host_cpu_count);
   191  #endif
   192      VMM_ASSERT( host_cpu_count );
   193      g_host_cpu_count = host_cpu_count;
   194      g_guest_regs_save_area = 
   195              vmm_memory_alloc(sizeof(GUEST_CPU_SAVE_AREA*)*host_cpu_count);
   196      VMM_ASSERT(g_guest_regs_save_area);
   197      // init subcomponents
   198      vmcs_hw_init();
   199      vmcs_manager_init();
   200      CLI_CODE( gcpu_install_show_service();)
   201  }
   202  
   203  GUEST_CPU_HANDLE gcpu_allocate(VIRTUAL_CPU_ID vcpu, GUEST_HANDLE guest)
   204  {
   205      GUEST_CPU_HANDLE          gcpu = NULL;
   206      GLOBAL_GUEST_CPU_ITERATOR ctx;
   207      VMM_STATUS                status;
   208  
   209  #ifdef JLMDEBUG1
   210      bprint("gcpu_allocate, g_cpu: 0x%016x\n", g_gcpus);
   211  #endif
   212      // ensure that this vcpu yet not allocated
   213      for (gcpu = global_gcpu_first(&ctx); gcpu; gcpu = global_gcpu_next(&ctx)) {
   214          if ((gcpu->vcpu.guest_id == vcpu.guest_id) &&
   215              (gcpu->vcpu.guest_cpu_id == vcpu.guest_cpu_id)) {
   216              VMM_LOG(mask_anonymous,level_trace,
   217                       "The CPU %d for the Guest %d was already allocated.\n",
   218                       vcpu.guest_cpu_id, vcpu.guest_id);
   219              VMM_ASSERT(FALSE);
   220              return gcpu;
   221          }
   222      }
   223      // allocate next gcpu
   224      gcpu = (GUEST_CPU_HANDLE) vmm_memory_alloc(sizeof(GUEST_CPU));
   225      VMM_ASSERT(gcpu);
   226      vmm_zeromem(gcpu, sizeof(GUEST_CPU));
   227      gcpu->next_gcpu = g_gcpus;
   228      g_gcpus = gcpu;
   229      gcpu->vcpu = vcpu;
   230      gcpu->last_guest_level = GUEST_LEVEL_1_SIMPLE;
   231      gcpu->next_guest_level = GUEST_LEVEL_1_SIMPLE;
   232      gcpu->state_flags = 0;
   233      gcpu->caching_flags = 0;
   234      status = vmcs_hierarchy_create(&gcpu->vmcs_hierarchy, gcpu);
   235      if (VMM_OK != status) {
   236  #ifdef JLMDEBUG
   237          bprint("gcpu_allocate, created hierarchy\n");
   238  #endif
   239      }
   240      gcpu->emulator_handle = 0;
   241      gcpu->guest_handle = guest;
   242      gcpu->active_gpm = NULL;
   243      SET_MODE_NATIVE(gcpu);
   244      SET_IMPORTANT_EVENT_OCCURED_FLAG(gcpu);
   245      SET_CACHED_ACTIVITY_STATE(gcpu, Ia32VmxVmcsGuestSleepStateActive);
   246  #ifdef JLMDEBUG1
   247      bprint("about to call setup_default_state\n");
   248  #endif
   249      setup_default_state(gcpu);
   250      gcpu->resume_func = gcpu_perform_split_merge; // default "resume" function
   251  #ifdef FAST_VIEW_SWITCH
   252      gcpu->fvs_cpu_desc.vmentry_eptp = 0;
   253      gcpu->fvs_cpu_desc.enabled = FALSE;
   254   #endif
   255      return gcpu;
   256  }
   257  
   258  // Get Guest CPU state by VIRTUAL_CPU_ID
   259  // Return NULL if no such guest cpu
   260  GUEST_CPU_HANDLE gcpu_state( const VIRTUAL_CPU_ID* vcpu )
   261  {
   262      GUEST_CPU_HANDLE gcpu = NULL;
   263      GLOBAL_GUEST_CPU_ITERATOR ctx;
   264  
   265      for (gcpu = global_gcpu_first(&ctx); gcpu; gcpu = global_gcpu_next(&ctx)) {
   266          if ((gcpu->vcpu.guest_id == vcpu->guest_id) &&
   267                  (gcpu->vcpu.guest_cpu_id == vcpu->guest_cpu_id)) {  // found guest cpu
   268              return gcpu;
   269          }
   270      }
   271      return NULL;
   272  }
   273  
   274  // get VMCS object to work directly
   275  VMCS_OBJECT* gcpu_get_vmcs( GUEST_CPU_HANDLE  gcpu )
   276  {
   277      if(gcpu == NULL) {
   278          return NULL;
   279      }
   280      return vmcs_hierarchy_get_vmcs(&gcpu->vmcs_hierarchy, VMCS_MERGED);
   281  }
   282  
   283  VMCS_HIERARCHY * gcpu_get_vmcs_hierarchy( GUEST_CPU_HANDLE  gcpu )
   284  {
   285      if(gcpu == NULL) {
   286          return NULL;
   287      }
   288      return &gcpu->vmcs_hierarchy;
   289  }
   290  
   291  VMCS_OBJECT* gcpu_get_vmcs_layered( GUEST_CPU_HANDLE  gcpu, VMCS_LEVEL level)
   292  {
   293      if(gcpu == NULL) {
   294          return NULL;
   295      }
   296      return vmcs_hierarchy_get_vmcs(&gcpu->vmcs_hierarchy, level);
   297  }
   298  
   299  
   300  BOOLEAN gcpu_is_vmcs_layered( GUEST_CPU_HANDLE  gcpu)
   301  {
   302      VMM_ASSERT(gcpu);
   303  
   304      return vmcs_hierarchy_is_layered(&gcpu->vmcs_hierarchy);
   305  }
   306  
   307  #ifdef INCLUDE_UNUSED_CODE
   308  BOOLEAN gcpu_is_merge_required(GUEST_CPU_HANDLE gcpu)
   309  {
   310      return gcpu->merge_required;
   311  }
   312  #endif
   313  
   314  #ifdef INCLUDE_UNUSED_CODE
   315  void gcpu_configure_merge_required(GUEST_CPU_HANDLE gcpu, BOOLEAN required)
   316  {
   317      gcpu->merge_required = (UINT8) required;
   318  }
   319  #endif
   320  
   321  BOOLEAN gcpu_uses_host_page_tables(GUEST_CPU_HANDLE gcpu)
   322  {
   323      return gcpu->use_host_page_tables;
   324  }
   325  
   326  void gcpu_do_use_host_page_tables(GUEST_CPU_HANDLE gcpu, BOOLEAN use)
   327  {
   328      gcpu->use_host_page_tables = (UINT8) use;
   329  }
   330  
   331  // Get VIRTUAL_CPU_ID by Guest CPU
   332  const VIRTUAL_CPU_ID* guest_vcpu(const GUEST_CPU_HANDLE gcpu)
   333  {
   334      if(gcpu == NULL) {
   335          return NULL;
   336      }
   337      return &gcpu->vcpu;
   338  }
   339  
   340  // Get Guest Handle by Guest CPU
   341  GUEST_HANDLE gcpu_guest_handle( const GUEST_CPU_HANDLE gcpu )
   342  {
   343      if(gcpu == NULL) {
   344          return NULL;
   345      }
   346      return gcpu->guest_handle;
   347  }
   348  
   349  #ifdef ENABLE_EMULATOR
   350  // Emulator-related
   351  EMULATOR_HANDLE gcpu_emulator_handle( GUEST_CPU_HANDLE gcpu )
   352  {
   353      if(gcpu == NULL) {
   354          return NULL;
   355      }
   356      if (gcpu->emulator_handle == NULL) {
   357          gcpu->emulator_handle = emul_create_handle( gcpu );
   358          VMM_ASSERT(gcpu->emulator_handle);
   359          emul_intialize(gcpu->emulator_handle);
   360      }
   361      return gcpu->emulator_handle;
   362  }
   363  
   364  
   365  BOOLEAN gcpu_process_interrupt(VECTOR_ID vector_id)
   366  {
   367      BOOLEAN recognized = emulator_is_running_as_guest();
   368  
   369      if (recognized) {
   370          // call emulator handler
   371          GUEST_CPU_HANDLE gcpu = scheduler_current_gcpu();
   372          VMM_ASSERT(gcpu && IS_MODE_EMULATOR(gcpu));
   373          VMM_ASSERT(gcpu->emulator_handle);
   374          emulator_interrupt_handler(gcpu->emulator_handle, vector_id);
   375      }
   376      return recognized;
   377  }
   378  #else
   379  BOOLEAN gcpu_process_interrupt(VECTOR_ID vector_id)
   380  {
   381    (void)vector_id;
   382      return FALSE;
   383  }
   384  #endif
   385  
   386  
   387  // Initialize guest CPU
   388  // Should be called only if initial GCPU state is not Wait-For-Sipi
   389  void gcpu_initialize(GUEST_CPU_HANDLE gcpu,
   390                        const VMM_GUEST_CPU_STARTUP_STATE* initial_state)
   391  {
   392      UINT32 idx;
   393  
   394  #ifdef JLMDEBUG1
   395      bprint("gcpu_initialize\n");
   396  #endif
   397      VMM_ASSERT( gcpu );
   398      if (!initial_state) {
   399          return;
   400      }
   401      if(initial_state->size_of_this_struct!=sizeof(VMM_GUEST_CPU_STARTUP_STATE)) {
   402          // wrong state
   403          VMM_LOG(mask_anonymous, level_trace,"gcpu_initialize() called with unknown structure\n");
   404          VMM_DEADLOOP();
   405          return;
   406      }
   407      if(initial_state->version_of_this_struct!=VMM_GUEST_CPU_STARTUP_STATE_VERSION) {
   408          // wrong version
   409          VMM_LOG(mask_anonymous, level_trace,
   410            "gcpu_initialize() called with non-compatible VMM_GUEST_CPU_STARTUP_STATE "
   411            "structure: given version: %d expected version: %d\n",
   412            initial_state->version_of_this_struct, VMM_GUEST_CPU_STARTUP_STATE_VERSION );
   413          VMM_DEADLOOP();
   414          return;
   415      }
   416      //    vmcs_set_launch_required( gcpu->vmcs );
   417      vmcs_set_launch_required(gcpu_get_vmcs(gcpu));
   418      // init gp registers
   419      for (idx = IA32_REG_RAX; idx < IA32_REG_GP_COUNT; ++idx) {
   420          gcpu_set_gp_reg(gcpu, (VMM_IA32_GP_REGISTERS)idx, 
   421                          initial_state->gp.reg[idx]);
   422      }
   423      // init xmm registers
   424      for (idx = IA32_REG_XMM0; idx < IA32_REG_XMM_COUNT; ++idx) {
   425          gcpu_set_xmm_reg(gcpu, (VMM_IA32_XMM_REGISTERS)idx, 
   426                           initial_state->xmm.reg[idx]);
   427      }
   428      // init segment registers
   429      for (idx = IA32_SEG_CS; idx < IA32_SEG_COUNT; ++idx) {
   430          gcpu_set_segment_reg(gcpu, (VMM_IA32_SEGMENT_REGISTERS)idx, 
   431                  initial_state->seg.segment[idx].selector,
   432                  initial_state->seg.segment[idx].base, 
   433                  initial_state->seg.segment[idx].limit,
   434                  initial_state->seg.segment[idx].attributes);
   435      }
   436      // init control registers
   437      for (idx = IA32_CTRL_CR0; idx < IA32_CTRL_COUNT; ++idx) {
   438          gcpu_set_control_reg(gcpu, (VMM_IA32_CONTROL_REGISTERS)idx, 
   439                                     initial_state->control.cr[idx]);
   440          gcpu_set_guest_visible_control_reg( gcpu, (VMM_IA32_CONTROL_REGISTERS)idx, 
   441                                     initial_state->control.cr[idx]);
   442      }
   443      gcpu_set_gdt_reg(gcpu, initial_state->control.gdtr.base, 
   444                       initial_state->control.gdtr.limit);
   445      gcpu_set_idt_reg(gcpu, initial_state->control.idtr.base, 
   446                       initial_state->control.idtr.limit);
   447      // init selected model-specific registers
   448      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_DEBUGCTL, initial_state->msr.msr_debugctl);
   449      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_EFER, initial_state->msr.msr_efer);
   450      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_PAT,  initial_state->msr.msr_pat );
   451      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_SYSENTER_ESP, 
   452                       initial_state->msr.msr_sysenter_esp );
   453      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_SYSENTER_EIP, initial_state->msr.msr_sysenter_eip );
   454      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_SYSENTER_CS, 
   455                       initial_state->msr.msr_sysenter_cs );
   456      gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_SMBASE, initial_state->msr.smbase);
   457      gcpu_set_pending_debug_exceptions(gcpu, initial_state->msr.pending_exceptions);
   458      gcpu_set_interruptibility_state(gcpu, initial_state->msr.interruptibility_state);
   459  
   460      // set cached value to the same in order not to trigger events
   461      gcpu_set_activity_state(gcpu,  
   462              (IA32_VMX_VMCS_GUEST_SLEEP_STATE)initial_state->msr.activity_state);
   463      gcpu_set_vmenter_control(gcpu);
   464  #ifdef JLMDEBUG1
   465      bprint("back at gcpu_initialize\n");
   466  #endif
   467      // set state in vmenter control fields
   468      cache_fx_state(gcpu);
   469      cache_debug_registers(gcpu);
   470      SET_MODE_NATIVE(gcpu);
   471      SET_ALL_MODIFIED(gcpu);
   472  #ifdef JLMDEBUG
   473      bprint("done with gcpu_initialize\n");
   474  #endif
   475  }
   476  
   477  
   478  BOOLEAN gcpu_gva_to_gpa(GUEST_CPU_HANDLE gcpu, GVA gva, GPA* gpa)
   479  {
   480      UINT64 gpa_tmp;
   481      UINT64 pfec_tmp;
   482      BOOLEAN res;
   483      EM64T_CR0 visible_cr0;
   484      visible_cr0.Uint64 = gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR0);
   485  
   486      // GVA = GPA in non-paged mode
   487      if(is_unrestricted_guest_supported() && !visible_cr0.Bits.PG) {
   488          *gpa = gva;
   489          return TRUE;
   490      }
   491      if (IS_FLAT_PT_INSTALLED(gcpu)) {
   492          *gpa = gva;
   493          return TRUE;
   494      }
   495      else {
   496          res = pw_perform_page_walk(gcpu, gva, FALSE, FALSE, FALSE, FALSE, &gpa_tmp, &pfec_tmp);
   497          if (res == PW_RETVAL_SUCCESS) {
   498              *gpa = gpa_tmp;
   499              return TRUE;
   500          }
   501      }
   502      return FALSE;
   503  }
   504  
   505  BOOLEAN gcpu_gva_to_hva(GUEST_CPU_HANDLE gcpu, GVA gva, HVA* hva)
   506  {
   507      GUEST_HANDLE guest_handle;
   508      GPM_HANDLE gpm_handle;
   509      UINT64 gpa;
   510      UINT64 hva_tmp;
   511  
   512      if (!gcpu_gva_to_gpa(gcpu, gva, &gpa)) {
   513          VMM_LOG(mask_uvmm, level_error,"%s: Failed to convert gva=%P to gpa\n", __FUNCTION__, gva);
   514          return FALSE;
   515      }
   516      guest_handle = gcpu_guest_handle(gcpu);
   517      gpm_handle = gcpu_get_current_gpm(guest_handle);
   518      if (!gpm_gpa_to_hva(gpm_handle, gpa, &hva_tmp)) {
   519          VMM_LOG(mask_uvmm, level_error,"%s: Failed to convert gpa=%P to hva\n", __FUNCTION__, gpa);
   520          return FALSE;
   521      }
   522      *hva = hva_tmp;
   523      return TRUE;
   524  }
   525  
   526  #ifdef INCLUDE_UNUSED_CODE
   527  void gcpu_assign_resume_func(GUEST_CPU_HANDLE gcpu, GCPU_RESUME_FUNC resume_func) {
   528      gcpu->resume_func = resume_func;
   529  }
   530  
   531  void gcpu_install_vmexit_func(GUEST_CPU_HANDLE gcpu, GCPU_VMEXIT_FUNC vmexit_func)
   532  {
   533      gcpu->vmexit_func = vmexit_func;
   534  }
   535  #endif
   536  
   537  GUEST_CPU_HANDLE gcpu_call_vmexit_function(GUEST_CPU_HANDLE gcpu, UINT32 reason)
   538  {
   539      if (gcpu->vmexit_func)
   540          return gcpu->vmexit_func(gcpu, reason);
   541      else
   542          return NULL;
   543  }
   544  
   545  
   546  #define PRINT_GP_REG(__gcpu, __reg) CLI_PRINT("\t%13s (addr=%P): %P\n", #__reg, &(__gcpu->save_area.gp.reg[__reg]), __gcpu->save_area.gp.reg[__reg]);
   547  
   548  CLI_CODE(
   549  
   550  int gcpu_show_gp_registers(unsigned argc, char *args[])
   551  {
   552      GUEST_ID guest_id;
   553      GUEST_CPU_HANDLE gcpu;
   554  
   555      if (argc < 2)
   556          return -1;
   557      guest_id = (GUEST_ID) CLI_ATOL(args[1]);
   558      gcpu = scheduler_get_current_gcpu_for_guest(guest_id);
   559      if (NULL == gcpu)
   560          return -1;
   561      CLI_PRINT("=============================================\n");
   562      PRINT_GP_REG(gcpu, IA32_REG_RAX);
   563      PRINT_GP_REG(gcpu, IA32_REG_RBX);
   564      PRINT_GP_REG(gcpu, IA32_REG_RCX);
   565      PRINT_GP_REG(gcpu, IA32_REG_RDX);
   566      PRINT_GP_REG(gcpu, IA32_REG_RDI);
   567      PRINT_GP_REG(gcpu, IA32_REG_RSI);
   568      PRINT_GP_REG(gcpu, IA32_REG_RBP);
   569      PRINT_GP_REG(gcpu, IA32_REG_R8);
   570      PRINT_GP_REG(gcpu, IA32_REG_R9);
   571      PRINT_GP_REG(gcpu, IA32_REG_R10);
   572      PRINT_GP_REG(gcpu, IA32_REG_R11);
   573      PRINT_GP_REG(gcpu, IA32_REG_R12);
   574      PRINT_GP_REG(gcpu, IA32_REG_R13);
   575      PRINT_GP_REG(gcpu, IA32_REG_R14);
   576      CLI_PRINT("\n");
   577      PRINT_GP_REG(gcpu, CR2_SAVE_AREA);
   578      PRINT_GP_REG(gcpu, CR3_SAVE_AREA);
   579      PRINT_GP_REG(gcpu, CR8_SAVE_AREA);
   580      //CLI_PRINT("\t%13s (addr=%P): %P\n", "EFER", &(gcpu->save_area.auto_swap_msrs.efer.MsrData), gcpu->save_area.auto_swap_msrs.efer.MsrData);
   581      //CLI_PRINT("\n");
   582      CLI_PRINT("\t%s : %P\n", "Guest visible CR0", gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR0));
   583      CLI_PRINT("\t%s : %P\n", "Guest visible CR4", gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR4));
   584      CLI_PRINT("=============================================\n");
   585  
   586      return 0;
   587  }
   588  
   589  ) // End Of CLI_CODE
   590  
   591  #ifdef ENABLE_EMULATOR
   592  int gcpu_show_emulator_state(unsigned argc, char *args[])
   593  {
   594      GUEST_ID guest_id;
   595      GUEST_CPU_HANDLE gcpu;
   596  
   597      if (argc < 2)
   598          return -1;
   599      guest_id = (GUEST_ID) CLI_ATOL(args[1]);
   600      gcpu = scheduler_get_current_gcpu_for_guest(guest_id);
   601      if (NULL == gcpu)
   602          return -1;
   603      if (FALSE == emul_state_show(gcpu->emulator_handle))
   604          return -1;
   605      return 0;
   606  }
   607  
   608  CLI_CODE(
   609  void gcpu_install_show_service(void)
   610  {
   611      CLI_AddCommand(gcpu_show_emulator_state,
   612          "debug emulator show",
   613          "Print Emulator Architectural State", "<guest_id>",
   614          CLI_ACCESS_LEVEL_SYSTEM);
   615  
   616      CLI_AddCommand(gcpu_show_gp_registers,
   617          "debug guest show registers",
   618          "Print Guest CPU General Purpose Registers on current CPU", "<guest_id>",
   619          CLI_ACCESS_LEVEL_USER);
   620  }
   621  ) // End Of CLI_CODE
   622  #else
   623  CLI_CODE(
   624  void gcpu_install_show_service(void)
   625  {
   626      CLI_AddCommand(gcpu_show_gp_registers,
   627          "debug guest show registers",
   628          "Print Guest CPU General Purpose Registers on current CPU", "<guest_id>",
   629          CLI_ACCESS_LEVEL_USER);
   630  }
   631  ) // End Of CLI_CODE
   632  #endif
   633  
   634  void gcpu_change_level0_vmexit_msr_load_list(GUEST_CPU_HANDLE gcpu, 
   635          IA32_VMX_MSR_ENTRY* msr_list, UINT32 msr_list_count) {
   636      UINT64 addr = 0;
   637      VMCS_OBJECT* level0_vmcs = vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy(gcpu), VMCS_LEVEL_0);
   638  
   639      if (gcpu_get_guest_level(gcpu) == GUEST_LEVEL_1_SIMPLE) {
   640          VMM_ASSERT(vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy(gcpu), VMCS_MERGED) == level0_vmcs);
   641          if ((msr_list_count != 0) && (!hmm_hva_to_hpa((HVA)msr_list, &addr))) {
   642              VMM_LOG(mask_anonymous, level_trace,"%s: Failed to convert HVA to HPA\n", __FUNCTION__);
   643              // BEFORE_VMLAUNCH
   644              VMM_DEADLOOP();
   645          }
   646      }
   647      else {
   648          // When layering HVA is stored
   649          addr = (UINT64)msr_list;
   650      }
   651      vmcs_write(level0_vmcs, VMCS_EXIT_MSR_LOAD_ADDRESS, addr);
   652      vmcs_write(level0_vmcs, VMCS_EXIT_MSR_LOAD_COUNT, msr_list_count);
   653  }