github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/host/host_cpu.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "host_cpu.h"
    16  #include "guest_cpu.h"
    17  #include "heap.h"
    18  #include "vmx_trace.h"
    19  #include "vmcs_api.h"
    20  #include "vmcs_init.h"
    21  #include "libc.h"
    22  #include "gpm_api.h"
    23  #include "host_memory_manager_api.h"
    24  #include "hw_utils.h"
    25  #include "vmx_asm.h"
    26  #include "vmm_stack_api.h"
    27  #include "scheduler.h"
    28  #include "hw_utils.h"
    29  #include "em64t_defs.h"
    30  #include "file_codes.h"
    31  #ifdef JLMDEBUG
    32  #include "jlmdebug.h"
    33  #endif
    34  
    35  #ifndef VMM_DEADLOOP
    36  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(HOST_CPU_C)
    37  #endif
    38  
    39  #ifndef VMM_ASSERT
    40  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(HOST_CPU_C, __condition)
    41  #endif
    42  
    43  
    44  // Host CPU model for VMCS
    45  
    46  //          types
    47  
    48  #pragma PACK_ON
    49  
    50  // Minimum size of allocated MSR list
    51  #define MIN_SIZE_OF_MSR_LIST  4
    52  
    53  
    54  //#define USE_SYSENTER_STACK
    55  
    56  #ifdef USE_SYSENTER_STACK
    57      #define SYSENTER_STACK_SIZE 16
    58  #endif
    59  
    60  // main save area
    61  typedef struct _HOST_CPU_SAVE_AREA {
    62      HVA     vmxon_region_hva;
    63      HPA     vmxon_region_hpa;
    64  
    65      UINT16  state_flags;
    66      UINT8   padding0[6];
    67  
    68      IA32_VMX_MSR_ENTRY*   vmexit_msr_load_list;
    69      UINT32  vmexit_msr_load_count;
    70      UINT32  max_vmexit_msr_load_count;
    71      GUEST_CPU_HANDLE last_vmexit_gcpu;
    72  
    73      UINT64 host_dr7;
    74      // must be aligned on 16-byte boundary
    75      //ALIGN16 UINT8       fxsave_area[512];
    76  #ifdef USE_SYSENTER_STACK
    77      ALIGN16(ADDRESS, sysenter_stack[SYSENTER_STACK_SIZE]);
    78  #endif
    79  } PACKED HOST_CPU_SAVE_AREA;
    80  
    81  #pragma PACK_OFF
    82  
    83  typedef enum _HOST_CPU_FLAGS {
    84      HCPU_VMX_IS_ON_FLAG = 0, // VMXON was executed
    85  } HOST_CPU_FLAGS;
    86  
    87  #define SET_VMX_IS_ON_FLAG( hcpu )  BIT_SET( (hcpu)->state_flags, HCPU_VMX_IS_ON_FLAG)
    88  #define CLR_VMX_IS_ON_FLAG( hcpu )  BIT_CLR( (hcpu)->state_flags, HCPU_VMX_IS_ON_FLAG)
    89  #define GET_VMX_IS_ON_FLAG( hcpu )  BIT_GET( (hcpu)->state_flags, HCPU_VMX_IS_ON_FLAG)
    90  
    91  //          globals
    92  static HOST_CPU_SAVE_AREA*   g_host_cpus = NULL;
    93  static UINT16                g_max_host_cpus = 0;
    94  
    95  #ifdef USE_SYSENTER_STACK
    96  //          internal funcs
    97  static void sysenter_func( void )
    98  {
    99      VMM_LOG(mask_anonymous, level_trace,"sysenter_func CALLED!!!!!!");
   100      VMM_DEADLOOP();
   101  }
   102  #endif
   103  
   104  
   105  extern BOOLEAN is_cr4_osxsave_supported(void);
   106  
   107  void host_cpu_manager_init( UINT16 max_host_cpus )
   108  {
   109      VMM_ASSERT( max_host_cpus );
   110      g_max_host_cpus = max_host_cpus;
   111      g_host_cpus = vmm_memory_alloc( sizeof( HOST_CPU_SAVE_AREA ) * max_host_cpus );
   112      VMM_ASSERT( g_host_cpus );
   113  }
   114  
   115  
   116  static void host_cpu_add_msr_to_vmexit_load_list(CPU_ID cpu, UINT32 msr_index, UINT64 msr_value)
   117  {
   118      HOST_CPU_SAVE_AREA*  hcpu = &g_host_cpus[cpu];
   119      BOOLEAN              update_gcpus = FALSE;
   120      IA32_VMX_MSR_ENTRY*  new_msr_ptr = NULL;
   121      UINT32               i = 0;
   122  
   123      // Check if MSR is already in the list.
   124      if (hcpu->vmexit_msr_load_list != NULL) {
   125          for (i = 0, new_msr_ptr = hcpu->vmexit_msr_load_list; i < hcpu->vmexit_msr_load_count; i++, new_msr_ptr++)
   126              if (new_msr_ptr->MsrIndex == msr_index)
   127                  break;
   128      }
   129      else
   130          i = hcpu->vmexit_msr_load_count;
   131  
   132      if (i >= hcpu->vmexit_msr_load_count) {
   133          if  (hcpu->vmexit_msr_load_list == NULL || hcpu->vmexit_msr_load_count >= hcpu->max_vmexit_msr_load_count)
   134          {
   135              // The list is full or not allocated, expand it
   136              UINT32 new_max_count = MAX(hcpu->max_vmexit_msr_load_count * 2, MIN_SIZE_OF_MSR_LIST);
   137              UINT32 new_size = sizeof(IA32_VMX_MSR_ENTRY) * new_max_count;
   138              IA32_VMX_MSR_ENTRY*  new_list = vmm_malloc_aligned(new_size, sizeof(IA32_VMX_MSR_ENTRY));
   139              UINT32 i;
   140  
   141              if (new_list == NULL) {
   142                  VMM_LOG(mask_anonymous, level_trace,
   143                          "%s: Memory allocation failed\n", __FUNCTION__);
   144                  // BEFORE_VMLAUNCH. MALLOC should not fail.
   145                  VMM_DEADLOOP();
   146              }
   147  
   148              // Copy the old list.
   149              for (i = 0; i < hcpu->vmexit_msr_load_count; i++) {
   150                  new_list[i] = hcpu->vmexit_msr_load_list[i];
   151              }
   152  
   153              // Free the old list.
   154              if (hcpu->vmexit_msr_load_list != NULL)
   155                  vmm_mfree(hcpu->vmexit_msr_load_list);
   156  
   157              // Assign the new one.
   158              hcpu->vmexit_msr_load_list = new_list;
   159              hcpu->max_vmexit_msr_load_count = new_max_count;
   160  
   161              update_gcpus = TRUE;
   162          }
   163          new_msr_ptr = &hcpu->vmexit_msr_load_list[hcpu->vmexit_msr_load_count++];
   164      }
   165  
   166      VMM_ASSERT(new_msr_ptr);
   167      new_msr_ptr->MsrIndex = msr_index;
   168      new_msr_ptr->Reserved = 0;
   169      new_msr_ptr->MsrData = msr_value;
   170  
   171      if (update_gcpus) {
   172          SCHEDULER_GCPU_ITERATOR iter;
   173          GUEST_CPU_HANDLE gcpu;
   174  
   175          gcpu = scheduler_same_host_cpu_gcpu_first(&iter, cpu);
   176          while (gcpu != NULL) {
   177              gcpu_change_level0_vmexit_msr_load_list(gcpu, hcpu->vmexit_msr_load_list, hcpu->vmexit_msr_load_count);
   178              gcpu = scheduler_same_host_cpu_gcpu_next(&iter);
   179          }
   180      }
   181  }
   182  
   183  #ifdef INCLUDE_UNUSED_CODE
   184  void host_cpu_add_msr_to_level0_autoswap(CPU_ID cpu, UINT32 msr_index) {
   185      SCHEDULER_GCPU_ITERATOR iter;
   186      GUEST_CPU_HANDLE gcpu;
   187      UINT64 msr_value = hw_read_msr(msr_index);
   188  
   189      gcpu = scheduler_same_host_cpu_gcpu_first(&iter, cpu);
   190      while (gcpu != NULL) {
   191          VMCS_OBJECT* vmcs = vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy( gcpu ), VMCS_LEVEL_0);
   192          vmcs_add_msr_to_vmexit_store_and_vmenter_load_lists(vmcs, msr_index, msr_value);
   193          gcpu = scheduler_same_host_cpu_gcpu_next(&iter);
   194      }
   195      host_cpu_add_msr_to_vmexit_load_list(cpu, msr_index, msr_value);
   196  }
   197  
   198  
   199  void host_cpu_delete_msr_from_vmexit_load_list(CPU_ID cpu, UINT32 msr_index)
   200  {
   201      HOST_CPU_SAVE_AREA*  hcpu = &g_host_cpus[cpu];
   202      BOOLEAN              update_gcpus = FALSE;
   203      IA32_VMX_MSR_ENTRY*  msr_ptr = NULL;
   204      UINT32               i = 0, j = 0;
   205      UINT32               msrs_to_copy;
   206  
   207      // Check if MSR is in the list.
   208      if (hcpu->vmexit_msr_load_list != NULL && hcpu->vmexit_msr_load_count != 0) {
   209          for (i = 0, msr_ptr = hcpu->vmexit_msr_load_list; 
   210               i < hcpu->vmexit_msr_load_count; i++, msr_ptr++) {
   211              if (msr_ptr->MsrIndex == msr_index) {
   212                  // New list size.
   213                  hcpu->vmexit_msr_load_count--;
   214                  // Shift the rest of a list by one up.
   215                  for (j = 0, msrs_to_copy = hcpu->vmexit_msr_load_count - i; 
   216                       j < msrs_to_copy; j++) {
   217                      msr_ptr[j] = msr_ptr[j + 1];
   218                  }
   219                  update_gcpus = TRUE;
   220                  break;
   221              }
   222          }
   223      }
   224      if (update_gcpus) {
   225          SCHEDULER_GCPU_ITERATOR  iter;
   226          GUEST_CPU_HANDLE         gcpu;
   227  
   228          gcpu = scheduler_same_host_cpu_gcpu_first(&iter, cpu);
   229  
   230          while (gcpu != NULL) {
   231              gcpu_change_level0_vmexit_msr_load_list(gcpu, hcpu->vmexit_msr_load_list,
   232                                                      hcpu->vmexit_msr_load_count);
   233              gcpu = scheduler_same_host_cpu_gcpu_next(&iter);
   234          }
   235      }
   236  }
   237  
   238  
   239  void host_cpu_delete_msr_from_level0_autoswap(CPU_ID cpu, UINT32 msr_index)
   240  {
   241      SCHEDULER_GCPU_ITERATOR iter;
   242      GUEST_CPU_HANDLE gcpu;
   243  
   244      gcpu = scheduler_same_host_cpu_gcpu_first(&iter, cpu);
   245  
   246      while (gcpu != NULL) {
   247          VMCS_OBJECT* vmcs = vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy( gcpu ), VMCS_LEVEL_0);
   248          vmcs_delete_msr_from_vmexit_store_and_vmenter_load_lists(vmcs, msr_index);
   249          gcpu = scheduler_same_host_cpu_gcpu_next(&iter);
   250      }
   251      host_cpu_delete_msr_from_vmexit_load_list(cpu, msr_index);
   252  }
   253  #endif
   254  
   255  void host_cpu_init_vmexit_store_and_vmenter_load_msr_lists_according_to_vmexit_load_list(
   256              GUEST_CPU_HANDLE gcpu) {
   257      CPU_ID cpu = hw_cpu_id();
   258      VMCS_OBJECT* vmcs = gcpu_get_vmcs(gcpu);
   259      UINT32 i;
   260      VMM_ASSERT(vmcs);
   261      vmcs_clear_vmexit_store_list(vmcs);
   262      vmcs_clear_vmenter_load_list(vmcs);
   263  
   264      //    VMM_ASSERT(g_host_cpus[cpu].vmexit_msr_load_count > 0);
   265      VMM_ASSERT(g_host_cpus);
   266      for (i = 0; i < g_host_cpus[cpu].vmexit_msr_load_count; i++) {
   267          vmcs_add_msr_to_vmexit_store_and_vmenter_load_lists(vmcs, 
   268                          g_host_cpus[cpu].vmexit_msr_load_list[i].MsrIndex,
   269                          g_host_cpus[cpu].vmexit_msr_load_list[i].MsrData);
   270      }
   271  }
   272  
   273  
   274  // Initialize current host cpu
   275  void host_cpu_init( void )
   276  {
   277  #ifdef JLMDEBUG
   278      bprint("In host_cpu_init\n");
   279  #endif
   280  #ifdef USE_SYSENTER_STACK
   281      CPU_ID              cpu_id = hw_cpu_id();
   282      HOST_CPU_SAVE_AREA* hcpu = &(g_host_cpus[cpu_id]);
   283  #endif
   284  
   285  #ifdef USE_SYSENTER_STACK
   286      hw_write_msr(IA32_MSR_SYSENTER_CS, hw_read_cs());
   287      hw_write_msr(IA32_MSR_SYSENTER_EIP, (ADDRESS)sysenter_func);
   288      hw_write_msr(IA32_MSR_SYSENTER_ESP, (ADDRESS)(hcpu->sysenter_stack + SYSENTER_STACK_SIZE - 5));
   289  #else
   290  // JLM(FIX)
   291  #ifdef JLMDEBUG
   292      bprint("Not using SYSENTER_STACK. CS = %d\n", IA32_MSR_SYSENTER_CS);
   293  #endif
   294  #if 0    // SYSENTER support?
   295      hw_write_msr(IA32_MSR_SYSENTER_CS, 0);
   296  #ifdef JLMDEBUG
   297      bprint("First msr write\n");
   298  #endif
   299      hw_write_msr(IA32_MSR_SYSENTER_EIP, 0 );
   300      hw_write_msr(IA32_MSR_SYSENTER_ESP, 0);
   301  #endif // ends if 0
   302  #endif // ends USE_SYSENTER_STACK
   303  
   304      {
   305          CPU_ID              cpu = hw_cpu_id();
   306          HOST_CPU_SAVE_AREA* host_cpu = &(g_host_cpus[cpu]);
   307  
   308          host_cpu->vmexit_msr_load_list = NULL;
   309          host_cpu->vmexit_msr_load_count = 0;
   310          host_cpu->max_vmexit_msr_load_count = 0;
   311  
   312          if(vmcs_hw_get_vmx_constraints()->may1_vm_exit_ctrl.Bits.SaveDebugControls != 1) {
   313              host_cpu_add_msr_to_vmexit_load_list(cpu, IA32_MSR_DEBUGCTL, hw_read_msr(IA32_MSR_DEBUGCTL));
   314          }
   315          if(vmcs_hw_get_vmx_constraints()->may1_vm_exit_ctrl.Bits.SaveSysEnterMsrs != 1) {
   316              host_cpu_add_msr_to_vmexit_load_list(cpu, IA32_MSR_SYSENTER_ESP, hw_read_msr(IA32_MSR_SYSENTER_ESP));
   317              host_cpu_add_msr_to_vmexit_load_list(cpu, IA32_MSR_SYSENTER_EIP, hw_read_msr(IA32_MSR_SYSENTER_EIP));
   318              host_cpu_add_msr_to_vmexit_load_list(cpu, IA32_MSR_SYSENTER_CS, hw_read_msr(IA32_MSR_SYSENTER_CS));
   319          }
   320          if(vmcs_hw_get_vmx_constraints()->may1_vm_exit_ctrl.Bits.SaveEfer != 1) {
   321              host_cpu_add_msr_to_vmexit_load_list(cpu, IA32_MSR_EFER, hw_read_msr(IA32_MSR_EFER));
   322          }
   323          if(vmcs_hw_get_vmx_constraints()->may1_vm_exit_ctrl.Bits.SavePat != 1) {
   324              host_cpu_add_msr_to_vmexit_load_list(cpu, IA32_MSR_PAT, hw_read_msr(IA32_MSR_PAT));
   325          }
   326          if(vmcs_hw_get_vmx_constraints()->may1_vm_exit_ctrl.Bits.Load_IA32_PERF_GLOBAL_CTRL != 1) {
   327              host_cpu_add_msr_to_vmexit_load_list(cpu, IA32_MSR_PERF_GLOBAL_CTRL, hw_read_msr(IA32_MSR_PERF_GLOBAL_CTRL));
   328          }
   329      }
   330  }
   331  
   332  // Init VMCS host cpu are for the target cpu. May be executed on any other CPU
   333  void host_cpu_vmcs_init( GUEST_CPU_HANDLE gcpu)
   334  {
   335      HPA                     host_msr_load_addr = 0;
   336      VMCS_OBJECT*            vmcs;
   337      EM64T_GDTR              gdtr;
   338      EM64T_IDT_DESCRIPTOR    idtr;
   339      HVA                     gcpu_stack;
   340      CPU_ID                  cpu;
   341      VM_EXIT_CONTROLS        exit_controls;
   342      ADDRESS                 base;
   343      UINT32                  limit;
   344      UINT32                  attributes;
   345      VMEXIT_CONTROL          vmexit_control;
   346      BOOLEAN                 success;
   347      VMM_STATUS              status;
   348  
   349      VMM_ASSERT( gcpu );
   350      exit_controls.Uint32 = 0;
   351      vmm_memset(&vmexit_control, 0, sizeof(vmexit_control));
   352      cpu = hw_cpu_id();
   353      VMM_ASSERT( cpu < g_max_host_cpus );
   354      vmcs = vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy( gcpu ), VMCS_LEVEL_0);
   355      VMM_ASSERT( vmcs );
   356  
   357      //  Control Registers
   358      vmcs_write(vmcs, VMCS_HOST_CR0, vmcs_hw_make_compliant_cr0(hw_read_cr0()));
   359      vmcs_write(vmcs, VMCS_HOST_CR3, hw_read_cr3());
   360      if(is_cr4_osxsave_supported()){
   361          EM64T_CR4 cr4_mask;
   362          cr4_mask.Uint64 = 0;
   363          cr4_mask.Bits.OSXSAVE = 1;
   364          vmcs_write(vmcs, VMCS_HOST_CR4, vmcs_hw_make_compliant_cr4(hw_read_cr4()|
   365              (vmcs_read(vmcs,VMCS_GUEST_CR4) & cr4_mask.Uint64)));
   366      } 
   367      else {
   368          vmcs_write(vmcs, VMCS_HOST_CR4, vmcs_hw_make_compliant_cr4(hw_read_cr4()));
   369      }
   370  
   371      // EIP, ESP
   372      vmcs_write(vmcs, VMCS_HOST_RIP, (UINT64)vmexit_func);
   373      success = vmm_stack_get_stack_pointer_for_cpu(cpu, &gcpu_stack);
   374      VMM_ASSERT(success == TRUE);
   375      vmcs_write(vmcs, VMCS_HOST_RSP, gcpu_stack);
   376  
   377      //  Base-address fields for FS, GS, TR, GDTR, and IDTR (64 bits each).
   378      hw_sgdt(&gdtr);
   379      vmcs_write( vmcs, VMCS_HOST_GDTR_BASE, gdtr.base );
   380  
   381      hw_sidt(&idtr);
   382      vmcs_write( vmcs, VMCS_HOST_IDTR_BASE, idtr.base );
   383  
   384      //  FS (Selector + Base)
   385      status = hw_gdt_parse_entry((UINT8 *) gdtr.base, hw_read_fs(), &base, &limit, &attributes);
   386      VMM_ASSERT(status == VMM_OK);
   387      vmcs_write(vmcs, VMCS_HOST_FS_SELECTOR, hw_read_fs());
   388      vmcs_write(vmcs, VMCS_HOST_FS_BASE, base);
   389  
   390       // GS (Selector + Base)
   391      status = hw_gdt_parse_entry((UINT8 *) gdtr.base, hw_read_gs(), &base, &limit, &attributes);
   392      VMM_ASSERT(status == VMM_OK);
   393      vmcs_write(vmcs, VMCS_HOST_GS_SELECTOR, hw_read_gs());
   394      vmcs_write(vmcs, VMCS_HOST_GS_BASE, base);
   395  
   396       // TR (Selector + Base)
   397      status = hw_gdt_parse_entry((UINT8 *) gdtr.base, hw_read_tr(), &base, 
   398                                  &limit, &attributes);
   399      VMM_ASSERT(status == VMM_OK);
   400      vmcs_write(vmcs, VMCS_HOST_TR_SELECTOR, hw_read_tr());
   401      vmcs_write(vmcs, VMCS_HOST_TR_BASE, base);
   402  
   403       // Selector fields (16 bits each) for CS, SS, DS, ES, FS, GS, and TR
   404      vmcs_write(vmcs, VMCS_HOST_CS_SELECTOR, hw_read_cs());
   405      vmcs_write(vmcs, VMCS_HOST_SS_SELECTOR, hw_read_ss());
   406      vmcs_write(vmcs, VMCS_HOST_DS_SELECTOR, hw_read_ds());
   407      vmcs_write(vmcs, VMCS_HOST_ES_SELECTOR, hw_read_es());
   408  
   409       // MSRS
   410      if(vmcs_hw_get_vmx_constraints()->may1_vm_exit_ctrl.Bits.LoadSysEnterMsrs == 1) {
   411          vmcs_write(vmcs, VMCS_HOST_SYSENTER_CS, hw_read_msr(IA32_MSR_SYSENTER_CS));
   412          vmcs_write(vmcs, VMCS_HOST_SYSENTER_ESP, hw_read_msr(IA32_MSR_SYSENTER_ESP));
   413          vmcs_write(vmcs, VMCS_HOST_SYSENTER_EIP, hw_read_msr(IA32_MSR_SYSENTER_EIP));
   414      }
   415  
   416      if(vmcs_hw_get_vmx_constraints()->may1_vm_exit_ctrl.Bits.LoadEfer == 1) {
   417          vmcs_write(vmcs, VMCS_HOST_EFER, hw_read_msr(IA32_MSR_EFER));
   418      }
   419  
   420      if(vmcs_hw_get_vmx_constraints()->may1_vm_exit_ctrl.Bits.LoadPat == 1) {
   421          vmcs_write(vmcs, VMCS_HOST_PAT, hw_read_msr(IA32_MSR_PAT));
   422      }
   423  
   424      exit_controls.Bits.Ia32eModeHost = 1;
   425      vmexit_control.vm_exit_ctrls.bit_request = exit_controls.Uint32;
   426      vmexit_control.vm_exit_ctrls.bit_mask = exit_controls.Uint32;
   427      gcpu_control_setup( gcpu, &vmexit_control );
   428  
   429      VMM_ASSERT(g_host_cpus);
   430      if (gcpu_get_guest_level(gcpu) == GUEST_LEVEL_1_SIMPLE) {
   431          VMM_ASSERT(vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy(gcpu), VMCS_MERGED) == vmcs)
   432          if ((g_host_cpus[cpu].vmexit_msr_load_count != 0) && (!hmm_hva_to_hpa((HVA)g_host_cpus[cpu].vmexit_msr_load_list, &host_msr_load_addr))) {
   433              VMM_LOG(mask_anonymous, level_trace,"%s:(%d):ASSERT: HVA to HPA conversion failed\n", __FUNCTION__, __LINE__);
   434              VMM_DEADLOOP();
   435          }
   436      }
   437      else {
   438          // Address remains HVA
   439          host_msr_load_addr = (UINT64)g_host_cpus[cpu].vmexit_msr_load_list;
   440      }
   441  
   442          // Assigning VMExit msr-load list
   443          vmcs_assign_vmexit_msr_load_list(vmcs, host_msr_load_addr, g_host_cpus[cpu].vmexit_msr_load_count);
   444  }
   445  
   446  
   447  // Set/Get VMXON Region pointer for the current CPU
   448  void host_cpu_set_vmxon_region(HVA hva, HPA hpa, CPU_ID my_cpu_id)
   449  {
   450      HOST_CPU_SAVE_AREA* hcpu = NULL;
   451  
   452      VMM_ASSERT(g_host_cpus);
   453      VMM_ASSERT(my_cpu_id < g_max_host_cpus);
   454      hcpu = &(g_host_cpus[my_cpu_id]);
   455      hcpu->vmxon_region_hva = hva;
   456      hcpu->vmxon_region_hpa = hpa;
   457  }
   458  
   459  HVA  host_cpu_get_vmxon_region(HPA* hpa)
   460  {
   461      CPU_ID              my_cpu_id = hw_cpu_id();
   462      HOST_CPU_SAVE_AREA* hcpu = NULL;
   463  
   464      VMM_ASSERT(g_host_cpus);
   465      VMM_ASSERT(my_cpu_id<g_max_host_cpus);
   466      VMM_ASSERT(hpa);
   467      hcpu = &(g_host_cpus[my_cpu_id]);
   468      *hpa = hcpu->vmxon_region_hpa;
   469      return hcpu->vmxon_region_hva;
   470  }
   471  
   472  void host_cpu_set_vmx_state(BOOLEAN value)
   473  {
   474      CPU_ID              my_cpu_id = hw_cpu_id();
   475      HOST_CPU_SAVE_AREA* hcpu = NULL;
   476  
   477      VMM_ASSERT( g_host_cpus );
   478      VMM_ASSERT( my_cpu_id < g_max_host_cpus );
   479  
   480      hcpu = &(g_host_cpus[my_cpu_id]);
   481      if (value) {
   482          SET_VMX_IS_ON_FLAG( hcpu );
   483      }
   484      else {
   485          CLR_VMX_IS_ON_FLAG( hcpu );
   486      }
   487  }
   488  
   489  BOOLEAN host_cpu_get_vmx_state( void )
   490  {
   491      CPU_ID              my_cpu_id = hw_cpu_id();
   492      HOST_CPU_SAVE_AREA* hcpu = NULL;
   493  
   494      VMM_ASSERT( g_host_cpus );
   495      VMM_ASSERT( my_cpu_id < g_max_host_cpus );
   496  
   497      hcpu = &(g_host_cpus[my_cpu_id]);
   498  
   499      return GET_VMX_IS_ON_FLAG( hcpu ) ? TRUE : FALSE;
   500  }
   501  
   502  void host_cpu_enable_usage_of_xmm_regs( void )
   503  {
   504      EM64T_CR4                 cr4;
   505  
   506      // allow access to XMM registers (compiler assumes this for 64bit code)
   507      cr4.Uint64 = hw_read_cr4();
   508      cr4.Bits.OSFXSR = 1;
   509      hw_write_cr4( cr4.Uint64 );
   510  }
   511  
   512  void host_cpu_store_vmexit_gcpu(CPU_ID cpu_id, GUEST_CPU_HANDLE gcpu)
   513  {
   514      if (cpu_id < g_max_host_cpus) {
   515          g_host_cpus[cpu_id].last_vmexit_gcpu = gcpu;
   516  
   517          VMM_DEBUG_CODE( vmm_trace(gcpu, "\n");)
   518      }
   519  }
   520  
   521  GUEST_CPU_HANDLE host_cpu_get_vmexit_gcpu(CPU_ID cpu_id)
   522  {
   523      GUEST_CPU_HANDLE gcpu = NULL;
   524  
   525      if (cpu_id < g_max_host_cpus) {
   526          gcpu = g_host_cpus[cpu_id].last_vmexit_gcpu;
   527      }
   528      return gcpu;
   529  }
   530  
   531  /*
   532   *  Purpose: At VMEXIT DR7 is always overwrittern with 400h. This prevents to set
   533   *    hardware breakponits in host-running code across VMEXIT/VMENTER transitions.
   534   *    Two functions below allow to keep DR7, set by external debugger in cpu context,
   535   *    and apply it to hardware upon VMEXIT.
   536   */
   537  
   538  void host_cpu_save_dr7(CPU_ID cpu_id)
   539  {
   540      VMM_ASSERT(g_host_cpus);
   541      if (cpu_id < g_max_host_cpus) {
   542          g_host_cpus[cpu_id].host_dr7 = hw_read_dr(7);
   543      }
   544  }
   545  
   546  void host_cpu_restore_dr7(CPU_ID cpu_id)
   547  {
   548      if (cpu_id < g_max_host_cpus) {
   549          if (0 != g_host_cpus[cpu_id].host_dr7) {
   550              hw_write_dr(7, g_host_cpus[cpu_id].host_dr7);
   551          }
   552      }
   553  }
   554