github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/memory/ept/fvs.c (about) 1 /* 2 * Copyright (c) 2013 Intel Corporation 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * Unless required by applicable law or agreed to in writing, software 9 * distributed under the License is distributed on an "AS IS" BASIS, 10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 * See the License for the specific language governing permissions and 12 * limitations under the License. 13 */ 14 15 #ifdef FAST_VIEW_SWITCH 16 #include "file_codes.h" 17 #define VMM_DEADLOOP() VMM_DEADLOOP_LOG(FVS_C) 18 #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(FVS_C, __condition) 19 #include "vmcs_init.h" 20 #include "ept.h" 21 #include "ept_hw_layer.h" 22 #include "host_memory_manager_api.h" 23 #include "scheduler.h" 24 #include "vmx_asm.h" 25 #include "ipc.h" 26 #include "vmx_ctrl_msrs.h" 27 #include "..\..\guest\guest_internal.h" 28 #include "..\..\guest\guest_cpu\guest_cpu_internal.h" 29 #include "isr.h" 30 #include "guest_cpu_vmenter_event.h" 31 #include "fvs.h" 32 #include "vmm_callback.h" 33 #include "common_types.h" 34 #include "profiling.h" 35 #ifdef JLMDEBUG 36 #include "jlmdebug.h" 37 #endif 38 39 static void fvs_init_eptp_switching(GUEST_DESCRIPTOR *guest); 40 static HPA fvs_get_eptp_list_paddress(GUEST_CPU_HANDLE gcpu); 41 static void fvs_enable_eptp_switching(CPU_ID from UNUSED,void* arg); 42 static void fvs_disable_eptp_switching(CPU_ID from UNUSED,void* arg); 43 extern UINT32 vmexit_reason(void); 44 extern BOOLEAN vmcs_sw_shadow_disable[]; 45 46 void fvs_initialize(GUEST_HANDLE guest, UINT32 number_of_host_processors) 47 { 48 guest->fvs_desc = (FVS_DESCRIPTOR *) vmm_malloc(sizeof(FVS_DESCRIPTOR)); 49 VMM_ASSERT(guest->fvs_desc); 50 guest->fvs_desc->num_of_cpus = number_of_host_processors; 51 guest->fvs_desc->dummy_eptp_address = 0; 52 guest->fvs_desc->eptp_list_paddress = vmm_malloc(sizeof(HPA) * number_of_host_processors); 53 guest->fvs_desc->eptp_list_vaddress = vmm_malloc(sizeof(HVA) * number_of_host_processors); 54 VMM_LOG(mask_anonymous, level_trace, 55 "fvs desc allocated...=0x%016lX\n", guest->fvs_desc); 56 fvs_init_eptp_switching(guest); 57 } 58 59 static void fvs_init_eptp_switching(GUEST_DESCRIPTOR *guest) 60 { 61 UINT32 i; 62 63 for(i = 0; i < guest->fvs_desc->num_of_cpus; i++) { 64 guest->fvs_desc->eptp_list_vaddress[i] = (HVA)vmm_page_alloc(1); 65 vmm_memset((UINT64 *)guest->fvs_desc->eptp_list_vaddress[i], 0, PAGE_4KB_SIZE); 66 VMM_ASSERT(guest->fvs_desc->eptp_list_vaddress[i]); 67 if ( !hmm_hva_to_hpa(guest->fvs_desc->eptp_list_vaddress[i], 68 &guest->fvs_desc->eptp_list_paddress[i]) ) { 69 VMM_LOG(mask_anonymous, level_error, 70 "%s:(%d):ASSERT: HVA to HPA conversion failed\n", 71 __FUNCTION__, __LINE__); 72 VMM_DEADLOOP(); 73 } 74 VMM_LOG(mask_anonymous, level_trace, 75 "eptp list allocated...vaddr=0x%016lX paddr=0x%016lX\n", 76 guest->fvs_desc->eptp_list_vaddress[i], 77 guest->fvs_desc->eptp_list_paddress[i]); 78 } 79 } 80 81 BOOLEAN fvs_is_eptp_switching_supported(void) 82 { 83 const VMCS_HW_CONSTRAINTS *hw_constraints = vmcs_hw_get_vmx_constraints(); 84 85 return (hw_constraints->eptp_switching_supported); 86 } 87 88 void fvs_guest_vmfunc_enable(GUEST_CPU_HANDLE gcpu) 89 { 90 PROCESSOR_BASED_VM_EXECUTION_CONTROLS2 ctrls2; 91 VMEXIT_CONTROL request; 92 93 ctrls2.Uint32 = 0; 94 vmm_zeromem(&request, sizeof(request)); 95 ctrls2.Bits.Vmfunc = 1; 96 request.proc_ctrls2.bit_mask = ctrls2.Uint32; 97 request.proc_ctrls2.bit_request = UINT64_ALL_ONES; 98 gcpu_control2_setup( gcpu, &request ); 99 } 100 101 static HPA fvs_get_eptp_list_paddress(GUEST_CPU_HANDLE gcpu) 102 { 103 GUEST_HANDLE guest = gcpu_guest_handle(gcpu); 104 const VIRTUAL_CPU_ID *vcpuid = guest_vcpu(gcpu); 105 106 VMM_ASSERT(guest); 107 VMM_ASSERT(guest->fvs_desc); 108 VMM_ASSERT(vcpuid); 109 110 return(guest->fvs_desc->eptp_list_paddress[vcpuid->guest_cpu_id]); 111 } 112 113 BOOLEAN fvs_add_entry_to_eptp_list(GUEST_HANDLE guest, 114 HPA ept_root_hpa, UINT32 gaw, UINT64 index) 115 { 116 UINT64 *hva = NULL; 117 EPTP eptp; 118 UINT32 ept_gaw = 0, i; 119 120 VMM_ASSERT(guest->fvs_desc); 121 if ( index < MAX_EPTP_ENTRIES ) { 122 ept_gaw = ept_hw_get_guest_address_width(gaw); 123 if(ept_gaw == (UINT32) -1) { 124 return FALSE; 125 } 126 eptp.Uint64 = ept_root_hpa; 127 eptp.Bits.ETMT = ept_hw_get_ept_memory_type(); 128 eptp.Bits.GAW = ept_hw_get_guest_address_width_encoding(ept_gaw); 129 eptp.Bits.Reserved = 0; 130 VMM_LOG(mask_anonymous, level_trace, 131 "adding eptp entry eptp=0x%016lX index=%d\n", eptp.Uint64, index); 132 } 133 else { 134 return FALSE; 135 } 136 for(i = 0; i < guest->fvs_desc->num_of_cpus; i++) { 137 hva = (UINT64 *)guest->fvs_desc->eptp_list_vaddress[i]; 138 *(hva + index) = eptp.Uint64; 139 } 140 return TRUE; 141 } 142 143 BOOLEAN fvs_delete_entry_from_eptp_list(GUEST_HANDLE guest, UINT64 index) 144 { 145 UINT64 *hva = NULL; 146 UINT32 i; 147 148 VMM_ASSERT(guest->fvs_desc); 149 150 if ( index < MAX_EPTP_ENTRIES ) { 151 VMM_LOG(mask_anonymous, level_trace, 152 "deleting eptp entry at index=%d\n", index); 153 } 154 else { 155 return FALSE; 156 } 157 for(i = 0; i < guest->fvs_desc->num_of_cpus; i++) { 158 hva = (UINT64 *)guest->fvs_desc->eptp_list_vaddress[i]; 159 *(hva + index) = 0; 160 } 161 return TRUE; 162 } 163 164 void fvs_vmfunc_vmcs_init(GUEST_CPU_HANDLE gcpu) 165 { 166 UINT64 value; 167 VMCS_OBJECT* vmcs = gcpu_get_vmcs(gcpu); 168 169 value = vmcs_read(vmcs, VMCS_VMFUNC_CONTROL); 170 VMM_LOG(mask_anonymous, level_trace, 171 "HW Vmfunc ctrl read value = 0x%016lX\n", value); 172 BIT_CLR(value, EPTP_SWITCHING_BIT); 173 VMM_LOG(mask_anonymous, level_trace, 174 "HW Vmfunc ctrl bitclr value = 0x%016lX\n", value); 175 vmcs_write(vmcs, VMCS_VMFUNC_CONTROL, value); 176 VMM_LOG(mask_anonymous, level_trace, 177 "EPTP switching disabled...0x%016lX\n", value); 178 } 179 #pragma warning( push ) 180 #pragma warning (disable : 4100) // disable non-referenced formal parameters 181 static void fvs_enable_eptp_switching(CPU_ID from UNUSED,void* arg) 182 { 183 UINT64 value = 0; 184 GUEST_HANDLE guest = (GUEST_HANDLE) arg; 185 GUEST_CPU_HANDLE gcpu = 186 scheduler_get_current_gcpu_for_guest(guest_get_id(guest)); 187 VMCS_OBJECT* vmcs = gcpu_get_vmcs(gcpu); 188 189 if ( fvs_is_eptp_switching_supported() ) { 190 value = vmcs_read(vmcs, VMCS_VMFUNC_CONTROL); 191 BIT_SET(value, EPTP_SWITCHING_BIT); 192 vmcs_write(vmcs, VMCS_VMFUNC_CONTROL, value); 193 vmcs_write(vmcs, VMCS_VMFUNC_EPTP_LIST_ADDRESS, 194 fvs_get_eptp_list_paddress(gcpu)); 195 } 196 gcpu->fvs_cpu_desc.enabled = TRUE; 197 VMM_LOG(mask_anonymous, level_trace, 198 "EPTP switching enabled by IB-agent...0x%016lX\n", value); 199 } 200 201 static void fvs_disable_eptp_switching(CPU_ID from UNUSED,void* arg) 202 { 203 UINT64 value = 0; 204 GUEST_HANDLE guest = (GUEST_HANDLE) arg; 205 GUEST_CPU_HANDLE gcpu = 206 scheduler_get_current_gcpu_for_guest(guest_get_id(guest)); 207 VMCS_OBJECT* vmcs = gcpu_get_vmcs(gcpu); 208 209 if ( fvs_is_eptp_switching_supported() ) { 210 value = vmcs_read(vmcs, VMCS_VMFUNC_CONTROL); 211 BIT_CLR(value, EPTP_SWITCHING_BIT); 212 vmcs_write(vmcs, VMCS_VMFUNC_CONTROL, value); 213 vmcs_write(vmcs, VMCS_VMFUNC_EPTP_LIST_ADDRESS, 0); 214 } 215 gcpu->fvs_cpu_desc.enabled = FALSE; 216 VMM_LOG(mask_anonymous, level_trace, 217 "EPTP switching disabled by IB-agent...0x%016lX\n", value); 218 } 219 #pragma warning( pop ) 220 221 void fvs_enable_fvs(GUEST_CPU_HANDLE gcpu) 222 { 223 GUEST_HANDLE guest = gcpu_guest_handle(gcpu); 224 const VIRTUAL_CPU_ID *vcpuid = guest_vcpu(gcpu); 225 UINT16 gcpu_id = 0; 226 IPC_DESTINATION ipc_dest; 227 228 VMM_ASSERT(vcpuid); 229 VMM_ASSERT(guest->fvs_desc); 230 gcpu_id = vcpuid->guest_cpu_id; 231 fvs_enable_eptp_switching(gcpu_id, guest); 232 vmm_zeromem(&ipc_dest, sizeof(ipc_dest)); 233 ipc_dest.addr_shorthand = IPI_DST_ALL_EXCLUDING_SELF; 234 ipc_execute_handler_sync(ipc_dest, fvs_enable_eptp_switching, guest); 235 VMM_LOG(mask_anonymous, level_trace,"Fast view switch enabled...\n"); 236 } 237 238 void fvs_disable_fvs(GUEST_CPU_HANDLE gcpu) 239 { 240 GUEST_HANDLE guest = gcpu_guest_handle(gcpu); 241 const VIRTUAL_CPU_ID *vcpuid = guest_vcpu(gcpu); 242 UINT16 gcpu_id = 0; 243 IPC_DESTINATION ipc_dest; 244 245 //paranoid check. If assertion fails, possible memory corruption. 246 VMM_ASSERT(guest); 247 VMM_ASSERT(vcpuid); 248 VMM_ASSERT(guest->fvs_desc); 249 gcpu_id = vcpuid->guest_cpu_id; 250 fvs_disable_eptp_switching(gcpu_id, guest); 251 vmm_zeromem(&ipc_dest, sizeof(ipc_dest)); 252 ipc_dest.addr_shorthand = IPI_DST_ALL_EXCLUDING_SELF; 253 ipc_execute_handler_sync(ipc_dest, fvs_disable_eptp_switching, guest); 254 VMM_LOG(mask_anonymous, level_trace, "Fast view switch disabled...\n"); 255 } 256 257 BOOLEAN fvs_is_fvs_enabled(GUEST_CPU_HANDLE gcpu) 258 { 259 return (gcpu->fvs_cpu_desc.enabled); 260 } 261 262 UINT64 fvs_get_eptp_entry(GUEST_CPU_HANDLE gcpu, UINT64 index) 263 { 264 GUEST_HANDLE guest = gcpu_guest_handle(gcpu); 265 const VIRTUAL_CPU_ID *vcpuid = guest_vcpu(gcpu); 266 UINT64 *hva = NULL; 267 268 VMM_ASSERT(guest); 269 VMM_ASSERT(guest->fvs_desc); 270 VMM_ASSERT(vcpuid); 271 hva = (UINT64 *)guest->fvs_desc->eptp_list_vaddress[vcpuid->guest_cpu_id]; 272 273 if ( index < MAX_EPTP_ENTRIES ) { 274 return(*(hva + index)); 275 } else { 276 return(0); 277 } 278 } 279 280 HPA *fvs_get_all_eptp_list_paddress(GUEST_CPU_HANDLE gcpu) 281 { 282 GUEST_HANDLE guest = gcpu_guest_handle(gcpu); 283 284 VMM_ASSERT(guest); 285 VMM_ASSERT(guest->fvs_desc); 286 return guest->fvs_desc->eptp_list_paddress; 287 } 288 289 void fvs_save_resumed_eptp(GUEST_CPU_HANDLE gcpu) 290 { 291 VMCS_OBJECT *vmcs = gcpu_get_vmcs(gcpu); 292 293 gcpu->fvs_cpu_desc.vmentry_eptp = vmcs_read(vmcs, VMCS_EPTP_ADDRESS); 294 295 } 296 297 void fvs_vmexit_handler(GUEST_CPU_HANDLE gcpu) 298 { 299 UINT64 r_eax, r_ecx, leptp; 300 const VIRTUAL_CPU_ID* vcpu_id; 301 REPORT_SET_ACTIVE_EPTP_DATA set_active_eptp_data; 302 REPORT_FAST_VIEW_SWITCH_DATA fast_view_switch_data; 303 VMCS_OBJECT *vmcs; 304 305 if (vmexit_reason() != Ia32VmxExitBasicReasonVmcallInstruction) 306 return; 307 VMM_ASSERT(gcpu); 308 r_eax = gcpu_get_native_gp_reg(gcpu, IA32_REG_RAX); 309 /* Check whether we drop because of fast view switch */ 310 if (r_eax != FAST_VIEW_SWITCH_LEAF) 311 return; 312 TMSL_PROFILING_API_ENTRY(TMSL_X_VMCALL_FVS, PROF_API_CALLER_IB); 313 r_ecx = gcpu_get_native_gp_reg(gcpu, IA32_REG_RCX); 314 vcpu_id = guest_vcpu( gcpu ); 315 /* Check whether view is valid */ 316 leptp = fvs_get_eptp_entry(gcpu, r_ecx); 317 set_active_eptp_data.eptp_list_index = r_ecx; 318 set_active_eptp_data.update_hw = FALSE; 319 if(leptp && 320 report_uvmm_event(UVMM_EVENT_SET_ACTIVE_EPTP, (VMM_IDENTIFICATION_DATA)gcpu, (const GUEST_VCPU*)guest_vcpu(gcpu), &set_active_eptp_data)) { 321 VMM_LOG(mask_anonymous, level_trace, 322 "Switch ept called %d\n", r_ecx); 323 vmcs = gcpu_get_vmcs(gcpu); 324 vmcs_write( vmcs, VMCS_EPTP_ADDRESS, leptp); 325 gcpu_skip_guest_instruction(gcpu); 326 nmi_window_update_before_vmresume(vmcs); 327 } 328 else { 329 /* View is invalid report to handler */ 330 331 VMM_LOG(mask_anonymous, level_trace, 332 "%s: view id=%d.Invalid view id requested.\n", 333 __FUNCTION__,r_ecx); 334 335 fast_view_switch_data.reg = r_ecx; 336 report_uvmm_event(UVMM_EVENT_INVALID_FAST_VIEW_SWITCH, (VMM_IDENTIFICATION_DATA)gcpu, (const GUEST_VCPU*)guest_vcpu(gcpu), (void *)&fast_view_switch_data); 337 nmi_window_update_before_vmresume(gcpu_get_vmcs(gcpu)); 338 339 } 340 TMSL_PROFILING_API_EXIT(TMSL_X_VMCALL_FVS, PROF_API_CALLER_IB); 341 vmentry_func( FALSE ); 342 } 343 344 #endif 345 346 #ifdef INCLUDE_UNUSED_CODE 347 static 348 void fvs_print_eptp_list(GUEST_CPU_HANDLE gcpu) 349 { 350 GUEST_HANDLE guest = gcpu_guest_handle(gcpu); 351 const VIRTUAL_CPU_ID *vcpuid = guest_vcpu(gcpu); 352 UINT64 *hva; 353 UINT64 index; 354 355 VMM_ASSERT(vcpuid); 356 hva = (UINT64 *)guest->fvs_desc->eptp_list_vaddress[vcpuid->guest_cpu_id]; 357 VMM_LOG(mask_anonymous, level_print_always,"\n"); 358 for(index=0;index<TOTAL_NUM_VIEWS;index++) { 359 VMM_LOG(mask_anonymous, 360 level_print_always,"entry at index %d = 0x%016lX\n", 361 index, *(hva + index)); 362 } 363 VMM_LOG(mask_anonymous, level_print_always,"\n"); 364 } 365 366 #endif