github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/vmexit/vmexit_analysis.c (about) 1 /* 2 * Copyright (c) 2013 Intel Corporation 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * Unless required by applicable law or agreed to in writing, software 9 * distributed under the License is distributed on an "AS IS" BASIS, 10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 * See the License for the specific language governing permissions and 12 * limitations under the License. 13 */ 14 15 #include "file_codes.h" 16 #define VMM_DEADLOOP() VMM_DEADLOOP_LOG(VMEXIT_ANALYSIS_C) 17 #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(VMEXIT_ANALYSIS_C, __condition) 18 #include <vmm_defs.h> 19 #include <vmm_dbg.h> 20 #include <vmcs_api.h> 21 #include <vmx_vmcs.h> 22 #include <vmx_ctrl_msrs.h> 23 #include <vmm_objects.h> 24 #include <isr.h> 25 #include <vmm_arch_defs.h> 26 #include <vmexit_cr_access.h> 27 #include <guest_cpu.h> 28 #include <guest.h> 29 #include <gpm_api.h> 30 #include <host_memory_manager_api.h> 31 #include <vmexit_analysis.h> 32 #include "vmm_callback.h" 33 #ifdef JLMDEBUG 34 #include "jlmdebug.h" 35 #endif 36 37 38 #pragma warning (disable : 4100) 39 40 typedef BOOLEAN (*VMEXIT_IS_CONTROL_REQUESTED_FUNC)(GUEST_CPU_HANDLE, VMCS_OBJECT*, VMCS_OBJECT*); 41 42 static BOOLEAN vmexit_analysis_true_func(GUEST_CPU_HANDLE gcpu UNUSED, 43 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs UNUSED) { 44 return TRUE; 45 } 46 47 static BOOLEAN vmexit_analysis_false_func(GUEST_CPU_HANDLE gcpu UNUSED, VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs UNUSED) { 48 return FALSE; 49 } 50 51 static BOOLEAN vmexit_analysis_interrupt_window_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 52 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 53 PROCESSOR_BASED_VM_EXECUTION_CONTROLS level1_ctrls; 54 55 level1_ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 56 return (level1_ctrls.Bits.VirtualInterrupt == 1); 57 } 58 59 static BOOLEAN vmexit_analysis_nmi_window_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 60 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 61 PROCESSOR_BASED_VM_EXECUTION_CONTROLS level1_ctrls; 62 63 level1_ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 64 return (level1_ctrls.Bits.NmiWindow == 1); 65 } 66 67 static BOOLEAN vmexit_analysis_hlt_inst_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 68 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 69 PROCESSOR_BASED_VM_EXECUTION_CONTROLS level1_ctrls; 70 71 level1_ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 72 return (level1_ctrls.Bits.Hlt == 1); 73 } 74 75 static BOOLEAN vmexit_analysis_invlpg_inst_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 76 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 77 PROCESSOR_BASED_VM_EXECUTION_CONTROLS level1_ctrls; 78 79 level1_ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 80 return (level1_ctrls.Bits.Invlpg == 1); 81 } 82 83 static BOOLEAN vmexit_analysis_rdpmc_inst_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 84 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 85 PROCESSOR_BASED_VM_EXECUTION_CONTROLS level1_ctrls; 86 87 level1_ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 88 return (level1_ctrls.Bits.Rdpmc == 1); 89 } 90 91 static BOOLEAN vmexit_analysis_rdtsc_inst_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 92 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 93 PROCESSOR_BASED_VM_EXECUTION_CONTROLS level1_ctrls; 94 95 level1_ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 96 return (level1_ctrls.Bits.Rdtsc == 1); 97 } 98 99 static BOOLEAN vmexit_analysis_dr_access_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 100 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 101 PROCESSOR_BASED_VM_EXECUTION_CONTROLS level1_ctrls; 102 103 level1_ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 104 return (level1_ctrls.Bits.MovDr == 1); 105 } 106 107 static BOOLEAN vmexit_analysis_mwait_inst_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 108 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 109 PROCESSOR_BASED_VM_EXECUTION_CONTROLS level1_ctrls; 110 111 level1_ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 112 return (level1_ctrls.Bits.Mwait == 1); 113 } 114 115 static BOOLEAN vmexit_analysis_monitor_inst_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 116 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 117 PROCESSOR_BASED_VM_EXECUTION_CONTROLS level1_ctrls; 118 119 level1_ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 120 return (level1_ctrls.Bits.Monitor == 1); 121 } 122 123 static BOOLEAN vmexit_analysis_pause_inst_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 124 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 125 PROCESSOR_BASED_VM_EXECUTION_CONTROLS level1_ctrls; 126 127 level1_ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 128 return (level1_ctrls.Bits.Pause == 1); 129 } 130 131 static BOOLEAN vmexit_analysis_softinterrupt_exception_nmi_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 132 VMCS_OBJECT* vmexit_vmcs, VMCS_OBJECT* control_vmcs) { 133 IA32_VMX_VMCS_VM_EXIT_INFO_INTERRUPT_INFO interrupt_info; 134 UINT32 vector; 135 136 interrupt_info.Uint32 = (UINT32)vmcs_read(vmexit_vmcs, VMCS_EXIT_INFO_EXCEPTION_INFO); 137 vector = (UINT32)interrupt_info.Bits.Vector; 138 139 if (vector == IA32_EXCEPTION_VECTOR_PAGE_FAULT) { 140 IA32_VMCS_EXCEPTION_BITMAP level1_exceptions; 141 UINT32 pfec = (UINT32)vmcs_read(vmexit_vmcs, VMCS_EXIT_INFO_EXCEPTION_ERROR_CODE); 142 UINT32 level1_pfec_mask = (UINT32)vmcs_read(vmexit_vmcs, VMCS_PAGE_FAULT_ERROR_CODE_MASK); 143 UINT32 level1_pfec_match = (UINT32)vmcs_read(vmexit_vmcs, VMCS_PAGE_FAULT_ERROR_CODE_MATCH); 144 145 VMM_ASSERT(interrupt_info.Bits.InterruptType != VmExitInterruptTypeExternalInterrupt); 146 VMM_ASSERT(interrupt_info.Bits.InterruptType != VmExitInterruptTypeNmi); 147 148 level1_exceptions.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_EXCEPTION_BITMAP); 149 if (level1_exceptions.Bits.PF == 1) { 150 return ((pfec & level1_pfec_mask) == level1_pfec_match); 151 } 152 else { 153 return ((pfec & level1_pfec_mask) != level1_pfec_match); 154 } 155 } 156 else if (interrupt_info.Bits.InterruptType == VmExitInterruptTypeNmi){ 157 PIN_BASED_VM_EXECUTION_CONTROLS level1_pin_ctrls; 158 159 VMM_ASSERT(vector == IA32_EXCEPTION_VECTOR_NMI); 160 level1_pin_ctrls.Uint32 = (UINT32)vmcs_read(vmexit_vmcs, VMCS_CONTROL_VECTOR_PIN_EVENTS); 161 return (level1_pin_ctrls.Bits.Nmi == 1); 162 } 163 else { 164 UINT32 level1_exceptions = (UINT32)vmcs_read(control_vmcs, VMCS_EXCEPTION_BITMAP); 165 166 return ((level1_exceptions & (1 << vector)) != 0); 167 } 168 169 } 170 171 static BOOLEAN vmexit_analysis_hardware_interrupt_exiting(GUEST_CPU_HANDLE gcpu UNUSED, 172 VMCS_OBJECT* vmexit_vmcs, VMCS_OBJECT* control_vmcs UNUSED) { 173 PIN_BASED_VM_EXECUTION_CONTROLS level1_pin_ctrls; 174 175 level1_pin_ctrls.Uint32 = (UINT32)vmcs_read(vmexit_vmcs, VMCS_CONTROL_VECTOR_PIN_EVENTS); 176 return (level1_pin_ctrls.Bits.ExternalInterrupt == 1); 177 } 178 179 static BOOLEAN vmexit_analysis_is_cr3_in_target_list(VMCS_OBJECT* vmcs, UINT64 cr3_value) { 180 UINT32 cr3_target_count = (UINT32)vmcs_read(vmcs, VMCS_CR3_TARGET_COUNT); 181 UINT32 i; 182 183 VMM_ASSERT(cr3_target_count <= 4); 184 for (i = 0; i < cr3_target_count; i++) { 185 UINT64 value = vmcs_read(vmcs, (VMCS_FIELD)(VMCS_CR3_TARGET_VALUE_0 + i)); 186 187 if (value == cr3_value) { 188 return TRUE; 189 } 190 } 191 return FALSE; 192 } 193 194 static BOOLEAN vmexit_analysis_is_exit_on_cr_update(VMCS_OBJECT* vmcs, UINT64 new_value, 195 VMCS_FIELD shadow_field, VMCS_FIELD mask_field) { 196 UINT64 shadow = vmcs_read(vmcs, shadow_field); 197 UINT64 mask = vmcs_read(vmcs, mask_field); 198 BOOLEAN result; 199 200 result = ((shadow & mask) != (new_value & mask)); 201 202 return result; 203 } 204 205 static BOOLEAN vmexit_analysis_cr_access_exiting(GUEST_CPU_HANDLE gcpu, 206 VMCS_OBJECT* vmexit_vmcs, VMCS_OBJECT* control_vmcs) { 207 IA32_VMX_EXIT_QUALIFICATION qualification; 208 209 qualification.Uint64 = vmcs_read(vmexit_vmcs, VMCS_EXIT_INFO_QUALIFICATION); 210 211 switch (qualification.CrAccess.AccessType) { 212 case 0: // move to CR 213 { 214 VMM_IA32_CONTROL_REGISTERS cr_id = vmexit_cr_access_get_cr_from_qualification(qualification.Uint64); 215 VMM_IA32_GP_REGISTERS operand = vmexit_cr_access_get_operand_from_qualification(qualification.Uint64); 216 UINT64 new_value = gcpu_get_gp_reg(gcpu, operand); 217 if (cr_id == IA32_CTRL_CR3) { 218 // return TRUE in case the value is not in target list 219 return (vmexit_analysis_is_cr3_in_target_list(control_vmcs, new_value) == FALSE); 220 } 221 else if (cr_id == IA32_CTRL_CR0) { 222 return vmexit_analysis_is_exit_on_cr_update(control_vmcs, new_value, VMCS_CR0_READ_SHADOW, VMCS_CR0_MASK); 223 } 224 else if (cr_id == IA32_CTRL_CR4) { 225 return vmexit_analysis_is_exit_on_cr_update(control_vmcs, new_value, VMCS_CR4_READ_SHADOW, VMCS_CR4_MASK); 226 } 227 else { 228 PROCESSOR_BASED_VM_EXECUTION_CONTROLS ctrls; 229 230 VMM_ASSERT(cr_id == IA32_CTRL_CR8); 231 ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 232 233 if (ctrls.Bits.Cr8Load) { 234 return TRUE; 235 } 236 237 if (ctrls.Bits.TprShadow) { 238 // TODO: currently TPR shadow is not supported 239 VMM_LOG(mask_anonymous, level_trace,"%s: Currently TPR shadow is not supported\n", __FUNCTION__); 240 VMM_DEADLOOP(); 241 } 242 return FALSE; 243 } 244 break; 245 } 246 case 1: // move from CR 247 { 248 VMM_IA32_CONTROL_REGISTERS cr_id = vmexit_cr_access_get_cr_from_qualification(qualification.Uint64); 249 250 if (cr_id == IA32_CTRL_CR3) { 251 return TRUE; 252 } 253 else { 254 PROCESSOR_BASED_VM_EXECUTION_CONTROLS ctrls; 255 256 VMM_ASSERT(cr_id == IA32_CTRL_CR8); 257 ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 258 if (ctrls.Bits.Cr8Store) { 259 return TRUE; 260 } 261 if (ctrls.Bits.TprShadow) { 262 // TODO: currently TPR shadow is not supported 263 VMM_LOG(mask_anonymous, level_trace,"%s: Currently TPR shadow is not supported\n"); 264 VMM_DEADLOOP(); 265 } 266 return FALSE; 267 } 268 break; 269 } 270 271 case 2: // CLTS 272 { 273 EM64T_CR0 cr0_shadow; 274 EM64T_CR0 cr0_mask; 275 276 VMM_ASSERT(0 == qualification.CrAccess.Number); 277 cr0_shadow.Uint64 = vmcs_read(control_vmcs, VMCS_CR0_READ_SHADOW); 278 cr0_mask.Uint64 = vmcs_read(control_vmcs, VMCS_CR0_MASK); 279 return ((cr0_mask.Bits.TS == 1) && (cr0_shadow.Bits.TS != 0)); 280 break; 281 } 282 283 case 3: // LMSW 284 { 285 EM64T_CR0 cr0_shadow; 286 EM64T_CR0 cr0_mask; 287 UINT32 mask_tmp; 288 289 VMM_ASSERT(0 == qualification.CrAccess.Number); 290 cr0_shadow.Uint64 = vmcs_read(control_vmcs, VMCS_CR0_READ_SHADOW); 291 cr0_mask.Uint64 = vmcs_read(control_vmcs, VMCS_CR0_MASK); 292 mask_tmp = (UINT32)(cr0_mask.Uint64 & 0xffff); 293 return ((mask_tmp != 0) && 294 ((cr0_shadow.Uint64 & mask_tmp) != (qualification.CrAccess.LmswData & mask_tmp))); 295 break; 296 } 297 } 298 299 // should not reach here 300 VMM_DEADLOOP(); 301 return FALSE; 302 } 303 304 static void* vmexit_analysis_retrieve_ptr_to_additional_memory(IN VMCS_OBJECT* vmcs, 305 IN VMCS_FIELD field, IN BOOLEAN convert_gpa_to_hpa) { 306 UINT64 bitmap_pa = vmcs_read(vmcs, field); 307 UINT64 bitmap_hpa; 308 UINT64 bitmap_hva; 309 MAM_ATTRIBUTES attrs; 310 311 if (convert_gpa_to_hpa) { 312 GUEST_CPU_HANDLE gcpu = vmcs_get_owner(vmcs); 313 GUEST_HANDLE guest = gcpu_guest_handle(gcpu); 314 GPM_HANDLE gpm = gcpu_get_current_gpm(guest); 315 if (!gpm_gpa_to_hpa(gpm, bitmap_pa, &bitmap_hpa, &attrs)) { 316 VMM_DEADLOOP(); 317 } 318 } 319 else { 320 bitmap_hpa = bitmap_pa; 321 } 322 if (!hmm_hpa_to_hva(bitmap_hpa, &bitmap_hva)) { 323 VMM_DEADLOOP(); 324 } 325 return (void*)bitmap_hva; 326 } 327 328 static BOOLEAN vmexit_analysis_is_bit_set_in_bitmap(void* bitmap, UINT32 bit_pos) { 329 UINT32 byte = bit_pos >> 3; 330 UINT32 pos_in_byte = bit_pos & 0x7; 331 UINT8* bitmap_tmp = (UINT8*)bitmap; 332 333 return ((bitmap_tmp[byte] & (1 << pos_in_byte)) != 0); 334 } 335 336 static BOOLEAN vmexit_analysis_io_exiting(GUEST_CPU_HANDLE gcpu UNUSED, VMCS_OBJECT* vmexit_vmcs, VMCS_OBJECT* control_vmcs) { 337 PROCESSOR_BASED_VM_EXECUTION_CONTROLS ctrls; 338 IA32_VMX_EXIT_QUALIFICATION qualification; 339 UINT32 port; 340 UINT32 size = 0; 341 VMCS_LEVEL control_vmcs_level; 342 343 ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 344 345 if (ctrls.Bits.ActivateIoBitmaps == 0) { 346 return (ctrls.Bits.UnconditionalIo == 1); 347 } 348 349 qualification.Uint64 = vmcs_read(vmexit_vmcs, VMCS_EXIT_INFO_QUALIFICATION); 350 port = qualification.IoInstruction.PortNumber; 351 switch (qualification.IoInstruction.Size) { 352 case 0: 353 size = 1; 354 break; 355 case 1: 356 size = 2; 357 break; 358 case 3: 359 size = 4; 360 break; 361 default: 362 VMM_DEADLOOP(); 363 } 364 365 if ((port + size) > 0xffff) { 366 // wrap around 367 return TRUE; 368 } 369 370 control_vmcs_level = vmcs_get_level(control_vmcs); 371 if (port < 0x7fff) { 372 void* bitmap = vmexit_analysis_retrieve_ptr_to_additional_memory(control_vmcs, VMCS_IO_BITMAP_ADDRESS_A, (control_vmcs_level == VMCS_LEVEL_1)); 373 return vmexit_analysis_is_bit_set_in_bitmap(bitmap, port); 374 } 375 else { 376 void* bitmap = vmexit_analysis_retrieve_ptr_to_additional_memory(control_vmcs, VMCS_IO_BITMAP_ADDRESS_B, (control_vmcs_level == VMCS_LEVEL_1)); 377 UINT32 bit_pos = port & 0x7fff; 378 return vmexit_analysis_is_bit_set_in_bitmap(bitmap, bit_pos); 379 } 380 } 381 382 383 // IAH: annotate this function to see if we get into and then where we get 384 static BOOLEAN vmexit_analysis_msr_access_exiting(GUEST_CPU_HANDLE gcpu, 385 VMCS_OBJECT* control_vmcs, BOOLEAN is_rdmsr) { 386 MSR_ID msr_id; 387 HVA bitmap_hva; 388 UINT32 bitmap_pos; 389 void* bitmap; 390 VMCS_LEVEL control_vmcs_level; 391 PROCESSOR_BASED_VM_EXECUTION_CONTROLS ctrls; 392 393 #ifdef JLMDEBUG 394 bprint("About to read the proc controls MSR\n"); 395 LOOP_FOREVER 396 #endif 397 ctrls.Uint32 = (UINT32)vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS); 398 if (ctrls.Bits.UseMsrBitmaps == 0) { 399 return TRUE; 400 } 401 msr_id = (MSR_ID) gcpu_get_native_gp_reg(gcpu, IA32_REG_RCX); 402 403 if ((msr_id > 0x1fff) && 404 (msr_id < 0xc0000000)) { 405 return TRUE; 406 } 407 if (msr_id > 0xc0001fff) { 408 return TRUE; 409 } 410 control_vmcs_level = vmcs_get_level(control_vmcs); 411 bitmap_hva = (HVA)vmexit_analysis_retrieve_ptr_to_additional_memory(control_vmcs, VMCS_MSR_BITMAP_ADDRESS, (control_vmcs_level == VMCS_LEVEL_1)); 412 bitmap_pos = msr_id & 0x1fff; 413 if (is_rdmsr) { 414 if (msr_id <= 0x1fff) { 415 bitmap = (void*)bitmap_hva; 416 } 417 else { 418 VMM_ASSERT(msr_id >= 0xc0000000); 419 VMM_ASSERT(msr_id <= 0xc0001fff); 420 bitmap = (void*)(bitmap_hva + (1 KILOBYTE)); 421 } 422 } 423 else { 424 if (msr_id <= 0x1fff) { 425 bitmap = (void*)(bitmap_hva + (2 KILOBYTES)); 426 } 427 else { 428 VMM_ASSERT(msr_id >= 0xc0000000); 429 VMM_ASSERT(msr_id <= 0xc0001fff); 430 bitmap = (void*)(bitmap_hva + (3 KILOBYTES)); 431 } 432 } 433 return vmexit_analysis_is_bit_set_in_bitmap(bitmap, bitmap_pos); 434 } 435 436 static BOOLEAN vmexit_analysis_rdmsr_exiting(GUEST_CPU_HANDLE gcpu, 437 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 438 return vmexit_analysis_msr_access_exiting(gcpu, control_vmcs, TRUE); 439 } 440 441 static BOOLEAN vmexit_analysis_wrmsr_exiting(GUEST_CPU_HANDLE gcpu, 442 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) { 443 return vmexit_analysis_msr_access_exiting(gcpu, control_vmcs, FALSE); 444 } 445 446 static BOOLEAN vmexit_analysis_timer_exiting( GUEST_CPU_HANDLE gcpu, 447 VMCS_OBJECT* vmexit_vmcs UNUSED, VMCS_OBJECT* control_vmcs) 448 { 449 // VMEXIT-request Analysis Algorithm: 450 // if Save-Value == 0 VMEXIT-requested = TRUE; 451 // else if (counter <= other-counter)VMEXIT-requested = TRUE; 452 // else VMEXIT-requested = FALSE; 453 PIN_BASED_VM_EXECUTION_CONTROLS pin_exec; 454 PIN_BASED_VM_EXECUTION_CONTROLS peer_pin_exec; 455 BOOLEAN vmexit_requested = FALSE; 456 VMCS_OBJECT *peer_control_vmcs; 457 VM_EXIT_CONTROLS vmexit_ctrls; 458 UINT32 counter_value; 459 UINT32 peer_counter_value; 460 461 pin_exec.Uint32 = (UINT32) vmcs_read(control_vmcs, VMCS_CONTROL_VECTOR_PIN_EVENTS); 462 if (1 == pin_exec.Bits.VmxTimer) { 463 // find other VMCS 464 if (VMCS_LEVEL_0 == vmcs_get_level(control_vmcs)) 465 peer_control_vmcs = gcpu_get_vmcs_layered(gcpu, VMCS_LEVEL_1); 466 else if (VMCS_LEVEL_1 == vmcs_get_level(control_vmcs)) 467 peer_control_vmcs = gcpu_get_vmcs_layered(gcpu, VMCS_LEVEL_0); 468 else { 469 VMM_ASSERT(0); 470 return TRUE; 471 } 472 473 peer_pin_exec.Uint32 = (UINT32) vmcs_read(peer_control_vmcs, VMCS_CONTROL_VECTOR_PIN_EVENTS); 474 if (0 == peer_pin_exec.Bits.VmxTimer) { 475 // if other vmcs did not requested it 476 // apparently it did the current level vmcs. don't check further 477 vmexit_requested = TRUE; 478 } 479 else { 480 // here both layers requested VMEXIT 481 vmexit_ctrls.Uint32 = (UINT32) vmcs_read(control_vmcs, VMCS_EXIT_CONTROL_VECTOR); 482 if (vmexit_ctrls.Bits.SaveVmxTimer) { 483 counter_value = (UINT32) vmcs_read(control_vmcs, VMCS_PREEMPTION_TIMER); 484 peer_counter_value = (UINT32) vmcs_read(peer_control_vmcs, VMCS_PREEMPTION_TIMER); 485 if (counter_value <= peer_counter_value) { 486 vmexit_requested = TRUE; 487 } 488 } 489 else { 490 //:BUGBUG: Dima insists to handle this case in a more precise way 491 VMM_ASSERT(0); 492 vmexit_requested = TRUE; 493 } 494 } 495 } 496 return vmexit_requested; 497 } 498 499 500 VMEXIT_IS_CONTROL_REQUESTED_FUNC vmexit_is_control_requested_func[Ia32VmxExitBasicReasonCount] = { 501 /* 0 Ia32VmxExitBasicReasonSoftwareInterruptExceptionNmi */ vmexit_analysis_softinterrupt_exception_nmi_exiting, 502 /* 1 Ia32VmxExitBasicReasonHardwareInterrupt */ vmexit_analysis_hardware_interrupt_exiting, 503 /* 2 Ia32VmxExitBasicReasonTripleFault */ vmexit_analysis_true_func, 504 /* 3 Ia32VmxExitBasicReasonInitEvent */ vmexit_analysis_true_func, 505 /* 4 Ia32VmxExitBasicReasonSipiEvent */ vmexit_analysis_true_func, 506 /* 5 Ia32VmxExitBasicReasonSmiIoEvent */ vmexit_analysis_true_func, 507 /* 6 Ia32VmxExitBasicReasonSmiOtherEvent */ vmexit_analysis_true_func, 508 /* 7 Ia32VmxExitBasicReasonPendingInterrupt */ vmexit_analysis_interrupt_window_exiting, 509 /* 8 Ia32VmxExitNmiWindow */ vmexit_analysis_nmi_window_exiting, 510 /* 9 Ia32VmxExitBasicReasonTaskSwitch */ vmexit_analysis_true_func, 511 /* 10 Ia32VmxExitBasicReasonCpuidInstruction */ vmexit_analysis_true_func, 512 /* 11 Ia32VmxExitBasicReasonGetsecInstruction */ vmexit_analysis_true_func, 513 /* 12 Ia32VmxExitBasicReasonHltInstruction */ vmexit_analysis_hlt_inst_exiting, 514 /* 13 Ia32VmxExitBasicReasonInvdInstruction */ vmexit_analysis_true_func, 515 /* 14 Ia32VmxExitBasicReasonInvlpgInstruction */ vmexit_analysis_invlpg_inst_exiting, 516 /* 15 Ia32VmxExitBasicReasonRdpmcInstruction */ vmexit_analysis_rdpmc_inst_exiting, 517 /* 16 Ia32VmxExitBasicReasonRdtscInstruction */ vmexit_analysis_rdtsc_inst_exiting, 518 /* 17 Ia32VmxExitBasicReasonRsmInstruction */ vmexit_analysis_true_func, 519 /* 18 Ia32VmxExitBasicReasonVmcallInstruction */ vmexit_analysis_true_func, 520 /* 19 Ia32VmxExitBasicReasonVmclearInstruction */ vmexit_analysis_true_func, 521 /* 20 Ia32VmxExitBasicReasonVmlaunchInstruction */ vmexit_analysis_true_func, 522 /* 21 Ia32VmxExitBasicReasonVmptrldInstruction */ vmexit_analysis_true_func, 523 /* 22 Ia32VmxExitBasicReasonVmptrstInstruction */ vmexit_analysis_true_func, 524 /* 23 Ia32VmxExitBasicReasonVmreadInstruction */ vmexit_analysis_true_func, 525 /* 24 Ia32VmxExitBasicReasonVmresumeInstruction */ vmexit_analysis_true_func, 526 /* 25 Ia32VmxExitBasicReasonVmwriteInstruction */ vmexit_analysis_true_func, 527 /* 26 Ia32VmxExitBasicReasonVmxoffInstruction */ vmexit_analysis_true_func, 528 /* 27 Ia32VmxExitBasicReasonVmxonInstruction */ vmexit_analysis_true_func, 529 /* 28 Ia32VmxExitBasicReasonCrAccess */ vmexit_analysis_cr_access_exiting, 530 /* 29 Ia32VmxExitBasicReasonDrAccess */ vmexit_analysis_dr_access_exiting, 531 /* 30 Ia32VmxExitBasicReasonIoInstruction */ vmexit_analysis_io_exiting, 532 /* 31 Ia32VmxExitBasicReasonMsrRead */ vmexit_analysis_rdmsr_exiting, 533 /* 32 Ia32VmxExitBasicReasonMsrWrite */ vmexit_analysis_wrmsr_exiting, 534 /* 33 Ia32VmxExitBasicReasonFailedVmEnterGuestState */ vmexit_analysis_true_func, 535 /* 34 Ia32VmxExitBasicReasonFailedVmEnterMsrLoading */ vmexit_analysis_true_func, 536 /* 35 Ia32VmxExitBasicReasonFailedVmExit */ vmexit_analysis_false_func, 537 /* 36 Ia32VmxExitBasicReasonMwaitInstruction */ vmexit_analysis_mwait_inst_exiting, 538 /* 37 Ia32VmxExitBasicReasonMonitorTrapFlag */ vmexit_analysis_false_func, 539 /* 38 Ia32VmxExitBasicReasonInvalidVmexitReason38 */ vmexit_analysis_false_func, 540 /* 39 Ia32VmxExitBasicReasonMonitor */ vmexit_analysis_monitor_inst_exiting, 541 /* 40 Ia32VmxExitBasicReasonPause */ vmexit_analysis_pause_inst_exiting, 542 /* 41 Ia32VmxExitBasicReasonFailureDueMachineCheck */ vmexit_analysis_true_func, 543 /* 42 Ia32VmxExitBasicReasonInvalidVmexitReason42 */ vmexit_analysis_false_func, 544 /* 43 Ia32VmxExitBasicReasonTprBelowThreshold */ vmexit_analysis_false_func, 545 /* 44 Ia32VmxExitBasicReasonApicAccess */ vmexit_analysis_false_func, 546 /* 45 Ia32VmxExitBasicReasonInvalidVmexitReason45 */ vmexit_analysis_false_func, 547 /* 46 Ia32VmxExitBasicReasonGdtrIdtrAccess */ vmexit_analysis_false_func, 548 /* 47 Ia32VmxExitBasicReasonLdtrTrAccess */ vmexit_analysis_false_func, 549 /* 48 Ia32VmxExitBasicReasonEptViolation */ vmexit_analysis_false_func, 550 /* 48 Ia32VmxExitBasicReasonEptMisconfiguration */ vmexit_analysis_false_func, 551 /* 50 Ia32VmxExitBasicReasonInveptInstruction */ vmexit_analysis_false_func, 552 /* 51 Ia32VmxExitBasicReasonRdtscpInstruction */ vmexit_analysis_false_func, 553 /* 52 Ia32VmxExitBasicReasonPreemptionTimerExpired */ vmexit_analysis_timer_exiting, 554 /* 53 Ia32VmxExitBasicReasonInvvpidInstruction */ vmexit_analysis_false_func, 555 /* 54 Ia32VmxExitBasicReasonInvalidVmexitReason54 */ vmexit_analysis_false_func, 556 /* 55 Ia32VmxExitBasicReasonXsetbvInstruction */ vmexit_analysis_true_func 557 }; 558 559 BOOLEAN vmexit_analysis_was_control_requested(GUEST_CPU_HANDLE gcpu, 560 VMCS_OBJECT* vmexit_vmcs, VMCS_OBJECT* control_vmcs, 561 IA32_VMX_EXIT_BASIC_REASON exit_reason) { 562 if (exit_reason >= Ia32VmxExitBasicReasonCount) { 563 return FALSE; 564 } 565 VMM_ASSERT(vmexit_vmcs != NULL); 566 VMM_ASSERT(control_vmcs != NULL); 567 return vmexit_is_control_requested_func[exit_reason](gcpu, vmexit_vmcs, control_vmcs); 568 }