github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/vmexit/vmexit_io.c (about) 1 /* 2 * Copyright (c) 2013 Intel Corporation 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * Unless required by applicable law or agreed to in writing, software 9 * distributed under the License is distributed on an "AS IS" BASIS, 10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 * See the License for the specific language governing permissions and 12 * limitations under the License. 13 */ 14 15 #include "file_codes.h" 16 #define VMM_DEADLOOP() VMM_DEADLOOP_LOG(VMEXIT_IO_C) 17 #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(VMEXIT_IO_C, __condition) 18 #include "vmm_defs.h" 19 #include "vmm_dbg.h" 20 #include "heap.h" 21 #include "lock.h" 22 #include "hw_utils.h" 23 #include "guest.h" 24 #include "guest_cpu.h" 25 #include "vmexit.h" 26 #include "vmcs_api.h" 27 #include "vmx_ctrl_msrs.h" 28 #include "host_memory_manager_api.h" 29 #include "gpm_api.h" 30 #include "vmexit_io.h" 31 #include "memory_allocator.h" 32 #include "address.h" 33 #include "guest_cpu_vmenter_event.h" 34 #ifdef JLMDEBUG 35 #include "jlmdebug.h" 36 #endif 37 38 39 #define IO_VMEXIT_MAX_COUNT 64 40 41 typedef struct { 42 IO_PORT_ID io_port; // in fact only 16 bits are meaningful 43 UINT16 pad; 44 RW_ACCESS io_access; 45 //IO_PORT_OWNER io_owner; //TODO: resolve owner conflict issues. 46 IO_ACCESS_HANDLER io_handler; //TODO: will use io_tmsl_handler & io_uvmm_handler. 47 void* io_handler_context; 48 } IO_VMEXIT_DESCRIPTOR; 49 50 51 typedef struct { 52 GUEST_ID guest_id; 53 char padding[6]; 54 UINT8 *io_bitmap; 55 IO_VMEXIT_DESCRIPTOR io_descriptors[IO_VMEXIT_MAX_COUNT]; 56 LIST_ELEMENT list[1]; 57 } GUEST_IO_VMEXIT_CONTROL; 58 59 typedef struct { 60 LIST_ELEMENT guest_io_vmexit_controls[1]; 61 } IO_VMEXIT_GLOBAL_STATE; 62 63 64 static IO_VMEXIT_GLOBAL_STATE io_vmexit_global_state; 65 66 67 static VMEXIT_HANDLING_STATUS io_vmexit_handler(GUEST_CPU_HANDLE gcpu); 68 static IO_VMEXIT_DESCRIPTOR * io_port_lookup(GUEST_ID guest_id, IO_PORT_ID port_id); 69 static IO_VMEXIT_DESCRIPTOR * io_free_port_lookup(GUEST_ID guest_id); 70 static void io_blocking_read_handler( 71 GUEST_CPU_HANDLE gcpu, 72 IO_PORT_ID port_id, 73 unsigned port_size, 74 void *p_value 75 ); 76 static void io_blocking_write_handler( 77 GUEST_CPU_HANDLE gcpu, 78 IO_PORT_ID port_id, 79 unsigned port_size, 80 void *p_value 81 ); 82 static BOOLEAN io_blocking_handler( 83 GUEST_CPU_HANDLE gcpu, 84 IO_PORT_ID port_id, 85 unsigned port_size, 86 RW_ACCESS access, 87 BOOLEAN string_intr, // ins/outs 88 BOOLEAN rep_prefix, // rep 89 UINT32 rep_count, 90 void *p_value, 91 void *context UNUSED 92 ); 93 void io_transparent_read_handler( 94 GUEST_CPU_HANDLE gcpu, 95 IO_PORT_ID port_id, 96 unsigned port_size, // 1, 2, 4 97 void *p_value 98 ); 99 void io_transparent_write_handler( 100 GUEST_CPU_HANDLE gcpu, 101 IO_PORT_ID port_id, 102 unsigned port_size, // 1, 2, 4 103 void *p_value 104 ); 105 static GUEST_IO_VMEXIT_CONTROL* io_vmexit_find_guest_io_control( 106 GUEST_ID guest_id 107 ); 108 109 110 111 // FUNCTION : io_vmexit_setup() 112 // PURPOSE : Allocate and initialize IO VMEXITs related data structures, 113 // : common for all guests 114 // ARGUMENTS: GUEST_ID num_of_guests 115 // RETURNS : void 116 void io_vmexit_initialize(void) 117 { 118 vmm_memset( &io_vmexit_global_state, 0, sizeof(io_vmexit_global_state) ); 119 list_init(io_vmexit_global_state.guest_io_vmexit_controls); 120 } 121 122 123 // FUNCTION : io_vmexit_guest_setup() 124 // PURPOSE : Allocate and initialize IO VMEXITs related data structures for 125 // : specific guest 126 // ARGUMENTS: GUEST_ID guest_id 127 // RETURNS : void 128 void io_vmexit_guest_initialize(GUEST_ID guest_id) 129 { 130 GUEST_IO_VMEXIT_CONTROL *io_ctrl; 131 VMM_LOG(mask_anonymous, level_trace,"io_vmexit_guest_initialize start\r\n"); 132 133 io_ctrl = (GUEST_IO_VMEXIT_CONTROL *) vmm_malloc(sizeof(GUEST_IO_VMEXIT_CONTROL)); 134 // BEFORE_VMLAUNCH. MALLOC should not fail. 135 VMM_ASSERT(io_ctrl); 136 137 io_ctrl->guest_id = guest_id; 138 io_ctrl->io_bitmap = vmm_memory_alloc(2 * PAGE_4KB_SIZE); 139 140 // BEFORE_VMLAUNCH 141 VMM_ASSERT(io_ctrl->io_bitmap); 142 143 list_add(io_vmexit_global_state.guest_io_vmexit_controls, io_ctrl->list); 144 145 VMM_LOG(mask_anonymous, level_trace,"io_vmexit_guest_initialize end\r\n"); 146 147 ///// TTTTT vmm_memset(io_ctrl->io_bitmap, 0xFF, 2 * PAGE_4KB_SIZE); 148 vmexit_install_handler(guest_id, io_vmexit_handler, Ia32VmxExitBasicReasonIoInstruction); 149 } 150 151 152 // FUNCTION : io_vmexit_activate() 153 // PURPOSE : enables in HW IO VMEXITs for specific guest on given CPU 154 // ARGUMENTS: GUEST_CPU_HANDLE gcpu 155 // RETURNS : void 156 void io_vmexit_activate(GUEST_CPU_HANDLE gcpu) 157 { 158 PROCESSOR_BASED_VM_EXECUTION_CONTROLS exec_controls; 159 VMCS_OBJECT *vmcs = gcpu_get_vmcs(gcpu); 160 GUEST_HANDLE guest = gcpu_guest_handle(gcpu); 161 GUEST_ID guest_id = guest_get_id(guest); 162 GUEST_IO_VMEXIT_CONTROL *io_ctrl = NULL; 163 HPA hpa[2]; 164 int i; 165 VMEXIT_CONTROL vmexit_request; 166 167 VMM_LOG(mask_anonymous, level_trace,"io_vmexit_activate start\r\n"); 168 io_ctrl = io_vmexit_find_guest_io_control(guest_id); 169 170 VMM_ASSERT(io_ctrl); 171 172 vmm_memset(&exec_controls, 0, sizeof(exec_controls)); 173 vmm_memset(&vmexit_request, 0, sizeof(vmexit_request)); 174 175 if (NULL == io_ctrl->io_bitmap) { 176 VMM_LOG(mask_anonymous, level_trace,"IO bitmap for guest %d is not allocated\n", guest_id); 177 VMM_DEADLOOP(); 178 return; 179 } 180 181 // first load bitmap addresses, and if OK, enable bitmap based IO VMEXITs 182 for (i = 0; i < 2; ++i) { 183 if (FALSE == hmm_hva_to_hpa((HVA) &io_ctrl->io_bitmap[PAGE_4KB_SIZE * i], &hpa[i])) { 184 VMM_LOG(mask_anonymous, level_trace,"IO bitmap page for guest %d is invalid\n", guest_id); 185 VMM_DEADLOOP(); 186 return; 187 } 188 vmcs_write(vmcs, (VMCS_FIELD)(i + VMCS_IO_BITMAP_ADDRESS_A), hpa[i]); 189 VMM_LOG(mask_anonymous, level_trace,"IO bitmap page %c : VA=%P PA=%P\n", 'A'+i, &io_ctrl->io_bitmap[PAGE_4KB_SIZE * i], hpa[i]); 190 } 191 192 exec_controls.Bits.ActivateIoBitmaps = 1; 193 194 vmexit_request.proc_ctrls.bit_request = UINT64_ALL_ONES; 195 vmexit_request.proc_ctrls.bit_mask = exec_controls.Uint32; 196 gcpu_control_setup( gcpu, &vmexit_request ); 197 } 198 199 200 // FUNCTION : io_port_lookup() 201 // PURPOSE : Look for descriptor for specified port 202 // ARGUMENTS: GUEST_ID guest_id 203 // : UINT16 port_id 204 // RETURNS : Pointer to the descriptor, NULL if not found 205 IO_VMEXIT_DESCRIPTOR * io_port_lookup( 206 GUEST_ID guest_id, 207 IO_PORT_ID port_id) 208 { 209 GUEST_IO_VMEXIT_CONTROL *io_ctrl = NULL; 210 unsigned i; 211 212 io_ctrl = io_vmexit_find_guest_io_control(guest_id); 213 if(NULL == io_ctrl) { 214 return NULL; 215 } 216 217 for (i = 0; i < NELEMENTS(io_ctrl->io_descriptors); ++i) { 218 if (io_ctrl->io_descriptors[i].io_port == port_id 219 && io_ctrl->io_descriptors[i].io_handler != NULL) { 220 return &io_ctrl->io_descriptors[i]; 221 } 222 } 223 return NULL; 224 } 225 226 227 // FUNCTION : io_free_port_lookup() 228 // PURPOSE : Look for unallocated descriptor 229 // ARGUMENTS: GUEST_ID guest_id 230 // RETURNS : Pointer to the descriptor, NULL if not found 231 IO_VMEXIT_DESCRIPTOR * io_free_port_lookup(GUEST_ID guest_id) 232 { 233 GUEST_IO_VMEXIT_CONTROL *io_ctrl = NULL; 234 unsigned i; 235 236 io_ctrl = io_vmexit_find_guest_io_control(guest_id); 237 if(NULL == io_ctrl) { 238 return NULL; 239 } 240 241 for (i = 0; i < NELEMENTS(io_ctrl->io_descriptors); ++i) { 242 if (NULL == io_ctrl->io_descriptors[i].io_handler) { 243 return &io_ctrl->io_descriptors[i]; 244 } 245 } 246 return NULL; 247 } 248 249 250 #pragma warning( push ) 251 #pragma warning (disable : 4100) // Supress warnings about unreferenced formal parameter 252 253 void io_blocking_read_handler( 254 GUEST_CPU_HANDLE gcpu UNUSED, 255 IO_PORT_ID port_id UNUSED, 256 unsigned port_size, // 1, 2, 4 257 void *p_value) 258 { 259 switch (port_size) 260 { 261 case 1: 262 case 2: 263 case 4: 264 vmm_memset(p_value, 0xFF, port_size); 265 break; 266 default: 267 VMM_LOG(mask_anonymous, level_trace,"Invalid IO port size(%d)\n", port_size); 268 VMM_DEADLOOP(); 269 break; 270 } 271 } 272 273 void io_blocking_write_handler( 274 GUEST_CPU_HANDLE gcpu UNUSED, 275 IO_PORT_ID port_id UNUSED, 276 unsigned port_size UNUSED, // 1, 2, 4 277 void *p_value UNUSED) 278 { 279 } 280 281 282 // FUNCTION : io_blocking_handler() 283 // PURPOSE : Used as default handler when no IO handler is registered, 284 // : but port configured as caused VMEXIT. 285 // ARGUMENTS: GUEST_CPU_HANDLE gcpu, 286 // : IO_PORT_ID port_id, 287 // : unsigned port_size, 288 // : RW_ACCESS access, 289 // : void *p_value 290 BOOLEAN io_blocking_handler( GUEST_CPU_HANDLE gcpu, IO_PORT_ID port_id, 291 unsigned port_size, RW_ACCESS access, BOOLEAN string_intr, 292 BOOLEAN rep_prefix, UINT32 rep_count, void *p_value, 293 void *context) 294 { 295 (void)string_intr; 296 (void)rep_prefix; 297 (void)rep_count; 298 (void)context; 299 switch (access) { 300 case WRITE_ACCESS: 301 io_blocking_write_handler(gcpu, port_id, port_size, p_value); 302 break; 303 case READ_ACCESS: 304 io_blocking_read_handler(gcpu, port_id, port_size, p_value); 305 break; 306 default: 307 VMM_LOG(mask_anonymous, level_trace,"Invalid IO access(%d)\n", access); 308 VMM_DEADLOOP(); 309 break; 310 } 311 312 return TRUE; 313 } 314 315 316 void io_transparent_read_handler( 317 GUEST_CPU_HANDLE gcpu UNUSED, 318 IO_PORT_ID port_id, 319 unsigned port_size, // 1, 2, 4 320 void *p_value) 321 { 322 switch (port_size) { 323 case 1: 324 *(UINT8 *) p_value = hw_read_port_8(port_id); 325 break; 326 case 2: 327 *(UINT16 *) p_value = hw_read_port_16(port_id); 328 break; 329 case 4: 330 *(UINT32 *) p_value = hw_read_port_32(port_id); 331 break; 332 default: 333 VMM_LOG(mask_anonymous, level_trace,"Invalid IO port size(%d)\n", port_size); 334 VMM_DEADLOOP(); 335 break; 336 } 337 } 338 339 void io_transparent_write_handler( 340 GUEST_CPU_HANDLE gcpu UNUSED, 341 IO_PORT_ID port_id, 342 unsigned port_size, // 1, 2, 4 343 void *p_value) 344 { 345 switch (port_size) { 346 case 1: 347 hw_write_port_8(port_id, *(UINT8 *) p_value); 348 break; 349 case 2: 350 hw_write_port_16(port_id, *(UINT16 *) p_value); 351 break; 352 case 4: 353 hw_write_port_32(port_id, *(UINT32 *) p_value); 354 break; 355 default: 356 VMM_LOG(mask_anonymous, level_trace,"Invalid IO port size(%d)\n", port_size); 357 VMM_DEADLOOP(); 358 break; 359 } 360 } 361 362 void io_vmexit_transparent_handler( GUEST_CPU_HANDLE gcpu, 363 UINT16 port_id, unsigned port_size, // 1, 2, 4 364 RW_ACCESS access, void *p_value, void *context UNUSED) 365 { 366 switch (access) { 367 case WRITE_ACCESS: 368 io_transparent_write_handler(gcpu, port_id, port_size, p_value); 369 break; 370 case READ_ACCESS: 371 io_transparent_read_handler(gcpu, port_id, port_size, p_value); 372 break; 373 default: 374 VMM_LOG(mask_anonymous, level_trace,"Invalid IO access(%d)\n", access); 375 VMM_DEADLOOP(); 376 break; 377 } 378 } 379 380 381 #pragma warning( pop ) 382 383 // FUNCTION : io_vmexit_handler_register() 384 // PURPOSE : Register/update IO handler for spec port/guest pair. 385 // ARGUMENTS: GUEST_ID guest_id 386 // : IO_PORT_ID port_id 387 // : IO_ACCESS_HANDLER handler 388 // RETURNS : status 389 VMM_STATUS io_vmexit_handler_register( GUEST_ID guest_id, IO_PORT_ID port_id, 390 IO_ACCESS_HANDLER handler, void* context) 391 { 392 IO_VMEXIT_DESCRIPTOR *p_desc = io_port_lookup(guest_id, port_id); 393 VMM_STATUS status; 394 GUEST_IO_VMEXIT_CONTROL *io_ctrl = NULL; 395 396 io_ctrl = io_vmexit_find_guest_io_control(guest_id); 397 398 VMM_ASSERT(io_ctrl); 399 VMM_ASSERT(handler); 400 401 if (NULL != p_desc) { 402 VMM_LOG(mask_anonymous, level_trace,"IO Handler for Guest(%d) Port(%d) is already regitered. Update...\n", 403 guest_id, port_id); 404 } 405 else { 406 p_desc = io_free_port_lookup(guest_id); 407 } 408 409 if (NULL != p_desc) { 410 BITARRAY_SET(io_ctrl->io_bitmap, port_id); 411 p_desc->io_port = port_id; 412 p_desc->io_handler = handler; 413 p_desc->io_handler_context = context; 414 status = VMM_OK; 415 } 416 else { 417 // if reach the MAX number (IO_VMEXIT_MAX_COUNT) of ports, 418 // return ERROR, but not deadloop. 419 status = VMM_ERROR; 420 VMM_LOG(mask_anonymous, level_trace,"Not enough space to register IO handler\n"); 421 } 422 return status; 423 } 424 425 426 // FUNCTION : io_vmexit_handler_unregister() 427 // PURPOSE : Unregister IO handler for spec port/guest pair. 428 // ARGUMENTS: GUEST_ID guest_id 429 // : IO_PORT_ID port_id 430 // RETURNS : status 431 VMM_STATUS io_vmexit_handler_unregister( 432 GUEST_ID guest_id, 433 IO_PORT_ID port_id) 434 { 435 IO_VMEXIT_DESCRIPTOR *p_desc = io_port_lookup(guest_id, port_id); 436 VMM_STATUS status; 437 GUEST_IO_VMEXIT_CONTROL *io_ctrl = NULL; 438 439 io_ctrl = io_vmexit_find_guest_io_control(guest_id); 440 441 VMM_ASSERT(io_ctrl); 442 443 if (NULL != p_desc) { 444 BITARRAY_CLR(io_ctrl->io_bitmap, port_id); 445 p_desc->io_handler = NULL; 446 p_desc->io_handler_context = NULL; 447 status = VMM_OK; 448 } 449 else { 450 // if not registered before, still return SUCCESS! 451 status = VMM_OK; 452 VMM_LOG(mask_anonymous, level_trace,"IO Handler for Guest(%d) Port(%d) is not regitered\n", 453 guest_id, port_id); 454 } 455 456 return status; 457 } 458 459 // 460 // VM exits caused by execution of the INS and OUTS instructions 461 // have priority over the following faults: 462 // —-1. A #GP fault due to the relevant segment (ES for INS; DS for 463 // OUTS unless overridden by an instruction prefix) being unusable; 464 // --2. A #GP fault due to an offset (ESI, EDI) beyond the limit of 465 // the relevant segment, for 64bit, check non-canonical form; 466 // --3. An #AC exception (unaligned memory referenced when CR0.AM=1, 467 // EFLAGS.AC=1, and CPL=3). 468 // Hence, if those fault/exception above happens,inject back to guest. 469 static BOOLEAN io_access_native_fault( GUEST_CPU_HANDLE gcpu, 470 IA32_VMX_EXIT_QUALIFICATION *qualification) 471 { 472 VMCS_OBJECT *vmcs = gcpu_get_vmcs(gcpu); 473 IA32_VMX_VMCS_VM_EXIT_INFO_INSTRUCTION_INFO ios_instr_info; 474 BOOLEAN status = FALSE; 475 VMM_SEGMENT_ATTRIBUTES seg_ar = {0}; 476 VM_ENTRY_CONTROLS vmentry_control; 477 BOOLEAN is_64bit = FALSE; 478 UINT64 cs_selector = 0; 479 EM64T_CR0 guest_cr0; 480 EM64T_RFLAGS guest_rflags ; 481 482 VMM_ASSERT(qualification); 483 VMM_ASSERT(vmcs); 484 485 // only handle ins/outs string io instructions. 486 VMM_ASSERT(qualification->IoInstruction.String == 1); 487 488 ios_instr_info.Uint32 = (UINT32)vmcs_read(vmcs, VMCS_EXIT_INFO_INSTRUCTION_INFO); 489 vmentry_control.Uint32 = (UINT32)vmcs_read(vmcs, VMCS_ENTER_CONTROL_VECTOR); 490 491 if (1 == vmentry_control.Bits.Ia32eModeGuest){ 492 is_64bit = TRUE; 493 } 494 495 // 1) check the 1st/2nd condidtion.-- #GP 496 if(qualification->IoInstruction.Direction){ 497 498 UINT64 Rdi = gcpu_get_native_gp_reg(gcpu, IA32_REG_RDI); 499 500 // for INS -- check ES segement usable? 501 seg_ar.attr32 = (UINT32) vmcs_read(vmcs, VMCS_GUEST_ES_AR); 502 if(seg_ar.bits.null_bit == 1){ 503 // ES unusable, inject #GP 504 VMM_LOG(mask_anonymous, level_trace, 505 "INS - ES segment is un-usable, inject #GP\n"); 506 507 gcpu_inject_gp0(gcpu); 508 return TRUE; 509 } 510 511 if(is_64bit){ 512 // must 64bit address size. 513 if(FALSE == addr_is_canonical(Rdi)){ 514 // address is not canonical , inject #GP 515 VMM_LOG(mask_anonymous, level_trace, 516 "INS - address %P is not canonical, inject #GP\n", Rdi); 517 gcpu_inject_gp0(gcpu); 518 return TRUE; 519 } 520 } 521 else{ 522 // 523 //TODO: OFFSET/rdi check against segment limit. 524 //Assume this case doesn't happen for 32bit Win7 OS, do nothing. 525 //Need to develop case to test it 526 } 527 } 528 else{ 529 UINT64 Rsi = gcpu_get_native_gp_reg(gcpu, IA32_REG_RSI); 530 531 // for OUTS -- segment can be overridden, so check instr info 532 switch(ios_instr_info.InsOutsInstruction.SegReg){ 533 case 0: // ES 534 seg_ar.attr32 = (UINT32)vmcs_read(vmcs, VMCS_GUEST_ES_AR); 535 break; 536 case 1: // CS 537 seg_ar.attr32 = (UINT32)vmcs_read(vmcs, VMCS_GUEST_CS_AR); 538 break; 539 case 2: // SS 540 seg_ar.attr32 = (UINT32)vmcs_read(vmcs, VMCS_GUEST_SS_AR); 541 break; 542 case 3: // DS 543 seg_ar.attr32 = (UINT32)vmcs_read(vmcs, VMCS_GUEST_DS_AR); 544 break; 545 case 4: // FS 546 seg_ar.attr32 = (UINT32)vmcs_read(vmcs, VMCS_GUEST_FS_AR); 547 break; 548 case 5: // GS 549 seg_ar.attr32 = (UINT32)vmcs_read(vmcs, VMCS_GUEST_GS_AR); 550 break; 551 default: 552 // impossible 553 VMM_ASSERT(0); 554 break; 555 } 556 if(seg_ar.bits.null_bit == 1){ 557 // xS segment unusable, inject #GP 558 559 VMM_LOG(mask_anonymous, level_trace, 560 "OUTS - the relevant segment is un-usable, inject #GP\n"); 561 562 gcpu_inject_gp0(gcpu); 563 return TRUE; 564 } 565 if(is_64bit){ 566 // must 64bit address size. 567 //VMM_ASSERT(ios_instr_info.InsOutsInstruction.AddrSize == 2); 568 569 if(FALSE == addr_is_canonical(Rsi)){ 570 // address is not canonical , inject #GP 571 VMM_LOG(mask_anonymous, level_trace, 572 "INS - address %P is not canonical, inject #GP\n", 573 Rsi); 574 575 gcpu_inject_gp0(gcpu); 576 return TRUE; 577 } 578 } 579 else{ 580 //TODO: OFFSET/rsi check against segment limit. 581 //Assume this case doesn't happen for 32bit OS, do nothing. 582 //Need to develop case to test it 583 } 584 } 585 586 // 2) check the 3rd condidtion.-- #AC 587 cs_selector = vmcs_read(vmcs, VMCS_GUEST_CS_SELECTOR); 588 if(BITMAP_GET(cs_selector, CS_SELECTOR_CPL_BIT) == 3){ 589 // ring3 level. 590 guest_cr0.Uint64 = gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR0); 591 if(guest_cr0.Bits.AM){ 592 // CR0.AM = 1 593 guest_rflags.Uint64 = vmcs_read(vmcs, VMCS_GUEST_RFLAGS); 594 if(guest_rflags.Bits.AC){ 595 // rflag.ac = 1 596 597 // TODO:check address (rdi/rsi) alignment based on 598 // ios_instr_info.InsOutsInstruction.AddrSize. 599 // if not word/dword/qword aligned, then inject #AC to guest 600 601 // Assume this case won't happen unless the IO port is allowed 602 // to access in ring3 level by setting I/O Permission Bit Map 603 // in TSS data structure.(actually, this is a hacking behavior) 604 605 // so, catch this case with deadloop. 606 VMM_DEADLOOP(); 607 } 608 } 609 610 } 611 return status; 612 613 } 614 615 616 VMEXIT_HANDLING_STATUS io_vmexit_handler(GUEST_CPU_HANDLE gcpu) 617 { 618 GUEST_HANDLE guest_handle = gcpu_guest_handle(gcpu); 619 GUEST_ID guest_id = guest_get_id(guest_handle); 620 VMCS_OBJECT *vmcs = gcpu_get_vmcs(gcpu); 621 UINT64 qualification = vmcs_read(vmcs, VMCS_EXIT_INFO_QUALIFICATION); 622 IA32_VMX_EXIT_QUALIFICATION *p_qualification = (IA32_VMX_EXIT_QUALIFICATION *) &qualification; 623 IO_PORT_ID port_id = (0 == p_qualification->IoInstruction.OpEncoding) ? 624 (UINT16) gcpu_get_native_gp_reg(gcpu, IA32_REG_RDX) : 625 (UINT16) p_qualification->IoInstruction.PortNumber; 626 IO_VMEXIT_DESCRIPTOR *p_desc = io_port_lookup(guest_id, port_id); 627 unsigned port_size = (unsigned) p_qualification->IoInstruction.Size + 1; 628 RW_ACCESS access = p_qualification->IoInstruction.Direction ? READ_ACCESS : WRITE_ACCESS; 629 IO_ACCESS_HANDLER handler = ((NULL == p_desc) ? io_blocking_handler : p_desc->io_handler); 630 void* context = ((NULL == p_desc) ? NULL : p_desc->io_handler_context); 631 BOOLEAN string_io = ( p_qualification->IoInstruction.String ? TRUE : FALSE); 632 BOOLEAN rep_prefix = ( p_qualification->IoInstruction.Rep ? TRUE : FALSE); 633 UINT32 rep_count = ( rep_prefix ? (UINT32) gcpu_get_native_gp_reg(gcpu, IA32_REG_RCX) : 0); 634 635 UINT64 io_value = 0; 636 637 IA32_VMX_VMCS_VM_EXIT_INFO_INSTRUCTION_INFO ios_instr_info; 638 639 640 if (FALSE == string_io){ 641 // ordinary IN/OUT instruction 642 // data is stored in guest RAX register, not need to 643 // pass it to Handler here. 644 io_value = 0; 645 } 646 else{ 647 // for string INS/OUTS instruction 648 // The linear address gva is the base address of 649 // relevant segment plus (E)DI (for INS) or (E)SI 650 // (for OUTS). It is valid only when the relevant 651 // segment is usable. Otherwise, it is undefined. 652 UINT64 gva = vmcs_read(vmcs, VMCS_EXIT_INFO_GUEST_LINEAR_ADDRESS); 653 HVA dumy_hva = 0; 654 655 // if a native fault/exception happens, then let OS handle them. 656 // and don't report invalid io-access event to Handler in order 657 // to avoid unexpected behaviors. 658 if(io_access_native_fault(gcpu, p_qualification) == TRUE){ 659 return VMEXIT_HANDLED; 660 } 661 if( FALSE == gcpu_gva_to_hva(gcpu, gva, &dumy_hva)){ 662 VMM_LOG(mask_anonymous, level_trace,"Guest(%d) Virtual Address %P Is Not Mapped\n", guest_id, gva); 663 // catch this failure to avoid further errors: 664 // for INS/OUTS instruction, if gva is invalid, which one will happen first? 665 // 1) native OS #PF; or 2) An IO VM exit 666 // if the testcase can reach here, then fix it. 667 VMM_DEADLOOP(); 668 } 669 670 ios_instr_info.Uint32 = (UINT32)vmcs_read(vmcs, VMCS_EXIT_INFO_INSTRUCTION_INFO); 671 switch(ios_instr_info.InsOutsInstruction.AddrSize){ 672 case 0: // 16-bit 673 gva &= (UINT64)0x0FFFF; 674 break; 675 case 1: // 32-bit 676 gva &= (UINT64)0x0FFFFFFFF; 677 break; 678 case 2: // 64-bit 679 break; 680 default: 681 // not h/w supported 682 VMM_DEADLOOP(); 683 } 684 // GVA address 685 io_value = (GVA)gva; 686 } 687 688 // call to the handler 689 if( TRUE == handler( gcpu, port_id, port_size, access, string_io, 690 rep_prefix, rep_count, (void *)io_value, context)){ 691 gcpu_skip_guest_instruction(gcpu); 692 } 693 return VMEXIT_HANDLED; 694 } 695 696 697 // FUNCTION : io_vmexit_block_port() 698 // PURPOSE : Enable VMEXIT on port without installing handler. 699 // : Blocking_handler will be used for such cases. 700 // ARGUMENTS: GUEST_ID guest_id 701 // : IO_PORT_ID port_from 702 // : IO_PORT_ID port_to 703 // RETURNS : void 704 void io_vmexit_block_port( 705 GUEST_ID guest_id, 706 IO_PORT_ID port_from, 707 IO_PORT_ID port_to) 708 { 709 unsigned i; 710 GUEST_IO_VMEXIT_CONTROL *io_ctrl = NULL; 711 712 io_ctrl = io_vmexit_find_guest_io_control(guest_id); 713 VMM_ASSERT(io_ctrl); 714 // unregister handler in case it was installed before 715 for (i = port_from; i <= port_to; ++i) { 716 io_vmexit_handler_unregister(guest_id, (IO_PORT_ID)i); 717 BITARRAY_SET(io_ctrl->io_bitmap, i); 718 } 719 } 720 721 static GUEST_IO_VMEXIT_CONTROL* io_vmexit_find_guest_io_control(GUEST_ID guest_id) 722 { 723 LIST_ELEMENT *iter = NULL; 724 GUEST_IO_VMEXIT_CONTROL *io_ctrl = NULL; 725 726 LIST_FOR_EACH(io_vmexit_global_state.guest_io_vmexit_controls, iter) { 727 io_ctrl = LIST_ENTRY(iter, GUEST_IO_VMEXIT_CONTROL, list); 728 if(io_ctrl->guest_id == guest_id) { 729 return io_ctrl; 730 } 731 } 732 return NULL; 733 } 734