github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/host/hw/local_apic.c (about) 1 /* 2 * Copyright (c) 2013 Intel Corporation 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * Unless required by applicable law or agreed to in writing, software 9 * distributed under the License is distributed on an "AS IS" BASIS, 10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 * See the License for the specific language governing permissions and 12 * limitations under the License. 13 */ 14 15 #include "local_apic.h" 16 #include "em64t_defs.h" 17 #include "hw_utils.h" 18 #include "vmm_dbg.h" 19 #include "host_memory_manager_api.h" 20 #include "memory_allocator.h" 21 #include "file_codes.h" 22 #define VMM_DEADLOOP() VMM_DEADLOOP_LOG(LOCAL_APIC_C) 23 #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(LOCAL_APIC_C, __condition) 24 #ifdef JLMDEBUG 25 #include "jlmdebug.h" 26 #endif 27 28 #pragma warning( disable : 4100) // unreferenced formal parameter 29 #define STRINGIFY(x) #x 30 31 32 // Local Macros and Types 33 34 typedef struct _LOCAL_APIC_PER_CPU_DATA { 35 ADDRESS lapic_base_address_hpa; 36 ADDRESS lapic_base_address_hva; 37 38 LOCAL_APIC_MODE lapic_mode; 39 CPU_ID lapic_cpu_id; 40 UINT8 pad[2]; 41 42 void (*lapic_read_reg) (const struct _LOCAL_APIC_PER_CPU_DATA* data, LOCAL_APIC_REG_ID reg_id, void* p_data, unsigned bytes); 43 void (*lapic_write_reg)(const struct _LOCAL_APIC_PER_CPU_DATA* data, LOCAL_APIC_REG_ID reg_id, void* p_data, unsigned bytes); 44 45 } LOCAL_APIC_PER_CPU_DATA; 46 47 // array per hw cpu 48 static LOCAL_APIC_PER_CPU_DATA* lapic_cpu_data = NULL; 49 50 #define IA32_APIC_BASE_MSR_BSP 0x100 51 #define IA32_APIC_BASE_MSR_X2APIC_ENABLE 0x400 52 #define IA32_APIC_BASE_MSR_GLOBAL_ENABLE 0x800 53 #define IA32_APIC_BASE_MSR_PHY_ADDRESS 0xFFFFFF000 54 55 // SW enable/disable flag - bit 8 in the Spurious Vector Register 56 #define IA32_APIC_SW_ENABLE_BIT_IDX 8 57 58 #define ACCESS_RO READ_ACCESS 59 #define ACCESS_WO WRITE_ACCESS 60 #define ACCESS_RW READ_WRITE_ACCESS 61 62 #define MODE_NO 0 63 #define MODE_MMIO 1 64 #define MODE_MSR 2 65 #define MODE_BOTH (MODE_MMIO | MODE_MSR) 66 67 68 typedef struct _LOCAL_APIC_REGISTER { 69 UINT32 offset; 70 UINT8 access; 71 UINT8 modes; 72 UINT16 x2_size; 73 char *name; 74 } LOCAL_APIC_REGISTER; 75 76 #define LOCAL_APIC_REG_MSR(__reg_id) (LOCAL_APIC_REG_MSR_BASE + (__reg_id)) 77 #define LOCAL_APIC_REG_ADDRESS(lapic_data, __reg_id) ((lapic_data)->lapic_base_address_hva + ((__reg_id) << 4)) 78 79 #define GET_OTHER_LAPIC(__cpu_id) (lapic_cpu_data + (__cpu_id)) 80 #define GET_CPU_LAPIC() GET_OTHER_LAPIC(hw_cpu_id()) 81 82 /* 83 * Forward Declarations for Local Functions 84 */ 85 #ifdef INCLUDE_UNUSED_CODE 86 static void lapic_mode_disable(void); 87 static void lapic_mode_enable_from_disabled(void); 88 static void lapic_mode_enable_from_x2(void); 89 static void lapic_mode_x2_from_disabled(void); 90 static void lapic_mode_x2_from_enabled(void); 91 #endif 92 static void lapic_read_reg_msr(const LOCAL_APIC_PER_CPU_DATA* data, LOCAL_APIC_REG_ID reg_id, void *p_data, unsigned bytes); 93 static void lapic_write_reg_msr(const LOCAL_APIC_PER_CPU_DATA* data, LOCAL_APIC_REG_ID reg_id, void *p_data, unsigned bytes); 94 static LOCAL_APIC_MODE local_apic_discover_mode(void); 95 static void lapic_read_reg_mmio(const LOCAL_APIC_PER_CPU_DATA* data , LOCAL_APIC_REG_ID reg_id, void *p_data, unsigned not_used); 96 static void lapic_write_reg_mmio(const LOCAL_APIC_PER_CPU_DATA* data, LOCAL_APIC_REG_ID reg_id, void *p_data, unsigned not_used); 97 static void lapic_fill_current_mode( LOCAL_APIC_PER_CPU_DATA* data ); 98 #ifdef INCLUDE_UNUSED_CODE 99 // find highest set bit in 256bit reg (8 sequential regs 32bit each). Return UINT32_ALL_ONES if no 1s found. 100 static UINT32 find_highest_bit_in_256bit_reg( LOCAL_APIC_PER_CPU_DATA* data, LOCAL_APIC_REG_ID reg_id ); 101 #endif 102 static BOOLEAN 103 local_apic_ipi_verify_params(LOCAL_APIC_IPI_DESTINATION_SHORTHAND dst_shorthand, 104 LOCAL_APIC_IPI_DELIVERY_MODE delivery_mode, 105 UINT8 vector, 106 LOCAL_APIC_IPI_LEVEL level, 107 LOCAL_APIC_IPI_TRIGGER_MODE trigger_mode); 108 109 110 #ifdef INCLUDE_UNUSED_CODE 111 112 113 static const LOCAL_APIC_REGISTER lapic_registers[] = 114 { 115 { 0x00, NO_ACCESS, MODE_NO, 0, NULL }, 116 { 0x01, NO_ACCESS, MODE_NO, 0, NULL }, 117 { 0x02, ACCESS_RO, MODE_BOTH, 4, "ID" }, 118 { 0x03, ACCESS_RO, MODE_BOTH, 4, "Version" }, 119 { 0x04, NO_ACCESS, MODE_NO, 0, NULL }, 120 { 0x05, NO_ACCESS, MODE_NO, 0, NULL }, 121 { 0x06, NO_ACCESS, MODE_NO, 0, NULL }, 122 { 0x07, NO_ACCESS, MODE_NO, 0, NULL }, 123 { 0x08, ACCESS_RW, MODE_BOTH, 4, "TPR" }, 124 { 0x09, NO_ACCESS, MODE_NO, 0, NULL }, 125 { 0x0A, ACCESS_RO, MODE_BOTH, 4, "PPR" }, 126 { 0x0B, ACCESS_WO, MODE_BOTH, 4, "EOI" }, // GP fault on non-zero write 127 { 0x0C, NO_ACCESS, MODE_NO, 0, NULL }, 128 { 0x0D, ACCESS_RO, MODE_BOTH, 4, "Logical Destination" }, 129 { 0x0E, ACCESS_RW, MODE_MMIO, 4, "Destination Format" }, 130 { 0x0F, ACCESS_RW, MODE_BOTH, 4, "Spurious interrupt Vector" }, 131 { 0x10, ACCESS_RO, MODE_BOTH, 4, "ISR[31..00]" }, 132 { 0x11, ACCESS_RO, MODE_BOTH, 4, "ISR[63..32]" }, 133 { 0x12, ACCESS_RO, MODE_BOTH, 4, "ISR[95..64]" }, 134 { 0x13, ACCESS_RO, MODE_BOTH, 4, "ISR[127..96]" }, 135 { 0x14, ACCESS_RO, MODE_BOTH, 4, "ISR[159..128]" }, 136 { 0x14, ACCESS_RO, MODE_BOTH, 4, "ISR[191..160]" }, 137 { 0x16, ACCESS_RO, MODE_BOTH, 4, "ISR[223..192]" }, 138 { 0x17, ACCESS_RO, MODE_BOTH, 4, "ISR[255..224]" }, 139 { 0x18, ACCESS_RO, MODE_BOTH, 4, "TMR[31..00]" }, 140 { 0x19, ACCESS_RO, MODE_BOTH, 4, "TMR[63..32]" }, 141 { 0x1A, ACCESS_RO, MODE_BOTH, 4, "TMR[95..64]" }, 142 { 0x1B, ACCESS_RO, MODE_BOTH, 4, "TMR[127..96]" }, 143 { 0x1C, ACCESS_RO, MODE_BOTH, 4, "TMR[159..128]" }, 144 { 0x1D, ACCESS_RO, MODE_BOTH, 4, "TMR[191..160]" }, 145 { 0x1E, ACCESS_RO, MODE_BOTH, 4, "TMR[223..192]" }, 146 { 0x1F, ACCESS_RO, MODE_BOTH, 4, "TMR[255..224]" }, 147 { 0x20, ACCESS_RO, MODE_BOTH, 4, "IRR[31..00]" }, 148 { 0x21, ACCESS_RO, MODE_BOTH, 4, "IRR[63..32]" }, 149 { 0x22, ACCESS_RO, MODE_BOTH, 4, "IRR[95..64]" }, 150 { 0x23, ACCESS_RO, MODE_BOTH, 4, "IRR[127..96]" }, 151 { 0x24, ACCESS_RO, MODE_BOTH, 4, "IRR[159..128]" }, 152 { 0x24, ACCESS_RO, MODE_BOTH, 4, "IRR[191..160]" }, 153 { 0x26, ACCESS_RO, MODE_BOTH, 4, "IRR[223..192]" }, 154 { 0x27, ACCESS_RO, MODE_BOTH, 4, "IRR[255..224]" }, 155 { 0x28, ACCESS_RW, MODE_BOTH, 4, "Error Status" }, // GP fault on non-zero write 156 { 0x29, NO_ACCESS, MODE_NO, 0, NULL }, 157 { 0x2A, NO_ACCESS, MODE_NO, 0, NULL }, 158 { 0x2B, NO_ACCESS, MODE_NO, 0, NULL }, 159 { 0x2C, NO_ACCESS, MODE_NO, 0, NULL }, 160 { 0x2D, NO_ACCESS, MODE_NO, 0, NULL }, 161 { 0x2E, NO_ACCESS, MODE_NO, 0, NULL }, 162 { 0x2F, NO_ACCESS, MODE_NO, 0, NULL }, 163 { 0x30, ACCESS_RW, MODE_BOTH, 8, "Interrupt Command" }, // 64-bit when access via MSR 164 { 0x31, ACCESS_RW, MODE_MMIO, 4, "Interrupt Command High" }, 165 { 0x32, ACCESS_RW, MODE_BOTH, 4, "LVT Timer" }, 166 { 0x33, ACCESS_RW, MODE_BOTH, 4, "LVT Thermal Sensor" }, 167 { 0x34, ACCESS_RW, MODE_BOTH, 4, "LVT Perf Monitoring" }, 168 { 0x35, ACCESS_RW, MODE_BOTH, 4, "LVT LINT0" }, 169 { 0x36, ACCESS_RW, MODE_BOTH, 4, "LVT LINT1" }, 170 { 0x37, ACCESS_RW, MODE_BOTH, 4, "LVT Error" }, 171 { 0x38, ACCESS_RW, MODE_BOTH, 4, "Initial counter" }, 172 { 0x39, ACCESS_RO, MODE_BOTH, 4, "Current counter" }, 173 { 0x3A, NO_ACCESS, MODE_NO, 0, NULL }, 174 { 0x3B, NO_ACCESS, MODE_NO, 0, NULL }, 175 { 0x3C, NO_ACCESS, MODE_NO, 0, NULL }, 176 { 0x3D, NO_ACCESS, MODE_NO, 0, NULL }, 177 { 0x3E, ACCESS_RW, MODE_BOTH, 4, "Divide Configuration" }, 178 { 0x3F, NO_ACCESS, MODE_NO, 0, NULL }, 179 { 0x40, ACCESS_WO, MODE_MSR, 4, "Self IPI" } 180 }; 181 182 void (*lapic_mode_switch_transitions[3][3])(void) = 183 { 184 { NULL, lapic_mode_enable_from_disabled, lapic_mode_x2_from_disabled}, 185 { lapic_mode_disable, NULL, lapic_mode_x2_from_enabled }, 186 { lapic_mode_disable, lapic_mode_enable_from_x2, NULL } 187 }; 188 #endif 189 190 191 /* 192 * FUNCTION : local_apic_is_x2apic_supported() 193 * PURPOSE : Checks if x2APIC mode is supported by CPU 194 * ARGUMENTS: void 195 * RETURNS : TRUE if supported, FALSE otherwise 196 */ 197 BOOLEAN local_apic_is_x2apic_supported(void) 198 { 199 CPUID_PARAMS cpuid_params; 200 cpuid_params.m_rax = 1; 201 hw_cpuid(&cpuid_params); 202 return BIT_GET64(cpuid_params.m_rcx, CPUID_X2APIC_SUPPORTED_BIT) != 0; 203 } 204 205 /* 206 * FUNCTION : local_apic_discover_mode() 207 * PURPOSE : Checks Local APIC current mode 208 * ARGUMENTS: void 209 * RETURNS : LOCAL_APIC_MODE mode discovered 210 */ 211 LOCAL_APIC_MODE local_apic_discover_mode(void) 212 { 213 UINT64 value = hw_read_msr(IA32_MSR_APIC_BASE); 214 LOCAL_APIC_MODE mode; 215 216 if (0 != BITMAP_GET(value, IA32_APIC_BASE_MSR_X2APIC_ENABLE)) 217 mode = LOCAL_APIC_X2_ENABLED; 218 else if (0 != BITMAP_GET(value, IA32_APIC_BASE_MSR_GLOBAL_ENABLE)) 219 mode = LOCAL_APIC_ENABLED; 220 else 221 mode = LOCAL_APIC_DISABLED; 222 return mode; 223 } 224 #ifdef INCLUDE_UNUSED_CODE 225 void lapic_mode_disable(void) 226 { 227 UINT64 value = hw_read_msr(IA32_MSR_APIC_BASE); 228 BITMAP_CLR(value, IA32_APIC_BASE_MSR_X2APIC_ENABLE | IA32_APIC_BASE_MSR_GLOBAL_ENABLE); 229 hw_write_msr(IA32_MSR_APIC_BASE, value); 230 } 231 232 void lapic_mode_enable_from_disabled(void) 233 { 234 UINT64 value = hw_read_msr(IA32_MSR_APIC_BASE); 235 BITMAP_SET(value, IA32_APIC_BASE_MSR_GLOBAL_ENABLE); 236 hw_write_msr(IA32_MSR_APIC_BASE, value); 237 } 238 239 void lapic_mode_enable_from_x2(void) 240 { 241 UINT64 value = hw_read_msr(IA32_MSR_APIC_BASE); 242 // disable x2 and xAPIC 243 BITMAP_CLR(value, IA32_APIC_BASE_MSR_X2APIC_ENABLE | IA32_APIC_BASE_MSR_GLOBAL_ENABLE); 244 hw_write_msr(IA32_MSR_APIC_BASE, value); 245 // enable xAPIC 246 BITMAP_SET(value, IA32_APIC_BASE_MSR_GLOBAL_ENABLE); 247 hw_write_msr(IA32_MSR_APIC_BASE, value); 248 } 249 250 void lapic_mode_x2_from_disabled(void) 251 { 252 UINT64 value = hw_read_msr(IA32_MSR_APIC_BASE); 253 254 BITMAP_SET(value, IA32_APIC_BASE_MSR_GLOBAL_ENABLE); 255 hw_write_msr(IA32_MSR_APIC_BASE, value); 256 257 BITMAP_SET(value, IA32_APIC_BASE_MSR_X2APIC_ENABLE); 258 hw_write_msr(IA32_MSR_APIC_BASE, value); 259 } 260 261 void lapic_mode_x2_from_enabled(void) 262 { 263 UINT64 value = hw_read_msr(IA32_MSR_APIC_BASE); 264 BITMAP_SET(value, IA32_APIC_BASE_MSR_X2APIC_ENABLE); 265 hw_write_msr(IA32_MSR_APIC_BASE, value); 266 } 267 268 /* 269 * FUNCTION : local_apic_set_mode() 270 * PURPOSE : Set one of 3 possible modes 271 * ARGUMENTS: LOCAL_APIC_MODE mode - mode to set 272 * RETURNS : LOCAL_APIC_NOERROR if OK, error code otherwise 273 */ 274 LOCAL_APIC_ERRNO local_apic_set_mode(LOCAL_APIC_MODE new_mode) 275 { 276 LOCAL_APIC_ERRNO error = LOCAL_APIC_NOERROR; 277 LOCAL_APIC_PER_CPU_DATA* data = GET_CPU_LAPIC(); 278 LOCAL_APIC_MODE old_mode = data->lapic_mode; 279 280 switch (new_mode) { 281 case LOCAL_APIC_DISABLED: 282 case LOCAL_APIC_ENABLED: 283 if (NULL != lapic_mode_switch_transitions[old_mode][new_mode]) 284 lapic_mode_switch_transitions[old_mode][new_mode](); 285 break; 286 287 case LOCAL_APIC_X2_ENABLED: 288 if (NULL != lapic_mode_switch_transitions[old_mode][new_mode]) { 289 if (TRUE == local_apic_is_x2apic_supported()) 290 lapic_mode_switch_transitions[old_mode][new_mode](); 291 else 292 error = LOCAL_APIC_X2_NOT_SUPPORTED; 293 } 294 295 default: 296 VMM_ASSERT(0); 297 break; 298 } 299 300 lapic_fill_current_mode( data ); 301 302 return error; 303 } 304 #endif 305 306 static void lapic_fill_current_mode( LOCAL_APIC_PER_CPU_DATA* lapic_data ) 307 { 308 lapic_data->lapic_mode = local_apic_discover_mode(); 309 310 switch (lapic_data->lapic_mode) { 311 case LOCAL_APIC_X2_ENABLED: 312 lapic_data->lapic_read_reg = lapic_read_reg_msr; 313 lapic_data->lapic_write_reg = lapic_write_reg_msr; 314 break; 315 case LOCAL_APIC_ENABLED: 316 // SW-disabled is HW-enabled also 317 lapic_data->lapic_read_reg = lapic_read_reg_mmio; 318 lapic_data->lapic_write_reg = lapic_write_reg_mmio; 319 break; 320 321 case LOCAL_APIC_DISABLED: 322 default: 323 VMM_LOG(mask_anonymous, level_trace,"Setting Local APIC into HW-disabled state on CPU#%d\n", hw_cpu_id()); 324 // BEFORE_VMLAUNCH. This case should not occur. 325 VMM_ASSERT( FALSE ); 326 } 327 328 } 329 void lapic_read_reg_mmio(const LOCAL_APIC_PER_CPU_DATA* data, 330 LOCAL_APIC_REG_ID reg_id, void *p_data, unsigned not_used UNUSED) 331 { 332 *(UINT32 *) p_data = *(volatile UINT32 *) LOCAL_APIC_REG_ADDRESS(data, reg_id); 333 } 334 335 void lapic_write_reg_mmio(const LOCAL_APIC_PER_CPU_DATA* data, 336 LOCAL_APIC_REG_ID reg_id, void *p_data, unsigned not_used UNUSED) 337 { 338 *(volatile UINT32 *) LOCAL_APIC_REG_ADDRESS(data, reg_id) = *(UINT32 *) p_data; 339 } 340 341 void lapic_read_reg_msr(const LOCAL_APIC_PER_CPU_DATA* data UNUSED, 342 LOCAL_APIC_REG_ID reg_id, void *p_data, unsigned bytes) 343 { 344 UINT64 value; 345 346 value = hw_read_msr(LOCAL_APIC_REG_MSR(reg_id)); 347 if (4 == bytes) { 348 *(UINT32 *) p_data = (UINT32) value; 349 } 350 else { 351 *(UINT64 *) p_data = value; 352 } 353 354 } 355 356 void lapic_write_reg_msr(const LOCAL_APIC_PER_CPU_DATA* data UNUSED, 357 LOCAL_APIC_REG_ID reg_id, void *p_data, unsigned bytes) 358 { 359 if (4 == bytes) { 360 hw_write_msr(LOCAL_APIC_REG_MSR(reg_id), *(UINT32 *) p_data); 361 } 362 else { 363 hw_write_msr(LOCAL_APIC_REG_MSR(reg_id), *(UINT64 *) p_data); 364 } 365 } 366 367 #ifdef INCLUDE_UNUSED_CODE 368 LOCAL_APIC_ERRNO local_apic_access( 369 LOCAL_APIC_REG_ID reg_id, 370 RW_ACCESS rw_access, 371 void *data, 372 INT32 bytes_to_deliver, 373 INT32 *p_bytes_delivered 374 ) 375 { 376 LOCAL_APIC_PER_CPU_DATA *lapic_data = GET_CPU_LAPIC(); 377 378 VMM_ASSERT(bytes_to_deliver > 0); 379 VMM_ASSERT(WRITE_ACCESS == rw_access || READ_ACCESS == rw_access); 380 381 if (NULL != p_bytes_delivered) 382 *p_bytes_delivered = 0; 383 384 385 // validate arguments 386 if (reg_id >= NELEMENTS(lapic_registers)) { 387 return LOCAL_APIC_INVALID_REGISTER_ERROR; 388 } 389 390 if (0 == (lapic_registers[reg_id].access & ACCESS_RW)) { 391 return LOCAL_APIC_RESERVED_REGISTER_ERROR; 392 } 393 394 if (0 == (lapic_registers[reg_id].access & rw_access)) { 395 return LOCAL_APIC_INVALID_RW_ACCESS_ERROR; 396 } 397 398 switch (lapic_data->lapic_mode) { 399 case LOCAL_APIC_ENABLED: 400 if (0 == BITMAP_GET(lapic_registers[reg_id].modes, MODE_MMIO)) { 401 return LOCAL_APIC_REGISTER_MMIO_ACCESS_DISABLED_ERROR; 402 } 403 404 if (bytes_to_deliver < (INT32)sizeof(UINT32)) { 405 return LOCAL_APIC_REGISTER_ACCESS_LENGTH_ERROR; 406 } 407 bytes_to_deliver = sizeof(UINT32); 408 break; 409 410 case LOCAL_APIC_X2_ENABLED: 411 if (0 == BITMAP_GET(lapic_registers[reg_id].modes, MODE_MSR)) { 412 return LOCAL_APIC_REGISTER_MSR_ACCESS_DISABLED_ERROR; 413 } 414 415 if (bytes_to_deliver < lapic_registers[reg_id].x2_size) { 416 return LOCAL_APIC_REGISTER_ACCESS_LENGTH_ERROR; 417 } 418 bytes_to_deliver = lapic_registers[reg_id].x2_size; 419 break; 420 421 default: 422 return LOCAL_APIC_ACCESS_WHILE_DISABLED_ERROR; 423 } 424 425 426 switch (rw_access) 427 { 428 case READ_ACCESS: 429 lapic_data->lapic_read_reg(lapic_data, reg_id, data, bytes_to_deliver); 430 break; 431 432 case WRITE_ACCESS: 433 lapic_data->lapic_write_reg(lapic_data, reg_id, data, bytes_to_deliver); 434 break; 435 436 default: 437 return LOCAL_APIC_INVALID_RW_ACCESS_ERROR; 438 } 439 440 if (NULL != p_bytes_delivered) 441 *p_bytes_delivered = bytes_to_deliver; 442 443 return LOCAL_APIC_NOERROR; 444 445 } 446 #endif 447 448 BOOLEAN validate_APIC_BASE_change(UINT64 msr_value) 449 { 450 LOCAL_APIC_PER_CPU_DATA *lapic_data = GET_CPU_LAPIC(); 451 UINT64 physical_address_size_mask = ~((((UINT64)1) << ((UINT8)hw_read_address_size()))-1); 452 UINT64 bit_9_mask = (UINT64)1 << 9; 453 UINT64 last_byte_mask = 0xff; 454 UINT64 reserved_bits_mask; 455 456 if(local_apic_is_x2apic_supported()) 457 reserved_bits_mask = bit_9_mask + last_byte_mask + ~((((UINT64)1) << 36)-1) 458 + IA32_APIC_BASE_MSR_BSP; 459 else 460 reserved_bits_mask = physical_address_size_mask + bit_9_mask + last_byte_mask 461 + IA32_APIC_BASE_MSR_X2APIC_ENABLE + IA32_APIC_BASE_MSR_BSP; 462 463 //if reserved bits are being changed, return FALSE, so,the caller will inject gp. 464 if( (hw_read_msr(IA32_MSR_APIC_BASE) & reserved_bits_mask) != (msr_value & reserved_bits_mask) ) 465 return FALSE; 466 467 //if the current mode is xAPIC, the legal target modes are xAPIC, x2APIC and disabled state. 468 //let's reject any change to disabled state since uVMM relies on xAPIC or x2APIC 469 470 //if the current mode is x2APIC, the legal target modes are x2APIC and disabled state. 471 //let's reject any change to disabled state for the same reason 472 if( lapic_data->lapic_mode == LOCAL_APIC_X2_ENABLED ) { 473 if( !(BITMAP_GET(msr_value, IA32_APIC_BASE_MSR_X2APIC_ENABLE)) || 474 !(BITMAP_GET(msr_value, IA32_APIC_BASE_MSR_GLOBAL_ENABLE)) ) 475 //VMM_DEADLOOP(); 476 return FALSE; //inject gp instead of deadloop--recommended by validation guys 477 } 478 else { 479 if( lapic_data->lapic_mode != LOCAL_APIC_ENABLED ) 480 //VMM_DEADLOOP(); 481 return FALSE; //inject gp instead of deadloop--recommended by validation guys 482 483 if(!(BITMAP_GET(msr_value, IA32_APIC_BASE_MSR_GLOBAL_ENABLE))) 484 //VMM_DEADLOOP(); 485 return FALSE; //inject gp instead of deadloop--recommended by validation guys 486 } 487 488 return TRUE; 489 } 490 491 void local_apic_setup_changed(void) 492 { 493 LOCAL_APIC_PER_CPU_DATA *lapic_data = GET_CPU_LAPIC(); 494 BOOLEAN result; 495 496 lapic_data->lapic_base_address_hpa = hw_read_msr(IA32_MSR_APIC_BASE); 497 lapic_data->lapic_base_address_hpa = 498 ALIGN_BACKWARD(lapic_data->lapic_base_address_hpa, PAGE_4KB_SIZE); 499 500 lapic_fill_current_mode( lapic_data ); 501 if( lapic_data->lapic_mode != LOCAL_APIC_X2_ENABLED ) { 502 result = hmm_map_uc_physical_page( lapic_data->lapic_base_address_hpa, 503 TRUE /* writable */, FALSE /* not_executable */, 504 FALSE /* do synch with other CPUs to avoid loop back*/, 505 &(lapic_data->lapic_base_address_hva)); 506 // BEFORE_VMLAUNCH. Critical check, keep it. 507 VMM_ASSERT(result); 508 } 509 510 VMM_LOG(mask_anonymous, level_trace,"CPU#%d: local apic base = %p\r\n", hw_cpu_id(), lapic_data->lapic_base_address_hpa); 511 512 // We do not unmap previous mapping, so old pages will remain mapped uncachable 513 } 514 #ifdef INCLUDE_UNUSED_CODE 515 ADDRESS lapic_base_address_hpa(void) 516 { 517 return GET_CPU_LAPIC()->lapic_base_address_hpa; 518 } 519 520 ADDRESS lapic_base_address_hva(void) 521 { 522 return GET_CPU_LAPIC()->lapic_base_address_hva; 523 } 524 #endif 525 526 527 // update lapic cpu id. (must be called after S3 or Local APIC host base was changed per cpu) 528 BOOLEAN update_lapic_cpu_id(void) 529 { 530 LOCAL_APIC_PER_CPU_DATA *lapic_data = GET_CPU_LAPIC(); 531 532 // BEFORE_VMLAUNCH. Critical check, keep it. 533 VMM_ASSERT(lapic_data); 534 535 lapic_data->lapic_cpu_id = local_apic_get_current_id(); 536 537 return TRUE; 538 } 539 540 541 BOOLEAN local_apic_cpu_init(void) 542 { 543 local_apic_setup_changed(); 544 update_lapic_cpu_id(); 545 return TRUE; 546 } 547 548 BOOLEAN local_apic_init( UINT16 num_of_cpus ) 549 { 550 UINT32 chunk_size = num_of_cpus * sizeof( LOCAL_APIC_PER_CPU_DATA ); 551 if (lapic_cpu_data == 0) { 552 lapic_cpu_data = vmm_page_alloc( PAGE_ROUNDUP( chunk_size )); 553 VMM_ASSERT( lapic_cpu_data != NULL ); 554 vmm_memset( lapic_cpu_data, 0, chunk_size ); 555 } 556 557 return TRUE; 558 } 559 560 /* 561 * Specific IPI support 562 */ 563 564 static void local_apic_wait_for_ipi_delivery( LOCAL_APIC_PER_CPU_DATA* lapic_data ) 565 { 566 LOCAL_APIC_INTERRUPT_COMMAND_REGISTER_LOW icr_low; 567 568 //delivery status bit does not exist for x2APIC mode 569 if( lapic_data->lapic_mode != LOCAL_APIC_X2_ENABLED ) 570 while(TRUE) { 571 lapic_data->lapic_read_reg( lapic_data, 572 LOCAL_APIC_INTERRUPT_COMMAND_REG, 573 &icr_low.uint32, 574 sizeof(icr_low.uint32) ); 575 576 if(IPI_DELIVERY_STATUS_IDLE == icr_low.bits.delivery_status) { 577 break; 578 } 579 } 580 } 581 582 BOOLEAN local_apic_ipi_verify_params(LOCAL_APIC_IPI_DESTINATION_SHORTHAND dst_shorthand, 583 LOCAL_APIC_IPI_DELIVERY_MODE delivery_mode, 584 UINT8 vector, LOCAL_APIC_IPI_LEVEL level, 585 LOCAL_APIC_IPI_TRIGGER_MODE trigger_mode) 586 { 587 BOOLEAN success = TRUE; 588 589 if(dst_shorthand == IPI_DST_SELF && (delivery_mode == IPI_DELIVERY_MODE_LOWEST_PRIORITY 590 || delivery_mode == IPI_DELIVERY_MODE_NMI || delivery_mode == IPI_DELIVERY_MODE_INIT 591 || delivery_mode == IPI_DELIVERY_MODE_SMI || delivery_mode == IPI_DELIVERY_MODE_START_UP)) { 592 success = FALSE; 593 VMM_LOG(mask_anonymous, level_trace,"IPI params verification failed: dst_shorthand == IPI_DST_SELF && delivery_mode==" STRINGIFY(delivery_mode)"\r\n"); 594 } 595 596 if(dst_shorthand == IPI_DST_ALL_INCLUDING_SELF && 597 (delivery_mode == IPI_DELIVERY_MODE_LOWEST_PRIORITY 598 || delivery_mode == IPI_DELIVERY_MODE_NMI || delivery_mode == IPI_DELIVERY_MODE_INIT 599 || delivery_mode == IPI_DELIVERY_MODE_SMI || delivery_mode == IPI_DELIVERY_MODE_START_UP)) { 600 success = FALSE; 601 VMM_LOG(mask_anonymous, level_trace,"IPI params verification failed: dst_shorthand == IPI_DST_ALL_INCLUDING_SELF && delivery_mode==" STRINGIFY(delivery_mode)"\r\n"); 602 } 603 604 if(trigger_mode == IPI_DELIVERY_TRIGGER_MODE_LEVEL && 605 (delivery_mode == IPI_DELIVERY_MODE_SMI || delivery_mode == IPI_DELIVERY_MODE_START_UP)) { 606 success = FALSE; 607 VMM_LOG(mask_anonymous, level_trace,"IPI params verification failed: trigger_mode == IPI_DELIVERY_TRIGGER_MODE_LEVEL && delivery_mode==" STRINGIFY(delivery_mode)"\r\n"); 608 } 609 610 if((delivery_mode == IPI_DELIVERY_MODE_SMI || delivery_mode == IPI_DELIVERY_MODE_INIT) 611 && vector != 0) { 612 success = FALSE; 613 VMM_LOG(mask_anonymous, level_trace,"IPI params verification failed: delivery_mode == " STRINGIFY(delivery_mode)", vector must be zero\r\n"); 614 } 615 616 // init level de-assert 617 if(delivery_mode == IPI_DELIVERY_MODE_INIT && level == IPI_DELIVERY_LEVEL_DEASSERT && trigger_mode ==IPI_DELIVERY_TRIGGER_MODE_LEVEL 618 && dst_shorthand != IPI_DST_ALL_INCLUDING_SELF) 619 { 620 success = FALSE; 621 VMM_LOG(mask_anonymous, level_trace,"IPI params verification failed: init level deassert ipi - destination must be IPI_DST_ALL_INCLUDING_SELF\r\n"); 622 } 623 624 // level must be assert for ipis other than init level de-assert 625 if((delivery_mode != IPI_DELIVERY_MODE_INIT || trigger_mode !=IPI_DELIVERY_TRIGGER_MODE_LEVEL) && level == IPI_DELIVERY_LEVEL_DEASSERT) 626 { 627 success = FALSE; 628 VMM_LOG(mask_anonymous, level_trace,"IPI params verification failed: level must be ASSERT for all ipis except init level deassert ipi\r\n"); 629 } 630 631 return success; 632 } 633 634 BOOLEAN 635 local_apic_send_ipi(LOCAL_APIC_IPI_DESTINATION_SHORTHAND dst_shorthand, 636 UINT8 dst, 637 LOCAL_APIC_IPI_DESTINATION_MODE dst_mode, 638 LOCAL_APIC_IPI_DELIVERY_MODE delivery_mode, 639 UINT8 vector, 640 LOCAL_APIC_IPI_LEVEL level, 641 LOCAL_APIC_IPI_TRIGGER_MODE trigger_mode) 642 { 643 LOCAL_APIC_INTERRUPT_COMMAND_REGISTER icr; 644 UINT32 icr_high_save; 645 LOCAL_APIC_PER_CPU_DATA* lapic_data = GET_CPU_LAPIC(); 646 BOOLEAN params_valid = FALSE; 647 648 649 params_valid = local_apic_ipi_verify_params(dst_shorthand, delivery_mode , vector, level, trigger_mode); 650 651 if(! params_valid) { 652 return FALSE; 653 } 654 655 // wait for IPI in progress to finish 656 local_apic_wait_for_ipi_delivery(lapic_data); 657 658 icr.hi_dword.uint32 = 0; 659 660 if (IPI_DST_NO_SHORTHAND == dst_shorthand) { 661 LOCAL_APIC_PER_CPU_DATA *dst_lapic_data = GET_OTHER_LAPIC(dst); 662 icr.hi_dword.bits.destination = dst_lapic_data->lapic_cpu_id; 663 } 664 else if (IPI_DST_SELF == dst_shorthand) { 665 icr.hi_dword.bits.destination = lapic_data->lapic_cpu_id; 666 } 667 else { 668 icr.hi_dword.bits.destination = dst; 669 } 670 671 if( lapic_data->lapic_mode == LOCAL_APIC_X2_ENABLED ) 672 icr.hi_dword.uint32 = (UINT32)icr.hi_dword.bits.destination; 673 674 icr.lo_dword.uint32 = 0; 675 icr.lo_dword.bits.destination_shorthand = dst_shorthand; 676 icr.lo_dword.bits.destination_mode = dst_mode; 677 icr.lo_dword.bits.delivery_mode = delivery_mode; 678 icr.lo_dword.bits.vector = vector; 679 icr.lo_dword.bits.level = level; 680 icr.lo_dword.bits.trigger_mode = trigger_mode; 681 682 if (LOCAL_APIC_X2_ENABLED == lapic_data->lapic_mode) { 683 lapic_data->lapic_write_reg( lapic_data, 684 LOCAL_APIC_INTERRUPT_COMMAND_REG, 685 &icr, 686 sizeof(icr)); 687 688 // wait for IPI in progress to finish 689 local_apic_wait_for_ipi_delivery(lapic_data); 690 } 691 else { 692 // save previous uint32: if guest is switched in the middle of IPI setup, 693 // need to restore the guest IPI destination uint32 694 lapic_data->lapic_read_reg(lapic_data, LOCAL_APIC_INTERRUPT_COMMAND_HI_REG, 695 &icr_high_save, sizeof(icr_high_save)); 696 697 // write new destination 698 lapic_data->lapic_write_reg(lapic_data, LOCAL_APIC_INTERRUPT_COMMAND_HI_REG, 699 &icr.hi_dword.uint32, sizeof(icr.hi_dword.uint32)); 700 701 // send IPI 702 lapic_data->lapic_write_reg(lapic_data, LOCAL_APIC_INTERRUPT_COMMAND_REG, 703 &icr.lo_dword.uint32, sizeof(icr.lo_dword.uint32)); 704 705 // wait for IPI in progress to finish 706 local_apic_wait_for_ipi_delivery(lapic_data); 707 708 // restore guest IPI destination 709 lapic_data->lapic_write_reg( lapic_data, LOCAL_APIC_INTERRUPT_COMMAND_HI_REG, 710 &icr_high_save, sizeof(icr_high_save)); 711 } 712 713 return TRUE; 714 } 715 716 UINT8 local_apic_get_current_id( void ) 717 { 718 LOCAL_APIC_PER_CPU_DATA* lapic_data = GET_CPU_LAPIC(); 719 UINT32 local_apic_id = 0; 720 721 lapic_data->lapic_read_reg( lapic_data, 722 LOCAL_APIC_ID_REG, 723 &local_apic_id, 724 sizeof(local_apic_id)); 725 726 if( lapic_data->lapic_mode != LOCAL_APIC_X2_ENABLED ) 727 return (UINT8)(local_apic_id >> LOCAL_APIC_ID_LOW_RESERVED_BITS_COUNT); 728 else 729 return (UINT8)(local_apic_id); 730 } 731 #ifdef INCLUDE_UNUSED_CODE 732 void local_apic_send_init_to_self( void ) 733 { 734 local_apic_send_ipi( IPI_DST_NO_SHORTHAND, (UINT8) hw_cpu_id(), 735 IPI_DESTINATION_MODE_PHYSICAL, 736 IPI_DELIVERY_MODE_INIT, 737 0, 738 IPI_DELIVERY_LEVEL_ASSERT, 739 IPI_DELIVERY_TRIGGER_MODE_EDGE ); 740 741 VMM_LOG(mask_anonymous, level_trace,"local_apic_send_init_to_self: local_apic_send_ipi(INIT) returned!!!\n"); 742 VMM_DEADLOOP(); 743 } 744 #endif 745 void local_apic_send_init( CPU_ID dst ) 746 { 747 local_apic_send_ipi( IPI_DST_NO_SHORTHAND, (UINT8) dst, 748 IPI_DESTINATION_MODE_PHYSICAL, 749 IPI_DELIVERY_MODE_INIT, 750 0, 751 IPI_DELIVERY_LEVEL_ASSERT, 752 IPI_DELIVERY_TRIGGER_MODE_EDGE ); 753 } 754 755 #ifdef DEBUG 756 757 LOCAL_APIC_MODE local_apic_get_mode(void) 758 { 759 return GET_CPU_LAPIC()->lapic_mode; 760 } 761 BOOLEAN local_apic_is_sw_enabled(void) 762 { 763 LOCAL_APIC_PER_CPU_DATA* lapic_data = GET_CPU_LAPIC(); 764 UINT32 spurious_vector_reg_value = 0; 765 766 if (LOCAL_APIC_DISABLED == lapic_data->lapic_mode) { 767 return FALSE; 768 } 769 770 // now read the spurios register 771 lapic_data->lapic_read_reg( lapic_data, 772 LOCAL_APIC_SPURIOUS_INTR_VECTOR_REG, 773 &spurious_vector_reg_value, 774 sizeof(spurious_vector_reg_value)); 775 return BIT_GET(spurious_vector_reg_value, IA32_APIC_SW_ENABLE_BIT_IDX) ? TRUE : FALSE; 776 } 777 778 // find highest set bit in 256bit reg (8 sequential regs 32bit each). 779 // Return UINT32_ALL_ONES if no 1s found. 780 static UINT32 find_highest_bit_in_reg(LOCAL_APIC_PER_CPU_DATA* lapic_data, LOCAL_APIC_REG_ID reg_id, 781 UINT32 reg_size_32bit_units ) 782 { 783 UINT32 subreg_idx; 784 UINT32 subreg_value; 785 UINT32 bit_idx; 786 787 for (subreg_idx = reg_size_32bit_units; subreg_idx > 0; --subreg_idx) { 788 lapic_data->lapic_read_reg( lapic_data, reg_id + subreg_idx - 1, 789 &subreg_value, sizeof(subreg_value)); 790 791 if (0 == subreg_value) { 792 continue; 793 } 794 795 // find highest set bit 796 hw_scan_bit_backward( &bit_idx, subreg_value ); 797 798 return ((subreg_idx - 1)* sizeof(subreg_value) * 8 + bit_idx); 799 } 800 801 // if we are here - not found 802 return UINT32_ALL_ONES; 803 } 804 // Find maximum interrupt request register priority 805 // IRR priority is a upper 4bit value of the highest interrupt bit set to 1 806 static UINT32 local_apic_get_irr_priority( LOCAL_APIC_PER_CPU_DATA* lapic_data ) 807 { 808 UINT32 irr_max_vector = find_highest_bit_in_reg( 809 lapic_data, 810 LOCAL_APIC_INTERRUPT_REQUEST_REG, 811 8 ); 812 813 return (irr_max_vector == UINT32_ALL_ONES) ? 0 : ((irr_max_vector >> 4) & 0xF); 814 } 815 816 // Processor Priority Register is a read-only register set to the highest priority 817 // class between ISR priority (priority of the highest ISR vector) and TPR 818 // PPR = MAX( ISR, TPR ) 819 static UINT32 local_apic_get_processor_priority( LOCAL_APIC_PER_CPU_DATA* lapic_data ) 820 { 821 UINT32 ppr_value; 822 823 lapic_data->lapic_read_reg( lapic_data, LOCAL_APIC_PROCESSOR_PRIORITY_REG, 824 &ppr_value, sizeof(ppr_value)); 825 return ((ppr_value >> 4) & 0xF); 826 } 827 828 // Test for ready-to-be-accepted fixed interrupts. 829 // Fixed interrupt is ready to be accepted if Local APIC will inject interrupt when 830 // SW will enable interrupts (assuming NMI is not in-service and no other 831 // execution-based interrupt blocking is active) 832 // Fixed Interrupt is ready-to-be-accepted if 833 // IRR_Priority > Processor_Priority 834 BOOLEAN local_apic_is_ready_interrupt_exist(void) 835 { 836 LOCAL_APIC_PER_CPU_DATA* lapic_data = GET_CPU_LAPIC(); 837 838 VMM_ASSERT( local_apic_is_sw_enabled() == TRUE ); 839 return local_apic_get_irr_priority(lapic_data) > local_apic_get_processor_priority(lapic_data); 840 } 841 #endif