github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/guest/guest.c (about) 1 /* 2 * Copyright (c) 2013 Intel Corporation 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * Unless required by applicable law or agreed to in writing, software 9 * distributed under the License is distributed on an "AS IS" BASIS, 10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 * See the License for the specific language governing permissions and 12 * limitations under the License. 13 */ 14 15 #include "file_codes.h" 16 #define VMM_DEADLOOP() VMM_DEADLOOP_LOG(GUEST_C) 17 #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(GUEST_C, __condition) 18 #include "guest_internal.h" 19 #include "guest_cpu_internal.h" 20 #include "guest_cpu.h" 21 #include "vmcall.h" 22 #include "gpm_api.h" 23 #include "heap.h" 24 #include "vmexit.h" 25 #include "vmm_dbg.h" 26 #include "memory_allocator.h" 27 #include "vmm_events_data.h" 28 #include "guest_pci_configuration.h" 29 #include "ipc.h" 30 #include "host_memory_manager_api.h" 31 #include "memory_address_mapper_api.h" 32 #include "scheduler.h" 33 #include "host_cpu.h" 34 #include <pat_manager.h> 35 #include "ept.h" 36 #ifdef JLMDEBUG 37 #include "jlmdebug.h" 38 #endif 39 40 #define MIN_ANONYMOUS_GUEST_ID 30000 41 42 extern void vmm_acpi_pm_initialize(GUEST_ID guest_id); 43 44 45 // Guest Manager 46 #ifdef ENABLE_MULTI_GUEST_SUPPORT 47 static void raise_gcpu_add_event(CPU_ID from, void* arg); 48 #endif 49 50 static UINT32 guests_count = 0; 51 static UINT32 max_gcpus_count_per_guest = 0; 52 static UINT32 num_host_cpus = 0; 53 static GUEST_DESCRIPTOR * guests = NULL; 54 55 56 void guest_manager_init(UINT16 max_cpus_per_guest, UINT16 host_cpu_count) 57 { 58 #ifdef DEBUG 59 bprint("guest_manager_init %d\n", host_cpu_count); 60 #endif 61 VMM_ASSERT(max_cpus_per_guest); 62 max_gcpus_count_per_guest = max_cpus_per_guest; 63 num_host_cpus = host_cpu_count; 64 guests = NULL; 65 // init subcomponents 66 gcpu_manager_init(host_cpu_count); 67 // create VMEXIT-related data 68 // vmexit_initialize(); 69 } 70 71 72 // Get Guest by guest ID 73 // Return NULL if no such guest 74 GUEST_HANDLE guest_handle(GUEST_ID guest_id) 75 { 76 GUEST_DESCRIPTOR *guest; 77 78 #ifdef JLMDEBUG1 79 bprint("guest_handle(%d). guests: 0x%016lx\n", guest_id, guests); 80 #endif 81 if(guest_id >= guests_count) { 82 return NULL; 83 } 84 for(guest = guests; guest != NULL; guest= guest->next_guest) { 85 if(guest->id == guest_id) { 86 #ifdef JLMDEBUG1 87 bprint("guest_id: %d\n", guest->id); 88 #endif 89 return guest; 90 } 91 } 92 return NULL; 93 } 94 95 // Get Guest ID by guest handle 96 GUEST_ID guest_get_id( GUEST_HANDLE guest ) 97 { 98 VMM_ASSERT( guest ); 99 return guest->id; 100 } 101 102 // Register new guest 103 // For primary guest physical_memory_size must be 0 104 // cpu_affinity - each 1 bit corresponds to host CPU_ID that should run GUEST_CPU 105 // on behalf of this guest. Number of bits should correspond 106 // to the number of registered guest CPUs for this guest 107 // -1 means run on all available CPUs 108 // Return NULL on error 109 GUEST_HANDLE guest_register(UINT32 magic_number, UINT32 physical_memory_size, 110 UINT32 cpu_affinity, const VMM_POLICY *guest_policy) 111 { 112 GUEST_DESCRIPTOR* guest; 113 114 #ifdef JLMDEBUG1 115 bprint("guest_register: magic_number %u, \n", magic_number); 116 #endif 117 guest = (GUEST_DESCRIPTOR *) vmm_malloc(sizeof(GUEST_DESCRIPTOR)); 118 VMM_ASSERT(guest); 119 120 guest->id = (GUEST_ID)guests_count; 121 ++guests_count; 122 if(magic_number == ANONYMOUS_MAGIC_NUMBER) { 123 guest->magic_number = MIN_ANONYMOUS_GUEST_ID + guest->id; 124 } 125 else { 126 VMM_ASSERT(magic_number < MIN_ANONYMOUS_GUEST_ID); 127 guest->magic_number = magic_number; 128 } 129 guest->physical_memory_size = physical_memory_size; 130 guest->cpu_affinity = cpu_affinity; 131 guest->cpus_array = (GUEST_CPU_HANDLE *) 132 vmm_malloc(sizeof(GUEST_CPU_HANDLE)*max_gcpus_count_per_guest); 133 guest->cpu_count = 0; 134 guest->flags = 0; 135 guest->saved_image = NULL; 136 guest->saved_image_size = 0; 137 guest->startup_gpm = gpm_create_mapping(); 138 #ifdef JLMDEBUG 139 bprint("gpm_create_mapping() returned %p\n", guest->startup_gpm); 140 #endif 141 VMM_ASSERT(guest->startup_gpm != GPM_INVALID_HANDLE); 142 if (guest_policy == NULL) 143 get_global_policy(&guest->guest_policy); 144 else 145 copy_policy(&guest->guest_policy, guest_policy); 146 list_init(guest->cpuid_filter_list); // prepare list for CPUID filters 147 list_init(guest->msr_control->msr_list); // prepare list for MSR handlers 148 // vmexit_guest_initialize(guest->id); 149 guest->next_guest = guests; 150 guests = guest; 151 #ifdef JLMDEBUG 152 bprint("returning from guest register\n"); 153 #endif 154 return guest; 155 } 156 157 158 // Get total number of guests 159 UINT16 guest_count( void ) 160 { 161 return (UINT16)guests_count; 162 } 163 164 // Get guest magic number 165 UINT32 guest_magic_number( const GUEST_HANDLE guest ) 166 { 167 VMM_ASSERT( guest ); 168 return guest->magic_number; 169 } 170 171 // Get Guest by guest magic number 172 // Return NULL if no such guest 173 GUEST_HANDLE guest_handle_by_magic_number(UINT32 magic_number) 174 { 175 GUEST_DESCRIPTOR *guest; 176 177 #ifdef JLMDEBUG1 178 bprint("guest_handle_by_magic_number(%d). guests: 0x%016lx\n", 179 magic_number, guests); 180 #endif 181 for(guest = guests; guest != NULL; guest = guest->next_guest) { 182 if(guest->magic_number == magic_number) { 183 return guest; 184 } 185 } 186 return NULL; 187 } 188 189 #ifdef INCLUDE_UNUSED_CODE 190 // Get guest physical memory size. For primary guest returns 0. 191 UINT32 guest_physical_memory_size( const GUEST_HANDLE guest ) 192 { 193 VMM_ASSERT( guest ); 194 return guest->physical_memory_size; 195 } 196 197 // Get guest physical memory base. For primary guest returns 0. 198 UINT64 guest_physical_memory_base( const GUEST_HANDLE guest ) 199 { 200 VMM_ASSERT( guest ); 201 return guest->physical_memory_base; 202 } 203 204 void set_guest_physical_memory_base( const GUEST_HANDLE guest, UINT64 base ) 205 { 206 VMM_ASSERT( guest ); 207 VMM_ASSERT( GET_GUEST_IS_PRIMARY_FLAG(guest) == 0 ); 208 guest->physical_memory_base = base; 209 } 210 #endif 211 212 #ifdef ENABLE_MULTI_GUEST_SUPPORT 213 // Get guest cpu affinity. 214 UINT32 guest_cpu_affinity( const GUEST_HANDLE guest ) 215 { 216 VMM_ASSERT( guest ); 217 return guest->cpu_affinity; 218 } 219 #endif 220 221 #ifdef INCLUDE_UNUSED_CODE 222 // Set guest cpu affinity. 223 void guest_set_cpu_affinity( const GUEST_HANDLE guest, UINT32 cpu_affinity ) 224 { 225 VMM_ASSERT( guest ); 226 guest->cpu_affinity = cpu_affinity; 227 } 228 #endif 229 230 // Get guest POLICY 231 const VMM_POLICY *guest_policy( const GUEST_HANDLE guest ) 232 { 233 VMM_ASSERT(guest); 234 return &guest->guest_policy; 235 } 236 237 #ifdef INCLUDE_UNUSED_CODE 238 // Set guest POLICY 239 void guest_set_policy( const GUEST_HANDLE guest, const VMM_POLICY *new_policy) 240 { 241 VMM_ASSERT(guest); 242 VMM_ASSERT(new_policy); 243 copy_policy(&guest->guest_policy, new_policy); 244 } 245 #endif 246 247 // Guest properties. 248 // Default for all properties - FALSE 249 void guest_set_primary( GUEST_HANDLE guest ) 250 { 251 VMM_ASSERT(guest); 252 VMM_ASSERT(guest->physical_memory_size == 0); 253 VMM_ASSERT(guest->physical_memory_base == 0); 254 VMM_ASSERT(guest->saved_image == NULL); 255 VMM_ASSERT(guest->saved_image_size == 0); 256 guest->flags|= GUEST_IS_PRIMARY_FLAG; 257 } 258 259 BOOLEAN guest_is_primary(const GUEST_HANDLE guest) 260 { 261 VMM_ASSERT( guest ); 262 return (GET_GUEST_IS_PRIMARY_FLAG(guest) != 0); 263 } 264 265 GUEST_ID guest_get_primary_guest_id(void) 266 { 267 GUEST_DESCRIPTOR *guest; 268 269 for(guest = guests; guest != NULL; guest = guest->next_guest) { 270 if(0 != GET_GUEST_IS_PRIMARY_FLAG( guest )) { 271 return guest->id; 272 } 273 } 274 return INVALID_GUEST_ID; 275 } 276 277 void guest_set_real_BIOS_access_enabled(GUEST_HANDLE guest) 278 { 279 VMM_ASSERT( guest ); 280 guest->flags|= GUEST_BIOS_ACCESS_ENABLED_FLAG; 281 #ifdef ENABLE_EMULATOR 282 vmcall_register( guest_get_id(guest), VMCALL_EMULATOR_TERMINATE, 283 gcpu_return_to_native_execution, TRUE); // special case 284 #endif 285 } 286 287 #ifdef INCLUDE_UNUSED_CODE 288 BOOLEAN guest_is_real_BIOS_access_enabled( const GUEST_HANDLE guest ) 289 { 290 VMM_ASSERT( guest ); 291 return (GET_GUEST_BIOS_ACCESS_ENABLED_FLAG(guest) != 0); 292 } 293 #endif 294 295 void guest_set_nmi_owner(GUEST_HANDLE guest) 296 { 297 #ifdef JLMDEBUG 298 bprint("guest_set_nmi_owner %p\n", guest); 299 #endif 300 VMM_ASSERT(guest); 301 guest->flags|= GUEST_IS_NMI_OWNER_FLAG; 302 } 303 304 BOOLEAN guest_is_nmi_owner(const GUEST_HANDLE guest ) 305 { 306 VMM_ASSERT( guest ); 307 return (GET_GUEST_IS_NMI_OWNER_FLAG(guest) != 0); 308 } 309 310 void guest_set_acpi_owner(GUEST_HANDLE guest ) 311 { 312 VMM_ASSERT( guest ); 313 #ifdef ENABLE_PM_S3 314 guest->flags|= GUEST_IS_ACPI_OWNER_FLAG; 315 vmm_acpi_pm_initialize(guest->id); // for ACPI owner only 316 #endif 317 } 318 319 #ifdef INCLUDE_UNUSED_CODE 320 BOOLEAN guest_is_acpi_owner(const GUEST_HANDLE guest ) 321 { 322 VMM_ASSERT( guest ); 323 return (GET_GUEST_IS_ACPI_OWNER_FLAG(guest) != 0); 324 } 325 #endif 326 327 void guest_set_default_device_owner(GUEST_HANDLE guest ) 328 { 329 VMM_ASSERT( guest ); 330 guest->flags|= GUEST_IS_DEFAULT_DEVICE_OWNER_FLAG; 331 } 332 333 #ifdef INCLUDE_UNUSED_CODE 334 BOOLEAN guest_is_default_device_owner(const GUEST_HANDLE guest ) 335 { 336 VMM_ASSERT( guest ); 337 return (GET_GUEST_IS_DEFAULT_DEVICE_OWNER_FLAG(guest) != 0); 338 } 339 #endif 340 341 GUEST_ID guest_get_default_device_owner_guest_id(void) 342 { 343 GUEST_DESCRIPTOR *guest; 344 345 for(guest = guests; guest != NULL; guest = guest->next_guest) { 346 if(0 != GET_GUEST_IS_DEFAULT_DEVICE_OWNER_FLAG( guest )) { 347 return guest->id; 348 } 349 } 350 return INVALID_GUEST_ID; 351 } 352 353 // Get startup guest physical memory descriptor 354 GPM_HANDLE guest_get_startup_gpm(GUEST_HANDLE guest) 355 { 356 VMM_ASSERT(guest); 357 return guest->startup_gpm; 358 } 359 360 // Get guest physical memory descriptor 361 GPM_HANDLE gcpu_get_current_gpm(GUEST_HANDLE guest) 362 { 363 GUEST_CPU_HANDLE gcpu; 364 365 VMM_ASSERT(guest); 366 gcpu = scheduler_get_current_gcpu_for_guest(guest_get_id(guest)); 367 VMM_ASSERT(gcpu); 368 return gcpu->active_gpm; 369 } 370 371 void gcpu_set_current_gpm(GUEST_CPU_HANDLE gcpu, GPM_HANDLE gpm) 372 { 373 VMM_ASSERT(gcpu); 374 gcpu->active_gpm = gpm; 375 } 376 377 // Guest executable image 378 // Should not be called for primary guest 379 void guest_set_executable_image( GUEST_HANDLE guest, const UINT8* image_address, 380 UINT32 image_size, UINT32 image_load_GPA, BOOLEAN image_is_compressed ) 381 { 382 VMM_ASSERT( guest ); 383 VMM_ASSERT( GET_GUEST_IS_PRIMARY_FLAG(guest) == 0 ); 384 guest->saved_image = image_address; 385 guest->saved_image_size = image_size; 386 guest->image_load_GPA = image_load_GPA; 387 if (image_is_compressed) { 388 guest->flags|= GUEST_SAVED_IMAGE_IS_COMPRESSED_FLAG; 389 } 390 } 391 392 #ifdef INCLUDE_UNUSED_CODE 393 // Load guest executable image into the guest memory 394 // Should not be called for primary guest 395 void guest_load_executable_image( GUEST_HANDLE guest ) 396 { 397 VMM_ASSERT( guest ); 398 VMM_ASSERT( GET_GUEST_IS_PRIMARY_FLAG(guest) == 0 ); 399 guest = 0; 400 VMM_LOG(mask_anonymous, level_trace, 401 "guest::guest_load_executable_image() is not implemented yet\n"); 402 VMM_DEADLOOP(); 403 VMM_BREAKPOINT(); 404 } 405 #endif 406 407 // Add new CPU to the guest 408 // Return the newly created CPU 409 GUEST_CPU_HANDLE guest_add_cpu( GUEST_HANDLE guest ) 410 { 411 VIRTUAL_CPU_ID vcpu; 412 GUEST_CPU_HANDLE gcpu; 413 414 #ifdef JLMDEBUG1 415 bprint("guest_add_cpu\n"); 416 #endif 417 VMM_ASSERT( guest ); 418 VMM_ASSERT( guest->cpu_count < max_gcpus_count_per_guest ); 419 vcpu.guest_id = guest->id; 420 vcpu.guest_cpu_id = guest->cpu_count ; 421 ++(guest->cpu_count); 422 gcpu = gcpu_allocate(vcpu, guest); 423 guest->cpus_array[vcpu.guest_cpu_id] = gcpu; 424 return gcpu; 425 } 426 427 // Get guest CPU count 428 UINT16 guest_gcpu_count( const GUEST_HANDLE guest ) 429 { 430 VMM_ASSERT( guest ); 431 return guest->cpu_count; 432 } 433 434 // enumerate guest cpus 435 // Return NULL on enumeration end 436 GUEST_CPU_HANDLE guest_gcpu_first( const GUEST_HANDLE guest, GUEST_GCPU_ECONTEXT* context ) 437 { 438 const GUEST_CPU_HANDLE* p_gcpu; 439 440 VMM_ASSERT( guest ); 441 p_gcpu = ARRAY_ITERATOR_FIRST( GUEST_CPU_HANDLE, guest->cpus_array, 442 guest->cpu_count, context ); 443 return p_gcpu ? *p_gcpu : NULL; 444 } 445 446 GUEST_CPU_HANDLE guest_gcpu_next( GUEST_GCPU_ECONTEXT* context ) 447 { 448 GUEST_CPU_HANDLE* p_gcpu; 449 450 p_gcpu = ARRAY_ITERATOR_NEXT( GUEST_CPU_HANDLE, context ); 451 return p_gcpu ? *p_gcpu : NULL; 452 } 453 454 // enumerate guests 455 // Return NULL on enumeration end 456 GUEST_HANDLE guest_first( GUEST_ECONTEXT* context ) 457 { 458 GUEST_DESCRIPTOR *guest = NULL; 459 460 VMM_ASSERT(context); 461 guest = guests; 462 *context = guest; 463 return guest; 464 } 465 466 GUEST_HANDLE guest_next( GUEST_ECONTEXT* context ) 467 { 468 GUEST_DESCRIPTOR *guest = NULL; 469 470 VMM_ASSERT(context); 471 guest = (GUEST_DESCRIPTOR *) *context; 472 473 if (guest != NULL) { 474 guest = guest->next_guest; 475 *context = guest; 476 } 477 return guest; 478 } 479 480 LIST_ELEMENT * guest_get_cpuid_list(GUEST_HANDLE guest) 481 { 482 return guest->cpuid_filter_list; 483 } 484 485 MSR_VMEXIT_CONTROL * guest_get_msr_control(GUEST_HANDLE guest) 486 { 487 return guest->msr_control; 488 } 489 490 // assumption - all CPUs are running 491 void guest_begin_physical_memory_modifications( GUEST_HANDLE guest ) 492 { 493 EVENT_GPM_MODIFICATION_DATA gpm_modification_data; 494 GUEST_CPU_HANDLE gcpu; 495 496 VMM_ASSERT( guest ); 497 gpm_modification_data.guest_id = guest->id; 498 gcpu = scheduler_get_current_gcpu_for_guest(guest_get_id(guest)); 499 VMM_ASSERT(gcpu); 500 event_raise(EVENT_BEGIN_GPM_MODIFICATION_BEFORE_CPUS_STOPPED, gcpu, &gpm_modification_data); 501 stop_all_cpus(); 502 //event_raise(EVENT_BEGIN_GPM_MODIFICATION_AFTER_CPUS_STOPPED, gcpu, &gpm_modification_data); 503 } 504 505 #pragma warning( push ) 506 #pragma warning (disable : 4100) // disable non-referenced formal parameters 507 508 static void guest_notify_gcpu_about_gpm_change( CPU_ID from UNUSED, void* arg ) 509 { 510 CPU_ID guest_id = (CPU_ID)(size_t)arg; 511 GUEST_CPU_HANDLE gcpu; 512 513 gcpu = scheduler_get_current_gcpu_for_guest(guest_id); 514 if (!gcpu) { 515 // no gcpu for the current guest on the current host cpu 516 return; 517 } 518 gcpu_physical_memory_modified( gcpu ); 519 } 520 521 #pragma warning( pop ) 522 523 #ifdef INCLUDE_UNUSED_CODE 524 // assumption - all CPUs stopped 525 void guest_abort_physical_memory_modifications( GUEST_HANDLE guest ) 526 { 527 GUEST_CPU_HANDLE gcpu; 528 529 VMM_ASSERT( guest ); 530 gcpu = scheduler_get_current_gcpu_for_guest(guest_get_id(guest)); 531 VMM_ASSERT(gcpu); 532 start_all_cpus(NULL, NULL); 533 event_raise(EVENT_END_GPM_MODIFICATION_AFTER_CPUS_RESUMED, gcpu, NULL); 534 } 535 #endif 536 537 538 // assumption - all CPUs stopped 539 void guest_end_physical_memory_perm_update( GUEST_HANDLE guest ) 540 { 541 EVENT_GPM_MODIFICATION_DATA gpm_modification_data; 542 GUEST_CPU_HANDLE gcpu; 543 544 VMM_ASSERT( guest ); 545 546 // prepare to raise events 547 gpm_modification_data.guest_id = guest->id; 548 gpm_modification_data.operation = VMM_MEM_OP_UPDATE; 549 gcpu = scheduler_get_current_gcpu_for_guest(guest_get_id(guest)); 550 VMM_ASSERT(gcpu); 551 event_raise(EVENT_END_GPM_MODIFICATION_BEFORE_CPUS_RESUMED, gcpu, &gpm_modification_data); 552 start_all_cpus(NULL, NULL); 553 event_raise(EVENT_END_GPM_MODIFICATION_AFTER_CPUS_RESUMED, gcpu, &gpm_modification_data); 554 } 555 556 // assumption - all CPUs stopped 557 void guest_end_physical_memory_modifications( GUEST_HANDLE guest ) 558 { 559 EVENT_GPM_MODIFICATION_DATA gpm_modification_data; 560 IPC_DESTINATION ipc_dest; 561 GUEST_CPU_HANDLE gcpu; 562 563 VMM_ASSERT( guest ); 564 // notify gcpu of the guest running on the current host cpu 565 guest_notify_gcpu_about_gpm_change( guest->id, (void*)(size_t)guest->id ); 566 // notify all other gcpu of the guest 567 ipc_dest.addr_shorthand = IPI_DST_ALL_EXCLUDING_SELF; 568 ipc_dest.addr = 0; 569 ipc_execute_handler(ipc_dest, guest_notify_gcpu_about_gpm_change, (void*)(size_t)guest->id); 570 // prepare to raise events 571 gpm_modification_data.guest_id = guest->id; 572 gpm_modification_data.operation = VMM_MEM_OP_RECREATE; 573 gcpu = scheduler_get_current_gcpu_for_guest(guest_get_id(guest)); 574 VMM_ASSERT(gcpu); 575 event_raise(EVENT_END_GPM_MODIFICATION_BEFORE_CPUS_RESUMED, gcpu, &gpm_modification_data); 576 start_all_cpus(NULL, NULL); 577 event_raise(EVENT_END_GPM_MODIFICATION_AFTER_CPUS_RESUMED, gcpu, &gpm_modification_data); 578 } 579 580 #ifdef INCLUDE_UNUSED_CODE 581 // assumption - all CPUs are running 582 void guest_begin_physical_memory_perm_switch( GUEST_HANDLE guest ) 583 { 584 EVENT_GPM_MODIFICATION_DATA gpm_modification_data; 585 GUEST_CPU_HANDLE gcpu; 586 587 VMM_ASSERT( guest ); 588 gpm_modification_data.guest_id = guest->id; 589 gcpu = scheduler_get_current_gcpu_for_guest(guest_get_id(guest)); 590 VMM_ASSERT(gcpu); 591 event_raise(EVENT_BEGIN_GPM_MODIFICATION_BEFORE_CPUS_STOPPED, gcpu, &gpm_modification_data); 592 } 593 594 // assumption - all CPUs stopped 595 void guest_end_physical_memory_perm_switch( GUEST_HANDLE guest ) 596 { 597 EVENT_GPM_MODIFICATION_DATA gpm_modification_data; 598 GUEST_CPU_HANDLE gcpu; 599 600 VMM_ASSERT( guest ); 601 // prepare to raise events 602 gpm_modification_data.guest_id = guest->id; 603 gpm_modification_data.operation = VMM_MEM_OP_SWITCH; 604 gcpu = scheduler_get_current_gcpu_for_guest(guest_get_id(guest)); 605 VMM_ASSERT(gcpu); 606 event_raise(EVENT_END_GPM_MODIFICATION_BEFORE_CPUS_RESUMED, gcpu, &gpm_modification_data); 607 event_raise(EVENT_END_GPM_MODIFICATION_AFTER_CPUS_RESUMED, gcpu, &gpm_modification_data); 608 } 609 #endif 610 611 #ifdef ENABLE_MULTI_GUEST_SUPPORT 612 GUEST_HANDLE guest_dynamic_create(BOOLEAN stop_and_notify, const VMM_POLICY *guest_policy) 613 { 614 GUEST_HANDLE guest = NULL; 615 GUEST_ID guest_id = INVALID_GUEST_ID; 616 EVENT_GUEST_CREATE_DATA guest_create_event_data; 617 618 if (TRUE == stop_and_notify) { 619 stop_all_cpus(); 620 } 621 // create guest 622 guest = guest_register(ANONYMOUS_MAGIC_NUMBER, 0, 623 (UINT32) -1 /* cpu affinity */, guest_policy); 624 if (! guest) { 625 VMM_LOG(mask_anonymous, level_trace,"Cannot create guest with the following params: \n" 626 "\t\tguest_magic_number = %#x\n" 627 "\t\tphysical_memory_size = %#x\n" 628 "\t\tcpu_affinity = %#x\n", 629 guest_magic_number(guest), 0, guest_cpu_affinity(guest) ); 630 return NULL; 631 } 632 guest_id = guest_get_id(guest); 633 vmexit_guest_initialize(guest_id); 634 gpci_guest_initialize(guest_id); 635 ipc_guest_initialize(guest_id); 636 event_manager_guest_initialize(guest_id); 637 guest_register_vmcall_services(guest); 638 VMM_LOG(mask_anonymous, level_trace,"Created new guest #%d\r\n", guest_id); 639 if (TRUE == stop_and_notify) { 640 vmm_zeromem(&guest_create_event_data, sizeof(guest_create_event_data)); 641 guest_create_event_data.guest_id = guest_id; 642 event_raise(EVENT_GUEST_CREATE, NULL, &guest_create_event_data); 643 start_all_cpus(NULL, NULL); 644 } 645 return guest; 646 } 647 648 BOOLEAN guest_dynamic_assign_memory(GUEST_HANDLE src_guest, GUEST_HANDLE dst_guest, 649 GPM_HANDLE memory_map) 650 { 651 GPM_HANDLE src_gpm = NULL, dst_gpm = NULL; 652 GPM_RANGES_ITERATOR gpm_iter = GPM_INVALID_RANGES_ITERATOR; 653 GPA gpa = 0, src_gpa = 0; 654 UINT64 size = 0; 655 BOOLEAN status = FALSE; 656 HPA hpa = 0; 657 UINT64 i; 658 MAM_ATTRIBUTES attrs; 659 660 VMM_ASSERT(dst_guest); 661 VMM_ASSERT(memory_map); 662 dst_gpm = gcpu_get_current_gpm(dst_guest); 663 gpm_iter = gpm_get_ranges_iterator(dst_gpm); 664 // check that target gpm is empty 665 VMM_ASSERT(GPM_INVALID_RANGES_ITERATOR == gpm_iter); 666 if(GPM_INVALID_RANGES_ITERATOR != gpm_iter) { 667 return FALSE; 668 } 669 if(src_guest != NULL) { 670 guest_begin_physical_memory_modifications( src_guest ); 671 gpm_iter = gpm_get_ranges_iterator(memory_map); 672 673 while(GPM_INVALID_RANGES_ITERATOR != gpm_iter) { 674 gpm_iter = gpm_get_range_details_from_iterator(memory_map, 675 gpm_iter, &gpa, &size); 676 status = gpm_gpa_to_hpa(memory_map, gpa, &hpa, &attrs); 677 VMM_ASSERT(status); 678 src_gpm = gcpu_get_current_gpm(src_guest); 679 for(i = hpa; i < hpa + size; i += PAGE_4KB_SIZE) { 680 status = gpm_hpa_to_gpa(src_gpm, hpa, &src_gpa); 681 VMM_ASSERT(status); 682 gpm_remove_mapping(src_gpm, src_gpa, PAGE_4KB_SIZE); 683 } 684 } 685 guest_end_physical_memory_modifications( src_guest ); 686 } 687 status = gpm_copy(src_gpm, dst_gpm, FALSE, mam_no_attributes); 688 VMM_ASSERT(status); 689 return TRUE; 690 } 691 692 GUEST_CPU_HANDLE guest_dynamic_add_cpu(GUEST_HANDLE guest, 693 const VMM_GUEST_CPU_STARTUP_STATE* gcpu_startup, 694 CPU_ID host_cpu, BOOLEAN ready_to_run, 695 BOOLEAN stop_and_notify) 696 { 697 GUEST_CPU_HANDLE gcpu; 698 const VIRTUAL_CPU_ID* vcpu = NULL; 699 700 if (TRUE == stop_and_notify) { 701 guest_before_dynamic_add_cpu(); 702 } 703 704 // DK: Do not need this if IPC will work with WaitForSIPI 705 // check that host_cpu is active 706 // gcpu = scheduler_get_current_gcpu_on_host_cpu(host_cpu); 707 // if(gcpu != NULL && gcpu_is_wait_for_sipi(gcpu)) { 708 // start_all_cpus(NULL, NULL); 709 // return NULL; 710 // } 711 gcpu = guest_add_cpu(guest); 712 VMM_ASSERT( gcpu ); 713 // find init data 714 vcpu = guest_vcpu( gcpu ); 715 // register with scheduler 716 scheduler_register_gcpu(gcpu, host_cpu, ready_to_run); 717 if (gcpu_startup != NULL) { 718 VMM_LOG(mask_anonymous, level_trace, 719 "Setting up initial state for the newly created Guest CPU\n"); 720 gcpu_initialize(gcpu, gcpu_startup); 721 } 722 else { 723 VMM_LOG(mask_anonymous, level_trace,"Newly created Guest CPU was initialized with the Wait-For-SIPI state\n"); 724 } 725 host_cpu_vmcs_init(gcpu); 726 if (TRUE == stop_and_notify) { 727 guest_after_dynamic_add_cpu( gcpu ); 728 } 729 return gcpu; 730 } 731 #endif 732 733 #ifdef INCLUDE_UNUSED_CODE 734 GUEST_CPU_HANDLE guest_dynamic_add_cpu_default(GUEST_HANDLE guest) 735 { 736 return guest_dynamic_add_cpu(guest, NULL, hw_cpu_id(), TRUE, TRUE); 737 } 738 #endif 739 740 #pragma warning( push ) 741 #pragma warning (disable : 4100) // disable non-referenced formal parameters 742 743 #ifdef ENABLE_MULTI_GUEST_SUPPORT 744 static 745 void raise_gcpu_add_event(CPU_ID from UNUSED, void* arg) 746 { 747 CPU_ID this_cpu_id = hw_cpu_id(); 748 GUEST_CPU_HANDLE gcpu = (GUEST_CPU_HANDLE) arg; 749 750 VMM_LOG(mask_anonymous, level_trace,"cpu#%d raise gcpu add event gcpu %p\n", 751 this_cpu_id, gcpu); 752 if(this_cpu_id == scheduler_get_host_cpu_id(gcpu)) { 753 event_raise(EVENT_GCPU_ADD, gcpu, NULL); 754 } 755 } 756 757 void guest_before_dynamic_add_cpu( void ) 758 { 759 stop_all_cpus(); 760 } 761 762 void guest_after_dynamic_add_cpu( GUEST_CPU_HANDLE gcpu ) 763 { 764 CPU_ID cpu_id = hw_cpu_id(); 765 766 if (gcpu) { 767 // created ok 768 host_cpu_vmcs_init( gcpu ); 769 VMM_LOG(mask_anonymous, level_trace,"CPU#%d: Notify all on added gcpu: %p host_cpu: %d\r\n", cpu_id, gcpu, scheduler_get_host_cpu_id(gcpu)); 770 start_all_cpus(raise_gcpu_add_event, gcpu); 771 VMM_LOG(mask_anonymous, level_trace,"CPU#%d: raise local gcpu add\r\n", cpu_id); 772 raise_gcpu_add_event(cpu_id, gcpu); 773 } 774 else { 775 // creation failed 776 start_all_cpus( NULL, NULL ); 777 } 778 } 779 #endif 780 781 #pragma warning( pop ) 782 783 // utils 784 #ifdef INCLUDE_UNUSED_CODE 785 BOOLEAN vmm_get_struct_host_ptr(GUEST_CPU_HANDLE gcpu, 786 void* guest_ptr, VMCALL_ID expected_vmcall_id, 787 UINT32 size_of_struct, void** host_ptr) { 788 UINT64 gva = (UINT64)guest_ptr; 789 UINT64 hva; 790 void* host_ptr_tmp; 791 792 if (!gcpu_gva_to_hva(gcpu, gva, &hva)) { 793 VMM_LOG(mask_anonymous, level_trace, 794 "%s: Invalid Parameter Struct Address %P\n", __FUNCTION__, gva); 795 return FALSE; 796 } 797 host_ptr_tmp = (void*)hva; 798 if (*((VMCALL_ID*)host_ptr_tmp) != expected_vmcall_id) { 799 VMM_LOG(mask_anonymous, level_trace, 800 "%s: Invalid first field (vmcall_id) of the struct: %d instead of %d\n", 801 __FUNCTION__, *((VMCALL_ID*)host_ptr_tmp), expected_vmcall_id); 802 return FALSE; 803 } 804 if (ALIGN_BACKWARD(gva, PAGE_4KB_SIZE) != ALIGN_BACKWARD(gva+size_of_struct, PAGE_4KB_SIZE)) { 805 VMM_LOG(mask_anonymous, level_trace,"%s: Parameters Struct crosses the page boundary. gva = %P, size_of_struct = 0x%x\n", __FUNCTION__, gva, size_of_struct); 806 return FALSE; 807 } 808 *host_ptr = host_ptr_tmp; 809 return TRUE; 810 } 811 812 #pragma warning( push ) 813 #pragma warning (disable : 4100) // disable non-referenced formal parameters 814 815 static VMM_STATUS is_uvmm_running(GUEST_CPU_HANDLE gcpu, ADDRESS *arg1, 816 ADDRESS *arg2 UNUSED, ADDRESS *arg3 UNUSED) { 817 void** is_vmm_running_params_guest_ptr = (void**)arg1; 818 VMM_IS_UVMM_RUNNING_PARAMS* is_vmm_running_params; 819 820 if (!vmm_get_struct_host_ptr(gcpu, *is_vmm_running_params_guest_ptr, 821 VMCALL_IS_UVMM_RUNNING, 822 sizeof(VMM_IS_UVMM_RUNNING_PARAMS), 823 (void**)&is_vmm_running_params)) { 824 VMM_LOG(mask_anonymous, level_trace,"%s: Error - could not retrieve pointer to parameters\n", __FUNCTION__); 825 VMM_DEADLOOP(); 826 return VMM_ERROR; 827 } 828 VMM_LOG(mask_anonymous, level_trace,"%s: Notifying driver that uVMM is running\n", __FUNCTION__); 829 is_vmm_running_params->version = 0; 830 return VMM_OK; 831 } 832 833 static VMM_STATUS print_debug_message_service(GUEST_CPU_HANDLE gcpu, ADDRESS *arg1, 834 ADDRESS *arg2 UNUSED, ADDRESS *arg3 UNUSED) 835 { 836 void** print_debug_message_params_guest_ptr = (void**)arg1; 837 VMM_PRINT_DEBUG_MESSAGE_PARAMS* print_debug_message_params; 838 839 if (!vmm_get_struct_host_ptr(gcpu, *print_debug_message_params_guest_ptr, 840 VMCALL_PRINT_DEBUG_MESSAGE, 841 sizeof(VMM_PRINT_DEBUG_MESSAGE_PARAMS), 842 (void**)&print_debug_message_params)) { 843 VMM_LOG(mask_anonymous, level_trace, 844 "%s: Error - could not retrieve pointer to parameters\n", __FUNCTION__); 845 return VMM_ERROR; 846 } 847 VMM_LOG(mask_anonymous, level_trace,"%s\n", print_debug_message_params->message); 848 return VMM_OK; 849 } 850 851 #pragma warning( pop ) 852 #endif 853 854 #ifdef DEBUG 855 extern void vmm_io_emulator_register( GUEST_ID guest_id ); 856 void guest_register_vmcall_services(GUEST_HANDLE guest) 857 { 858 GUEST_ID guest_id = guest_get_id(guest); 859 vmm_io_emulator_register(guest_id); 860 } 861 #endif