github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/utils/event_mgr.c (about) 1 /* 2 * Copyright (c) 2013 Intel Corporation 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * Unless required by applicable law or agreed to in writing, software 9 * distributed under the License is distributed on an "AS IS" BASIS, 10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 * See the License for the specific language governing permissions and 12 * limitations under the License. 13 */ 14 15 16 #include "file_codes.h" 17 #define VMM_DEADLOOP() VMM_DEADLOOP_LOG(EVENT_MGR_C) 18 #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(EVENT_MGR_C, __condition) 19 #include "vmm_objects.h" 20 #include "guest_cpu.h" 21 #include "lock.h" 22 #include "event_mgr.h" 23 #include "common_libc.h" 24 #include "vmm_dbg.h" 25 #include "heap.h" 26 #include "hash64_api.h" 27 #include "memory_allocator.h" 28 #include "guest.h" 29 #include "list.h" 30 #include "vmm_callback.h" 31 #ifdef JLMDEBUG 32 #include "jlmdebug.h" 33 #endif 34 35 #define OBSERVERS_LIMIT 5 36 #define NO_EVENT_SPECIFIC_LIMIT (UINT32)-1 37 38 39 typedef struct _EVENT_ENTRY 40 { 41 VMM_READ_WRITE_LOCK lock; 42 event_callback call[OBSERVERS_LIMIT]; 43 } EVENT_ENTRY, *PEVENT_ENTRY; 44 45 typedef struct _CPU_EVENTS 46 { 47 EVENT_ENTRY event[EVENTS_COUNT]; 48 } CPU_EVENTS, *PCPU_EVENTS; 49 50 typedef struct _GUEST_EVENTS 51 { 52 EVENT_ENTRY event[EVENTS_COUNT]; 53 LIST_ELEMENT link; 54 GUEST_ID guest_id; 55 UINT8 pad[6]; 56 } GUEST_EVENTS; 57 58 typedef struct _EVENT_MANAGER 59 { 60 HASH64_HANDLE gcpu_events; 61 LIST_ELEMENT guest_events; 62 EVENT_ENTRY general_event[EVENTS_COUNT]; // events not related to particular gcpu, e.g. guest create 63 } EVENT_MANAGER; 64 65 UINT32 host_physical_cpus; 66 EVENT_MANAGER event_mgr; 67 68 /* 69 * EVENT_CHARACTERISTICS: 70 * Specify event specific characteristics: name and observers limits. 71 * This list should be IDENTICAL(!) to UVMM_EVENT_INTERNAL enumration. 72 */ 73 EVENT_CHARACTERISTICS events_characteristics[] = 74 { 75 // { Event observers Limit , observ.registered, " Event Name "}, 76 // emulator 77 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_EMULATOR_BEFORE_MEM_WRITE"}, 78 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_EMULATOR_AFTER_MEM_WRITE"}, 79 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_EMULATOR_AS_GUEST_ENTER"}, 80 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_EMULATOR_AS_GUEST_LEAVE"}, 81 82 // guest cpu CR writes 83 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_AFTER_GUEST_CR0_WRITE"}, 84 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_AFTER_GUEST_CR3_WRITE"}, 85 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_AFTER_GUEST_CR4_WRITE"}, 86 87 // guest cpu invalidate page 88 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_INVALIDATE_PAGE"}, 89 {1, EVENT_GCPU_SCOPE, (CHAR8 *)"EVENT_GCPU_PAGE_FAULT"}, 90 91 // guest cpu msr writes 92 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_AFTER_EFER_MSR_WRITE"}, 93 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_AFTER_PAT_MSR_WRITE"}, 94 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_AFTER_MTRR_MSR_WRITE"}, 95 96 // guest activity state 97 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_ACTIVITY_STATE_CHANGE"}, 98 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_ENTERING_S3"}, 99 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_RETRUNED_FROM_S3"}, 100 101 // EPT events 102 {1, EVENT_GCPU_SCOPE, (CHAR8 *)"EVENT_GCPU_EPT_MISCONFIGURATION"}, 103 {1, EVENT_GCPU_SCOPE, (CHAR8 *)"EVENT_GCPU_EPT_VIOLATION"}, 104 105 // MTF events 106 {1, EVENT_GCPU_SCOPE, (CHAR8 *)"EVENT_GCPU_MTF"}, 107 108 // GPM modification 109 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_BEGIN_GPM_MODIFICATION_BEFORE_CPUS_STOPPED"}, 110 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_BEGIN_GPM_MODIFICATION_AFTER_CPUS_STOPPED"}, 111 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_END_GPM_MODIFICATION_BEFORE_CPUS_RESUMED"}, 112 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_END_GPM_MODIFICATION_AFTER_CPUS_RESUMED"}, 113 114 // guest memory modification 115 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_BEGIN_GUEST_MEMORY_MODIFICATION"}, 116 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_END_GUEST_MEMORY_MODIFICATION"}, 117 118 // guest lifecycle 119 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GUEST_CREATE"}, 120 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GUEST_DESTROY"}, 121 122 // gcpu lifecycle 123 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_ADD"}, 124 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GCPU_REMOVE"}, 125 {NO_EVENT_SPECIFIC_LIMIT, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GUEST_LAUNCH"}, 126 127 {1, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GUEST_CPU_BREAKPOINT"}, 128 {1, EVENT_ALL_SCOPE, (CHAR8 *)"EVENT_GUEST_CPU_SINGLE_STEP"}, 129 }; 130 131 132 static BOOLEAN event_manager_add_gcpu( GUEST_CPU_HANDLE gcpu, void *pv); 133 static BOOLEAN event_register_internal(PEVENT_ENTRY p_event, 134 UVMM_EVENT_INTERNAL e, // in: event 135 event_callback call // in: callback to register on event e 136 ); 137 138 #if 0 // not defined 139 static BOOLEAN event_unregister_internal( PEVENT_ENTRY p_event, 140 UVMM_EVENT_INTERNAL e, event_callback call); 141 #endif 142 143 static BOOLEAN event_raise_internal( PEVENT_ENTRY p_event, 144 UVMM_EVENT_INTERNAL e, 145 GUEST_CPU_HANDLE gcpu, // in: guest cpu 146 void * p // in: pointer to event specific structure 147 ); 148 static BOOLEAN event_global_raise(UVMM_EVENT_INTERNAL e, GUEST_CPU_HANDLE gcpu, void * p ); 149 BOOLEAN event_guest_raise(UVMM_EVENT_INTERNAL e, GUEST_CPU_HANDLE gcpu, void *p); 150 static BOOLEAN event_gcpu_raise(UVMM_EVENT_INTERNAL e, GUEST_CPU_HANDLE gcpu, void *p ); 151 152 153 static EVENT_ENTRY * get_gcpu_observers(UVMM_EVENT_INTERNAL e, GUEST_CPU_HANDLE gcpu) 154 { 155 const VIRTUAL_CPU_ID* p_vcpu; 156 PCPU_EVENTS p_cpu_events = NULL; 157 EVENT_ENTRY *p_event = NULL; 158 BOOLEAN res; 159 160 p_vcpu = guest_vcpu(gcpu); 161 VMM_ASSERT(p_vcpu); 162 res = hash64_lookup(event_mgr.gcpu_events, 163 (UINT64) (p_vcpu->guest_id << (8 * sizeof(GUEST_ID)) | p_vcpu->guest_cpu_id), 164 (UINT64 *) &p_cpu_events); 165 (void)res; 166 if(p_cpu_events != NULL) { 167 p_event = &(p_cpu_events->event[e]); 168 } 169 return p_event; 170 } 171 172 static EVENT_ENTRY * get_guest_observers(UVMM_EVENT_INTERNAL e, GUEST_HANDLE guest) 173 { 174 EVENT_ENTRY *p_event = NULL; 175 GUEST_ID guest_id = guest_get_id(guest); 176 LIST_ELEMENT *iter = NULL; 177 178 LIST_FOR_EACH(&event_mgr.guest_events, iter) { 179 GUEST_EVENTS *p_guest_events; 180 p_guest_events = LIST_ENTRY(iter, GUEST_EVENTS, link); 181 if(p_guest_events->guest_id == guest_id) { 182 p_event = &p_guest_events->event[e]; 183 break; 184 } 185 } 186 return p_event; 187 } 188 189 static EVENT_ENTRY * get_global_observers(UVMM_EVENT_INTERNAL e) 190 { 191 return &(event_mgr.general_event[e]); 192 } 193 194 static UINT32 event_observers_limit (UVMM_EVENT_INTERNAL e) 195 { 196 UINT32 observers_limits = 0; 197 198 VMM_ASSERT(e <= ARRAY_SIZE(events_characteristics)); 199 200 /* 201 * See if event has specific observers limits. (If none, we'll use the 202 * array boundry limits). 203 */ 204 if(events_characteristics[e].specific_observers_limits==NO_EVENT_SPECIFIC_LIMIT){ 205 observers_limits = OBSERVERS_LIMIT; 206 } 207 else { 208 observers_limits = (UINT32)events_characteristics[e].specific_observers_limits; 209 VMM_ASSERT(observers_limits <= OBSERVERS_LIMIT); 210 } 211 VMM_ASSERT(observers_limits > 0); 212 213 return observers_limits; 214 } 215 216 217 UINT32 event_manager_initialize(UINT32 num_of_host_cpus) 218 { 219 PEVENT_ENTRY general_event; 220 int i; 221 GUEST_HANDLE guest = NULL; 222 GUEST_ID guest_id = INVALID_GUEST_ID; 223 GUEST_ECONTEXT context; 224 225 /* 226 * Assert that all events are registed both in events_characteristics 227 * and in the events enumeration UVMM_EVENT_INTERNAL 228 */ 229 VMM_ASSERT(ARRAY_SIZE(events_characteristics) == EVENTS_COUNT); 230 host_physical_cpus = num_of_host_cpus; 231 vmm_memset( &event_mgr, 0, sizeof( event_mgr )); 232 event_mgr.gcpu_events = hash64_create_default_hash(host_physical_cpus * host_physical_cpus); 233 for(i = 0; i < EVENTS_COUNT; i++) { 234 general_event = &(event_mgr.general_event[i]); 235 lock_initialize_read_write_lock(&(general_event->lock)); 236 } 237 list_init(&event_mgr.guest_events); 238 for(guest = guest_first(&context); guest != NULL; guest = guest_next(&context)) { 239 guest_id = guest_get_id(guest); 240 event_manager_guest_initialize(guest_id); 241 } 242 event_global_register(EVENT_GCPU_ADD, event_manager_add_gcpu); 243 return 0; 244 } 245 246 UINT32 event_manager_guest_initialize(GUEST_ID guest_id) 247 { 248 GUEST_CPU_HANDLE gcpu; 249 GUEST_GCPU_ECONTEXT gcpu_context; 250 GUEST_HANDLE guest = guest_handle(guest_id); 251 GUEST_EVENTS *p_new_guest_events; 252 PEVENT_ENTRY event; 253 int i; 254 255 p_new_guest_events = vmm_malloc(sizeof(*p_new_guest_events)); 256 VMM_ASSERT(p_new_guest_events); 257 vmm_memset(p_new_guest_events, 0, sizeof(*p_new_guest_events)); 258 // init lock for each event 259 for(i = 0; i < EVENTS_COUNT; i++) { 260 event = &(p_new_guest_events->event[i]); 261 lock_initialize_read_write_lock(&(event->lock)); 262 } 263 p_new_guest_events->guest_id = guest_id; 264 /* for each guest/cpu we keep the event (callbacks) array */ 265 for( gcpu = guest_gcpu_first(guest, &gcpu_context); gcpu; gcpu = guest_gcpu_next(&gcpu_context)) { 266 event_manager_gcpu_initialize(gcpu); 267 } 268 list_add(&event_mgr.guest_events, &p_new_guest_events->link); 269 270 return 0; 271 } 272 273 #pragma warning( push ) 274 #pragma warning (disable : 4100) // disable non-referenced formal parameters 275 276 static BOOLEAN event_manager_add_gcpu (GUEST_CPU_HANDLE gcpu, void* pv UNUSED) 277 { 278 event_manager_gcpu_initialize(gcpu); 279 return TRUE; 280 } 281 282 #pragma warning( pop ) 283 284 UINT32 event_manager_gcpu_initialize(GUEST_CPU_HANDLE gcpu) 285 { 286 const VIRTUAL_CPU_ID* p_vcpu = NULL; 287 PCPU_EVENTS gcpu_events = NULL; 288 PEVENT_ENTRY event = NULL; 289 int i; 290 291 #ifdef JLMDEBUG1 292 bprint("event_manager_gcpu_initialize\n"); 293 #endif 294 p_vcpu = guest_vcpu( gcpu ); 295 VMM_ASSERT(p_vcpu); 296 gcpu_events = (CPU_EVENTS *) vmm_malloc(sizeof(CPU_EVENTS)); 297 VMM_ASSERT(gcpu_events); 298 299 VMM_LOG(mask_anonymous, level_trace, 300 "event mgr add gcpu guest id=%d cpu id=%d with key %p\n", 301 p_vcpu->guest_id, p_vcpu->guest_cpu_id, 302 (UINT64) (p_vcpu->guest_id<<(8*sizeof(GUEST_ID))|p_vcpu->guest_cpu_id)); 303 hash64_insert(event_mgr.gcpu_events, 304 (UINT64) (p_vcpu->guest_id<<(8*sizeof(GUEST_ID))|p_vcpu->guest_cpu_id), 305 (UINT64) gcpu_events); 306 // init lock for each event 307 for(i = 0; i < EVENTS_COUNT; i++) { 308 event = &(gcpu_events->event[i]); 309 lock_initialize_read_write_lock(&(event->lock)); 310 } 311 return 0; 312 } 313 #ifdef INCLUDE_UNUSED_CODE 314 315 void event_cleanup_event_manger(void) 316 { 317 return; 318 } 319 #endif 320 321 BOOLEAN event_register_internal(PEVENT_ENTRY p_event, 322 UVMM_EVENT_INTERNAL e, event_callback call) 323 { 324 UINT32 i = 0; 325 UINT32 observers_limits; 326 BOOLEAN registered = FALSE; 327 328 #ifdef LMDEBUG 329 bprint("event_register_internal\n"); 330 #endif 331 observers_limits = event_observers_limit(e); 332 lock_acquire_writelock(&p_event->lock); 333 // Find free observer slot 334 while (i < observers_limits && p_event->call[i]) 335 ++i; 336 if (i < observers_limits) { 337 p_event->call[i] = call; 338 registered = TRUE; 339 } 340 else { 341 VMM_DEADLOOP(); 342 } 343 lock_release_writelock(&p_event->lock); 344 return registered; 345 } 346 347 348 BOOLEAN event_global_register( UVMM_EVENT_INTERNAL e, event_callback call) 349 { 350 PEVENT_ENTRY list; 351 352 #ifdef JLMDEBUG 353 bprint("event_global_register\n"); 354 #endif 355 if (call == 0) 356 return FALSE; 357 if (e >= EVENTS_COUNT) 358 return FALSE; 359 if (0 == (events_characteristics[e].scope & EVENT_GLOBAL_SCOPE)) return FALSE; 360 list = get_global_observers(e); 361 return event_register_internal(list, e, call); 362 } 363 364 365 #ifdef ENABLE_VTLB 366 BOOLEAN event_guest_register( UVMM_EVENT_INTERNAL e, 367 GUEST_HANDLE guest, event_callback call) 368 { 369 PEVENT_ENTRY list; 370 BOOLEAN registered = FALSE; 371 372 if (call == 0) 373 return FALSE; 374 if (e >= EVENTS_COUNT) 375 return FALSE; 376 if (0 == (events_characteristics[e].scope & EVENT_GUEST_SCOPE)) 377 return FALSE; 378 list = get_guest_observers(e, guest); 379 if (NULL != list) { 380 registered = event_register_internal(list, e, call); 381 } 382 return registered; 383 } 384 #endif 385 386 387 BOOLEAN event_gcpu_register( UVMM_EVENT_INTERNAL e, 388 GUEST_CPU_HANDLE gcpu, event_callback call) 389 { 390 PEVENT_ENTRY list; 391 BOOLEAN registered = FALSE; 392 393 #ifdef JLMDEBUG 394 bprint("event_gcpu_register\n"); 395 #endif 396 if (call == 0) 397 return FALSE; 398 if (e >= EVENTS_COUNT) 399 return FALSE; 400 if (0 == (events_characteristics[e].scope & EVENT_GCPU_SCOPE)) 401 return FALSE; 402 list = get_gcpu_observers(e, gcpu); 403 if (NULL != list) { 404 registered = event_register_internal(list, e, call); 405 } 406 return registered; 407 } 408 409 #ifdef INCLUDE_UNUSED_CODE 410 BOOLEAN event_unregister_internal( PEVENT_ENTRY p_event, 411 UVMM_EVENT_INTERNAL e, event_callback call) 412 { 413 UINT32 i= 0; 414 UINT32 observers_limits; 415 BOOLEAN unregistered = FALSE; 416 417 observers_limits = event_observers_limit(e); 418 lock_acquire_writelock(&p_event->lock); 419 while (i < observers_limits && p_event->call[i]) { 420 if (p_event->call[i] == call) { 421 unregistered = TRUE; 422 // Match, delete entry (promote following entries, one entry forward) 423 while ((i+1) < observers_limits && p_event->call[i+1]) { 424 p_event->call[i] = p_event->call[i+1]; 425 ++i; 426 } 427 p_event->call[i] = 0; 428 break; 429 } 430 ++i; 431 } // while (i < observers_limits && list->call[i]) 432 lock_release_writelock(&p_event->lock); 433 return unregistered; 434 } 435 436 BOOLEAN event_global_unregister( UVMM_EVENT_INTERNAL e, event_callback call ) 437 { 438 PEVENT_ENTRY list; 439 BOOLEAN unregistered = FALSE; 440 441 if (call == 0) 442 return FALSE; 443 if (e >= EVENTS_COUNT) 444 return FALSE; 445 list = get_global_observers(e); 446 if (NULL != list) { 447 unregistered = event_unregister_internal(list, e, call); 448 } 449 return unregistered; 450 } 451 452 BOOLEAN event_guest_unregister( UVMM_EVENT_INTERNAL e, 453 GUEST_HANDLE guest, event_callback call) 454 { 455 PEVENT_ENTRY list; 456 BOOLEAN unregistered = FALSE; 457 458 if (call == 0) 459 return FALSE; 460 if (e >= EVENTS_COUNT) 461 return FALSE; 462 list = get_guest_observers(e, guest); 463 if (NULL != list) { 464 unregistered = event_unregister_internal(list, e, call); 465 } 466 return unregistered; 467 } 468 #endif 469 470 #ifdef ENABLE_VTLB 471 BOOLEAN event_gcpu_unregister( UVMM_EVENT_INTERNAL e, 472 GUEST_CPU_HANDLE gcpu, event_callback call) 473 { 474 PEVENT_ENTRY list; 475 BOOLEAN unregistered = FALSE; 476 477 if (call == 0) return FALSE; 478 if (e >= EVENTS_COUNT) return FALSE; 479 480 list = get_gcpu_observers(e, gcpu); 481 if (NULL != list) { 482 unregistered = event_unregister_internal(list, e, call); 483 } 484 return unregistered; 485 } 486 #endif 487 488 BOOLEAN event_raise_internal(PEVENT_ENTRY p_event, UVMM_EVENT_INTERNAL e, 489 GUEST_CPU_HANDLE gcpu, void* p) 490 { 491 UINT32 i= 0; 492 UINT32 observers_limits; 493 event_callback call[OBSERVERS_LIMIT]; 494 BOOLEAN event_is_handled = FALSE; 495 496 observers_limits = event_observers_limit(e); 497 lock_acquire_readlock(&p_event->lock); 498 #ifdef JLMDEBUG1 499 bprint("event_guest_raise_internal observers limit: %d, LIMIT: %d\n", 500 observers_limits, OBSERVERS_LIMIT); 501 #endif 502 VMM_ASSERT(observers_limits<=OBSERVERS_LIMIT); 503 vmm_memcpy(call, p_event->call, sizeof(call)); 504 lock_release_readlock(&p_event->lock); 505 while(i<observers_limits && call[i]) { 506 call[i](gcpu, p); 507 event_is_handled= TRUE; 508 ++i; 509 } 510 return event_is_handled; 511 } 512 513 514 BOOLEAN event_global_raise(UVMM_EVENT_INTERNAL e, GUEST_CPU_HANDLE gcpu, void * p) 515 { 516 PEVENT_ENTRY list; 517 list = get_global_observers(e); 518 return event_raise_internal(list, e, gcpu, p); 519 } 520 521 522 BOOLEAN event_guest_raise(UVMM_EVENT_INTERNAL e, GUEST_CPU_HANDLE gcpu, void *p) 523 { 524 GUEST_HANDLE guest; 525 PEVENT_ENTRY list; 526 BOOLEAN event_handled = FALSE; 527 528 #ifdef JLMDEBUG1 529 bprint("event_guest_raise gcpu: %p\n", gcpu); 530 #endif 531 VMM_ASSERT(gcpu); 532 guest = gcpu_guest_handle(gcpu); 533 VMM_ASSERT(guest); 534 list = get_guest_observers(e, guest); 535 if (NULL != list) { 536 event_handled = event_raise_internal(list, e, gcpu, p); 537 } 538 return event_handled; 539 } 540 541 542 BOOLEAN event_gcpu_raise(UVMM_EVENT_INTERNAL e, GUEST_CPU_HANDLE gcpu, 543 void *p) 544 { 545 PEVENT_ENTRY list; 546 BOOLEAN event_handled = FALSE; 547 548 #ifdef JLMDEBUG1 549 bprint("event_gcpu_raise\n"); 550 #endif 551 list = get_gcpu_observers(e, gcpu); 552 if (NULL != list) { 553 event_handled = event_raise_internal(list, e, gcpu, p); 554 } 555 return event_handled; 556 } 557 558 559 BOOLEAN event_raise(UVMM_EVENT_INTERNAL e, GUEST_CPU_HANDLE gcpu, void *p) 560 { 561 BOOLEAN raised = FALSE; 562 563 #ifdef JLMDEBUG 564 bprint("event_raise(%d), EVENT_COUNT: %d, gcpu: %p\n", 565 e, EVENTS_COUNT, gcpu); 566 #endif 567 VMM_ASSERT(e < EVENTS_COUNT); 568 if (e < EVENTS_COUNT) { 569 if (NULL != gcpu) // try to raise GCPU-scope event 570 raised = event_gcpu_raise(e, gcpu, p); 571 if (NULL != gcpu) // try to raise GUEST-scope event 572 raised = raised || event_guest_raise(e, gcpu, p); 573 raised = raised || event_global_raise(e, gcpu, p); 574 } 575 return raised; 576 } 577 578 #ifdef ENABLE_VTLB 579 BOOLEAN event_is_registered( UVMM_EVENT_INTERNAL e, GUEST_CPU_HANDLE gcpu, 580 event_callback call) 581 { 582 PEVENT_ENTRY list; 583 UINT32 i = 0; 584 UINT32 observers_limits; 585 BOOLEAN res = FALSE; 586 587 if (call == 0) 588 return FALSE; 589 if (e >= EVENTS_COUNT) 590 return FALSE; 591 list = get_gcpu_observers(e, gcpu); 592 if (list == NULL) 593 return FALSE; 594 observers_limits = event_observers_limit(e); 595 lock_acquire_readlock(&list->lock); 596 // Find free observer slot 597 while (i < observers_limits && list->call[i]) { 598 if (list->call[i] == call) { 599 res = TRUE; 600 break; 601 } 602 ++i; 603 } 604 lock_release_readlock(&list->lock); 605 return res; 606 } 607 #endif 608