github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/vmexit/vmcall.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "file_codes.h"
    16  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(VMCALL_C)
    17  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(VMCALL_C, __condition)
    18  #include "vmm_defs.h"
    19  #include "heap.h"
    20  #include "hw_utils.h"
    21  #include "guest.h"
    22  #include "guest_cpu.h"
    23  #include "gpm_api.h"
    24  #include "vmexit.h"
    25  #include "vmcall.h"
    26  #include "vmm_dbg.h"
    27  #include "list.h"
    28  #include "lock.h"
    29  #include "memory_allocator.h"
    30  #include "../guest/guest_cpu/unrestricted_guest.h"
    31  #ifdef JLMDEBUG
    32  #include "jlmdebug.h"
    33  #endif
    34  
    35  
    36  #ifdef VMCALL_NOT_ALLOWED_FROM_RING_1_TO_3
    37  #include "guest_cpu_vmenter_event.h"
    38  
    39  #define DESCRIPTOR_CPL_BIT 0x3
    40  #endif
    41  
    42  // MAX_ACTIVE_VMCALLS_PER_GUEST must be power of 2
    43  #define MAX_ACTIVE_VMCALLS_PER_GUEST   64
    44  #define UNALLOCATED_VMCALL             VMCALL_LAST_USED_INTERNAL
    45  
    46  #define VMCALL_IS_VALID(__vmcall_id) ((__vmcall_id) != UNALLOCATED_VMCALL)
    47  
    48  typedef  struct {
    49      VMCALL_HANDLER  vmcall_handler;
    50      BOOLEAN         vmcall_special; // e.g. for emuator termination
    51      VMCALL_ID       vmcall_id;
    52  } VMCALL_ENTRY;
    53  
    54  typedef  struct {
    55      GUEST_ID guest_id;
    56      UINT8    padding[2];
    57      UINT32   filled_entries_count;
    58      VMCALL_ENTRY vmcall_table[MAX_ACTIVE_VMCALLS_PER_GUEST];
    59      LIST_ELEMENT list[1];
    60  } GUEST_VMCALL_ENTRIES;
    61  
    62  typedef struct {
    63      LIST_ELEMENT guest_vmcall_entries[1];
    64  } VMCALL_GLOBAL_STATE;
    65  
    66  static VMCALL_GLOBAL_STATE         vmcall_global_state;  // for all guests
    67  
    68  static VMM_STATUS vmcall_unimplemented(GUEST_CPU_HANDLE gcpu, ADDRESS *arg1, ADDRESS *arg2, ADDRESS *arg3);
    69  VMM_STATUS vmcall_print_string(GUEST_CPU_HANDLE gcpu, ADDRESS * p_string, ADDRESS *is_real_guest, ADDRESS *arg3);
    70  
    71  static VMEXIT_HANDLING_STATUS vmcall_common_handler(GUEST_CPU_HANDLE gcpu);
    72  
    73  static GUEST_VMCALL_ENTRIES* vmcall_find_guest_vmcalls(GUEST_ID guest_id);
    74  
    75  static VMCALL_ENTRY* vmcall_get_vmcall_entry(GUEST_ID     guest_id,
    76                                               VMCALL_ID    vmcall_id);
    77  #ifdef ENABLE_INT15_VIRTUALIZATION
    78  BOOLEAN handle_int15_vmcall(GUEST_CPU_HANDLE gcpu);
    79  #endif
    80  void vmcall_intialize( void )
    81  {
    82      vmm_memset( &vmcall_global_state, 0, sizeof(vmcall_global_state) );
    83      list_init(vmcall_global_state.guest_vmcall_entries);
    84  }
    85  
    86  void vmcall_guest_intialize(
    87      GUEST_ID    guest_id)
    88  {
    89      UINT32       id;
    90      GUEST_VMCALL_ENTRIES *guest_vmcalls;
    91      VMCALL_ENTRY *vmcall_entry;
    92  
    93      VMM_LOG(mask_uvmm, level_trace,"vmcall_guest_intialize start\r\n");
    94      guest_vmcalls= (GUEST_VMCALL_ENTRIES *)vmm_malloc(sizeof(GUEST_VMCALL_ENTRIES));
    95      VMM_ASSERT(guest_vmcalls);
    96      guest_vmcalls->guest_id = guest_id;
    97      guest_vmcalls->filled_entries_count = 0;
    98      list_add(vmcall_global_state.guest_vmcall_entries, guest_vmcalls->list);
    99      vmexit_install_handler( guest_id, vmcall_common_handler,
   100          Ia32VmxExitBasicReasonVmcallInstruction);
   101      for (id = 0; id < MAX_ACTIVE_VMCALLS_PER_GUEST; ++id) {
   102          vmcall_entry = &guest_vmcalls->vmcall_table[id];
   103          vmcall_entry->vmcall_handler = vmcall_unimplemented;
   104          vmcall_entry->vmcall_id = UNALLOCATED_VMCALL;
   105      }
   106      VMM_LOG(mask_uvmm, level_trace,"vmcall_guest_intialize end\r\n");
   107  }
   108  
   109  void vmcall_register(
   110      GUEST_ID        guest_id,
   111      VMCALL_ID       vmcall_id,
   112      VMCALL_HANDLER  handler,
   113      BOOLEAN         special_call)
   114  {
   115      VMCALL_ENTRY *vmcall_entry;
   116  
   117      VMM_ASSERT(NULL != handler);
   118      // if already exists, check that all params are the same
   119      vmcall_entry = vmcall_get_vmcall_entry(guest_id, vmcall_id);
   120      if (NULL != vmcall_entry) {
   121          if ((vmcall_entry->vmcall_id      == vmcall_id) &&
   122              (vmcall_entry->vmcall_handler == handler)   &&
   123              (vmcall_entry->vmcall_special == special_call)) {
   124              return;
   125          }
   126          VMM_LOG(mask_uvmm, level_trace, 
   127                  "VMCALL %d is already registered for the Guest %d with different params\n",
   128                    vmcall_id, guest_id);
   129          VMM_ASSERT(FALSE);
   130      }
   131  
   132      vmcall_entry = vmcall_get_vmcall_entry(guest_id, UNALLOCATED_VMCALL);
   133      VMM_ASSERT(vmcall_entry);
   134      VMM_LOG(mask_uvmm, level_trace,"vmcall_register: guest %d vmcall_id %d vmcall_entry %p\r\n",
   135          guest_id, vmcall_id, vmcall_entry);
   136  
   137      vmcall_entry->vmcall_handler = handler;
   138      vmcall_entry->vmcall_special = special_call;
   139      vmcall_entry->vmcall_id      = vmcall_id;
   140  }
   141  
   142  #ifdef VMCALL_NOT_ALLOWED_FROM_RING_1_TO_3
   143  
   144  // Return TRUE is the DPL of the guest issuing the VMCALL is in ring 0,
   145  // otherwise inject the #UD execption and return FALSE.
   146  BOOLEAN vmcall_check_guest_dpl_is_ring0(GUEST_CPU_HANDLE gcpu){
   147          VMCS_OBJECT* vmcs = gcpu_get_vmcs(gcpu);
   148          UINT64 guest_cs_selector= vmcs_read(vmcs, VMCS_GUEST_CS_SELECTOR);
   149  
   150      if (BITMAP_GET(guest_cs_selector, DESCRIPTOR_CPL_BIT) == 0) {
   151          return TRUE;
   152      }
   153      VMM_DEBUG_CODE(VMM_LOG(mask_uvmm, level_error,
   154                          "CPU%d: %s: Error: VMCALL is initialized from ring >0. CPL=%d.\n",
   155                          hw_cpu_id(), __FUNCTION__,
   156                          BITMAP_GET(guest_cs_selector, DESCRIPTOR_CPL_BIT)));
   157      gcpu_inject_invalid_opcode_exception(gcpu);
   158      return FALSE;
   159  }
   160  #endif
   161  
   162  VMEXIT_HANDLING_STATUS vmcall_common_handler(GUEST_CPU_HANDLE gcpu)
   163  {
   164      GUEST_HANDLE guest      = gcpu_guest_handle(gcpu);
   165      GUEST_ID     guest_id   = guest_get_id(guest);
   166      VMCALL_ID    vmcall_id;
   167      ADDRESS      arg1, arg2, arg3;
   168      VMM_STATUS   ret_value;
   169      VMCALL_HANDLER vmcall_function;
   170      BOOLEAN      is_vmcall_special = FALSE;
   171      VMCALL_ENTRY *vmcall_entry = NULL;
   172      VMEXIT_HANDLING_STATUS handle_status;
   173  #ifdef ENABLE_INT15_VIRTUALIZATION
   174      if(is_unrestricted_guest_supported())
   175          if ( handle_int15_vmcall(gcpu) )
   176              return VMEXIT_HANDLED;
   177  #endif
   178  #ifdef VMCALL_NOT_ALLOWED_FROM_RING_1_TO_3
   179      if (!vmcall_check_guest_dpl_is_ring0(gcpu))
   180          return VMEXIT_HANDLED;
   181  #endif
   182  
   183      vmcall_id = (VMCALL_ID) gcpu_get_native_gp_reg(gcpu, IA32_REG_RCX);
   184      if (VMM_NATIVE_VMCALL_SIGNATURE == gcpu_get_native_gp_reg(gcpu, IA32_REG_RAX)) {
   185          vmcall_entry = vmcall_get_vmcall_entry(guest_id, vmcall_id);
   186      }
   187      if (NULL != vmcall_entry) {
   188          VMM_ASSERT( vmcall_entry->vmcall_id == vmcall_id );
   189  
   190          vmcall_function = vmcall_entry->vmcall_handler;
   191          is_vmcall_special = vmcall_entry->vmcall_special;
   192      }
   193      else {
   194          if (GUEST_LEVEL_2 == gcpu_get_guest_level(gcpu)) {
   195              // VMCALL will be delivered to level#1 VMM for processing
   196              vmcall_function = NULL;
   197          }
   198          else {
   199              VMM_LOG(mask_uvmm, level_trace,"ERROR: vmcall %d is not implemented\n", vmcall_id);
   200              vmcall_function = vmcall_unimplemented;
   201              is_vmcall_special = FALSE;
   202          }
   203      }
   204      if (NULL != vmcall_function) {
   205          if (TRUE == is_vmcall_special) {
   206              vmcall_function(gcpu, NULL, NULL, NULL);
   207          }
   208          else {
   209              arg1      = gcpu_get_native_gp_reg(gcpu, IA32_REG_RDX);
   210              arg2      = gcpu_get_native_gp_reg(gcpu, IA32_REG_RDI);
   211              arg3      = gcpu_get_native_gp_reg(gcpu, IA32_REG_RSI);
   212  
   213              /* Invoke vmcall_function that is registered for this vmcall_id */
   214              ret_value = vmcall_function(gcpu, &arg1, &arg2, &arg3);
   215  
   216              if (ret_value == VMM_OK) {
   217                  // return arguments back to Guest, in case they were changed
   218                  gcpu_set_native_gp_reg(gcpu, IA32_REG_RDX, arg1);
   219                  gcpu_set_native_gp_reg(gcpu, IA32_REG_RDI, arg2);
   220                  gcpu_set_native_gp_reg(gcpu, IA32_REG_RSI, arg3);
   221  
   222                  /* Skip instruction only if return_value is VMM_OK */
   223                  gcpu_skip_guest_instruction(gcpu);
   224              }
   225          }
   226          handle_status = VMEXIT_HANDLED;
   227      }
   228      else {
   229                  VMM_LOG(mask_uvmm, level_error, 
   230                          "CPU%d: %s: Error: VMEXIT_NOT_HANDLED\n",
   231                          hw_cpu_id(), __FUNCTION__);
   232          handle_status = VMEXIT_NOT_HANDLED;
   233      }
   234      return handle_status;
   235  }
   236  
   237  #pragma warning( push )
   238  #pragma warning (disable : 4100)  // Supress warnings about unreferenced formal parameter
   239  VMM_STATUS vmcall_unimplemented( GUEST_CPU_HANDLE gcpu USED_IN_DEBUG_ONLY,
   240      ADDRESS *arg1 UNUSED, ADDRESS *arg2 UNUSED, ADDRESS *arg3 UNUSED)
   241  {
   242      VMM_LOG(mask_uvmm, level_error,
   243                  "CPU%d: %s: Error: Unimplemented VMCALL invoked on Guest ",
   244                  hw_cpu_id(), __FUNCTION__);
   245      PRINT_GCPU_IDENTITY(gcpu);
   246      VMM_LOG(mask_uvmm, level_error,"\n");
   247  #ifdef ENABLE_TMSL_API_PROTECTION
   248      gcpu_inject_invalid_opcode_exception(gcpu);
   249  #endif
   250      return VMM_ERROR;
   251  }
   252  #ifdef INCLUDE_UNUSED_CODE
   253  VMM_STATUS vmcall_print_string( GUEST_CPU_HANDLE gcpu,
   254      ADDRESS *string_gva, ADDRESS *is_real_guest, ADDRESS *arg3 UNUSED)
   255  {
   256      if (TRUE == *is_real_guest) {
   257          GUEST_HANDLE    guest_handle;
   258          GPM_HANDLE      guest_phy_memory;
   259          HPA             string_gpa;
   260          HVA             string_hva;
   261  
   262          string_gpa = *string_gva; // TODO:: translate GVA to GPA (do guest page walk)
   263  
   264          // translate GPA to HVA
   265          guest_handle = gcpu_guest_handle(gcpu);
   266          VMM_ASSERT(guest_handle);
   267          guest_phy_memory = gcpu_get_current_gpm(guest_handle);
   268          VMM_ASSERT(guest_phy_memory);
   269          if (FALSE == gpm_gpa_to_hva(guest_phy_memory, string_gpa, &string_hva)) {
   270              VMM_LOG(mask_uvmm, level_trace,"Bad VM Print\n");
   271          }
   272          else {
   273              VMM_LOG(mask_uvmm, level_trace,"%s", (char *) string_hva);
   274          }
   275      }
   276      else {
   277          // it is a Host memory space, so GVA == HVA
   278          VMM_LOG(mask_uvmm, level_trace,"%s", (char *) *string_gva);
   279      }
   280  
   281      return VMM_OK;
   282  }
   283  #endif
   284  
   285  #pragma warning( pop )
   286  
   287  static
   288  GUEST_VMCALL_ENTRIES* vmcall_find_guest_vmcalls(GUEST_ID guest_id) {
   289      LIST_ELEMENT *iter = NULL;
   290      GUEST_VMCALL_ENTRIES *guest_vmcalls = NULL;
   291  
   292      LIST_FOR_EACH(vmcall_global_state.guest_vmcall_entries, iter) {
   293          guest_vmcalls = LIST_ENTRY(iter, GUEST_VMCALL_ENTRIES, list);
   294          if(guest_vmcalls->guest_id == guest_id) {
   295              return guest_vmcalls;
   296          }
   297      }
   298      return NULL;
   299  }
   300  
   301  static VMCALL_ENTRY* find_guest_vmcall_entry( GUEST_VMCALL_ENTRIES* guest_vmcalls,
   302                                         VMCALL_ID call_id )
   303  {
   304      UINT32 idx;
   305  
   306      for (idx = 0; idx < MAX_ACTIVE_VMCALLS_PER_GUEST; ++idx) {
   307          if (guest_vmcalls->vmcall_table[idx].vmcall_id == call_id) {
   308              return &(guest_vmcalls->vmcall_table[idx]);
   309          }
   310      }
   311      return NULL;
   312  }
   313  
   314  static VMCALL_ENTRY* vmcall_get_vmcall_entry(GUEST_ID guest_id, VMCALL_ID vmcall_id)
   315  {
   316      GUEST_VMCALL_ENTRIES *guest_vmcalls;
   317      VMCALL_ENTRY *vmcall_entry;
   318  
   319      guest_vmcalls = vmcall_find_guest_vmcalls(guest_id);
   320      if(NULL == guest_vmcalls) {
   321          VMM_ASSERT(0);
   322          return NULL;
   323      }
   324      vmcall_entry = find_guest_vmcall_entry(guest_vmcalls,vmcall_id);
   325      return vmcall_entry;
   326  }
   327