github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/vmx/vmcs.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "vmm_defs.h"
    16  #include "vmm_dbg.h"
    17  #include "heap.h"
    18  #include "vmx_vmcs.h"
    19  #include "vmcs_init.h"
    20  #include "vmcs_api.h"
    21  #include "libc.h"
    22  #include "host_memory_manager_api.h"
    23  #include "memory_allocator.h"
    24  #include "vmcs_internal.h"
    25  #include "cli.h"
    26  #include "vmm_api.h"
    27  #include "scheduler.h"
    28  #include "isr.h"
    29  #include "memory_dump.h"
    30  #include "file_codes.h"
    31  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(VMCS_C)
    32  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(VMCS_C, __condition)
    33  #ifdef JLMDEBUG
    34  #include "jlmdebug.h"
    35  #endif
    36  
    37  #pragma warning (disable : 4710)
    38  
    39  #define NO_EXIST            VMCS_NOT_EXISTS
    40  #define READONLY            VMCS_READABLE
    41  #define WRITABLE           (VMCS_READABLE | VMCS_WRITABLE)
    42  #define WRITABLE_IN_CACHE  (VMCS_READABLE | VMCS_WRITABLE_IN_CACHE)
    43  
    44  
    45  #define FIELD_IS_READABLE(__field)  (0 != (g_field_data[__field].access & VMCS_READABLE))
    46  #define FIELD_IS_WRITEABLE(__field) (0 != (g_field_data[__field].access & (VMCS_WRITABLE | VMCS_WRITABLE_IN_CACHE)))
    47  
    48  #define FULL_ENC_ONLY   0
    49  #define SUPP_HIGH_ENC   1
    50  
    51  //
    52  // Minimum size of allocated MSR list
    53  //
    54  #define MIN_SIZE_OF_MSR_LIST  4
    55  
    56  
    57  typedef UINT8 FIELD_ACCESS_TYPE;
    58  
    59  // field encoding and naming
    60  typedef struct _VMCS_ENCODING {
    61      UINT32             encoding;
    62      FIELD_ACCESS_TYPE  access;
    63      UINT8              supports_high_encoding;
    64      UINT8              pad[2];
    65      const char*        name;
    66  } VMCS_ENCODING;
    67  
    68  static VMCS_ENCODING g_field_data[] = {
    69      { VM_X_VPID,                                NO_EXIST,         FULL_ENC_ONLY, {0}, "VMCS_VPID" },
    70      { VM_X_EPTP_INDEX,                          NO_EXIST,         FULL_ENC_ONLY, {0}, "VMCS_EPTP_INDEX" },
    71      { VM_X_CONTROL_VECTOR_PIN_EVENTS,           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_CONTROL_VECTOR_PIN_EVENTS" },
    72      { VM_X_CONTROL_VECTOR_PROCESSOR_EVENTS,     WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS" },
    73      { VM_X_CONTROL2_VECTOR_PROCESSOR_EVENTS,    WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_CONTROL2_VECTOR_PROCESSOR_EVENTS" },
    74      { VM_X_EXCEPTION_BITMAP,                    WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_EXCEPTION_BITMAP" },
    75      { VM_X_CR3_TARGET_COUNT,                    WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_CR3_TARGET_COUNT" },
    76      { VM_X_CR0_MASK,                            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_CR0_MASK" },
    77      { VM_X_CR4_MASK,                            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_CR4_MASK" },
    78      { VM_X_CR0_READ_SHADOW,                     WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_CR0_READ_SHADOW" },
    79      { VM_X_CR4_READ_SHADOW,                     WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_CR4_READ_SHADOW" },
    80      { VM_X_PAGE_FAULT_ERROR_CODE_MASK,          WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_PAGE_FAULT_ERROR_CODE_MASK" },
    81      { VM_X_PAGE_FAULT_ERROR_CODE_MATCH,         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_PAGE_FAULT_ERROR_CODE_MATCH" },
    82      { VM_EXIT_CONTROL_VECTOR,                   WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_CONTROL_VECTOR" },
    83      { VM_EXIT_MSR_STORE_COUNT,                  WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_MSR_STORE_COUNT" },
    84      { VM_EXIT_MSR_LOAD_COUNT,                   WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_MSR_LOAD_COUNT" },
    85      { VM_ENTER_CONTROL_VECTOR,                  WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_ENTER_CONTROL_VECTOR" },
    86      { VM_ENTER_INTERRUPT_INFO,                  WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_ENTER_INTERRUPT_INFO" },
    87      { VM_ENTER_EXCEPTION_ERROR_CODE,            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_ENTER_EXCEPTION_ERROR_CODE" },
    88      { VM_ENTER_INSTRUCTION_LENGTH,              WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_ENTER_INSTRUCTION_LENGTH" },
    89      { VM_ENTER_MSR_LOAD_COUNT,                  WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_ENTER_MSR_LOAD_COUNT" },
    90      { VM_X_IO_BITMAP_ADDRESS_A,                 WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_IO_BITMAP_ADDRESS_A" },
    91      { VM_X_IO_BITMAP_ADDRESS_B,                 WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_IO_BITMAP_ADDRESS_B" },
    92      { VM_X_MSR_BITMAP_ADDRESS,                  WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_MSR_BITMAP_ADDRESS" },
    93      { VM_EXIT_MSR_STORE_ADDRESS,                WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_EXIT_MSR_STORE_ADDRESS" },
    94      { VM_EXIT_MSR_LOAD_ADDRESS,                 WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_EXIT_MSR_LOAD_ADDRESS" },
    95      { VM_ENTER_MSR_LOAD_ADDRESS,                WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_ENTER_MSR_LOAD_ADDRESS" },
    96      { VM_X_OSV_CONTROLLING_VMCS_ADDRESS,        WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_OSV_CONTROLLING_VMCS_ADDRESS" },
    97      { VM_X_TSC_OFFSET,                          WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_TSC_OFFSET" },
    98      { VM_EXIT_PHYSICAL_ADDRESS,                 NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_EXIT_INFO_GUEST_PHYSICAL_ADDRESS" },
    99      { VM_EXIT_INFO_INSTRUCTION_ERROR_CODE,      READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_INSTRUCTION_ERROR_CODE" },
   100      { VM_EXIT_INFO_REASON,                      READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_REASON" },
   101      { VM_EXIT_INFO_EXCEPTION_INFO,              READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_EXCEPTION_INFO" },
   102      { VM_EXIT_INFO_EXCEPTION_ERROR_CODE,        READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_EXCEPTION_ERROR_CODE" },
   103      { VM_EXIT_INFO_IDT_VECTORING,               WRITABLE_IN_CACHE,FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_IDT_VECTORING" },
   104      { VM_EXIT_INFO_IDT_VECTORING_ERROR_CODE,    READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_IDT_VECTORING_ERROR_CODE" },
   105      { VM_EXIT_INFO_INSTRUCTION_LENGTH,          READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_INSTRUCTION_LENGTH" },
   106      { VM_EXIT_INFO_INSTRUCTION_INFO,            READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_INSTRUCTION_INFO" },
   107      { VM_EXIT_INFO_QUALIFICATION,               READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_QUALIFICATION" },
   108      { VM_EXIT_INFO_IO_RCX,                      READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_IO_RCX" },
   109      { VM_EXIT_INFO_IO_RSI,                      READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_IO_RSI" },
   110      { VM_EXIT_INFO_IO_RDI,                      READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_IO_RDI" },
   111      { VM_EXIT_INFO_IO_RIP,                      READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_IO_RIP" },
   112      { VM_EXIT_INFO_GUEST_LINEAR_ADDRESS,        READONLY,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_INFO_GUEST_LINEAR_ADDRESS" },
   113      { VM_X_VIRTUAL_APIC_ADDRESS,                WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_VIRTUAL_APIC_ADDRESS" },
   114      { VM_X_APIC_ACCESS_ADDRESS,                 WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_APIC_ACCESS_ADDRESS" },
   115      { VM_EXIT_TPR_THRESHOLD,                    WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_EXIT_TPR_THRESHOLD" },
   116      { VM_X_EPTP_ADDRESS,                        NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_EPTP_ADDRESS" },
   117      { VM_X_PREEMTION_TIMER,                     NO_EXIST,         FULL_ENC_ONLY, {0}, "VMCS_PREEMPTION_TIMER" },
   118      { GUEST_CR0,                                WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_CR0" },
   119      { GUEST_CR3,                                WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_CR3" },
   120      { GUEST_CR4,                                WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_CR4" },
   121      { GUEST_DR7,                                WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_DR7" },
   122      { GUEST_ES_SELECTOR,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_ES_SELECTOR" },
   123      { GUEST_ES_BASE,                            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_ES_BASE" },
   124      { GUEST_ES_LIMIT,                           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_ES_LIMIT" },
   125      { GUEST_ES_AR,                              WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_ES_AR" },
   126      { GUEST_CS_SELECTOR,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_CS_SELECTOR" },
   127      { GUEST_CS_BASE,                            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_CS_BASE" },
   128      { GUEST_CS_LIMIT,                           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_CS_LIMIT" },
   129      { GUEST_CS_AR,                              WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_CS_AR" },
   130      { GUEST_SS_SELECTOR,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_SS_SELECTOR" },
   131      { GUEST_SS_BASE,                            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_SS_BASE" },
   132      { GUEST_SS_LIMIT,                           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_SS_LIMIT" },
   133      { GUEST_SS_AR,                              WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_SS_AR" },
   134      { GUEST_DS_SELECTOR,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_DS_SELECTOR" },
   135      { GUEST_DS_BASE,                            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_DS_BASE" },
   136      { GUEST_DS_LIMIT,                           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_DS_LIMIT" },
   137      { GUEST_DS_AR,                              WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_DS_AR" },
   138      { GUEST_FS_SELECTOR,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_FS_SELECTOR" },
   139      { GUEST_FS_BASE,                            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_FS_BASE" },
   140      { GUEST_FS_LIMIT,                           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_FS_LIMIT" },
   141      { GUEST_FS_AR,                              WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_FS_AR" },
   142      { GUEST_GS_SELECTOR,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_GS_SELECTOR" },
   143      { GUEST_GS_BASE,                            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_GS_BASE" },
   144      { GUEST_GS_LIMIT,                           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_GS_LIMIT" },
   145      { GUEST_GS_AR,                              WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_GS_AR" },
   146      { GUEST_LDTR_SELECTOR,                      WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_LDTR_SELECTOR" },
   147      { GUEST_LDTR_BASE,                          WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_LDTR_BASE" },
   148      { GUEST_LDTR_LIMIT,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_LDTR_LIMIT" },
   149      { GUEST_LDTR_AR,                            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_LDTR_AR" },
   150      { GUEST_TR_SELECTOR,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_TR_SELECTOR" },
   151      { GUEST_TR_BASE,                            WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_TR_BASE" },
   152      { GUEST_TR_LIMIT,                           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_TR_LIMIT" },
   153      { GUEST_TR_AR,                              WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_TR_AR" },
   154      { GUEST_GDTR_BASE,                          WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_GDTR_BASE" },
   155      { GUEST_GDTR_LIMIT,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_GDTR_LIMIT" },
   156      { GUEST_IDTR_BASE,                          WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_IDTR_BASE" },
   157      { GUEST_IDTR_LIMIT,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_IDTR_LIMIT" },
   158      { GUEST_ESP,                                WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_RSP" },
   159      { GUEST_EIP,                                WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_RIP" },
   160      { GUEST_EFLAGS,                             WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_RFLAGS" },
   161      { GUEST_PEND_DBE,                           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_PEND_DBE" },
   162      { GUEST_WORKING_VMCS_PTR,                   WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_GUEST_WORKING_VMCS_PTR" },
   163      { GUEST_DEBUG_CONTROL,                      WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_GUEST_DEBUG_CONTROL" },
   164      { GUEST_INTERRUPTIBILITY,                   WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_INTERRUPTIBILITY" },
   165      { GUEST_SLEEP_STATE,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_SLEEP_STATE" },
   166      { GUEST_SMBASE,                             WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_SMBASE" },
   167      { GUEST_SYSENTER_CS,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_SYSENTER_CS" },
   168      { GUEST_SYSENTER_ESP,                       WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_SYSENTER_ESP" },
   169      { GUEST_SYSENTER_EIP,                       WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_GUEST_SYSENTER_EIP" },
   170      { GUEST_PAT,                                NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_GUEST_PAT" },
   171      { GUEST_EFER,                               WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_GUEST_EFER" },
   172      { GUEST_IA32_PERF_GLOBAL_CTRL,              NO_EXIST,         SUPP_HIGH_ENC, {0}, "GUEST_IA32_PERF_GLOBAL_CTRL" },
   173      { GUEST_PDPTR0,                             NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_GUEST_PDPTR0" },
   174      { GUEST_PDPTR1,                             NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_GUEST_PDPTR1" },
   175      { GUEST_PDPTR2,                             NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_GUEST_PDPTR2" },
   176      { GUEST_PDPTR3,                             NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_GUEST_PDPTR3" },
   177      { HOST_CR0,                                 WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_CR0" },
   178      { HOST_CR3,                                 WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_CR3" },
   179      { HOST_CR4,                                 WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_CR4" },
   180      { HOST_ES_SELECTOR,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_ES_SELECTOR" },
   181      { HOST_CS_SELECTOR,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_CS_SELECTOR" },
   182      { HOST_SS_SELECTOR,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_SS_SELECTOR" },
   183      { HOST_DS_SELECTOR,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_DS_SELECTOR" },
   184      { HOST_FS_SELECTOR,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_FS_SELECTOR" },
   185      { HOST_FS_BASE,                             WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_FS_BASE" },
   186      { HOST_GS_SELECTOR,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_GS_SELECTOR" },
   187      { HOST_GS_BASE,                             WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_GS_BASE" },
   188      { HOST_TR_SELECTOR,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_TR_SELECTOR" },
   189      { HOST_TR_BASE,                             WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_TR_BASE" },
   190      { HOST_GDTR_BASE,                           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_GDTR_BASE" },
   191      { HOST_IDTR_BASE,                           WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_IDTR_BASE" },
   192      { HOST_ESP,                                 WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_RSP" },
   193      { HOST_EIP,                                 WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_RIP" },
   194      { HOST_SYSENTER_CS,                         WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_SYSENTER_CS" },
   195      { HOST_SYSENTER_ESP,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_SYSENTER_ESP" },
   196      { HOST_SYSENTER_EIP,                        WRITABLE,         FULL_ENC_ONLY, {0}, "VMCS_HOST_SYSENTER_EIP" },
   197      { HOST_PAT,                                 NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_HOST_PAT" },
   198      { HOST_EFER,                                WRITABLE,         SUPP_HIGH_ENC, {0}, "VMCS_HOST_EFER" },
   199      { HOST_IA32_PERF_GLOBAL_CTRL,               NO_EXIST,         SUPP_HIGH_ENC, {0}, "HOST_IA32_PERF_GLOBAL_CTRL" },
   200      { VM_X_CR3_TARGET_VALUE(0),                 WRITABLE,         FULL_ENC_ONLY, {0}, "VM_X_CR3_TARGET_VALUE_0" },
   201      { VM_X_CR3_TARGET_VALUE(1),                 WRITABLE,         FULL_ENC_ONLY, {0}, "VM_X_CR3_TARGET_VALUE_1" },
   202      { VM_X_CR3_TARGET_VALUE(2),                 WRITABLE,         FULL_ENC_ONLY, {0}, "VM_X_CR3_TARGET_VALUE_2" },
   203      { VM_X_CR3_TARGET_VALUE(3),                 WRITABLE,         FULL_ENC_ONLY, {0}, "VM_X_CR3_TARGET_VALUE_3" },
   204  #ifdef FAST_VIEW_SWITCH
   205      { VM_X_VMFUNC_CONTROL,                      NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_VMFUNC_CONTROL" },
   206      { VM_X_VMFUNC_EPTP_LIST_ADDRESS,            NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_VMFUNC_EPTP_LIST_ADDRESS" },
   207  #endif
   208      { VM_X_VE_INFO_ADDRESS,                     NO_EXIST,         SUPP_HIGH_ENC, {0}, "VMCS_VE_INFO_ADDRESS" },
   209      { VMCS_NO_COMPONENT,                        NO_EXIST,         FULL_ENC_ONLY, {0}, "VMCS_FIELD_COUNT" }
   210  };
   211  
   212  static VMCS_FIELD g_guest_state_fields[] = {
   213      VMCS_GUEST_CR0,
   214      VMCS_GUEST_CR3,
   215      VMCS_GUEST_CR4,
   216      VMCS_GUEST_DR7,
   217      VMCS_GUEST_ES_SELECTOR,
   218      VMCS_GUEST_ES_BASE,
   219      VMCS_GUEST_ES_LIMIT,
   220      VMCS_GUEST_ES_AR,
   221      VMCS_GUEST_CS_SELECTOR,
   222      VMCS_GUEST_CS_BASE,
   223      VMCS_GUEST_CS_LIMIT,
   224      VMCS_GUEST_CS_AR,
   225      VMCS_GUEST_SS_SELECTOR,
   226      VMCS_GUEST_SS_BASE,
   227      VMCS_GUEST_SS_LIMIT,
   228      VMCS_GUEST_SS_AR,
   229      VMCS_GUEST_DS_SELECTOR,
   230      VMCS_GUEST_DS_BASE,
   231      VMCS_GUEST_DS_LIMIT,
   232      VMCS_GUEST_DS_AR,
   233      VMCS_GUEST_FS_SELECTOR,
   234      VMCS_GUEST_FS_BASE,
   235      VMCS_GUEST_FS_LIMIT,
   236      VMCS_GUEST_FS_AR,
   237      VMCS_GUEST_GS_SELECTOR,
   238      VMCS_GUEST_GS_BASE,
   239      VMCS_GUEST_GS_LIMIT,
   240      VMCS_GUEST_GS_AR,
   241      VMCS_GUEST_LDTR_SELECTOR,
   242      VMCS_GUEST_LDTR_BASE,
   243      VMCS_GUEST_LDTR_LIMIT,
   244      VMCS_GUEST_LDTR_AR,
   245      VMCS_GUEST_TR_SELECTOR,
   246      VMCS_GUEST_TR_BASE,
   247      VMCS_GUEST_TR_LIMIT,
   248      VMCS_GUEST_TR_AR,
   249      VMCS_GUEST_GDTR_BASE,
   250      VMCS_GUEST_GDTR_LIMIT,
   251      VMCS_GUEST_IDTR_BASE,
   252      VMCS_GUEST_IDTR_LIMIT,
   253      VMCS_GUEST_RSP,
   254      VMCS_GUEST_RIP,
   255      VMCS_GUEST_RFLAGS,
   256      VMCS_GUEST_PEND_DBE,
   257      VMCS_GUEST_WORKING_VMCS_PTR,
   258      VMCS_GUEST_DEBUG_CONTROL,
   259      VMCS_GUEST_INTERRUPTIBILITY,
   260      VMCS_GUEST_SLEEP_STATE,
   261      VMCS_GUEST_SMBASE,
   262      VMCS_GUEST_SYSENTER_CS,
   263      VMCS_GUEST_SYSENTER_ESP,
   264      VMCS_GUEST_SYSENTER_EIP,
   265      VMCS_GUEST_PAT,
   266      VMCS_GUEST_EFER,
   267      VMCS_GUEST_PDPTR0,
   268      VMCS_GUEST_PDPTR1,
   269      VMCS_GUEST_PDPTR2,
   270      VMCS_GUEST_PDPTR3,
   271      VMCS_PREEMPTION_TIMER
   272  };
   273  
   274  static VMCS_FIELD g_host_state_fields[] = {
   275      VMCS_HOST_CR0,
   276      VMCS_HOST_CR3,
   277      VMCS_HOST_CR4,
   278      VMCS_HOST_ES_SELECTOR,
   279      VMCS_HOST_CS_SELECTOR,
   280      VMCS_HOST_SS_SELECTOR,
   281      VMCS_HOST_DS_SELECTOR,
   282      VMCS_HOST_FS_SELECTOR,
   283      VMCS_HOST_FS_BASE,
   284      VMCS_HOST_GS_SELECTOR,
   285      VMCS_HOST_GS_BASE,
   286      VMCS_HOST_TR_SELECTOR,
   287      VMCS_HOST_TR_BASE,
   288      VMCS_HOST_GDTR_BASE,
   289      VMCS_HOST_IDTR_BASE,
   290      VMCS_HOST_RSP,
   291      VMCS_HOST_RIP,
   292      VMCS_HOST_SYSENTER_CS,
   293      VMCS_HOST_SYSENTER_ESP,
   294      VMCS_HOST_SYSENTER_EIP,
   295      VMCS_HOST_PAT,
   296      VMCS_HOST_EFER
   297  };
   298  
   299  static VMCS_FIELD g_control_fields[] = {
   300      VMCS_VPID,
   301      VMCS_CONTROL_VECTOR_PIN_EVENTS,
   302      VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS,
   303      VMCS_CONTROL2_VECTOR_PROCESSOR_EVENTS,
   304      VMCS_EXCEPTION_BITMAP,
   305      VMCS_CR3_TARGET_COUNT,
   306      VMCS_CR0_MASK,
   307      VMCS_CR4_MASK,
   308      VMCS_CR0_READ_SHADOW,
   309      VMCS_CR4_READ_SHADOW,
   310      VMCS_PAGE_FAULT_ERROR_CODE_MASK,
   311      VMCS_PAGE_FAULT_ERROR_CODE_MATCH,
   312      VMCS_EXIT_CONTROL_VECTOR,
   313      VMCS_EXIT_MSR_STORE_COUNT,
   314      VMCS_EXIT_MSR_LOAD_COUNT,
   315      VMCS_ENTER_CONTROL_VECTOR,
   316      VMCS_ENTER_INTERRUPT_INFO,
   317      VMCS_ENTER_EXCEPTION_ERROR_CODE,
   318      VMCS_ENTER_INSTRUCTION_LENGTH,
   319      VMCS_ENTER_MSR_LOAD_COUNT,
   320      VMCS_IO_BITMAP_ADDRESS_A,
   321      VMCS_IO_BITMAP_ADDRESS_B,
   322      VMCS_MSR_BITMAP_ADDRESS,
   323      VMCS_EXIT_MSR_STORE_ADDRESS,
   324      VMCS_EXIT_MSR_LOAD_ADDRESS,
   325      VMCS_ENTER_MSR_LOAD_ADDRESS,
   326      VMCS_OSV_CONTROLLING_VMCS_ADDRESS,
   327      VMCS_TSC_OFFSET,
   328      VMCS_EXIT_INFO_GUEST_PHYSICAL_ADDRESS,
   329      VMCS_EXIT_INFO_GUEST_LINEAR_ADDRESS,
   330      VMCS_EXIT_INFO_INSTRUCTION_ERROR_CODE,
   331      VMCS_EXIT_INFO_REASON,
   332      VMCS_EXIT_INFO_EXCEPTION_INFO,
   333      VMCS_EXIT_INFO_EXCEPTION_ERROR_CODE,
   334      VMCS_EXIT_INFO_IDT_VECTORING,
   335      VMCS_EXIT_INFO_IDT_VECTORING_ERROR_CODE,
   336      VMCS_EXIT_INFO_INSTRUCTION_LENGTH,
   337      VMCS_EXIT_INFO_INSTRUCTION_INFO,
   338      VMCS_EXIT_INFO_QUALIFICATION,
   339      VMCS_EXIT_INFO_IO_RCX,
   340      VMCS_EXIT_INFO_IO_RSI,
   341      VMCS_EXIT_INFO_IO_RDI,
   342      VMCS_EXIT_INFO_IO_RIP,
   343      VMCS_VIRTUAL_APIC_ADDRESS,
   344      VMCS_APIC_ACCESS_ADDRESS,
   345      VMCS_EXIT_TPR_THRESHOLD,
   346      VMCS_EPTP_ADDRESS,
   347      VMCS_CR3_TARGET_VALUE_0,
   348      VMCS_CR3_TARGET_VALUE_1,
   349      VMCS_CR3_TARGET_VALUE_2,
   350      VMCS_CR3_TARGET_VALUE_3,
   351  #ifdef FAST_VIEW_SWITCH
   352      VMCS_VMFUNC_CONTROL,
   353      VMCS_VMFUNC_EPTP_LIST_ADDRESS
   354  #endif
   355  };
   356  
   357  /*      translation encoding -> field enum */
   358  
   359  #define NUMBER_OF_ENCODING_TYPES    16
   360  #define MAX_ENCODINGS_OF_SAME_TYPE  32
   361  
   362  // this array is parallel to array of encoding tables
   363  //    /* encoding type */
   364  ///*1*/    0x0000,     /* 0000_00xx_xxxx_xxx0 */
   365  ///*2*/    0x0800,     /* 0000_10xx_xxxx_xxx0 */
   366  ///*3*/    0x0C00,     /* 0000_11xx_xxxx_xxx0 */
   367  ///*4*/    0x2000,     /* 0010_00xx_xxxx_xxx0 */
   368  ///*5*/    0x2400,     /* 0010_01xx_xxxx_xxx0 */
   369  ///*6*/    0x2800,     /* 0010_10xx_xxxx_xxx0 */
   370  ///*7*/    0x2C00,     /* 0010_11xx_xxxx_xxx0 */
   371  ///*8*/    0x4000,     /* 0100_00xx_xxxx_xxx0 */
   372  ///*9*/    0x4400,     /* 0100_01xx_xxxx_xxx0 */
   373  ///*10*/   0x4800,     /* 0100_10xx_xxxx_xxx0 */
   374  ///*11*/   0x4C00,     /* 0100_11xx_xxxx_xxx0 */
   375  ///*12*/   0x6000,     /* 0110_00xx_xxxx_xxx0 */
   376  ///*13*/   0x6400,     /* 0110_01xx_xxxx_xxx0 */
   377  ///*14*/   0x6800,     /* 0110_10xx_xxxx_xxx0 */
   378  ///*15*/   0x6C00,     /* 0110_11xx_xxxx_xxx0 */
   379  //
   380  // In the above table we see that encoding looks like the following
   381  //  0mm0_nnxx_xxxx_xxxA
   382  // Where actual encoding type is mmnn and A is a FULL/HIGH selector
   383  // assuming that bits signed as 0 must be 0
   384  #define ENC_MUST_BE_ZERO_BITS 0x9000
   385  #define ENC_M_BITS            0x6000
   386  #define ENC_M_BITS_SHIFT      11
   387  #define ENC_N_BITS            0x0C00
   388  #define ENC_N_BITS_SHIFT      10
   389  #define ENC_X_BITS            0x03FE
   390  #define ENC_BITS_SHIFT        1
   391  #define ENC_HIGH_TYPE_BIT     0x1
   392  
   393  #define ENC_TYPE_FROM_ENCODING( enc )                                          \
   394      ((((enc) & ENC_M_BITS) >> ENC_M_BITS_SHIFT) |                              \
   395       (((enc) & ENC_N_BITS) >> ENC_N_BITS_SHIFT))
   396  #define ENTRY_IDX_FROM_ENCODING( enc ) (((enc) & ENC_X_BITS) >> ENC_BITS_SHIFT)
   397  #define IS_ENCODING_HIGH_TYPE( enc )   (((enc) & ENC_HIGH_TYPE_BIT) == ENC_HIGH_TYPE_BIT)
   398  
   399  
   400  typedef struct _ENC_2_FIELD_ENTRY {
   401      UINT32      valid;
   402      VMCS_FIELD  field_id;
   403  } ENC_2_FIELD_ENTRY;
   404  
   405  static ENC_2_FIELD_ENTRY g_enc_2_field[NUMBER_OF_ENCODING_TYPES][MAX_ENCODINGS_OF_SAME_TYPE];
   406  
   407  VMM_DEBUG_CODE(
   408  static void vmcs_print_group(const struct _VMCS_OBJECT *obj, const VMCS_FIELD *fields_to_print, UINT32 count);
   409  )
   410  static void enable_vmcs_2_0_fields(const VMCS_HW_CONSTRAINTS* constraints);
   411  static void init_enc_2_field_tables(void);
   412  BOOLEAN string_is_substring(const char *bigstring, const char *smallstring);
   413  BOOLEAN strings_are_substrings(const char *bigstring, UINT32 num_of_smallstrings, char *smallstring[]);
   414  
   415  
   416  BOOLEAN vmcs_field_is_supported(VMCS_FIELD field_id)
   417  {
   418      return (field_id < VMCS_FIELD_COUNT && 
   419              field_id < (NELEMENTS(g_field_data)-1) &&
   420              g_field_data[field_id].access != NO_EXIST);
   421  }
   422  
   423  void vmcs_write_nocheck(struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id, UINT64 value)
   424  {
   425      VMM_ASSERT(vmcs);
   426      VMM_ASSERT(field_id < VMCS_FIELD_COUNT);
   427      if (field_id < VMCS_FIELD_COUNT) {
   428          vmcs->vmcs_write(vmcs, field_id, value);
   429      }
   430  }
   431  
   432  void vmcs_write(struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id, UINT64 value)
   433  {
   434      VMM_ASSERT(field_id < VMCS_FIELD_COUNT);
   435      VMM_ASSERT(FIELD_IS_WRITEABLE(field_id));
   436      if (field_id < VMCS_FIELD_COUNT &&
   437          (vmcs->skip_access_checking || FIELD_IS_WRITEABLE(field_id))) {
   438          vmcs->vmcs_write(vmcs, field_id, value);
   439      }
   440  }
   441  
   442  
   443  UINT64 vmcs_read(const struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id)
   444  {
   445      UINT64 value;
   446  #ifdef JLMDEBUG1
   447      bprint("vmcs_read entry\n");
   448  #endif
   449  
   450      VMM_ASSERT(vmcs);
   451      VMM_ASSERT(field_id<VMCS_FIELD_COUNT);
   452      VMM_ASSERT(FIELD_IS_READABLE(field_id));
   453      if (field_id<VMCS_FIELD_COUNT && (vmcs->skip_access_checking || FIELD_IS_READABLE(field_id))) {
   454          value = vmcs->vmcs_read(vmcs, field_id);
   455      }
   456      else {
   457          value = UINT64_ALL_ONES;
   458      }
   459  #ifdef JLMDEBUG1
   460      bprint("vmcs_read returning 0x%016lx\n", value);
   461  #endif
   462      return value;
   463  }
   464  
   465  
   466  void vmcs_update(struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id, 
   467                   UINT64 value, UINT64 bits_to_update)
   468  {
   469      UINT64 result_value;
   470  
   471  #ifdef JLMDEBUG1
   472      bprint("vmcs_update\n");
   473  #endif
   474      VMM_ASSERT(field_id < VMCS_FIELD_COUNT);
   475      result_value = vmcs_read(vmcs, field_id);
   476      value &= bits_to_update;     // clear all bits except bits_to_update
   477      result_value &= ~bits_to_update;    // clear bits_to_update
   478      result_value |= value;
   479      vmcs_write(vmcs, field_id, result_value);
   480  }
   481  
   482  #ifdef INCLUDE_UNUSED_CODE
   483  void vmcs_copy(struct _VMCS_OBJECT *vmcs_dst, const struct _VMCS_OBJECT *vmcs_src)
   484  {
   485      VMCS_FIELD  field_id;
   486      UINT64      value;
   487  
   488      VMM_ASSERT(vmcs_dst->level != VMCS_MERGED);
   489  
   490      for (field_id = 0; field_id < VMCS_FIELD_COUNT; ++field_id) {
   491          if (FIELD_IS_READABLE(field_id)) {
   492              value = vmcs_read(vmcs_src, field_id);
   493              vmcs_write_nocheck(vmcs_dst, field_id, value);
   494          }
   495      }
   496  }
   497  
   498  void vmcs_store(struct _VMCS_OBJECT *vmcs, UINT64 *buffer)
   499  {
   500      VMCS_FIELD field_id;
   501  
   502      for (field_id = 0; field_id < VMCS_FIELD_COUNT; ++field_id) {
   503          if (FIELD_IS_READABLE(field_id)) {
   504              buffer[field_id] = vmcs_read(vmcs, field_id);
   505          }
   506      }
   507  }
   508  
   509  
   510  void vmcs_load(struct _VMCS_OBJECT *vmcs, UINT64 *buffer)
   511  {
   512      VMCS_FIELD field_id;
   513  
   514      for (field_id = 0; field_id < VMCS_FIELD_COUNT; ++field_id) {
   515          if (FIELD_IS_WRITEABLE(field_id)) {
   516              vmcs_write(vmcs, field_id, buffer[field_id]);
   517          }
   518      }
   519  }
   520  #endif
   521  
   522  UINT32 vmcs_get_field_encoding(VMCS_FIELD field_id, RW_ACCESS *p_access)
   523  {
   524      if (field_id < VMCS_FIELD_COUNT) {
   525          if (NULL != p_access)
   526              *p_access = (RW_ACCESS)g_field_data[field_id].access;
   527          return g_field_data[field_id].encoding;
   528      }
   529      return VMCS_NO_COMPONENT;
   530  }
   531  
   532  // Init the package
   533  void vmcs_manager_init(void)
   534  {
   535      const VMCS_HW_CONSTRAINTS* constraints;
   536      constraints = vmcs_hw_get_vmx_constraints();
   537      enable_vmcs_2_0_fields( constraints );
   538      init_enc_2_field_tables();
   539  }
   540  
   541  // Enable VMCS 2.0 fields if exist
   542  void enable_vmcs_2_0_fields( const VMCS_HW_CONSTRAINTS* constraints )
   543  {
   544      if (constraints->ept_supported) {
   545          g_field_data[VMCS_EPTP_ADDRESS].access = WRITABLE;
   546          g_field_data[VMCS_EXIT_INFO_GUEST_PHYSICAL_ADDRESS].access = READONLY;
   547          g_field_data[VMCS_GUEST_PDPTR0].access = WRITABLE;
   548          g_field_data[VMCS_GUEST_PDPTR1].access = WRITABLE;
   549          g_field_data[VMCS_GUEST_PDPTR2].access = WRITABLE;
   550          g_field_data[VMCS_GUEST_PDPTR3].access = WRITABLE;
   551      }
   552  
   553      if (constraints->vpid_supported) {
   554          g_field_data[VMCS_VPID].access = WRITABLE;
   555      }
   556  
   557      if ((1 == constraints->may1_vm_entry_ctrl.Bits.LoadEfer) ||
   558          (1 == constraints->may1_vm_exit_ctrl.Bits.SaveEfer)) {
   559          g_field_data[VMCS_GUEST_EFER].access = WRITABLE;
   560      }
   561  
   562      if ((1 == constraints->may1_vm_entry_ctrl.Bits.LoadPat) ||
   563          (1 == constraints->may1_vm_exit_ctrl.Bits.SavePat)) {
   564          g_field_data[VMCS_GUEST_PAT].access = WRITABLE;
   565      }
   566  
   567      if (1 == constraints->may1_pin_based_exec_ctrl.Bits.VmxTimer) {
   568          g_field_data[VMCS_PREEMPTION_TIMER].access = WRITABLE;
   569      }
   570  
   571      if (1 == constraints->may1_vm_exit_ctrl.Bits.LoadEfer) {
   572          g_field_data[VMCS_HOST_EFER].access = WRITABLE;
   573      }
   574  
   575      if (1 == constraints->may1_vm_exit_ctrl.Bits.LoadPat) {
   576          g_field_data[VMCS_HOST_PAT].access = WRITABLE;
   577      }
   578  
   579      if (1 == constraints->may1_vm_exit_ctrl.Bits.Load_IA32_PERF_GLOBAL_CTRL) {
   580          g_field_data[VMCS_GUEST_IA32_PERF_GLOBAL_CTRL].access = WRITABLE;
   581      }
   582  
   583      if (1 == constraints->may1_vm_entry_ctrl.Bits.Load_IA32_PERF_GLOBAL_CTRL) {
   584          g_field_data[VMCS_HOST_IA32_PERF_GLOBAL_CTRL].access = WRITABLE;
   585      }
   586  #ifdef FAST_VIEW_SWITCH
   587      if (constraints->vmfunc_supported) {
   588          g_field_data[VMCS_VMFUNC_CONTROL].access = WRITABLE;
   589          g_field_data[VMCS_VMFUNC_EPTP_LIST_ADDRESS].access = WRITABLE;
   590      }
   591  #endif
   592      if (constraints->ve_supported) {
   593          g_field_data[VMCS_EPTP_INDEX].access = WRITABLE;
   594          g_field_data[VMCS_VE_INFO_ADDRESS].access = WRITABLE;
   595      }
   596  }
   597  
   598  static BOOLEAN get_enc_2_field_entry( UINT32 encoding, ENC_2_FIELD_ENTRY** entry )
   599  {
   600      UINT32  enc_type  = ENC_TYPE_FROM_ENCODING(encoding);
   601      UINT32  entry_idx = ENTRY_IDX_FROM_ENCODING(encoding);
   602  
   603      VMM_ASSERT( NULL != entry );
   604  
   605      if ((encoding & ENC_MUST_BE_ZERO_BITS) != 0) {
   606          VMM_LOG(mask_anonymous, level_trace,"ERROR: VMCS Encoding %P contains bits that assumed to be 0\n", encoding);
   607          return FALSE;
   608      }
   609      if (enc_type >= NUMBER_OF_ENCODING_TYPES) {
   610          VMM_LOG(mask_anonymous, level_trace,"ERROR: VMCS Encoding %P means that need more encoding types %P>=%P\n",
   611                      encoding, enc_type, NUMBER_OF_ENCODING_TYPES);
   612          return FALSE;
   613      }
   614      if (entry_idx >= MAX_ENCODINGS_OF_SAME_TYPE) {
   615          VMM_LOG(mask_anonymous, level_trace,"ERROR: VMCS Encoding %P means that need more entries per type %P>=%P\n",
   616                      encoding, entry_idx, MAX_ENCODINGS_OF_SAME_TYPE);
   617          return FALSE;
   618      }
   619      *entry = &(g_enc_2_field[ enc_type ][ entry_idx ]);
   620      return TRUE;
   621  }
   622  
   623  static void init_enc_2_field_tables(void)
   624  {
   625      VMCS_FIELD          cur_field;
   626      VMCS_ENCODING*      enc;
   627      ENC_2_FIELD_ENTRY*  enc_2_field_entry= NULL;
   628      BOOLEAN             ok;
   629  
   630      vmm_memset( g_enc_2_field, 0, sizeof(g_enc_2_field) );
   631  
   632      // loop though all supported fields and fill the enc->field table
   633      for (cur_field = (VMCS_FIELD)0; cur_field < VMCS_FIELD_COUNT; ++cur_field) {
   634          enc = g_field_data + (UINT32)cur_field;
   635          if (NO_EXIST == enc->access) {
   636              continue;
   637          }
   638          ok = get_enc_2_field_entry( enc->encoding, &enc_2_field_entry );
   639          // BEFORE_VMLAUNCH
   640          VMM_ASSERT( ok );
   641          // BEFORE_VMLAUNCH
   642          VMM_ASSERT( FALSE == enc_2_field_entry->valid );
   643          enc_2_field_entry->valid    = TRUE;
   644          enc_2_field_entry->field_id = cur_field;
   645      }
   646  }
   647  
   648  VMCS_FIELD vmcs_get_field_id_by_encoding( UINT32 encoding, OPTIONAL BOOLEAN* is_HIGH_part )
   649  {
   650      ENC_2_FIELD_ENTRY*  enc_2_field_entry;
   651      BOOLEAN             found;
   652      VMCS_FIELD          field;
   653  
   654      found = get_enc_2_field_entry( encoding, &enc_2_field_entry );
   655  
   656      if ((!found) || (!enc_2_field_entry->valid)) {
   657  //        VMM_LOG(mask_anonymous, level_trace,"ERROR: VMCS Encoding %P is unknown\n", encoding);
   658          return VMCS_FIELD_COUNT;
   659      }
   660  
   661      field = enc_2_field_entry->field_id;
   662      if (IS_ENCODING_HIGH_TYPE(encoding)) {
   663          if (SUPP_HIGH_ENC != g_field_data[field].supports_high_encoding) {
   664              VMM_LOG(mask_anonymous, level_trace,"ERROR: VMCS Encoding %P does not map to a known HIGH type encoding\n",
   665                          encoding);
   666              return VMCS_FIELD_COUNT;
   667          }
   668      }
   669  
   670      if (is_HIGH_part) {
   671          *is_HIGH_part = IS_ENCODING_HIGH_TYPE(encoding);
   672      }
   673  
   674      return field;
   675  }
   676  
   677  #ifdef CLI_INCLUDE
   678  void vmcs_print_group( const struct _VMCS_OBJECT* obj, const VMCS_FIELD* fields_to_print, UINT32 count )
   679  {
   680      UINT32 i;
   681      UINT64 value;
   682      const VMCS_ENCODING* field_desc;
   683  
   684      for (i = 0; i < count; ++i) {
   685          field_desc = &g_field_data[fields_to_print[i]];
   686          if (field_desc->access == NO_EXIST) {
   687              continue;
   688          }
   689          value = vmcs_read(obj, fields_to_print[i]);
   690          CLI_PRINT("%40s (0x%04X) = %P\n", field_desc->name, field_desc->encoding, value );
   691      }
   692  }
   693  
   694  BOOLEAN string_is_substring(const char *bigstring, const char *smallstring)
   695  {
   696      BOOLEAN match = TRUE;
   697      int ib, is;
   698  
   699      for (ib = 0, is = 0; bigstring[ib] != 0 && smallstring[is] != 0; ++ib) {
   700          if (bigstring[ib] == smallstring[is]) {
   701              is++;
   702              match = TRUE;
   703          }
   704          else {
   705              is = 0;
   706              match = FALSE;
   707          }
   708      }
   709      if (smallstring[is] != 0)
   710          match = FALSE;
   711      return match;
   712  }
   713  
   714  // Returns TRUE if all smallstrings are part of a bigstring
   715  BOOLEAN strings_are_substrings(const char *bigstring, UINT32 num_of_smallstrings, char *smallstring[])
   716  {
   717      UINT32 i;
   718      for (i = 0; i < num_of_smallstrings; ++i) {
   719          if (FALSE == string_is_substring(bigstring, smallstring[i])) {
   720              return FALSE;
   721          }
   722      }
   723      return TRUE;
   724  }
   725  
   726  void vmcs_print_all_filtered(
   727      const struct _VMCS_OBJECT* obj, UINT32 num_of_filters, char *filters[])
   728  {
   729      UINT32 i;
   730      UINT64 value;
   731      const VMCS_ENCODING *field_desc;
   732  
   733      for (i = 0; i < NELEMENTS(g_field_data); ++i) {
   734          field_desc = &g_field_data[i];
   735          if (field_desc->access == NO_EXIST) {
   736              continue;
   737          }
   738          if (strings_are_substrings(field_desc->name, num_of_filters, filters)) {
   739              value = vmcs_read(obj, (VMCS_FIELD)i);
   740              CLI_PRINT("%40s (0x%04X) = %P\n", field_desc->name, field_desc->encoding, value );
   741          }
   742      }
   743  }
   744  
   745  void vmcs_print_guest_state( const struct _VMCS_OBJECT* obj )
   746  {
   747      CLI_PRINT("------------- VMCS Guest State --------------\n");
   748      vmcs_print_group(obj, g_guest_state_fields, NELEMENTS(g_guest_state_fields));
   749      CLI_PRINT("------------- END OF VMCS Guest State -------\n\n");
   750  }
   751  
   752  void vmcs_print_host_state( const struct _VMCS_OBJECT* obj )
   753  {
   754      CLI_PRINT("------------- VMCS Host State --------------\n");
   755      vmcs_print_group(obj, g_host_state_fields, NELEMENTS(g_host_state_fields));
   756      CLI_PRINT("------------- END OF VMCS Host State -------\n\n");
   757  }
   758  
   759  void vmcs_print_controls( const struct _VMCS_OBJECT* obj )
   760  {
   761      CLI_PRINT("------------- VMCS Controls --------------\n");
   762      vmcs_print_group(obj, g_control_fields, NELEMENTS(g_control_fields));
   763      CLI_PRINT("------------- END OF VMCS Controls -------\n\n");
   764  }
   765  
   766  void vmcs_print_all( const struct _VMCS_OBJECT* obj )
   767  {
   768      vmcs_print_controls(obj);
   769      vmcs_print_guest_state(obj);
   770      vmcs_print_host_state(obj);
   771  
   772  }
   773  #endif
   774  const char* vmcs_get_field_name( VMCS_FIELD field_id )
   775  {
   776      const char* name;
   777      const VMCS_ENCODING* enc = (field_id < VMCS_FIELD_COUNT) ? &g_field_data[field_id] : NULL;
   778  
   779      if (enc) {
   780          name = enc->name;
   781      }
   782      else {
   783          name = "UNKNOWN VMCS FIELD";
   784      }
   785  
   786      return name;
   787  }
   788  
   789  void vmcs_clear_all_msr_lists(struct _VMCS_OBJECT* vmcs) {
   790      vmcs_write(vmcs, VMCS_EXIT_MSR_STORE_COUNT, 0);
   791      vmcs_write(vmcs, VMCS_EXIT_MSR_LOAD_COUNT, 0);
   792      vmcs_write(vmcs, VMCS_ENTER_MSR_LOAD_COUNT, 0);
   793  }
   794  
   795  void vmcs_init_all_msr_lists(struct _VMCS_OBJECT* vmcs) {
   796      vmcs_write(vmcs, VMCS_EXIT_MSR_STORE_ADDRESS, VMCS_INVALID_ADDRESS);
   797      vmcs_write(vmcs, VMCS_EXIT_MSR_LOAD_ADDRESS, VMCS_INVALID_ADDRESS);
   798      vmcs_write(vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS, VMCS_INVALID_ADDRESS);
   799      vmcs_clear_all_msr_lists(vmcs);
   800      vmcs->max_num_of_vmexit_store_msrs = 0;
   801      vmcs->max_num_of_vmexit_load_msrs = 0;
   802      vmcs->max_num_of_vmenter_load_msrs = 0;
   803  }
   804  
   805  static void vmcs_free_msr_list(UINT64 msr_list_addr, BOOLEAN address_is_in_hpa) {
   806      if (msr_list_addr != VMCS_INVALID_ADDRESS) {
   807          HVA msr_list_addr_hva;
   808  
   809          if (address_is_in_hpa) {
   810              if (!hmm_hpa_to_hva(msr_list_addr, &msr_list_addr_hva)) {
   811                  VMM_LOG(mask_anonymous, level_trace,
   812                          "%s: Could not retrieve HVA of MSR list\n", __FUNCTION__);
   813                  VMM_DEADLOOP();
   814              }
   815          }
   816          else {
   817              msr_list_addr_hva = msr_list_addr;
   818          }
   819  
   820          vmm_mfree((void*)msr_list_addr_hva);
   821      }
   822  }
   823  
   824  void vmcs_destroy_all_msr_lists_internal(struct _VMCS_OBJECT* vmcs,
   825                                           BOOLEAN addresses_are_in_hpa) {
   826      UINT64 exit_store_addr;
   827      UINT64 exit_load_addr;
   828      UINT64 enter_load_addr;
   829  
   830      exit_store_addr = vmcs_read(vmcs, VMCS_EXIT_MSR_STORE_ADDRESS);
   831      exit_load_addr = vmcs_read(vmcs, VMCS_EXIT_MSR_LOAD_ADDRESS);
   832      enter_load_addr = vmcs_read(vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS);
   833  
   834      vmcs_free_msr_list(exit_store_addr, addresses_are_in_hpa);
   835      vmcs_free_msr_list(exit_load_addr, addresses_are_in_hpa);
   836      vmcs_free_msr_list(enter_load_addr, addresses_are_in_hpa);
   837  
   838      vmcs_init_all_msr_lists(vmcs);
   839  }
   840  
   841  static void vmcs_alloc_msr_list(IN UINT32 requested_num_of_msrs,
   842                   OUT void** msl_list_memory, OUT UINT32* allocated_num_of_msrs) {
   843      UINT32 num_of_msrs;
   844  
   845      // Fund closes power of 2
   846      num_of_msrs = (requested_num_of_msrs > 8) ? requested_num_of_msrs : MIN_SIZE_OF_MSR_LIST;
   847      for (; !IS_POW_OF_2(num_of_msrs); num_of_msrs++);
   848  
   849      *msl_list_memory = vmm_malloc_aligned(num_of_msrs * sizeof(IA32_VMX_MSR_ENTRY), sizeof(IA32_VMX_MSR_ENTRY));
   850      if (*msl_list_memory == NULL) {
   851          VMM_LOG(mask_anonymous, level_trace,
   852                  "%s: Failed to allocate memory for MSR list\n", __FUNCTION__);
   853          VMM_DEADLOOP();
   854          *allocated_num_of_msrs = 0;
   855      }
   856      else {
   857          *allocated_num_of_msrs = num_of_msrs;
   858      }
   859  }
   860  
   861  static void vmcs_copy_msr_list(IA32_VMX_MSR_ENTRY* copy_to,
   862                   IA32_VMX_MSR_ENTRY* copy_from, UINT32 num_of_msrs)
   863  {
   864      for(; num_of_msrs > 0; num_of_msrs--) {
   865          *copy_to++ = *copy_from++;
   866      }
   867  }
   868  
   869  
   870  void vmcs_add_msr_to_list(struct _VMCS_OBJECT* vmcs, UINT32 msr_index, UINT64  value,
   871                            VMCS_FIELD list_address, VMCS_FIELD list_count,
   872                            UINT32* max_msrs_counter, BOOLEAN is_addres_hpa)
   873  {
   874      UINT64               msr_list_addr = vmcs_read(vmcs, list_address);
   875      UINT32               msr_list_count = (UINT32)vmcs_read(vmcs, list_count);
   876      IA32_VMX_MSR_ENTRY*  msr_list_addr_ptr = NULL;
   877      IA32_VMX_MSR_ENTRY*  new_msr_ptr;
   878      UINT32               i;
   879  
   880      // Retrieve pointer to a MSR list.
   881      if (msr_list_addr != VMCS_INVALID_ADDRESS) {
   882          // Get pointer to MSR list
   883          if (is_addres_hpa) {
   884              // Address that is written in VMCS is HPA, convert it to pointer
   885              if (!hmm_hpa_to_hva((HPA)msr_list_addr, (HVA*)&msr_list_addr_ptr)) {
   886                  VMM_LOG(mask_anonymous, level_trace,
   887                          "%s: Failed to retrieve HVA of MSR list from HPA=%P\n", 
   888                          __FUNCTION__, msr_list_addr);
   889                  VMM_DEADLOOP();
   890              }
   891          }
   892          else {
   893              // Address that is written in VMCS is HVA
   894              msr_list_addr_ptr = (IA32_VMX_MSR_ENTRY*)msr_list_addr;
   895          }
   896  
   897          VMM_ASSERT(msr_list_addr_ptr != NULL);
   898          VMM_ASSERT(ALIGN_BACKWARD((UINT64)msr_list_addr_ptr, sizeof(IA32_VMX_MSR_ENTRY)) == (UINT64)msr_list_addr_ptr);
   899      }
   900  
   901      VMM_ASSERT(*max_msrs_counter <= 256);
   902  
   903      // Check that MSR is not already in a list.
   904      for (i = 0, new_msr_ptr = msr_list_addr_ptr; i < msr_list_count; i++, new_msr_ptr++)
   905          if ((new_msr_ptr != NULL) && (new_msr_ptr->MsrIndex == msr_index))
   906              break;
   907  
   908      if (i >= msr_list_count) {
   909          // Check if a MSR list should be re/allocated.
   910          if (msr_list_count >= *max_msrs_counter) {
   911              // The list is full or not allocated, expand it
   912              IA32_VMX_MSR_ENTRY*  new_msr_list_addr_ptr = NULL;
   913              UINT32               new_max_counter;
   914  
   915              VMM_ASSERT(*max_msrs_counter < 256);
   916  
   917              // Allocate new list
   918              vmcs_alloc_msr_list(msr_list_count + 1, (void**)&new_msr_list_addr_ptr, &new_max_counter);
   919              VMM_ASSERT(new_msr_list_addr_ptr != NULL);
   920  
   921              if (msr_list_count != 0) {
   922                  vmcs_copy_msr_list(new_msr_list_addr_ptr, msr_list_addr_ptr, msr_list_count);
   923                  vmm_mfree(msr_list_addr_ptr);
   924              }
   925  
   926              msr_list_addr_ptr = new_msr_list_addr_ptr;
   927  
   928              if (is_addres_hpa) {
   929                  UINT64  msr_list_addr_hpa;
   930  
   931                  if (!hmm_hva_to_hpa((UINT64)msr_list_addr_ptr, &msr_list_addr_hpa)) {
   932                      VMM_LOG(mask_anonymous, level_trace,
   933                              "%s: Failed to retrieve HPA of MSR list\n", 
   934                              __FUNCTION__);
   935                      VMM_DEADLOOP();
   936                  }
   937                  vmcs_write(vmcs, list_address, msr_list_addr_hpa);
   938              }
   939              else {
   940                  vmcs_write(vmcs, list_address, (UINT64)msr_list_addr_ptr);
   941              }
   942  
   943              *max_msrs_counter = new_max_counter;
   944          }
   945          new_msr_ptr = msr_list_addr_ptr + msr_list_count;
   946          vmcs_write(vmcs, list_count, msr_list_count + 1);
   947      }
   948  
   949      if (new_msr_ptr != NULL) {
   950              new_msr_ptr->MsrIndex = msr_index;
   951              new_msr_ptr->Reserved = 0;
   952              new_msr_ptr->MsrData = value;
   953      }
   954  }
   955  
   956  void vmcs_delete_msr_from_list(struct _VMCS_OBJECT*  vmcs, UINT32 msr_index,
   957                   VMCS_FIELD list_address, VMCS_FIELD list_count, BOOLEAN is_addres_hpa)
   958  {
   959      UINT64               msr_list_addr = vmcs_read(vmcs, list_address);
   960      UINT32               msr_list_count = (UINT32)vmcs_read(vmcs, list_count);
   961      IA32_VMX_MSR_ENTRY*  msr_list_addr_ptr = NULL;
   962      IA32_VMX_MSR_ENTRY*  msr_ptr;
   963      UINT32               i;
   964  
   965      if (msr_list_count != 0 && msr_list_addr != VMCS_INVALID_ADDRESS) {
   966          // Get pointer to MSR list
   967          if (is_addres_hpa) {
   968              // Address that is written in VMCS is HPA, convert it to pointer
   969              if (!hmm_hpa_to_hva((HPA) msr_list_addr, (HVA *) &msr_list_addr_ptr))
   970              {
   971                  VMM_LOG(mask_anonymous, level_trace,"%s: Failed to retrieve HVA of MSR list from HPA=%P\n", __FUNCTION__, msr_list_addr);
   972                  VMM_DEADLOOP();
   973              }
   974          }
   975          else {
   976              // Address that is written in VMCS is HVA
   977              msr_list_addr_ptr = (IA32_VMX_MSR_ENTRY*)msr_list_addr;
   978          }
   979  
   980          VMM_ASSERT(msr_list_addr_ptr != NULL);
   981          VMM_ASSERT(ALIGN_BACKWARD((UINT64) msr_list_addr_ptr, sizeof(IA32_VMX_MSR_ENTRY)) == (UINT64) msr_list_addr_ptr);
   982  
   983          // Look for that MSR in a list.
   984          for (i = 0, msr_ptr = msr_list_addr_ptr; i < msr_list_count; i++, msr_ptr++) {
   985              if (msr_ptr->MsrIndex == msr_index) {
   986                  // Shift the rest of a list by one up.
   987                  UINT32  msrs_to_copy = msr_list_count - i - 1;
   988  
   989                  if (msrs_to_copy > 0)
   990                      vmcs_copy_msr_list(msr_ptr, msr_ptr + 1, msrs_to_copy);
   991  
   992                  // Save new list size.
   993                  vmcs_write(vmcs, list_count, msr_list_count - 1);
   994  
   995                  break;
   996              }
   997          }
   998      }
   999  }
  1000  
  1001  void vmcs_add_msr_to_vmexit_store_and_vmenter_load_lists_internal(struct _VMCS_OBJECT* vmcs,
  1002                      UINT32 msr_index, UINT64 value, BOOLEAN is_msr_list_addr_hpa) {
  1003      UINT64 vmexit_store_addr = vmcs_read(vmcs, VMCS_EXIT_MSR_STORE_ADDRESS);
  1004      UINT64 vmenter_load_addr = vmcs_read(vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS);
  1005  
  1006      if ((vmexit_store_addr != vmenter_load_addr) && (vmenter_load_addr != VMCS_INVALID_ADDRESS)) {
  1007          vmcs_add_msr_to_vmexit_store_list_internal(vmcs, msr_index, value, is_msr_list_addr_hpa);
  1008          vmcs_add_msr_to_vmenter_load_list_internal(vmcs, msr_index, value, is_msr_list_addr_hpa);
  1009      }
  1010      else {
  1011          vmcs_add_msr_to_vmexit_store_list_internal(vmcs, msr_index, value, is_msr_list_addr_hpa);
  1012          VMM_ASSERT(vmcs_read(vmcs, VMCS_EXIT_MSR_STORE_ADDRESS) != VMCS_INVALID_ADDRESS);
  1013          VMM_ASSERT(vmcs_read(vmcs, VMCS_EXIT_MSR_STORE_COUNT) > 0);
  1014  
  1015          vmcs_write(vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS, vmcs_read(vmcs, VMCS_EXIT_MSR_STORE_ADDRESS));
  1016          vmcs_write(vmcs, VMCS_ENTER_MSR_LOAD_COUNT, vmcs_read(vmcs, VMCS_EXIT_MSR_STORE_COUNT));
  1017      }
  1018  }
  1019  
  1020  void vmcs_delete_msr_from_vmexit_store_and_vmenter_load_lists_internal(struct _VMCS_OBJECT*  vmcs,
  1021                       UINT32 msr_index, BOOLEAN is_msr_list_addr_hpa)
  1022  {
  1023      UINT64 vmexit_store_addr = vmcs_read(vmcs, VMCS_EXIT_MSR_STORE_ADDRESS);
  1024      UINT64 vmenter_load_addr = vmcs_read(vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS);
  1025  
  1026      if (vmexit_store_addr != vmenter_load_addr) {
  1027          vmcs_delete_msr_from_vmexit_store_list_internal(vmcs, msr_index, is_msr_list_addr_hpa);
  1028          vmcs_delete_msr_from_vmenter_load_list_internal(vmcs, msr_index, is_msr_list_addr_hpa);
  1029      }
  1030      else if (vmenter_load_addr != VMCS_INVALID_ADDRESS) {
  1031          vmcs_delete_msr_from_vmexit_store_list_internal(vmcs, msr_index, is_msr_list_addr_hpa);
  1032  
  1033          vmcs_write(vmcs, VMCS_ENTER_MSR_LOAD_COUNT, vmcs_read(vmcs, VMCS_EXIT_MSR_STORE_COUNT));
  1034      }
  1035  }
  1036  
  1037  void vmcs_assign_vmexit_msr_load_list(struct _VMCS_OBJECT* vmcs, UINT64 address_value,
  1038                                        UINT64 count_value) {
  1039      vmcs_write(vmcs, VMCS_EXIT_MSR_LOAD_ADDRESS, address_value);
  1040      vmcs_write(vmcs, VMCS_EXIT_MSR_LOAD_COUNT, count_value);
  1041      vmcs->max_num_of_vmexit_load_msrs = 0; // in order to disable extension of current list
  1042  }
  1043  
  1044  static BOOLEAN vmcs_is_msr_in_list(struct _VMCS_OBJECT* vmcs, VMCS_FIELD list_address_field,
  1045                              VMCS_FIELD list_count_field, UINT32 msr_index) {
  1046      UINT64 msr_list_addr = vmcs_read(vmcs, list_address_field);
  1047      UINT32 msr_list_count = (UINT32)vmcs_read(vmcs, list_count_field);
  1048      IA32_VMX_MSR_ENTRY* msr_list_addr_ptr = NULL;
  1049      UINT32 i;
  1050  
  1051      if (msr_list_count == 0) {
  1052          return FALSE;
  1053      }
  1054  
  1055      VMM_ASSERT(msr_list_addr != VMCS_INVALID_ADDRESS);
  1056  
  1057      if (vmcs_get_level(vmcs) == VMCS_MERGED) {
  1058          if (!hmm_hpa_to_hva(msr_list_addr, (HVA*)(&msr_list_addr_ptr))) {
  1059              VMM_LOG(mask_anonymous, level_trace,"%s: Failed to retrieve HVA of MSR list\n", __FUNCTION__);
  1060              VMM_DEADLOOP();
  1061          }
  1062      }
  1063      else {
  1064          msr_list_addr_ptr = (IA32_VMX_MSR_ENTRY*)msr_list_addr;
  1065      }
  1066  
  1067      for (i = 0; i < msr_list_count; i++) {
  1068          if (msr_list_addr_ptr[i].MsrIndex == msr_index) {
  1069              return TRUE;
  1070          }
  1071      }
  1072      return FALSE;
  1073  }
  1074  
  1075  BOOLEAN vmcs_is_msr_in_vmexit_store_list(struct _VMCS_OBJECT* vmcs, UINT32 msr_index) {
  1076      return vmcs_is_msr_in_list(vmcs, VMCS_EXIT_MSR_STORE_ADDRESS, VMCS_EXIT_MSR_STORE_COUNT, msr_index);
  1077  }
  1078  
  1079  BOOLEAN vmcs_is_msr_in_vmexit_load_list(struct _VMCS_OBJECT* vmcs, UINT32 msr_index) {
  1080      return vmcs_is_msr_in_list(vmcs, VMCS_EXIT_MSR_LOAD_ADDRESS, VMCS_EXIT_MSR_LOAD_COUNT, msr_index);
  1081  }
  1082  
  1083  BOOLEAN vmcs_is_msr_in_vmenter_load_list(struct _VMCS_OBJECT* vmcs, UINT32 msr_index) {
  1084      return vmcs_is_msr_in_list(vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS, VMCS_ENTER_MSR_LOAD_COUNT, msr_index);
  1085  }
  1086  
  1087  
  1088  void vmcs_print_msr_list(struct _VMCS_OBJECT* vmcs, VMCS_FIELD address,
  1089                           VMCS_FIELD count, BOOLEAN is_addr_in_hpa) {
  1090      UINT32 i;
  1091      UINT32 max = (UINT32)vmcs_read(vmcs, count);
  1092      UINT64 addr = vmcs_read(vmcs, address);
  1093      IA32_VMX_MSR_ENTRY* msr_list_ptr = NULL;
  1094      CLI_PRINT("MSR list:\n");
  1095      CLI_PRINT("=============\n");
  1096  
  1097      if (addr != VMCS_INVALID_ADDRESS) {
  1098          if (is_addr_in_hpa) {
  1099              if (!hmm_hpa_to_hva(addr, (HVA*)&msr_list_ptr)) {
  1100                  CLI_PRINT("%s: Failed to translate HPA to HVA\n", __FUNCTION__);
  1101                  VMM_DEADLOOP();
  1102              }
  1103          }
  1104          else {
  1105              msr_list_ptr = (IA32_VMX_MSR_ENTRY*)addr;
  1106          }
  1107   
  1108          for (i = 0; i < max; i++) {
  1109              CLI_PRINT("\t0x%x : %P\n", msr_list_ptr[i].MsrIndex, msr_list_ptr[i].MsrData);
  1110          }
  1111      }
  1112      else {
  1113              CLI_PRINT("%s: Invalid VMCS address. \n", __FUNCTION__);
  1114      }
  1115      CLI_PRINT("=============\n\n");
  1116  }
  1117  
  1118  void vmcs_print_vmenter_msr_load_list(struct _VMCS_OBJECT* vmcs) {
  1119      vmcs_print_msr_list(vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS, VMCS_ENTER_MSR_LOAD_COUNT, (vmcs_get_level(vmcs) == VMCS_MERGED));
  1120  }
  1121  
  1122  void vmcs_print_vmexit_msr_store_list(struct _VMCS_OBJECT* vmcs) {
  1123      vmcs_print_msr_list(vmcs, VMCS_EXIT_MSR_STORE_ADDRESS, VMCS_EXIT_MSR_STORE_COUNT, (vmcs_get_level(vmcs) == VMCS_MERGED));
  1124  }
  1125  
  1126  extern GPM_HANDLE gcpu_get_current_gpm(GUEST_HANDLE guest);
  1127  extern BOOLEAN gpm_gpa_to_hva(GPM_HANDLE gpm_handle, GPA gpa, HVA* hva);
  1128  
  1129  BOOLEAN vmm_copy_to_guest_phy_addr(GUEST_CPU_HANDLE gcpu, void* gpa,
  1130                                     UINT32 size, void* hva)
  1131  {
  1132      UINT64 gpa_dst = (UINT64)gpa;
  1133      UINT64 hva_dst = 0;
  1134      UINT8 *hva_src = (UINT8*)hva;
  1135      GUEST_HANDLE guest;
  1136  
  1137      VMM_ASSERT(gcpu);
  1138      guest = gcpu_guest_handle(gcpu);
  1139  
  1140      if (!gpm_gpa_to_hva(gcpu_get_current_gpm(guest), gpa_dst, &hva_dst)) {
  1141          VMM_LOG(mask_uvmm, level_error,"%s: Failed to convert gpa=%P to hva\n", __FUNCTION__, gpa_dst);
  1142          return FALSE;
  1143      }
  1144  
  1145      vmm_memcpy((void*)hva_dst, hva_src, size);
  1146  
  1147      return TRUE;
  1148  }
  1149  
  1150  // Save initial vmcs state for deadloop/asssert handler
  1151  void vmcs_store_initial(GUEST_CPU_HANDLE gcpu, CPU_ID cpu_id)
  1152  {
  1153      VMCS_FIELD   field_id;
  1154      VMCS_OBJECT* vmcs;
  1155      UINT32       i, j, count;
  1156      UINT64       *initial_vmcs;
  1157  
  1158      count = NELEMENTS(g_control_fields)+NELEMENTS(g_guest_state_fields)+
  1159                      NELEMENTS(g_host_state_fields);
  1160      vmcs = gcpu_get_vmcs(gcpu);
  1161      if (g_initial_vmcs[cpu_id] == 0) {
  1162          g_initial_vmcs[cpu_id] = (UINT64)vmm_malloc(sizeof(UINT64) * count);
  1163      }
  1164      initial_vmcs = (UINT64 *)g_initial_vmcs[cpu_id];
  1165      if (initial_vmcs == NULL) {
  1166          VMM_LOG(mask_anonymous, level_trace, 
  1167                  "%s: Failed to allocate memory\n", __FUNCTION__);
  1168          return;
  1169      }
  1170      j = 0;
  1171      // save control fields
  1172      for (i = 0; i < NELEMENTS(g_control_fields); i++) {
  1173          field_id = g_control_fields[i];
  1174          if (FIELD_IS_READABLE(field_id) && FIELD_IS_WRITEABLE(field_id)) {
  1175              initial_vmcs[j++] = vmcs_read(vmcs, field_id);
  1176          }
  1177      }
  1178      // save guest fields
  1179      for (i = 0; i < NELEMENTS(g_guest_state_fields); i++) {
  1180          field_id = g_guest_state_fields[i];
  1181          if (FIELD_IS_READABLE(field_id) && FIELD_IS_WRITEABLE(field_id)) {
  1182              initial_vmcs[j++] = vmcs_read(vmcs, field_id);
  1183          }
  1184      }
  1185      // save host fields
  1186      for (i = 0; i < NELEMENTS(g_host_state_fields); i++) {
  1187          field_id = g_host_state_fields[i];
  1188          if (FIELD_IS_READABLE(field_id) && FIELD_IS_WRITEABLE(field_id)) {
  1189              initial_vmcs[j++] = vmcs_read(vmcs, field_id);
  1190              }
  1191      }
  1192  }
  1193  
  1194  // Restore initial vmcs state for deadloop/asssert handler
  1195  void vmcs_restore_initial(GUEST_CPU_HANDLE gcpu)
  1196  {
  1197      VMCS_FIELD      field_id;
  1198      VMCS_OBJECT*    vmcs;
  1199      UINT32          i, j;
  1200      UINT64          *initial_vmcs;
  1201      CPU_ID          cpu_id;
  1202      GUEST_HANDLE    guest;
  1203      UINT64          eptp;
  1204      UINT64	    default_ept_root_table_hpa = 0;
  1205      UINT32	    default_ept_gaw = 0;
  1206  
  1207      cpu_id = hw_cpu_id();
  1208      if (g_initial_vmcs[cpu_id] == 0)
  1209          return;
  1210      vmcs = gcpu_get_vmcs(gcpu);
  1211      initial_vmcs = (UINT64 *)g_initial_vmcs[cpu_id];
  1212      // write vmcs directly to HW
  1213      vmcs_sw_shadow_disable[cpu_id] = TRUE;
  1214      j = 0;
  1215      // restore control fields
  1216      for (i = 0; i < NELEMENTS(g_control_fields); i++) {
  1217          field_id = g_control_fields[i];
  1218          if (FIELD_IS_READABLE(field_id) && FIELD_IS_WRITEABLE(field_id)) {
  1219              vmcs_write(vmcs, field_id, initial_vmcs[j++]);
  1220          }
  1221      }
  1222      // restore guest fields
  1223      for (i = 0; i < NELEMENTS(g_guest_state_fields); i++) {
  1224          field_id = g_guest_state_fields[i];
  1225          if (FIELD_IS_READABLE(field_id) && FIELD_IS_WRITEABLE(field_id)) {
  1226              vmcs_write(vmcs, field_id, initial_vmcs[j++]);
  1227          }
  1228      }
  1229      // restore host fields
  1230      for (i = 0; i < NELEMENTS(g_host_state_fields); i++) {
  1231          field_id = g_host_state_fields[i];
  1232          if (FIELD_IS_READABLE(field_id) && FIELD_IS_WRITEABLE(field_id)) {
  1233              vmcs_write(vmcs, field_id, initial_vmcs[j++]);
  1234          }
  1235      }
  1236      // Set EPTP to default EPT
  1237      guest = gcpu_guest_handle(gcpu);
  1238      ept_get_default_ept(guest, &default_ept_root_table_hpa, &default_ept_gaw);
  1239      eptp = ept_compute_eptp(guest, default_ept_root_table_hpa, default_ept_gaw);
  1240      vmcs_write(vmcs, VMCS_EPTP_ADDRESS, eptp);
  1241  }
  1242  
  1243  // required buffer byte size for control-532, guest-592, host-222
  1244  // do not use malloc for tmp buffer, corrupted memory will trigger nested assert
  1245  // causing deadloop handler to hang
  1246  #define MAX_VMCS_BUF_SIZE       650
  1247  
  1248  // format vmcs info and write to guest buffer
  1249  static void vmcs_dump_group(GUEST_CPU_HANDLE gcpu, const struct _VMCS_OBJECT* vmcs,
  1250                    const VMCS_FIELD* fields_to_print, UINT32 count, UINT64 debug_gpa)
  1251  {
  1252      char                 buf[MAX_VMCS_BUF_SIZE], *bufptr;
  1253      UINT32               i;
  1254      UINT16               entry_count;
  1255      const VMCS_ENCODING* field_desc;
  1256      VMCS_ENTRY           entry;
  1257  
  1258      entry_count = 0;
  1259  
  1260      // skip over the count field
  1261      bufptr = (char *)buf + sizeof(UINT16);
  1262  
  1263      for (i = 0; i < count; i++) {
  1264          // copy only the existing fields to guest buffer
  1265          field_desc = &g_field_data[fields_to_print[i]];
  1266          if (field_desc->access == NO_EXIST) {
  1267              continue;
  1268          }
  1269  
  1270          entry.index = fields_to_print[i];
  1271          entry.value = vmcs_read(vmcs, fields_to_print[i]);
  1272          entry_count++;
  1273  
  1274          // copy vmcs entry to tmp buffer
  1275          vmm_memcpy(bufptr, (void *)&entry, sizeof(VMCS_ENTRY));
  1276          bufptr = bufptr + sizeof(VMCS_ENTRY);
  1277      }
  1278  
  1279      // save count to beginning of buffer
  1280      vmm_memcpy(buf, (void *)&entry_count, sizeof(UINT16));
  1281  
  1282      // copy vmcs group to guest buffer
  1283      if (!vmm_copy_to_guest_phy_addr(gcpu, (void*)(debug_gpa),
  1284                                     sizeof(UINT16) + (sizeof(VMCS_ENTRY)*entry_count),
  1285                                     (void*)buf)) {
  1286          VMM_LOG(mask_uvmm, level_error,
  1287              "CPU%d: %s: Error: Could not copy vmcs message back to guest\n",
  1288              hw_cpu_id(), __FUNCTION__);
  1289          }
  1290  }
  1291  
  1292  
  1293  // write all vmcs fields to guest buffer
  1294  void vmcs_dump_all(GUEST_CPU_HANDLE gcpu)
  1295  {
  1296      VMCS_OBJECT *vmcs;
  1297      UINT64 debug_gpa;
  1298      UINT32 control_size, guest_size, host_size;
  1299  
  1300      // guest buffer is 4K size and vmcs starts at the 2K offset
  1301      control_size = sizeof(UINT16) + (sizeof(VMCS_ENTRY) * NELEMENTS(g_control_fields));
  1302      guest_size   = sizeof(UINT16) + (sizeof(VMCS_ENTRY) * NELEMENTS(g_guest_state_fields));
  1303      host_size    = sizeof(UINT16) + (sizeof(VMCS_ENTRY) * NELEMENTS(g_host_state_fields));
  1304  
  1305      if ((control_size+guest_size+host_size) > VMCS_SIZE) {
  1306          VMM_LOG(mask_uvmm, level_error, "%s: Error: Debug info exceeds guest buffer size\n",
  1307              __FUNCTION__);
  1308          return;
  1309      }
  1310      vmcs = gcpu_get_vmcs(gcpu);
  1311      // write control fields to guest buffer
  1312      debug_gpa = g_debug_gpa + OFFSET_VMCS;
  1313      vmcs_dump_group(gcpu, vmcs, g_control_fields, NELEMENTS(g_control_fields), debug_gpa);
  1314      // write guest fields to guest buffer
  1315      debug_gpa = debug_gpa + control_size;
  1316      vmcs_dump_group(gcpu, vmcs, g_guest_state_fields, NELEMENTS(g_guest_state_fields), debug_gpa);
  1317      // write host fields to guest buffer
  1318      debug_gpa = debug_gpa + guest_size;
  1319      vmcs_dump_group(gcpu, vmcs, g_host_state_fields, NELEMENTS(g_host_state_fields), debug_gpa);
  1320  }
  1321  
  1322  extern BOOLEAN ept_is_ept_supported(void);
  1323  BOOLEAN vmm_get_vmcs_guest_state(GUEST_CPU_HANDLE gcpu, VMM_GUEST_STATE GuestStateId, 
  1324                                   VMM_GUEST_STATE_VALUE *value)
  1325  {
  1326      VMCS_OBJECT* vmcs;
  1327      VMCS_FIELD vmcs_field_id;
  1328      VM_ENTRY_CONTROLS vmentry_control;
  1329  
  1330      VMM_ASSERT(gcpu);
  1331  
  1332      vmcs = gcpu_get_vmcs(gcpu);
  1333      VMM_ASSERT(vmcs);
  1334  
  1335      if (((UINT32) GuestStateId) > ((UINT32)(NUM_OF_VMM_GUEST_STATE - 1)))
  1336          return FALSE;
  1337  
  1338      if (GuestStateId < VMM_GUEST_CR0) {
  1339          value->value = gcpu_get_gp_reg(gcpu, (VMM_IA32_GP_REGISTERS)GuestStateId);
  1340      }
  1341      else if (GuestStateId == VMM_GUEST_RIP) {
  1342          value->value = gcpu_get_gp_reg(gcpu, IA32_REG_RIP);
  1343      }
  1344      else if (GuestStateId == VMM_GUEST_PAT) {
  1345          value->value = gcpu_get_msr_reg(gcpu, IA32_VMM_MSR_PAT);
  1346      }
  1347      else if (GuestStateId == VMM_GUEST_EFER) {
  1348          value->value = gcpu_get_msr_reg(gcpu, IA32_VMM_MSR_EFER);
  1349      }
  1350      else if (GuestStateId == VMM_GUEST_CR8) {
  1351          value->value = gcpu_get_control_reg(gcpu, IA32_CTRL_CR8);
  1352      }
  1353      else if (GuestStateId == VMM_GUEST_IA32_PERF_GLOBAL_CTRL) {
  1354          vmentry_control.Uint32 = (UINT32) vmcs_read(vmcs, VMCS_ENTER_CONTROL_VECTOR);
  1355          if(vmentry_control.Bits.Load_IA32_PERF_GLOBAL_CTRL) {
  1356              value->value = vmcs_read(vmcs, VMCS_GUEST_IA32_PERF_GLOBAL_CTRL);
  1357          }
  1358          else {
  1359              value->value = UINT64_ALL_ONES;
  1360          }
  1361      }
  1362      else if(GuestStateId == VMM_GUEST_INTERRUPTIBILITY) {
  1363          value->value = gcpu_get_interruptibility_state(gcpu);
  1364      }
  1365      else {
  1366          if ((GuestStateId == VMM_GUEST_PREEMPTION_TIMER) && ept_is_ept_supported())
  1367              vmcs_field_id = VMCS_PREEMPTION_TIMER;
  1368          else if ((GuestStateId >= VMM_GUEST_CR0) && (GuestStateId <= VMM_GUEST_SYSENTER_EIP))
  1369              vmcs_field_id = (VMCS_FIELD)(GuestStateId - VMM_GUEST_CR0 + VMCS_GUEST_CR0);
  1370          else if (ept_is_ept_supported() && (GuestStateId >= VMM_GUEST_PDPTR0) && (GuestStateId <= VMM_GUEST_PDPTR3))
  1371              vmcs_field_id = (VMCS_FIELD)(GuestStateId - VMM_GUEST_PDPTR0 + VMCS_GUEST_PDPTR0);
  1372          else
  1373              return FALSE;
  1374          value->value = vmcs_read(vmcs, vmcs_field_id);
  1375      }
  1376      return TRUE;
  1377  }
  1378  
  1379  BOOLEAN vmm_set_vmcs_guest_state(GUEST_CPU_HANDLE gcpu, VMM_GUEST_STATE GuestStateId, 
  1380                                   VMM_GUEST_STATE_VALUE value)
  1381  {
  1382      VMCS_OBJECT* vmcs;
  1383      VMCS_FIELD vmcs_field_id;
  1384  
  1385      VMM_ASSERT(gcpu);
  1386  
  1387      vmcs = gcpu_get_vmcs(gcpu);
  1388      VMM_ASSERT(vmcs);
  1389  
  1390      if (((UINT32) GuestStateId) > ((UINT32)(NUM_OF_VMM_GUEST_STATE - 1)))
  1391          return FALSE;
  1392  
  1393      if (GuestStateId < VMM_GUEST_CR0) {
  1394          gcpu_set_gp_reg(gcpu, (VMM_IA32_GP_REGISTERS)GuestStateId, value.value);
  1395      }
  1396      else if (GuestStateId == VMM_GUEST_PAT) {
  1397          gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_PAT, value.value);
  1398      }
  1399      else if (GuestStateId == VMM_GUEST_EFER) {
  1400          gcpu_set_msr_reg(gcpu, IA32_VMM_MSR_EFER, value.value);
  1401      }
  1402      else if (GuestStateId == VMM_GUEST_CR0 || GuestStateId == VMM_GUEST_CR4 || GuestStateId == VMM_GUEST_IA32_PERF_GLOBAL_CTRL) {
  1403          //TBD;New functionality,needs to be implemented if required in future
  1404          return FALSE;
  1405      }
  1406      else if(GuestStateId == VMM_GUEST_RIP) {
  1407          if(TRUE == value.skip_rip)
  1408              gcpu_skip_guest_instruction(gcpu);
  1409          else
  1410              vmcs_write(vmcs, VMCS_GUEST_RIP, value.value);
  1411      }
  1412      else if(GuestStateId == VMM_GUEST_INTERRUPTIBILITY) {
  1413          gcpu_set_interruptibility_state(gcpu, (UINT32)value.value);
  1414      }
  1415      else {
  1416          if ((GuestStateId == VMM_GUEST_PREEMPTION_TIMER) && ept_is_ept_supported())
  1417              vmcs_field_id = VMCS_PREEMPTION_TIMER;
  1418          else if ((GuestStateId >= VMM_GUEST_CR0) && (GuestStateId <= VMM_GUEST_SYSENTER_EIP))
  1419              vmcs_field_id = (VMCS_FIELD)(GuestStateId - VMM_GUEST_CR0 + VMCS_GUEST_CR0);
  1420          else if (ept_is_ept_supported() && (GuestStateId >= VMM_GUEST_PDPTR0) && (GuestStateId <= VMM_GUEST_PDPTR3))
  1421              vmcs_field_id = (VMCS_FIELD)(GuestStateId - VMM_GUEST_PDPTR0 + VMCS_GUEST_PDPTR0);
  1422          else
  1423              return FALSE;
  1424          vmcs_write(vmcs, vmcs_field_id, value.value);
  1425      }
  1426      return TRUE;
  1427  }