github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/vmx/vmcs_merge_split.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "file_codes.h"
    16  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(VMCS_MERGE_SPLIT_C)
    17  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(VMCS_MERGE_SPLIT_C, __condition)
    18  #include <vmm_defs.h>
    19  #include <vmm_dbg.h>
    20  #include <vmcs_api.h>
    21  #include <vmx_ctrl_msrs.h>
    22  #include <vmx_vmcs.h>
    23  #include <pfec.h>
    24  #include <host_memory_manager_api.h>
    25  #include <guest.h>
    26  #include <guest_cpu.h>
    27  #include <em64t_defs.h>
    28  #include <gpm_api.h>
    29  #include <ia32_defs.h>
    30  #include "vmcs_internal.h"
    31  #ifdef JLMDEBUG
    32  #include "jlmdebug.h"
    33  #endif
    34  
    35  // do not report warning on unused params
    36  #pragma warning( disable: 4100 )
    37  
    38  typedef UINT32 MSR_LIST_COPY_MODE; // mitmask
    39  #define MSR_LIST_COPY_NO_CHANGE 0x0
    40  #define MSR_LIST_COPY_WITH_EFER_CHANGE 0x1
    41  #define MSR_LIST_COPY_AND_SET_32_BIT_MODE_IN_EFER (0x00 | MSR_LIST_COPY_WITH_EFER_CHANGE)
    42  #define MSR_LIST_COPY_AND_SET_64_BIT_MODE_IN_EFER (0x10 | MSR_LIST_COPY_WITH_EFER_CHANGE)
    43  #define MSR_LIST_COPY_UPDATE_GCPU 0x100
    44  
    45  typedef enum {
    46      MS_HVA,
    47      MS_GPA,
    48      MS_HPA
    49  } MS_MEM_ADDRESS_TYPE;
    50  
    51  
    52  static void ms_merge_timer_to_level2(VMCS_OBJECT *vmcs_0, VMCS_OBJECT *vmcs_1, VMCS_OBJECT *vmcs_m);
    53  static void ms_split_timer_from_level2(VMCS_OBJECT *vmcs_0, VMCS_OBJECT *vmcs_1, VMCS_OBJECT *vmcs_m);
    54  
    55  static void ms_copy_guest_state_to_level1_vmcs(IN GUEST_CPU_HANDLE gcpu, IN BOOLEAN copy_crs) {
    56      IN VMCS_OBJECT* level1_vmcs = vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy(gcpu), VMCS_LEVEL_1);
    57      IN VMCS_OBJECT* merged_vmcs = vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy(gcpu), VMCS_MERGED);
    58      UINT64 value;
    59      UINT16 selector;
    60      UINT64 base;
    61      UINT32 limit;
    62      UINT32 ar;
    63      UINT64 vmentry_control;
    64  
    65      if (copy_crs) {
    66          value = gcpu_get_control_reg_layered(gcpu, IA32_CTRL_CR0, VMCS_MERGED);
    67          gcpu_set_control_reg_layered(gcpu, IA32_CTRL_CR0, value, VMCS_LEVEL_1);
    68  
    69          value = gcpu_get_control_reg_layered(gcpu, IA32_CTRL_CR3, VMCS_MERGED);
    70          gcpu_set_control_reg_layered(gcpu, IA32_CTRL_CR3, value, VMCS_LEVEL_1);
    71  
    72          value = gcpu_get_control_reg_layered(gcpu, IA32_CTRL_CR4, VMCS_MERGED);
    73          gcpu_set_control_reg_layered(gcpu, IA32_CTRL_CR4, value, VMCS_LEVEL_1);
    74      }
    75  
    76      value = gcpu_get_debug_reg_layered(gcpu, IA32_REG_DR7, VMCS_MERGED);
    77      gcpu_set_debug_reg_layered(gcpu, IA32_REG_DR7, value, VMCS_LEVEL_1);
    78  
    79      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_ES, &selector, &base, &limit, &ar, VMCS_MERGED);
    80      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_ES, selector, base, limit, ar, VMCS_LEVEL_1);
    81  
    82      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_CS, &selector, &base, &limit, &ar, VMCS_MERGED);
    83      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_CS, selector, base, limit, ar, VMCS_LEVEL_1);
    84  
    85      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_SS, &selector, &base, &limit, &ar, VMCS_MERGED);
    86      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_SS, selector, base, limit, ar, VMCS_LEVEL_1);
    87  
    88      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_DS, &selector, &base, &limit, &ar, VMCS_MERGED);
    89      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_DS, selector, base, limit, ar, VMCS_LEVEL_1);
    90  
    91      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_FS, &selector, &base, &limit, &ar, VMCS_MERGED);
    92      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_FS, selector, base, limit, ar, VMCS_LEVEL_1);
    93  
    94      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_GS, &selector, &base, &limit, &ar, VMCS_MERGED);
    95      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_GS, selector, base, limit, ar, VMCS_LEVEL_1);
    96  
    97      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_LDTR, &selector, &base, &limit, &ar, VMCS_MERGED);
    98      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_LDTR, selector, base, limit, ar, VMCS_LEVEL_1);
    99  
   100      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_TR, &selector, &base, &limit, &ar, VMCS_MERGED);
   101      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_TR, selector, base, limit, ar, VMCS_LEVEL_1);
   102  
   103      gcpu_get_gdt_reg_layered(gcpu, &base, &limit, VMCS_MERGED);
   104      gcpu_set_gdt_reg_layered(gcpu, base, limit, VMCS_LEVEL_1);
   105  
   106      gcpu_get_idt_reg_layered(gcpu, &base, &limit, VMCS_MERGED);
   107      gcpu_set_idt_reg_layered(gcpu, base, limit, VMCS_LEVEL_1);
   108  
   109      value = gcpu_get_gp_reg_layered(gcpu, IA32_REG_RSP, VMCS_MERGED);
   110      gcpu_set_gp_reg_layered(gcpu, IA32_REG_RSP, value, VMCS_LEVEL_1);
   111  
   112      value = gcpu_get_gp_reg_layered(gcpu, IA32_REG_RIP, VMCS_MERGED);
   113      gcpu_set_gp_reg_layered(gcpu, IA32_REG_RIP, value, VMCS_LEVEL_1);
   114  
   115      value = gcpu_get_gp_reg_layered(gcpu, IA32_REG_RFLAGS, VMCS_MERGED);
   116      gcpu_set_gp_reg_layered(gcpu, IA32_REG_RFLAGS, value, VMCS_LEVEL_1);
   117  
   118      value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_CS, VMCS_MERGED);
   119      gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_CS, value, VMCS_LEVEL_1);
   120  
   121      value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_ESP, VMCS_MERGED);
   122      gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_ESP, value, VMCS_LEVEL_1);
   123  
   124      value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_EIP, VMCS_MERGED);
   125      gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_EIP, value, VMCS_LEVEL_1);
   126  
   127      value = gcpu_get_pending_debug_exceptions_layered(gcpu, VMCS_MERGED);
   128      gcpu_set_pending_debug_exceptions_layered(gcpu, value, VMCS_LEVEL_1);
   129  
   130      value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_SMBASE, VMCS_MERGED);
   131      gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SMBASE, value, VMCS_LEVEL_1);
   132  
   133      value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_DEBUGCTL, VMCS_MERGED);
   134      gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_DEBUGCTL, value, VMCS_LEVEL_1);
   135  
   136      if (vmcs_field_is_supported(VMCS_GUEST_IA32_PERF_GLOBAL_CTRL))
   137      {
   138          value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_PERF_GLOBAL_CTRL, VMCS_MERGED);
   139          gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_PERF_GLOBAL_CTRL, value, VMCS_LEVEL_1);
   140      }
   141  
   142      value = vmcs_read(merged_vmcs, VMCS_GUEST_WORKING_VMCS_PTR);
   143      vmcs_write(level1_vmcs, VMCS_GUEST_WORKING_VMCS_PTR, value);
   144  
   145      value = gcpu_get_interruptibility_state_layered(gcpu, VMCS_MERGED);
   146      gcpu_set_interruptibility_state_layered(gcpu, (UINT32)value, VMCS_LEVEL_1);
   147  
   148      value = gcpu_get_activity_state_layered(gcpu, VMCS_MERGED);
   149      gcpu_set_activity_state_layered(gcpu, (IA32_VMX_VMCS_GUEST_SLEEP_STATE)value, VMCS_LEVEL_1);
   150  
   151      // Copy IA32e Guest bit is a part of guest state, so copy it here
   152  #define VMENTER_IA32E_MODE_GUEST 0x200
   153      vmentry_control = vmcs_read(merged_vmcs, VMCS_ENTER_CONTROL_VECTOR);
   154      vmcs_update(level1_vmcs, VMCS_ENTER_CONTROL_VECTOR, vmentry_control, VMENTER_IA32E_MODE_GUEST);
   155  
   156      // TODO VMCS v2 fields
   157  }
   158  
   159  static void ms_copy_guest_state_flom_level1(IN GUEST_CPU_HANDLE gcpu, IN BOOLEAN copy_crs) {
   160      IN VMCS_OBJECT* level1_vmcs = vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy(gcpu), VMCS_LEVEL_1);
   161      IN VMCS_OBJECT* merged_vmcs = vmcs_hierarchy_get_vmcs(gcpu_get_vmcs_hierarchy(gcpu), VMCS_MERGED);
   162      UINT64 value;
   163      UINT16 selector;
   164      UINT64 base;
   165      UINT32 limit;
   166      UINT32 ar;
   167  
   168      if (copy_crs) {
   169          value = gcpu_get_control_reg_layered(gcpu, IA32_CTRL_CR0, VMCS_LEVEL_1);
   170          gcpu_set_control_reg_layered(gcpu, IA32_CTRL_CR0, value, VMCS_MERGED);
   171  
   172          value = gcpu_get_control_reg_layered(gcpu, IA32_CTRL_CR3, VMCS_LEVEL_1);
   173          gcpu_set_control_reg_layered(gcpu, IA32_CTRL_CR3, value, VMCS_MERGED);
   174  
   175          value = gcpu_get_control_reg_layered(gcpu, IA32_CTRL_CR4, VMCS_LEVEL_1);
   176          gcpu_set_control_reg_layered(gcpu, IA32_CTRL_CR4, value, VMCS_MERGED);
   177      }
   178  
   179      value = gcpu_get_debug_reg_layered(gcpu, IA32_REG_DR7, VMCS_LEVEL_1);
   180      gcpu_set_debug_reg_layered(gcpu, IA32_REG_DR7, value, VMCS_MERGED);
   181  
   182      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_ES, &selector, &base, &limit, &ar, VMCS_LEVEL_1);
   183      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_ES, selector, base, limit, ar, VMCS_MERGED);
   184  
   185      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_CS, &selector, &base, &limit, &ar, VMCS_LEVEL_1);
   186      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_CS, selector, base, limit, ar, VMCS_MERGED);
   187  
   188      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_SS, &selector, &base, &limit, &ar, VMCS_LEVEL_1);
   189      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_SS, selector, base, limit, ar, VMCS_MERGED);
   190  
   191      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_DS, &selector, &base, &limit, &ar, VMCS_LEVEL_1);
   192      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_DS, selector, base, limit, ar, VMCS_MERGED);
   193  
   194      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_FS, &selector, &base, &limit, &ar, VMCS_LEVEL_1);
   195      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_FS, selector, base, limit, ar, VMCS_MERGED);
   196  
   197      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_GS, &selector, &base, &limit, &ar, VMCS_LEVEL_1);
   198      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_GS, selector, base, limit, ar, VMCS_MERGED);
   199  
   200      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_LDTR, &selector, &base, &limit, &ar, VMCS_LEVEL_1);
   201      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_LDTR, selector, base, limit, ar, VMCS_MERGED);
   202  
   203      gcpu_get_segment_reg_layered(gcpu, IA32_SEG_TR, &selector, &base, &limit, &ar, VMCS_LEVEL_1);
   204      gcpu_set_segment_reg_layered(gcpu, IA32_SEG_TR, selector, base, limit, ar, VMCS_MERGED);
   205  
   206      gcpu_get_gdt_reg_layered(gcpu, &base, &limit, VMCS_LEVEL_1);
   207      gcpu_set_gdt_reg_layered(gcpu, base, limit, VMCS_MERGED);
   208  
   209      gcpu_get_idt_reg_layered(gcpu, &base, &limit, VMCS_LEVEL_1);
   210      gcpu_set_idt_reg_layered(gcpu, base, limit, VMCS_MERGED);
   211  
   212      value = gcpu_get_gp_reg_layered(gcpu, IA32_REG_RSP, VMCS_LEVEL_1);
   213      gcpu_set_gp_reg_layered(gcpu, IA32_REG_RSP, value, VMCS_MERGED);
   214  
   215      value = gcpu_get_gp_reg_layered(gcpu, IA32_REG_RIP, VMCS_LEVEL_1);
   216      gcpu_set_gp_reg_layered(gcpu, IA32_REG_RIP, value, VMCS_MERGED);
   217  
   218      value = gcpu_get_gp_reg_layered(gcpu, IA32_REG_RFLAGS, VMCS_LEVEL_1);
   219      gcpu_set_gp_reg_layered(gcpu, IA32_REG_RFLAGS, value, VMCS_MERGED);
   220  
   221      value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_CS, VMCS_LEVEL_1);
   222      gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_CS, value, VMCS_MERGED);
   223  
   224      value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_ESP, VMCS_LEVEL_1);
   225      gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_ESP, value, VMCS_MERGED);
   226  
   227      value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_EIP, VMCS_LEVEL_1);
   228      gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_EIP, value, VMCS_MERGED);
   229  
   230      value = gcpu_get_pending_debug_exceptions_layered(gcpu, VMCS_LEVEL_1);
   231      gcpu_set_pending_debug_exceptions_layered(gcpu, value, VMCS_MERGED);
   232  
   233      value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_SMBASE, VMCS_LEVEL_1);
   234      gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SMBASE, value, VMCS_MERGED);
   235  
   236      value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_DEBUGCTL, VMCS_LEVEL_1);
   237      gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_DEBUGCTL, value, VMCS_MERGED);
   238  
   239      if (vmcs_field_is_supported(VMCS_GUEST_IA32_PERF_GLOBAL_CTRL)) {
   240          value = gcpu_get_msr_reg_layered(gcpu, IA32_VMM_MSR_PERF_GLOBAL_CTRL, VMCS_LEVEL_1);
   241          gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_PERF_GLOBAL_CTRL, value, VMCS_MERGED);
   242      }
   243      value = vmcs_read(level1_vmcs, VMCS_GUEST_WORKING_VMCS_PTR);
   244      vmcs_write(merged_vmcs, VMCS_GUEST_WORKING_VMCS_PTR, value);
   245  
   246      value = gcpu_get_interruptibility_state_layered(gcpu, VMCS_LEVEL_1);
   247      gcpu_set_interruptibility_state_layered(gcpu, (UINT32)value, VMCS_MERGED);
   248  
   249      value = (UINT64)gcpu_get_activity_state_layered(gcpu, VMCS_LEVEL_1);
   250      gcpu_set_activity_state_layered(gcpu, (IA32_VMX_VMCS_GUEST_SLEEP_STATE)value, VMCS_MERGED);
   251  
   252      // TODO VMCS v2 fields
   253  }
   254  
   255  static void ms_copy_data_fields(IN OUT VMCS_OBJECT* vmcs_to, IN VMCS_OBJECT* vmcs_from) {
   256      UINT64 value;
   257  
   258      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_INSTRUCTION_ERROR_CODE);
   259      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_INSTRUCTION_ERROR_CODE, value);
   260  
   261      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_REASON);
   262      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_REASON, value);
   263  
   264      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_EXCEPTION_INFO);
   265      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_EXCEPTION_INFO, value);
   266  
   267      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_EXCEPTION_ERROR_CODE);
   268      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_EXCEPTION_ERROR_CODE, value);
   269  
   270      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_IDT_VECTORING);
   271      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_IDT_VECTORING, value);
   272  
   273      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_IDT_VECTORING_ERROR_CODE);
   274      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_IDT_VECTORING_ERROR_CODE, value);
   275  
   276      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_INSTRUCTION_LENGTH);
   277      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_INSTRUCTION_LENGTH, value);
   278  
   279      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_INSTRUCTION_INFO);
   280      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_INSTRUCTION_INFO, value);
   281  
   282      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_QUALIFICATION);
   283      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_QUALIFICATION, value);
   284  
   285      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_IO_RCX);
   286      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_IO_RCX, value);
   287  
   288      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_IO_RSI);
   289      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_IO_RSI, value);
   290  
   291      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_IO_RDI);
   292      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_IO_RDI, value);
   293  
   294      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_IO_RIP);
   295      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_IO_RIP, value);
   296  
   297      value = vmcs_read(vmcs_from, VMCS_EXIT_INFO_GUEST_LINEAR_ADDRESS);
   298      vmcs_write_nocheck(vmcs_to, VMCS_EXIT_INFO_GUEST_LINEAR_ADDRESS, value);
   299      // TODO: Copy VMCS v2 fields
   300  }
   301  
   302  static void ms_copy_host_state(IN OUT VMCS_OBJECT* vmcs_to, IN VMCS_OBJECT* vmcs_from) {
   303      UINT64 value;
   304  
   305      value = vmcs_read(vmcs_from, VMCS_HOST_CR0);
   306      vmcs_write(vmcs_to, VMCS_HOST_CR0, value);
   307  
   308      value = vmcs_read(vmcs_from, VMCS_HOST_CR3);
   309      vmcs_write(vmcs_to, VMCS_HOST_CR3, value);
   310  
   311      value = vmcs_read(vmcs_from, VMCS_HOST_CR4);
   312      vmcs_write(vmcs_to, VMCS_HOST_CR4, value);
   313  
   314      value = vmcs_read(vmcs_from, VMCS_HOST_ES_SELECTOR);
   315      vmcs_write(vmcs_to, VMCS_HOST_ES_SELECTOR, value);
   316  
   317      value = vmcs_read(vmcs_from, VMCS_HOST_CS_SELECTOR);
   318      vmcs_write(vmcs_to, VMCS_HOST_CS_SELECTOR, value);
   319  
   320      value = vmcs_read(vmcs_from, VMCS_HOST_SS_SELECTOR);
   321      vmcs_write(vmcs_to, VMCS_HOST_SS_SELECTOR, value);
   322  
   323      value = vmcs_read(vmcs_from, VMCS_HOST_DS_SELECTOR);
   324      vmcs_write(vmcs_to, VMCS_HOST_DS_SELECTOR, value);
   325  
   326      value = vmcs_read(vmcs_from, VMCS_HOST_FS_SELECTOR);
   327      vmcs_write(vmcs_to, VMCS_HOST_FS_SELECTOR, value);
   328  
   329      value = vmcs_read(vmcs_from, VMCS_HOST_FS_BASE);
   330      vmcs_write(vmcs_to, VMCS_HOST_FS_BASE, value);
   331  
   332      value = vmcs_read(vmcs_from, VMCS_HOST_GS_SELECTOR);
   333      vmcs_write(vmcs_to, VMCS_HOST_GS_SELECTOR, value);
   334  
   335      value = vmcs_read(vmcs_from, VMCS_HOST_GS_BASE);
   336      vmcs_write(vmcs_to, VMCS_HOST_GS_BASE, value);
   337  
   338      value = vmcs_read(vmcs_from, VMCS_HOST_TR_SELECTOR);
   339      vmcs_write(vmcs_to, VMCS_HOST_TR_SELECTOR, value);
   340  
   341      value = vmcs_read(vmcs_from, VMCS_HOST_TR_BASE);
   342      vmcs_write(vmcs_to, VMCS_HOST_TR_BASE, value);
   343  
   344      value = vmcs_read(vmcs_from, VMCS_HOST_GDTR_BASE);
   345      vmcs_write(vmcs_to, VMCS_HOST_GDTR_BASE, value);
   346  
   347      value = vmcs_read(vmcs_from, VMCS_HOST_IDTR_BASE);
   348      vmcs_write(vmcs_to, VMCS_HOST_IDTR_BASE, value);
   349  
   350      value = vmcs_read(vmcs_from, VMCS_HOST_RSP);
   351      vmcs_write(vmcs_to, VMCS_HOST_RSP, value);
   352  
   353      value = vmcs_read(vmcs_from, VMCS_HOST_RIP);
   354      vmcs_write(vmcs_to, VMCS_HOST_RIP, value);
   355  
   356      value = vmcs_read(vmcs_from, VMCS_HOST_SYSENTER_CS);
   357      vmcs_write(vmcs_to, VMCS_HOST_SYSENTER_CS, value);
   358  
   359      value = vmcs_read(vmcs_from, VMCS_HOST_SYSENTER_ESP);
   360      vmcs_write(vmcs_to, VMCS_HOST_SYSENTER_ESP, value);
   361  
   362      value = vmcs_read(vmcs_from, VMCS_HOST_SYSENTER_EIP);
   363      vmcs_write(vmcs_to, VMCS_HOST_SYSENTER_EIP, value);
   364  
   365      // TODO VMCS v2 fields
   366  }
   367  
   368  static BOOLEAN may_cause_vmexit_on_page_fault(IN GUEST_CPU_HANDLE gcpu, IN VMCS_LEVEL level) {
   369      UINT32 possible_pfec_mask = (1 << VMM_PFEC_NUM_OF_USED_BITS) - 1;
   370      UINT32 vmcs_pfec_mask;
   371      UINT32 vmcs_pfec_match;
   372      IA32_VMCS_EXCEPTION_BITMAP exception_ctrls;
   373  
   374      gcpu_get_pf_error_code_mask_and_match_layered(gcpu, level, &vmcs_pfec_mask, &vmcs_pfec_match);
   375  
   376      exception_ctrls.Uint32 = (UINT32)gcpu_get_exceptions_map_layered(gcpu, level);
   377  
   378      if (exception_ctrls.Bits.PF == 1) {
   379  
   380          if ((vmcs_pfec_match & possible_pfec_mask) != vmcs_pfec_match) {
   381              // There are bits which are set in PFEC_MATCH, but will be
   382              // cleared in actual PFEC
   383              return FALSE;
   384          }
   385  
   386          if ((vmcs_pfec_mask & vmcs_pfec_match) != vmcs_pfec_match) {
   387              // There are bits which are set in PFEC_MATCH, but are
   388              // cleared in PFEC_MASK
   389              return FALSE;
   390          }
   391  
   392          // There still can be values of PFEC_MASK and PFEC_MATCH that will
   393          // never cause VMExits on PF.
   394          return TRUE;
   395      }
   396      else {
   397          if ((vmcs_pfec_match == 0x00000000) &&
   398              ((vmcs_pfec_mask & possible_pfec_mask) == 0)) {
   399              return FALSE;
   400          }
   401  
   402          return TRUE;
   403      }
   404  }
   405  
   406  static UINT64 ms_merge_cr_shadow(IN GUEST_CPU_HANDLE gcpu, IN VMM_IA32_CONTROL_REGISTERS reg) {
   407      UINT64 level1_shadow = gcpu_get_guest_visible_control_reg_layered(gcpu, reg, VMCS_LEVEL_1);
   408      UINT64 level0_mask;
   409      UINT64 level1_mask;
   410      UINT64 level1_reg = gcpu_get_control_reg_layered(gcpu, reg, VMCS_LEVEL_1);
   411      UINT64 merged_shadow;
   412      UINT64 mask_tmp;
   413  
   414      if (reg == IA32_CTRL_CR0) {
   415          level0_mask = gcpu_get_cr0_reg_mask_layered(gcpu, VMCS_LEVEL_0);
   416          level1_mask = gcpu_get_cr0_reg_mask_layered(gcpu, VMCS_LEVEL_1);
   417      }
   418      else {
   419          VMM_ASSERT(reg == IA32_CTRL_CR4);
   420          level0_mask = gcpu_get_cr4_reg_mask_layered(gcpu, VMCS_LEVEL_0);
   421          level1_mask = gcpu_get_cr4_reg_mask_layered(gcpu, VMCS_LEVEL_1);
   422      }
   423  
   424      merged_shadow = level1_shadow;
   425  
   426      // clear all bits that are 0 in mask
   427      merged_shadow &= level1_mask;
   428  
   429      // Copy bits that are 0 in level1_mask and
   430      // 1 in level0_mask
   431      // from level1_reg
   432      mask_tmp = (level0_mask ^ level1_mask) & level0_mask;
   433      merged_shadow |= (mask_tmp & level1_reg);
   434  
   435      return merged_shadow;
   436  }
   437  
   438  static void* ms_retrieve_ptr_to_additional_memory(IN VMCS_OBJECT* vmcs, IN VMCS_FIELD field,
   439                                             IN MS_MEM_ADDRESS_TYPE mem_type) {
   440      UINT64 addr_value = vmcs_read(vmcs, field);
   441      UINT64 addr_hpa;
   442      UINT64 addr_hva;
   443      MAM_ATTRIBUTES attrs;
   444  
   445      if (mem_type == MS_HVA) {
   446          return (void*)addr_value;
   447      }
   448  
   449      if (mem_type == MS_GPA) {
   450          GUEST_CPU_HANDLE gcpu = vmcs_get_owner(vmcs);
   451          GUEST_HANDLE guest = gcpu_guest_handle(gcpu);
   452          GPM_HANDLE gpm = gcpu_get_current_gpm(guest);
   453          if (!gpm_gpa_to_hpa(gpm, addr_value, &addr_hpa, &attrs)) {
   454              VMM_DEADLOOP();
   455          }
   456      }
   457      else {
   458          VMM_ASSERT(mem_type == MS_HPA);
   459          addr_hpa = addr_value;
   460      }
   461  
   462      if (!hmm_hpa_to_hva(addr_hpa, &addr_hva)) {
   463          VMM_DEADLOOP();
   464      }
   465  
   466      return (void*)addr_hva;
   467  }
   468  
   469  static void ms_merge_bitmaps(IN void* bitmap0, IN void* bitmap1,
   470                        IN OUT void* merged_bitmap) {
   471      UINT64 bitmap0_hva = (UINT64)bitmap0;
   472      UINT64 bitmap1_hva = (UINT64)bitmap1;
   473      UINT64 merged_bitmap_hva = (UINT64)merged_bitmap;
   474      UINT64 merged_bitmap_hva_final = merged_bitmap_hva + PAGE_4KB_SIZE;
   475  
   476      VMM_ASSERT((bitmap0 != NULL) || (bitmap1 != NULL));
   477      VMM_ASSERT(merged_bitmap);
   478  
   479      while (merged_bitmap_hva < merged_bitmap_hva_final) {
   480          UINT64 value0 = (bitmap0 == NULL) ? (UINT64)0 : *((UINT64*)bitmap0_hva);
   481          UINT64 value1 = (bitmap1 == NULL) ? (UINT64)0 : *((UINT64*)bitmap1_hva);
   482          UINT64 merged_value = value0 | value1;
   483  
   484          *((UINT64*)merged_bitmap_hva) = merged_value;
   485  
   486          bitmap0_hva += sizeof(UINT64);
   487          bitmap1_hva += sizeof(UINT64);
   488          merged_bitmap_hva += sizeof(UINT64);
   489      }
   490  }
   491  
   492  #if 0  // Debug support
   493  static BOOLEAN ms_is_msr_in_list(IN IA32_VMX_MSR_ENTRY* list, IN UINT32 msr_index,
   494                            IN UINT32 count, OUT UINT64* value) {
   495      UINT32 i;
   496  
   497      for (i = count; i > 0; i--) {
   498          if (list[i - 1].MsrIndex == msr_index) {
   499              if (value != NULL) {
   500                  *value = list[i - 1].MsrData;
   501              }
   502              return TRUE;
   503          }
   504      }
   505      return FALSE;
   506  }
   507  #endif
   508  
   509  static void ms_merge_msr_list(IN GUEST_CPU_HANDLE gcpu, IN VMCS_OBJECT* merged_vmcs,
   510                         IN IA32_VMX_MSR_ENTRY* first_list, IN IA32_VMX_MSR_ENTRY* second_list,
   511                         IN UINT32 first_list_count, IN UINT32 second_list_count,
   512                         IN MSR_LIST_COPY_MODE copy_mode, IN VMCS_ADD_MSR_FUNC add_msr_func,
   513                         IN VMCS_CLEAR_MSR_LIST_FUNC clear_list_func,
   514                         IN VMCS_IS_MSR_IN_LIST_FUNC is_msr_in_list_func,
   515                         IN VMCS_FIELD msr_list_addr_field,
   516                         IN VMCS_FIELD msr_list_count_field) {
   517      UINT32 i;
   518  
   519      clear_list_func(merged_vmcs);
   520  
   521      for (i = 0; i < first_list_count; i++) {
   522          add_msr_func(merged_vmcs, first_list[i].MsrIndex, first_list[i].MsrData);
   523      }
   524  
   525      for (i = 0; i < second_list_count; i++) {
   526          if (!is_msr_in_list_func(merged_vmcs, second_list[i].MsrIndex)) {
   527              add_msr_func(merged_vmcs, second_list[i].MsrIndex, second_list[i].MsrData);
   528          }
   529      }
   530  
   531      if (copy_mode != MSR_LIST_COPY_NO_CHANGE) {
   532          IA32_VMX_MSR_ENTRY* merged_list = ms_retrieve_ptr_to_additional_memory(merged_vmcs, msr_list_addr_field, MS_HPA);
   533          UINT32 merged_list_count = (UINT32)vmcs_read(merged_vmcs, msr_list_count_field);
   534  
   535          for (i = 0; i < merged_list_count; i++) {
   536              if ((copy_mode & MSR_LIST_COPY_WITH_EFER_CHANGE) &&
   537                  (merged_list[i].MsrIndex == IA32_MSR_EFER)) {
   538                  IA32_EFER_S* efer = (IA32_EFER_S*)(&(merged_list[i].MsrData));
   539                  efer->Bits.LME = ((copy_mode & MSR_LIST_COPY_AND_SET_64_BIT_MODE_IN_EFER) == MSR_LIST_COPY_AND_SET_64_BIT_MODE_IN_EFER) ? 1 : 0;
   540                  efer->Bits.LMA = efer->Bits.LME;
   541              }
   542  
   543              if (copy_mode & MSR_LIST_COPY_UPDATE_GCPU) {
   544                  gcpu_set_msr_reg_by_index_layered(gcpu, merged_list[i].MsrIndex, merged_list[i].MsrData, VMCS_MERGED);
   545              }
   546          }
   547      }
   548  }
   549  
   550  static
   551  void ms_split_msr_lists(IN GUEST_CPU_HANDLE gcpu, IN IA32_VMX_MSR_ENTRY* merged_list,
   552                          IN UINT32 merged_list_count) {
   553      UINT32 i;
   554  
   555      // Copy while there is match
   556      for (i = 0; i < merged_list_count; i++) {
   557          gcpu_set_msr_reg_by_index_layered(gcpu, merged_list[i].MsrIndex, merged_list[i].MsrData, VMCS_LEVEL_0);
   558          gcpu_set_msr_reg_by_index_layered(gcpu, merged_list[i].MsrIndex, merged_list[i].MsrData, VMCS_LEVEL_1);
   559      }
   560  }
   561  
   562  static void ms_perform_cr_split(IN GUEST_CPU_HANDLE gcpu, IN VMM_IA32_CONTROL_REGISTERS reg) {
   563      UINT64 level1_mask;
   564      UINT64 merged_mask;
   565      UINT64 merged_shadow = gcpu_get_guest_visible_control_reg_layered(gcpu, reg, VMCS_MERGED);
   566      UINT64 level1_reg = gcpu_get_control_reg_layered(gcpu, reg, VMCS_LEVEL_1);
   567      UINT64 merged_reg = gcpu_get_control_reg_layered(gcpu, reg, VMCS_MERGED);
   568      UINT64 bits_to_take_from_merged_reg;
   569      UINT64 bits_to_take_from_merged_shadow;
   570  
   571      if (reg == IA32_CTRL_CR0) {
   572          level1_mask = gcpu_get_cr0_reg_mask_layered(gcpu, VMCS_LEVEL_1);
   573          merged_mask = gcpu_get_cr0_reg_mask_layered(gcpu, VMCS_MERGED);
   574      }
   575      else {
   576          VMM_ASSERT(reg == IA32_CTRL_CR4);
   577          level1_mask = gcpu_get_cr4_reg_mask_layered(gcpu, VMCS_LEVEL_1);
   578          merged_mask = gcpu_get_cr4_reg_mask_layered(gcpu, VMCS_MERGED);
   579      }
   580  
   581      // There should not be any bit that is set level1_mask and cleared in merged_mask
   582      VMM_ASSERT(((~merged_mask) & level1_mask) == 0);
   583  
   584  
   585      bits_to_take_from_merged_reg = ~merged_mask;
   586      bits_to_take_from_merged_shadow = (merged_mask ^ level1_mask); // bits that 1 in merged and 0 in level1 masks
   587  
   588      level1_reg = (level1_reg & level1_mask) |
   589                   (merged_reg & bits_to_take_from_merged_reg) |
   590                   (merged_shadow & bits_to_take_from_merged_shadow);
   591      gcpu_set_control_reg_layered(gcpu, reg, level1_reg, VMCS_LEVEL_1);
   592  }
   593  
   594  void ms_merge_to_level2(IN GUEST_CPU_HANDLE gcpu, IN BOOLEAN merge_only_dirty) {
   595      // TODO: merge only dirty
   596      VMCS_HIERARCHY* hierarchy = gcpu_get_vmcs_hierarchy(gcpu);
   597      VMCS_OBJECT* level0_vmcs = vmcs_hierarchy_get_vmcs(hierarchy, VMCS_LEVEL_0);
   598      VMCS_OBJECT* level1_vmcs = vmcs_hierarchy_get_vmcs(hierarchy, VMCS_LEVEL_1);
   599      VMCS_OBJECT* merged_vmcs = vmcs_hierarchy_get_vmcs(hierarchy, VMCS_MERGED);
   600      PROCESSOR_BASED_VM_EXECUTION_CONTROLS controls0;
   601      PROCESSOR_BASED_VM_EXECUTION_CONTROLS controls1;
   602      PROCESSOR_BASED_VM_EXECUTION_CONTROLS2 controls0_2;
   603      PROCESSOR_BASED_VM_EXECUTION_CONTROLS2 controls1_2;
   604      PROCESSOR_BASED_VM_EXECUTION_CONTROLS merged_controls;
   605      PROCESSOR_BASED_VM_EXECUTION_CONTROLS2 merged_controls_2;
   606  
   607      VMM_ASSERT(level0_vmcs && level1_vmcs);
   608  
   609      if ((merge_only_dirty) &&
   610          (!vmcs_is_dirty(level0_vmcs)) &&
   611          (!vmcs_is_dirty(level1_vmcs))) {
   612          return;
   613      }
   614  
   615      // Copy guest state from level-1 vmcs
   616      ms_copy_guest_state_flom_level1(gcpu, TRUE /* copy CRs */);
   617  
   618      // Merging controls
   619  
   620      controls0.Uint32 = (UINT32)gcpu_get_processor_ctrls_layered(gcpu, VMCS_LEVEL_0);
   621      controls1.Uint32 = (UINT32)gcpu_get_processor_ctrls_layered(gcpu, VMCS_LEVEL_1);
   622      controls0_2.Uint32 = (UINT32)gcpu_get_processor_ctrls2_layered(gcpu, VMCS_LEVEL_0);
   623      controls1_2.Uint32 = (UINT32)gcpu_get_processor_ctrls2_layered(gcpu, VMCS_LEVEL_1);
   624      merged_controls.Uint32 = (UINT32)gcpu_get_processor_ctrls_layered(gcpu, VMCS_MERGED);
   625      merged_controls_2.Uint32 = (UINT32)gcpu_get_processor_ctrls2_layered(gcpu, VMCS_MERGED);
   626  
   627      // Pin-based controls
   628      {
   629          UINT32 value0 = (UINT32)gcpu_get_pin_ctrls_layered(gcpu, VMCS_LEVEL_0);
   630          UINT32 value1 = (UINT32)gcpu_get_pin_ctrls_layered(gcpu, VMCS_LEVEL_1);
   631          UINT32 merged_value = value0 | value1;
   632  
   633          gcpu_set_pin_ctrls_layered(gcpu, VMCS_MERGED, merged_value);
   634      }
   635  
   636      // Exceptions bitmap
   637      {
   638          UINT32 value0 = (UINT32)gcpu_get_exceptions_map_layered(gcpu, VMCS_LEVEL_0);
   639          UINT32 value1 = (UINT32)gcpu_get_exceptions_map_layered(gcpu, VMCS_LEVEL_1);
   640          UINT32 merged_value = value0 | value1;
   641  
   642          gcpu_set_exceptions_map_layered(gcpu, VMCS_MERGED, merged_value);
   643      }
   644  
   645      // Primary and secondary processor-based controls
   646      {
   647          BOOLEAN is_ia32e_mode = FALSE;
   648          VM_ENTRY_CONTROLS entry_ctrls;
   649  
   650          // bit 2
   651          merged_controls.Bits.SoftwareInterrupt = controls0.Bits.SoftwareInterrupt | controls1.Bits.SoftwareInterrupt;
   652  
   653          // bit 3
   654          merged_controls.Bits.UseTscOffsetting = controls0.Bits.UseTscOffsetting | controls1.Bits.UseTscOffsetting;
   655  
   656          // bit 7
   657          merged_controls.Bits.Hlt = controls0.Bits.Hlt | controls1.Bits.Hlt;
   658  
   659          // bit 9
   660          merged_controls.Bits.Invlpg = controls0.Bits.Invlpg | controls1.Bits.Invlpg;
   661  
   662          // bit 10
   663          merged_controls.Bits.Mwait = controls0.Bits.Mwait | controls1.Bits.Mwait;
   664  
   665          // bit 11
   666          merged_controls.Bits.Rdpmc = controls0.Bits.Rdpmc | controls1.Bits.Rdpmc;
   667  
   668          // bit 12
   669          merged_controls.Bits.Rdtsc = controls0.Bits.Rdtsc | controls1.Bits.Rdtsc;
   670  
   671          // bit 19
   672          entry_ctrls.Uint32 = (UINT32)gcpu_get_enter_ctrls_layered(gcpu, VMCS_LEVEL_1);
   673          is_ia32e_mode = entry_ctrls.Bits.Ia32eModeGuest;
   674          if (is_ia32e_mode) {
   675              merged_controls.Bits.Cr8Load = controls0.Bits.Cr8Load | controls1.Bits.Cr8Load;
   676          }
   677  
   678          // bit 20
   679          if (is_ia32e_mode) {
   680              merged_controls.Bits.Cr8Store = controls0.Bits.Cr8Store | controls1.Bits.Cr8Store;
   681          }
   682  
   683          // bit 21
   684          // TPR shadow is currently not supported
   685          // TODO: Support for TPR shadow in layering
   686          VMM_ASSERT(controls0.Bits.TprShadow == 0);
   687          VMM_ASSERT(controls1.Bits.TprShadow == 0);
   688  
   689  
   690          // bit 22
   691          merged_controls.Bits.NmiWindow = controls0.Bits.NmiWindow | controls1.Bits.NmiWindow;
   692  
   693          // bit 23
   694          merged_controls.Bits.MovDr = controls0.Bits.MovDr | controls1.Bits.MovDr;
   695  
   696          // bits 24 and 25
   697          if (((controls0.Bits.UnconditionalIo == 1) && (controls0.Bits.ActivateIoBitmaps == 0)) ||
   698              ((controls1.Bits.UnconditionalIo == 1) && (controls1.Bits.ActivateIoBitmaps == 0))) {
   699  
   700              merged_controls.Bits.UnconditionalIo = 1;
   701              merged_controls.Bits.ActivateIoBitmaps = 0;
   702          }
   703          else {
   704              merged_controls.Bits.UnconditionalIo = 0;
   705              merged_controls.Bits.ActivateIoBitmaps = controls0.Bits.ActivateIoBitmaps | controls1.Bits.ActivateIoBitmaps;
   706          }
   707  
   708          // bit 28
   709          merged_controls.Bits.UseMsrBitmaps = controls0.Bits.UseMsrBitmaps & controls1.Bits.UseMsrBitmaps;
   710  
   711          // bit 29
   712          merged_controls.Bits.Monitor = controls0.Bits.Monitor | controls1.Bits.Monitor;
   713  
   714          // bit 30
   715          merged_controls.Bits.Pause = controls0.Bits.Pause | controls1.Bits.Pause;
   716  
   717          // bit 31
   718          merged_controls.Bits.SecondaryControls = controls0.Bits.SecondaryControls | controls1.Bits.SecondaryControls;
   719  
   720          gcpu_set_processor_ctrls_layered(gcpu, VMCS_MERGED, merged_controls.Uint32);
   721  
   722  
   723          // Secondary controls
   724          if (controls0.Bits.SecondaryControls == 0) {
   725              controls0_2.Uint32 = 0;
   726  
   727          }
   728  
   729          if (controls1.Bits.SecondaryControls == 0) {
   730              controls1_2.Uint32 = 0;
   731          }
   732  
   733          merged_controls_2.Uint32 = controls0_2.Uint32 | controls1_2.Uint32;
   734  
   735          gcpu_set_processor_ctrls2_layered(gcpu, VMCS_MERGED, merged_controls_2.Uint32);
   736      }
   737  
   738      // Executive VMCS pointer
   739      {
   740          UINT64 value = vmcs_read(level1_vmcs, VMCS_OSV_CONTROLLING_VMCS_ADDRESS);
   741          vmcs_write(merged_vmcs, VMCS_OSV_CONTROLLING_VMCS_ADDRESS, value);
   742      }
   743  
   744      // Entry controls
   745      {
   746          UINT32 value = (UINT32)gcpu_get_enter_ctrls_layered(gcpu, VMCS_LEVEL_1);
   747          gcpu_set_enter_ctrls_layered(gcpu, VMCS_MERGED, value);
   748  
   749  #ifdef DEBUG
   750          {
   751              VM_ENTRY_CONTROLS ctrls;
   752              ctrls.Uint32 = value;
   753              VMM_ASSERT(ctrls.Bits.Load_IA32_PERF_GLOBAL_CTRL == 0);
   754          }
   755  #endif
   756      }
   757  
   758      // Interruption-information field
   759      {
   760          UINT32 value = (UINT32)vmcs_read(level1_vmcs, VMCS_ENTER_INTERRUPT_INFO);
   761          vmcs_write(merged_vmcs, VMCS_ENTER_INTERRUPT_INFO, value);
   762      }
   763  
   764      // Exception error code
   765      {
   766          UINT32 value = (UINT32)vmcs_read(level1_vmcs, VMCS_ENTER_EXCEPTION_ERROR_CODE);
   767          vmcs_write(merged_vmcs, VMCS_ENTER_EXCEPTION_ERROR_CODE, value);
   768      }
   769  
   770      // Instruction length
   771      {
   772          UINT32 value = (UINT32)vmcs_read(level1_vmcs, VMCS_ENTER_INSTRUCTION_LENGTH);
   773          vmcs_write(merged_vmcs, VMCS_ENTER_INSTRUCTION_LENGTH, value);
   774      }
   775  
   776      // TSC offset
   777      {
   778          if (merged_controls.Bits.UseTscOffsetting) {
   779              UINT64 final_value = 0;
   780  
   781  
   782              if ((controls0.Bits.UseTscOffsetting == 1) &&
   783                  (controls1.Bits.UseTscOffsetting == 0)) {
   784  
   785                  final_value = vmcs_read(level0_vmcs, VMCS_TSC_OFFSET);
   786  
   787              }
   788              else if ((controls0.Bits.UseTscOffsetting == 0) &&
   789                       (controls1.Bits.UseTscOffsetting == 1)) {
   790  
   791                  final_value = vmcs_read(level1_vmcs, VMCS_TSC_OFFSET);
   792  
   793              }
   794              else {
   795                  UINT64 value0 = vmcs_read(level0_vmcs, VMCS_TSC_OFFSET);
   796                  UINT64 value1 = vmcs_read(level1_vmcs, VMCS_TSC_OFFSET);
   797  
   798                  VMM_ASSERT(controls0.Bits.UseTscOffsetting == 1);
   799                  VMM_ASSERT(controls1.Bits.UseTscOffsetting == 1);
   800  
   801                  final_value = value0 + value1;
   802              }
   803  
   804              vmcs_write(merged_vmcs, VMCS_TSC_OFFSET, final_value);
   805          }
   806      }
   807  
   808      // APIC-access address
   809      {
   810          if ((merged_controls.Bits.SecondaryControls == 1) &&
   811              (merged_controls_2.Bits.VirtualizeAPIC == 1)) {
   812  
   813              // TODO: Implement APIC-access merge
   814              VMM_DEADLOOP();
   815          }
   816      }
   817  
   818      // TPR shadow address
   819      {
   820          if (merged_controls.Bits.TprShadow == 1) {
   821              // TODO: Implement TPR-shadow merge
   822              VMM_DEADLOOP();
   823          }
   824      }
   825  
   826      // "Page-fault error-code mask" and "Page-fault error-code match"
   827      {
   828          IA32_VMCS_EXCEPTION_BITMAP exception_ctrls;
   829  
   830          exception_ctrls.Uint32 = (UINT32)gcpu_get_exceptions_map_layered(gcpu, VMCS_MERGED);
   831  
   832          if (may_cause_vmexit_on_page_fault(gcpu, VMCS_LEVEL_0) ||
   833              may_cause_vmexit_on_page_fault(gcpu, VMCS_LEVEL_1)) {
   834  
   835              if (exception_ctrls.Bits.PF == 1) {
   836                  gcpu_set_pf_error_code_mask_and_match_layered(gcpu, VMCS_MERGED, 0x00000000, 0x00000000);
   837              }
   838              else {
   839                  gcpu_set_pf_error_code_mask_and_match_layered(gcpu, VMCS_MERGED, 0x00000000, 0xffffffff);
   840              }
   841          }
   842          else {
   843              if (exception_ctrls.Bits.PF == 1) {
   844                  gcpu_set_pf_error_code_mask_and_match_layered(gcpu, VMCS_MERGED, 0x00000000, 0xffffffff);
   845              }
   846              else {
   847                  gcpu_set_pf_error_code_mask_and_match_layered(gcpu, VMCS_MERGED, 0x00000000, 0x00000000);
   848              }
   849          }
   850      }
   851  
   852      // CR3 target count
   853      {
   854          // Target list is not supported
   855          vmcs_write(merged_vmcs, VMCS_CR3_TARGET_COUNT, 0);
   856      }
   857  
   858      // VM-exit controls
   859      {
   860          VM_EXIT_CONTROLS merged_exit_controls;
   861  
   862          merged_exit_controls.Uint32 = (UINT32)gcpu_get_exit_ctrls_layered(gcpu, VMCS_LEVEL_0);
   863          merged_exit_controls.Bits.AcknowledgeInterruptOnExit = 0; // The only difference
   864  
   865          gcpu_set_exit_ctrls_layered(gcpu, VMCS_MERGED, merged_exit_controls.Uint32);
   866  
   867          // VTUNE is not supported
   868          VMM_ASSERT(merged_exit_controls.Bits.Load_IA32_PERF_GLOBAL_CTRL == 0);
   869      }
   870  
   871      // Attention !!! ms_merge_timer_to_level2 must be called
   872      // after all other control fields were already merged
   873      if (vmcs_field_is_supported(VMCS_PREEMPTION_TIMER))
   874      {
   875          ms_merge_timer_to_level2(level0_vmcs, level1_vmcs, merged_vmcs);
   876      }
   877  
   878      // TPR threshold
   879      {
   880          if (merged_controls.Bits.TprShadow == 1) {
   881              // TODO: Implement TPR-threshold merge
   882              VMM_DEADLOOP();
   883          }
   884      }
   885  
   886      // CR0 guest/host mask
   887      {
   888          UINT64 mask0 = gcpu_get_cr0_reg_mask_layered(gcpu, VMCS_LEVEL_0);
   889          UINT64 mask1 = gcpu_get_cr0_reg_mask_layered(gcpu, VMCS_LEVEL_1);
   890          UINT64 merged_mask = mask0 | mask1;
   891  
   892          gcpu_set_cr0_reg_mask_layered(gcpu, VMCS_MERGED, merged_mask);
   893      }
   894  
   895      // CR4 guest/host mask
   896      {
   897          UINT64 mask0 = gcpu_get_cr4_reg_mask_layered(gcpu, VMCS_LEVEL_0);
   898          UINT64 mask1 = gcpu_get_cr4_reg_mask_layered(gcpu, VMCS_LEVEL_1);
   899          UINT64 merged_mask = mask0 | mask1;
   900  
   901          gcpu_set_cr4_reg_mask_layered(gcpu, VMCS_MERGED, merged_mask);
   902      }
   903  
   904      // CR0 shadow
   905      {
   906          UINT64 shadow = ms_merge_cr_shadow(gcpu, IA32_CTRL_CR0);
   907          gcpu_set_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR0, shadow, VMCS_MERGED);
   908      }
   909  
   910      // CR3 pseudo shadow
   911      {
   912          UINT64 value = gcpu_get_control_reg_layered(gcpu, IA32_CTRL_CR3, VMCS_LEVEL_1);
   913          gcpu_set_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR3, value, VMCS_MERGED);
   914      }
   915  
   916      // CR4 shadow
   917      {
   918          UINT64 shadow = ms_merge_cr_shadow(gcpu, IA32_CTRL_CR4);
   919          gcpu_set_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR4, shadow, VMCS_MERGED);
   920      }
   921  
   922      // I/O bitmaps A and B
   923      {
   924          if (merged_controls.Bits.ActivateIoBitmaps == 1) {
   925              void* level0_bitmap_A;
   926              void* level0_bitmap_B;
   927              void* level1_bitmap_A;
   928              void* level1_bitmap_B;
   929              void* merged_bitmap_A;
   930              void* merged_bitmap_B;
   931  
   932              if (controls0.Bits.ActivateIoBitmaps == 1) {
   933                  level0_bitmap_A = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_IO_BITMAP_ADDRESS_A, MS_HVA);
   934                  level0_bitmap_B = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_IO_BITMAP_ADDRESS_B, MS_HVA);
   935              }
   936              else {
   937                  level0_bitmap_A = NULL;
   938                  level0_bitmap_B = NULL;
   939              }
   940  
   941              if (controls1.Bits.ActivateIoBitmaps == 1) {
   942                  level1_bitmap_A = ms_retrieve_ptr_to_additional_memory(level1_vmcs, VMCS_IO_BITMAP_ADDRESS_A, MS_HVA);
   943                  level1_bitmap_B = ms_retrieve_ptr_to_additional_memory(level1_vmcs, VMCS_IO_BITMAP_ADDRESS_B, MS_HVA);
   944              }
   945              else {
   946                  level1_bitmap_A = NULL;
   947                  level1_bitmap_B = NULL;
   948              }
   949  
   950              merged_bitmap_A = ms_retrieve_ptr_to_additional_memory(merged_vmcs, VMCS_IO_BITMAP_ADDRESS_A, MS_HPA);
   951              merged_bitmap_B = ms_retrieve_ptr_to_additional_memory(merged_vmcs, VMCS_IO_BITMAP_ADDRESS_B, MS_HPA);
   952  
   953              ms_merge_bitmaps(level0_bitmap_A, level1_bitmap_A, merged_bitmap_A);
   954              ms_merge_bitmaps(level0_bitmap_B, level1_bitmap_B, merged_bitmap_B);
   955          }
   956      }
   957  
   958      // MSR bitmap
   959      {
   960          if (merged_controls.Bits.UseMsrBitmaps == 1) {
   961              void* level0_bitmap;
   962              void* level1_bitmap;
   963              void* merged_bitmap;
   964  
   965              level0_bitmap = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_MSR_BITMAP_ADDRESS, MS_HVA);
   966              level1_bitmap = ms_retrieve_ptr_to_additional_memory(level1_vmcs, VMCS_MSR_BITMAP_ADDRESS, MS_HVA);
   967              merged_bitmap = ms_retrieve_ptr_to_additional_memory(merged_vmcs, VMCS_MSR_BITMAP_ADDRESS, MS_HPA);
   968  
   969              ms_merge_bitmaps(level0_bitmap, level1_bitmap, merged_bitmap);
   970          }
   971      }
   972  
   973      // VMExit MSR-store address and count
   974      {
   975          IA32_VMX_MSR_ENTRY* level0_list = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_EXIT_MSR_STORE_ADDRESS, MS_HVA);
   976          UINT32 level0_list_count = (UINT32)vmcs_read(level0_vmcs, VMCS_EXIT_MSR_STORE_COUNT);
   977          IA32_VMX_MSR_ENTRY* level1_list = ms_retrieve_ptr_to_additional_memory(level1_vmcs, VMCS_EXIT_MSR_STORE_ADDRESS, MS_HVA);
   978          UINT32 level1_list_count = (UINT32)vmcs_read(level1_vmcs, VMCS_EXIT_MSR_STORE_COUNT);
   979  
   980  
   981          if ((level0_list_count + level1_list_count) > 256) {
   982              // TODO: proper handling of VMExit MSR-store list when it must be > 512 entries
   983              VMM_DEADLOOP();
   984          }
   985  
   986          ms_merge_msr_list(gcpu, merged_vmcs, level1_list, level0_list, level1_list_count,
   987                            level0_list_count, MSR_LIST_COPY_NO_CHANGE,
   988                            vmcs_add_msr_to_vmexit_store_list, vmcs_clear_vmexit_store_list,
   989                            vmcs_is_msr_in_vmexit_store_list, VMCS_EXIT_MSR_STORE_ADDRESS,
   990                            VMCS_EXIT_MSR_STORE_COUNT);
   991      }
   992  
   993      // VMExit MSR-load address and count
   994      {
   995          IA32_VMX_MSR_ENTRY* level0_list = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_EXIT_MSR_LOAD_ADDRESS, MS_HVA);
   996          UINT32 level0_list_count = (UINT32)vmcs_read(level0_vmcs, VMCS_EXIT_MSR_LOAD_COUNT);
   997  
   998          if (level0_list_count > 256) {
   999              // TODO: proper handling of VMExit MSR-load list when it must be > 512 entries
  1000              VMM_DEADLOOP();
  1001          }
  1002  
  1003          ms_merge_msr_list(gcpu, merged_vmcs, level0_list, NULL, level0_list_count, 0,
  1004                            MSR_LIST_COPY_NO_CHANGE, vmcs_add_msr_to_vmexit_load_list,
  1005                            vmcs_clear_vmexit_load_list, vmcs_is_msr_in_vmexit_load_list,
  1006                            VMCS_EXIT_MSR_LOAD_ADDRESS, VMCS_EXIT_MSR_LOAD_COUNT);
  1007      }
  1008  
  1009      // VMEnter MSR-load address and count
  1010      {
  1011          IA32_VMX_MSR_ENTRY* level0_list = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS, MS_HVA);
  1012          UINT32 level0_list_count = (UINT32)vmcs_read(level0_vmcs, VMCS_ENTER_MSR_LOAD_COUNT);
  1013          IA32_VMX_MSR_ENTRY* level1_list = ms_retrieve_ptr_to_additional_memory(level1_vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS, MS_HVA);
  1014          UINT32 level1_list_count = (UINT32)vmcs_read(level1_vmcs, VMCS_ENTER_MSR_LOAD_COUNT);
  1015          VM_ENTRY_CONTROLS entry_ctrls;
  1016          MSR_LIST_COPY_MODE copy_mode;
  1017  
  1018          if ((level0_list_count + level1_list_count) > 512) {
  1019              // TODO: proper handling of VMEnter MSR-load list when it must be > 512 entries
  1020              VMM_DEADLOOP();
  1021          }
  1022  
  1023          entry_ctrls.Uint32 = (UINT32)gcpu_get_enter_ctrls_layered(gcpu, VMCS_MERGED);
  1024          if (entry_ctrls.Bits.Ia32eModeGuest) {
  1025              copy_mode = MSR_LIST_COPY_AND_SET_64_BIT_MODE_IN_EFER | MSR_LIST_COPY_UPDATE_GCPU;
  1026          }
  1027          else {
  1028              copy_mode = MSR_LIST_COPY_AND_SET_32_BIT_MODE_IN_EFER | MSR_LIST_COPY_UPDATE_GCPU;
  1029          }
  1030  
  1031          ms_merge_msr_list(gcpu,
  1032                            merged_vmcs,
  1033                            level1_list,
  1034                            level0_list,
  1035                            level1_list_count,
  1036                            level0_list_count,
  1037                            copy_mode,
  1038                            vmcs_add_msr_to_vmenter_load_list,
  1039                            vmcs_clear_vmenter_load_list,
  1040                            vmcs_is_msr_in_vmenter_load_list,
  1041                            VMCS_ENTER_MSR_LOAD_ADDRESS,
  1042                            VMCS_ENTER_MSR_LOAD_COUNT);
  1043      }
  1044  
  1045      // Copy host state from level-0 vmcs
  1046      ms_copy_host_state(merged_vmcs, level0_vmcs);
  1047  }
  1048  
  1049  void ms_split_from_level2(IN GUEST_CPU_HANDLE gcpu) {
  1050      VMCS_HIERARCHY* hierarchy = gcpu_get_vmcs_hierarchy(gcpu);
  1051      VMCS_OBJECT* level1_vmcs = vmcs_hierarchy_get_vmcs(hierarchy, VMCS_LEVEL_1);
  1052      VMCS_OBJECT* merged_vmcs = vmcs_hierarchy_get_vmcs(hierarchy, VMCS_MERGED);
  1053  
  1054      // ---UPDATE MSR LISTS IN LEVEL0 and LEVEL1 VMCSs---
  1055      {
  1056          IA32_VMX_MSR_ENTRY* merged_list = ms_retrieve_ptr_to_additional_memory(merged_vmcs, VMCS_EXIT_MSR_STORE_ADDRESS, MS_HPA);
  1057          UINT32 merged_list_count = (UINT32)vmcs_read(merged_vmcs, VMCS_EXIT_MSR_STORE_COUNT);
  1058  
  1059          ms_split_msr_lists(gcpu, merged_list, merged_list_count);
  1060      }
  1061  
  1062      // Copy guest state from level-1 vmcs
  1063      ms_copy_guest_state_to_level1_vmcs(gcpu, FALSE /* do not copy CRs */);
  1064  
  1065      // CR3 - actual CR3 is stored as "visible" CR3
  1066      {
  1067          UINT64 value = gcpu_get_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR3, VMCS_MERGED);
  1068          gcpu_set_control_reg_layered(gcpu, IA32_CTRL_CR3, value, VMCS_LEVEL_1);
  1069      }
  1070  
  1071      // CR0/CR4 update
  1072      ms_perform_cr_split(gcpu, IA32_CTRL_CR0);
  1073      ms_perform_cr_split(gcpu, IA32_CTRL_CR4);
  1074  
  1075      ms_copy_data_fields(level1_vmcs, merged_vmcs);
  1076  
  1077      if (vmcs_field_is_supported(VMCS_PREEMPTION_TIMER)) {
  1078          ms_split_timer_from_level2(
  1079              vmcs_hierarchy_get_vmcs(hierarchy, VMCS_LEVEL_1),
  1080              level1_vmcs, merged_vmcs);
  1081      }
  1082  }
  1083  
  1084  void ms_merge_to_level1(IN GUEST_CPU_HANDLE gcpu,
  1085                          IN BOOLEAN was_vmexit_from_level1,
  1086                          IN BOOLEAN merge_only_dirty UNUSED) {
  1087      // TODO: merge only dirty
  1088      VMCS_HIERARCHY* hierarchy = gcpu_get_vmcs_hierarchy(gcpu);
  1089      VMCS_OBJECT* level0_vmcs = vmcs_hierarchy_get_vmcs(hierarchy, VMCS_LEVEL_0);
  1090      VMCS_OBJECT* level1_vmcs = vmcs_hierarchy_get_vmcs(hierarchy, VMCS_LEVEL_1);
  1091      VMCS_OBJECT* merged_vmcs = vmcs_hierarchy_get_vmcs(hierarchy, VMCS_MERGED);
  1092  
  1093      if (!was_vmexit_from_level1) {
  1094          // (level-2) --> (level-1) vmexit, copy host area of level-1 vmcs to guest area of merged vmcs
  1095          VM_EXIT_CONTROLS exit_ctrls;
  1096  
  1097          // merged exit controls will be identical to "level-0" exit controls
  1098          exit_ctrls.Uint32 = (UINT32)gcpu_get_exit_ctrls_layered(gcpu, VMCS_LEVEL_0);
  1099  
  1100          // ES segment
  1101          {
  1102              IA32_SELECTOR selector;
  1103              IA32_VMX_VMCS_GUEST_AR ar;
  1104  
  1105              selector.sel16 = (UINT16)vmcs_read(level1_vmcs, VMCS_HOST_ES_SELECTOR);
  1106  
  1107              ar.Uint32 = 0;
  1108              ar.Bits.SegmentType = 1;
  1109              ar.Bits.DescriptorPrivilegeLevel = 0;
  1110              ar.Bits.SegmentPresent = 1;
  1111              ar.Bits.DefaultOperationSize = exit_ctrls.Bits.Ia32eModeHost ? 0 : 1;
  1112              ar.Bits.Granularity = 1;
  1113              ar.Bits.Null = (selector.bits.index == 0) ? 1 : 0; // unused in case when selector is 0
  1114  
  1115              gcpu_set_segment_reg_layered(gcpu, IA32_SEG_ES, selector.sel16, 0, 0xffffffff, ar.Uint32, VMCS_MERGED);
  1116          }
  1117  
  1118          // CS segment
  1119          {
  1120              IA32_SELECTOR selector;
  1121              IA32_VMX_VMCS_GUEST_AR ar;
  1122  
  1123              selector.sel16= (UINT16)vmcs_read(level1_vmcs, VMCS_HOST_CS_SELECTOR);
  1124  
  1125              ar.Uint32 = 0;
  1126              ar.Bits.SegmentType = 11;
  1127              ar.Bits.DescriptorType = 1;
  1128              ar.Bits.DescriptorPrivilegeLevel = 0;
  1129              ar.Bits.SegmentPresent = 1;
  1130              ar.Bits.Reserved_1 = exit_ctrls.Bits.Ia32eModeHost ? 1 : 0;
  1131              ar.Bits.DefaultOperationSize = exit_ctrls.Bits.Ia32eModeHost ? 0 : 1;
  1132              ar.Bits.Granularity = 1;
  1133              ar.Bits.Null = 0; // usable
  1134  
  1135              gcpu_set_segment_reg_layered(gcpu, IA32_SEG_CS, selector.sel16, 0, 0xffffffff, ar.Uint32, VMCS_MERGED);
  1136          }
  1137  
  1138          // SS segment
  1139          {
  1140              IA32_SELECTOR selector;
  1141              IA32_VMX_VMCS_GUEST_AR ar;
  1142  
  1143              selector.sel16 = (UINT16)vmcs_read(level1_vmcs, VMCS_HOST_SS_SELECTOR);
  1144  
  1145              ar.Uint32 = 0;
  1146              ar.Bits.SegmentType = 1;
  1147              ar.Bits.DescriptorPrivilegeLevel = 0;
  1148              ar.Bits.SegmentPresent = 1;
  1149              ar.Bits.DefaultOperationSize = exit_ctrls.Bits.Ia32eModeHost ? 0 : 1;
  1150              ar.Bits.Null = (selector.bits.index == 0) ? 1 : 0; // unusable in case the index is 0
  1151  
  1152              gcpu_set_segment_reg_layered(gcpu, IA32_SEG_SS, selector.sel16, 0, 0xffffffff, ar.Uint32, VMCS_MERGED);
  1153          }
  1154  
  1155          // DS segment
  1156          {
  1157              IA32_SELECTOR selector;
  1158              IA32_VMX_VMCS_GUEST_AR ar;
  1159  
  1160              selector.sel16 = (UINT16)vmcs_read(level1_vmcs, VMCS_HOST_DS_SELECTOR);
  1161  
  1162              ar.Uint32 = 0;
  1163              ar.Bits.SegmentType = 1;
  1164              ar.Bits.DescriptorPrivilegeLevel = 0;
  1165              ar.Bits.SegmentPresent = 1;
  1166              ar.Bits.DefaultOperationSize = exit_ctrls.Bits.Ia32eModeHost ? 0 : 1;
  1167              ar.Bits.Granularity = 1;
  1168              ar.Bits.Null = (selector.bits.index == 0) ? 1 : 0; // unusable in case the index is 0
  1169  
  1170              gcpu_set_segment_reg_layered(gcpu, IA32_SEG_DS, selector.sel16, 0, 0xffffffff, ar.Uint32, VMCS_MERGED);
  1171          }
  1172  
  1173          // FS segment
  1174          {
  1175              IA32_SELECTOR selector;
  1176              UINT64 base = vmcs_read(level1_vmcs, VMCS_HOST_FS_BASE);
  1177              IA32_VMX_VMCS_GUEST_AR ar;
  1178  
  1179              selector.sel16 = (UINT16)vmcs_read(level1_vmcs, VMCS_HOST_FS_SELECTOR);
  1180  
  1181              ar.Uint32 = 0;
  1182              ar.Bits.SegmentType = 1;
  1183              ar.Bits.DescriptorPrivilegeLevel = 0;
  1184              ar.Bits.SegmentPresent = 1;
  1185              ar.Bits.DefaultOperationSize = exit_ctrls.Bits.Ia32eModeHost ? 0 : 1;
  1186              ar.Bits.Granularity = 1;
  1187              ar.Bits.Null = (selector.bits.index == 0) ? 1 : 0; // unusable in case the index is 0
  1188  
  1189              gcpu_set_segment_reg_layered(gcpu, IA32_SEG_FS, selector.sel16, base, 0xffffffff, ar.Uint32, VMCS_MERGED);
  1190          }
  1191  
  1192          // GS segment
  1193          {
  1194              IA32_SELECTOR selector;
  1195              UINT64 base = vmcs_read(level1_vmcs, VMCS_HOST_GS_BASE);
  1196              IA32_VMX_VMCS_GUEST_AR ar;
  1197  
  1198              selector.sel16 = (UINT16)vmcs_read(level1_vmcs, VMCS_HOST_GS_SELECTOR);
  1199  
  1200              ar.Uint32 = 0;
  1201              ar.Bits.SegmentType = 1;
  1202              ar.Bits.DescriptorPrivilegeLevel = 0;
  1203              ar.Bits.SegmentPresent = 1;
  1204              ar.Bits.DefaultOperationSize = exit_ctrls.Bits.Ia32eModeHost ? 0 : 1;
  1205              ar.Bits.Granularity = 1;
  1206              ar.Bits.Null = (selector.bits.index == 0) ? 1 : 0; // unusable in case the index is 0
  1207  
  1208              gcpu_set_segment_reg_layered(gcpu, IA32_SEG_GS, selector.sel16, base, 0xffffffff, ar.Uint32, VMCS_MERGED);
  1209          }
  1210  
  1211          // TR segment
  1212          {
  1213              IA32_SELECTOR selector;
  1214              UINT64 base = vmcs_read(level1_vmcs, VMCS_HOST_TR_BASE);
  1215              IA32_VMX_VMCS_GUEST_AR ar;
  1216  
  1217              selector.sel16 = (UINT16)vmcs_read(level1_vmcs, VMCS_HOST_TR_SELECTOR);
  1218  
  1219              ar.Uint32 = 0;
  1220              ar.Bits.SegmentType = 11;
  1221              ar.Bits.DescriptorType = 0;
  1222              ar.Bits.DescriptorPrivilegeLevel = 0;
  1223              ar.Bits.SegmentPresent = 1;
  1224              ar.Bits.DefaultOperationSize = 0;
  1225              ar.Bits.Granularity = 0;
  1226              ar.Bits.Null = 0; // usable
  1227  
  1228              gcpu_set_segment_reg_layered(gcpu, IA32_SEG_TR, selector.sel16, base, 0x67, ar.Uint32, VMCS_MERGED);
  1229          }
  1230  
  1231          // LDTR
  1232          {
  1233              IA32_VMX_VMCS_GUEST_AR ar;
  1234              ar.Uint32 = 0;
  1235              ar.Bits.Null = 1; // unusable
  1236  
  1237              gcpu_set_segment_reg_layered(gcpu, IA32_SEG_LDTR, 0, 0, 0, ar.Uint32, VMCS_MERGED);
  1238          }
  1239  
  1240  
  1241          // GDTR IDTR
  1242          {
  1243              UINT64 base;
  1244  
  1245              base = vmcs_read(level1_vmcs, VMCS_HOST_GDTR_BASE);
  1246              gcpu_set_gdt_reg_layered(gcpu, base, 0xffff, VMCS_MERGED);
  1247  
  1248              base = vmcs_read(level1_vmcs, VMCS_HOST_IDTR_BASE);
  1249              gcpu_set_idt_reg_layered(gcpu, base, 0xffff, VMCS_MERGED);
  1250          }
  1251  
  1252          // RFLAGS
  1253          {
  1254              gcpu_set_native_gp_reg_layered(gcpu, IA32_REG_RFLAGS, 0x2, VMCS_MERGED);
  1255          }
  1256  
  1257          // RSP, RIP
  1258          {
  1259              UINT64 value;
  1260  
  1261              value = vmcs_read(level1_vmcs, VMCS_HOST_RIP);
  1262              gcpu_set_native_gp_reg_layered(gcpu, IA32_REG_RIP, value, VMCS_MERGED);
  1263  
  1264              value = vmcs_read(level1_vmcs, VMCS_HOST_RSP);
  1265              gcpu_set_native_gp_reg_layered(gcpu, IA32_REG_RSP, value, VMCS_MERGED);
  1266          }
  1267  
  1268          // SYSENTER_CS, SYSENTER_ESP, SYSENTER_EIP
  1269          {
  1270              UINT64 value;
  1271  
  1272              value = vmcs_read(level1_vmcs, VMCS_HOST_SYSENTER_CS);
  1273              gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_CS, value, VMCS_MERGED);
  1274  
  1275              value = vmcs_read(level1_vmcs, VMCS_HOST_SYSENTER_ESP);
  1276              gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_ESP, value, VMCS_MERGED);
  1277  
  1278              value = vmcs_read(level1_vmcs, VMCS_HOST_SYSENTER_EIP);
  1279              gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SYSENTER_EIP, value, VMCS_MERGED);
  1280          }
  1281  
  1282          // DR7
  1283          {
  1284              gcpu_set_debug_reg_layered(gcpu, IA32_REG_DR7, 0x400, VMCS_MERGED);
  1285          }
  1286  
  1287          // IA32_PERF_GLOBAL_CTRL
  1288          if (vmcs_field_is_supported(VMCS_HOST_IA32_PERF_GLOBAL_CTRL) &&
  1289              vmcs_field_is_supported(VMCS_GUEST_IA32_PERF_GLOBAL_CTRL))
  1290          {
  1291              UINT64 value;
  1292  
  1293              value = vmcs_read(level1_vmcs, VMCS_HOST_IA32_PERF_GLOBAL_CTRL);
  1294              vmcs_write(merged_vmcs, VMCS_GUEST_IA32_PERF_GLOBAL_CTRL, value);
  1295          }
  1296  
  1297          // SMBASE
  1298          {
  1299              gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_SMBASE, 0, VMCS_MERGED);
  1300          }
  1301  
  1302          // VMCS link pointer
  1303          {
  1304              vmcs_write(merged_vmcs, VMCS_OSV_CONTROLLING_VMCS_ADDRESS, ~((UINT64)0));
  1305          }
  1306  
  1307          // CR0, CR3, CR4
  1308          {
  1309              UINT64 value;
  1310  
  1311              value = vmcs_read(level1_vmcs, VMCS_HOST_CR0);
  1312              gcpu_set_control_reg_layered(gcpu, IA32_CTRL_CR0, value, VMCS_MERGED);
  1313              gcpu_set_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR0, value, VMCS_MERGED);
  1314  
  1315              value = vmcs_read(level1_vmcs, VMCS_HOST_CR3);
  1316              gcpu_set_control_reg_layered(gcpu, IA32_CTRL_CR3, value, VMCS_MERGED);
  1317              gcpu_set_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR3, value, VMCS_MERGED);
  1318  
  1319              value = vmcs_read(level1_vmcs, VMCS_HOST_CR4);
  1320              gcpu_set_control_reg_layered(gcpu, IA32_CTRL_CR4, value, VMCS_MERGED);
  1321              gcpu_set_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR4, value, VMCS_MERGED);
  1322          }
  1323  
  1324          // Interruptibility state
  1325          {
  1326              IA32_VMX_VMCS_GUEST_INTERRUPTIBILITY interruptibility;
  1327              IA32_VMX_EXIT_REASON reason;
  1328  
  1329  
  1330              interruptibility.Uint32 = 0;
  1331              reason.Uint32 = (UINT32)vmcs_read(level1_vmcs, VMCS_EXIT_INFO_REASON);
  1332              if (reason.Bits.BasicReason == Ia32VmxExitBasicReasonSoftwareInterruptExceptionNmi) {
  1333                  IA32_VMX_VMCS_VM_EXIT_INFO_IDT_VECTORING vectoring_info;
  1334  
  1335                  vectoring_info.Uint32 = (UINT32)vmcs_read(level1_vmcs, VMCS_EXIT_INFO_EXCEPTION_INFO);
  1336                  if (vectoring_info.Bits.InterruptType == 2) {
  1337                      // NMI
  1338                      interruptibility.Bits.BlockNmi = 1;
  1339                  }
  1340              }
  1341              gcpu_set_interruptibility_state_layered(gcpu, interruptibility.Uint32, VMCS_MERGED);
  1342          }
  1343  
  1344          // Activity state
  1345          {
  1346              gcpu_set_activity_state_layered(gcpu, Ia32VmxVmcsGuestSleepStateActive, VMCS_MERGED);
  1347          }
  1348  
  1349          // IA32_DEBUGCTL
  1350          {
  1351              gcpu_set_msr_reg_layered(gcpu, IA32_VMM_MSR_DEBUGCTL, 0, VMCS_MERGED);
  1352          }
  1353  
  1354          // Pending debug exceptions
  1355          {
  1356              gcpu_set_pending_debug_exceptions_layered(gcpu, 0, VMCS_MERGED);
  1357          }
  1358  
  1359          // Preemption Timer
  1360          vmcs_write(merged_vmcs,
  1361                     VMCS_PREEMPTION_TIMER,
  1362                     vmcs_read(level0_vmcs, VMCS_PREEMPTION_TIMER));
  1363  
  1364      }
  1365  
  1366      // Most is copied from level-0
  1367      {
  1368          UINT64 value;
  1369          UINT32 pf_mask;
  1370          UINT32 pf_match;
  1371  
  1372          value = gcpu_get_pin_ctrls_layered(gcpu, VMCS_LEVEL_0);
  1373          gcpu_set_pin_ctrls_layered(gcpu, VMCS_MERGED, value);
  1374  
  1375          value = gcpu_get_exceptions_map_layered(gcpu, VMCS_LEVEL_0);
  1376          gcpu_set_exceptions_map_layered(gcpu, VMCS_MERGED, value);
  1377  
  1378          value = gcpu_get_processor_ctrls_layered(gcpu, VMCS_LEVEL_0);
  1379          gcpu_set_processor_ctrls_layered(gcpu, VMCS_MERGED, value);
  1380  
  1381          value = gcpu_get_processor_ctrls2_layered(gcpu, VMCS_LEVEL_0);
  1382          gcpu_set_processor_ctrls2_layered(gcpu, VMCS_MERGED, value);
  1383  
  1384  
  1385          value = gcpu_get_enter_ctrls_layered(gcpu, VMCS_LEVEL_0);
  1386          gcpu_set_enter_ctrls_layered(gcpu, VMCS_MERGED, (UINT32)value);
  1387  #ifdef DEBUG
  1388          {
  1389              VM_ENTRY_CONTROLS controls;
  1390              controls.Uint32 = (UINT32)value;
  1391  
  1392              // VTUNE is not supported
  1393              VMM_ASSERT(controls.Bits.Load_IA32_PERF_GLOBAL_CTRL == 0);
  1394          }
  1395  #endif
  1396  
  1397          value = gcpu_get_exit_ctrls_layered(gcpu, VMCS_LEVEL_0);
  1398          gcpu_set_exit_ctrls_layered(gcpu, VMCS_MERGED, (UINT32)value);
  1399  #ifdef DEBUG
  1400          {
  1401              VM_EXIT_CONTROLS controls;
  1402              controls.Uint32 = (UINT32)value;
  1403  
  1404              // VTUNE is not supported
  1405              VMM_ASSERT(controls.Bits.Load_IA32_PERF_GLOBAL_CTRL == 0);
  1406          }
  1407  #endif
  1408  
  1409          value = gcpu_get_cr0_reg_mask_layered(gcpu, VMCS_LEVEL_0);
  1410          gcpu_set_cr0_reg_mask_layered(gcpu, VMCS_MERGED, value);
  1411  
  1412          value = gcpu_get_cr4_reg_mask_layered(gcpu, VMCS_LEVEL_0);
  1413          gcpu_set_cr4_reg_mask_layered(gcpu, VMCS_MERGED, value);
  1414  
  1415          value = vmcs_read(level0_vmcs, VMCS_OSV_CONTROLLING_VMCS_ADDRESS);
  1416          vmcs_write(merged_vmcs, VMCS_OSV_CONTROLLING_VMCS_ADDRESS, value);
  1417  
  1418          value = vmcs_read(level0_vmcs, VMCS_ENTER_INTERRUPT_INFO);
  1419          vmcs_write(merged_vmcs, VMCS_ENTER_INTERRUPT_INFO, value);
  1420  
  1421          value = vmcs_read(level0_vmcs, VMCS_ENTER_EXCEPTION_ERROR_CODE);
  1422          vmcs_write(merged_vmcs, VMCS_ENTER_EXCEPTION_ERROR_CODE, value);
  1423  
  1424          value = vmcs_read(level0_vmcs, VMCS_ENTER_INSTRUCTION_LENGTH);
  1425          vmcs_write(merged_vmcs, VMCS_ENTER_INSTRUCTION_LENGTH, value);
  1426  
  1427          value = vmcs_read(level0_vmcs, VMCS_TSC_OFFSET);
  1428          vmcs_write(merged_vmcs, VMCS_TSC_OFFSET, value);
  1429  
  1430          value = vmcs_read(level0_vmcs, VMCS_APIC_ACCESS_ADDRESS);
  1431          vmcs_write(merged_vmcs, VMCS_APIC_ACCESS_ADDRESS, value);
  1432  
  1433          value = vmcs_read(level0_vmcs, VMCS_VIRTUAL_APIC_ADDRESS);
  1434          vmcs_write(merged_vmcs, VMCS_VIRTUAL_APIC_ADDRESS, value);
  1435  
  1436          gcpu_get_pf_error_code_mask_and_match_layered(gcpu, VMCS_LEVEL_0, &pf_mask, &pf_match);
  1437          gcpu_set_pf_error_code_mask_and_match_layered(gcpu, VMCS_MERGED, pf_mask, pf_match);
  1438  
  1439          value = vmcs_read(level0_vmcs, VMCS_CR3_TARGET_COUNT);
  1440          vmcs_write(merged_vmcs, VMCS_CR3_TARGET_COUNT, value);
  1441  
  1442          value = vmcs_read(level0_vmcs, VMCS_CR3_TARGET_VALUE_0);
  1443          vmcs_write(merged_vmcs, VMCS_CR3_TARGET_VALUE_0, value);
  1444  
  1445          value = vmcs_read(level0_vmcs, VMCS_CR3_TARGET_VALUE_1);
  1446          vmcs_write(merged_vmcs, VMCS_CR3_TARGET_VALUE_1, value);
  1447  
  1448          value = vmcs_read(level0_vmcs, VMCS_CR3_TARGET_VALUE_2);
  1449          vmcs_write(merged_vmcs, VMCS_CR3_TARGET_VALUE_2, value);
  1450  
  1451          value = vmcs_read(level0_vmcs, VMCS_CR3_TARGET_VALUE_3);
  1452          vmcs_write(merged_vmcs, VMCS_CR3_TARGET_VALUE_3, value);
  1453  
  1454          value = vmcs_read(level0_vmcs, VMCS_EXIT_TPR_THRESHOLD);
  1455          vmcs_write(merged_vmcs, VMCS_EXIT_TPR_THRESHOLD, value);
  1456  
  1457          value = gcpu_get_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR0, VMCS_LEVEL_0);
  1458          gcpu_set_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR0, value, VMCS_MERGED);
  1459  
  1460          value = gcpu_get_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR4, VMCS_LEVEL_0);
  1461          gcpu_set_guest_visible_control_reg_layered(gcpu, IA32_CTRL_CR4, value, VMCS_MERGED);
  1462  
  1463      }
  1464  
  1465      // I/O bitmaps A and B
  1466      {
  1467          PROCESSOR_BASED_VM_EXECUTION_CONTROLS merged_controls;
  1468  
  1469          merged_controls.Uint32 = (UINT32)gcpu_get_processor_ctrls_layered(gcpu, VMCS_LEVEL_0);
  1470          if (merged_controls.Bits.ActivateIoBitmaps == 1) {
  1471              void* level0_bitmap_A;
  1472              void* level0_bitmap_B;
  1473              void* merged_bitmap_A;
  1474              void* merged_bitmap_B;
  1475  
  1476              level0_bitmap_A = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_IO_BITMAP_ADDRESS_A, MS_HVA);
  1477              level0_bitmap_B = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_IO_BITMAP_ADDRESS_B, MS_HVA);
  1478  
  1479              VMM_ASSERT(level0_bitmap_A != NULL);
  1480              VMM_ASSERT(level0_bitmap_B != NULL);
  1481  
  1482              merged_bitmap_A = ms_retrieve_ptr_to_additional_memory(merged_vmcs, VMCS_IO_BITMAP_ADDRESS_A, MS_HPA);
  1483              merged_bitmap_B = ms_retrieve_ptr_to_additional_memory(merged_vmcs, VMCS_IO_BITMAP_ADDRESS_B, MS_HPA);
  1484  
  1485              VMM_ASSERT(merged_bitmap_A != NULL);
  1486              VMM_ASSERT(merged_bitmap_B != NULL);
  1487  
  1488              ms_merge_bitmaps(level0_bitmap_A, NULL, merged_bitmap_A);
  1489              ms_merge_bitmaps(level0_bitmap_B, NULL, merged_bitmap_B);
  1490  
  1491          }
  1492      }
  1493  
  1494      // MSR bitmap
  1495      {
  1496          PROCESSOR_BASED_VM_EXECUTION_CONTROLS merged_controls;
  1497  
  1498          merged_controls.Uint32 = (UINT32)gcpu_get_processor_ctrls_layered(gcpu, VMCS_LEVEL_0);
  1499  
  1500          if (merged_controls.Bits.UseMsrBitmaps == 1) {
  1501              void* level0_bitmap;
  1502              void* merged_bitmap;
  1503  
  1504              level0_bitmap = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_MSR_BITMAP_ADDRESS, MS_HVA);
  1505              merged_bitmap = ms_retrieve_ptr_to_additional_memory(merged_vmcs, VMCS_MSR_BITMAP_ADDRESS, MS_HPA);
  1506  
  1507              ms_merge_bitmaps(level0_bitmap, NULL, merged_bitmap);
  1508          }
  1509      }
  1510  
  1511      // VMExit MSR-store address and count
  1512      {
  1513          IA32_VMX_MSR_ENTRY* level0_list = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_EXIT_MSR_STORE_ADDRESS, MS_HVA);
  1514          UINT32 level0_list_count = (UINT32)vmcs_read(level0_vmcs, VMCS_EXIT_MSR_STORE_COUNT);
  1515  
  1516          if (level0_list_count > 256) {
  1517              // TODO: proper handling
  1518              VMM_DEADLOOP();
  1519          }
  1520  
  1521          ms_merge_msr_list(gcpu, merged_vmcs, level0_list, NULL, level0_list_count,
  1522                            0, MSR_LIST_COPY_NO_CHANGE, vmcs_add_msr_to_vmexit_store_list,
  1523                            vmcs_clear_vmexit_store_list, vmcs_is_msr_in_vmexit_store_list,
  1524                            VMCS_EXIT_MSR_STORE_ADDRESS, VMCS_EXIT_MSR_STORE_COUNT);
  1525      }
  1526  
  1527      // VMExit MSR-load address and count
  1528      {
  1529          IA32_VMX_MSR_ENTRY* level0_list = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_EXIT_MSR_LOAD_ADDRESS, MS_HVA);
  1530          UINT32 level0_list_count = (UINT32)vmcs_read(level0_vmcs, VMCS_EXIT_MSR_LOAD_COUNT);
  1531  
  1532          if (level0_list_count > 256) {
  1533              // TODO: proper handling
  1534              VMM_DEADLOOP();
  1535          }
  1536  
  1537          ms_merge_msr_list(gcpu, merged_vmcs, level0_list, NULL, level0_list_count, 0,
  1538                            MSR_LIST_COPY_NO_CHANGE, vmcs_add_msr_to_vmexit_load_list,
  1539                            vmcs_clear_vmexit_load_list, vmcs_is_msr_in_vmexit_load_list,
  1540                            VMCS_EXIT_MSR_LOAD_ADDRESS, VMCS_EXIT_MSR_LOAD_COUNT);
  1541      }
  1542  
  1543      // VMEnter MSR-load address and count
  1544      {
  1545          IA32_VMX_MSR_ENTRY* level0_list = ms_retrieve_ptr_to_additional_memory(level0_vmcs, VMCS_ENTER_MSR_LOAD_ADDRESS, MS_HVA);
  1546          UINT32 level0_list_count = (UINT32)vmcs_read(level0_vmcs, VMCS_ENTER_MSR_LOAD_COUNT);
  1547          IA32_VMX_MSR_ENTRY* level1_list = ms_retrieve_ptr_to_additional_memory(level1_vmcs, VMCS_EXIT_MSR_LOAD_ADDRESS, MS_HVA);
  1548          UINT32 level1_list_count = (UINT32)vmcs_read(level1_vmcs, VMCS_EXIT_MSR_LOAD_COUNT);
  1549          VM_ENTRY_CONTROLS entry_ctrls;
  1550          MSR_LIST_COPY_MODE copy_mode;
  1551  
  1552          if ((level0_list_count + level1_list_count) > 256) {
  1553              // TODO: proper handling
  1554              VMM_DEADLOOP();
  1555          }
  1556  
  1557          entry_ctrls.Uint32 = (UINT32)gcpu_get_enter_ctrls_layered(gcpu, VMCS_MERGED);
  1558          if (entry_ctrls.Bits.Ia32eModeGuest) {
  1559              copy_mode = MSR_LIST_COPY_AND_SET_64_BIT_MODE_IN_EFER | MSR_LIST_COPY_UPDATE_GCPU;
  1560          }
  1561          else {
  1562              copy_mode = MSR_LIST_COPY_AND_SET_32_BIT_MODE_IN_EFER | MSR_LIST_COPY_UPDATE_GCPU;
  1563          }
  1564  
  1565          ms_merge_msr_list(gcpu, merged_vmcs, level1_list, level0_list, level1_list_count,
  1566                            level0_list_count, copy_mode, vmcs_add_msr_to_vmenter_load_list,
  1567                            vmcs_clear_vmenter_load_list, vmcs_is_msr_in_vmenter_load_list,
  1568                            VMCS_ENTER_MSR_LOAD_ADDRESS, VMCS_ENTER_MSR_LOAD_COUNT);
  1569      }
  1570  
  1571      // Copy host state from level-0 vmcs
  1572      ms_copy_host_state(merged_vmcs, level0_vmcs);
  1573  }
  1574  
  1575  
  1576  /*
  1577      Merge Algorithm:
  1578      ---------------
  1579          If VMCS#1.Timer-Enabled == FALSE ==> copy from VMCS#0
  1580          else if VMCS#0.Timer-Enabled == FALSE ==> copy from VMCS#1
  1581          else do real-merge:
  1582              Save-Value = 1
  1583              Enable=1
  1584              Counter = Minimum of 2
  1585  
  1586      Split Algorithm:
  1587      ---------------
  1588          Control information is not split
  1589          if Save-Value = 0 Counter not changed
  1590          else
  1591              if (Counter[i] < Counter[1-i]) Counter[i] = Counter[m]
  1592              else Counter[i] = Counter[m] + Counter[i] - Counter[1-i]
  1593  
  1594      VMEXIT-request Analysis Algorithm: (implemented in other file)
  1595      ---------------------------------
  1596      if Save-Value == 0              VMEXIT-requested = TRUE;
  1597      else if (counter#0 == counter#1)VMEXIT-requested = TRUE;
  1598      else                            VMEXIT-requested = FALSE;
  1599  */
  1600  void ms_merge_timer_to_level2(VMCS_OBJECT *vmcs_0, VMCS_OBJECT *vmcs_1, VMCS_OBJECT *vmcs_m)
  1601  {
  1602      PIN_BASED_VM_EXECUTION_CONTROLS merged_pin_exec;
  1603      VM_EXIT_CONTROLS                merged_vmexit_ctrls;
  1604      UINT32                          merged_counter_value;
  1605      PIN_BASED_VM_EXECUTION_CONTROLS pin_exec[2];
  1606      UINT32                          counter_value[2];
  1607  
  1608      pin_exec[0].Uint32 = (UINT32)vmcs_read(vmcs_0, VMCS_CONTROL_VECTOR_PIN_EVENTS);
  1609      pin_exec[1].Uint32 = (UINT32)vmcs_read(vmcs_1, VMCS_CONTROL_VECTOR_PIN_EVENTS);
  1610      merged_pin_exec.Uint32 = (UINT32)vmcs_read(vmcs_m, VMCS_CONTROL_VECTOR_PIN_EVENTS);
  1611      merged_vmexit_ctrls.Uint32 = (UINT32)vmcs_read(vmcs_m, VMCS_EXIT_CONTROL_VECTOR);
  1612  
  1613      merged_pin_exec.Bits.VmxTimer = pin_exec[0].Bits.VmxTimer || pin_exec[1].Bits.VmxTimer;
  1614  
  1615      if (0 == merged_pin_exec.Bits.VmxTimer) {
  1616          // VMX Timer disabled
  1617          merged_vmexit_ctrls.Bits.SaveVmxTimer = 0;
  1618          merged_counter_value = 0;
  1619      }
  1620      else {
  1621          VM_EXIT_CONTROLS vmexit_ctrls;
  1622  
  1623          // VMX Timer enabled at least in one VMCS
  1624          if (0 == pin_exec[1].Bits.VmxTimer) {
  1625              // copy from vmcs#0
  1626              vmexit_ctrls.Uint32 = (UINT32) vmcs_read(vmcs_0, VMCS_EXIT_CONTROL_VECTOR);
  1627              merged_vmexit_ctrls.Bits.SaveVmxTimer = vmexit_ctrls.Bits.SaveVmxTimer;
  1628              merged_counter_value = (UINT32) vmcs_read(vmcs_0, VMCS_PREEMPTION_TIMER);
  1629          }
  1630          else if (0 == pin_exec[0].Bits.VmxTimer) {
  1631              // copy from vmcs#1
  1632              vmexit_ctrls.Uint32 = (UINT32) vmcs_read(vmcs_1, VMCS_EXIT_CONTROL_VECTOR);
  1633              merged_vmexit_ctrls.Bits.SaveVmxTimer = vmexit_ctrls.Bits.SaveVmxTimer;
  1634              merged_counter_value = (UINT32) vmcs_read(vmcs_1, VMCS_PREEMPTION_TIMER);
  1635          }
  1636          else {
  1637              // VMX Timer enabled at least in one VMCS
  1638              // so doing real merge here
  1639              merged_vmexit_ctrls.Bits.SaveVmxTimer = 1;
  1640              counter_value[0] = (UINT32) vmcs_read(vmcs_0, VMCS_PREEMPTION_TIMER);
  1641              counter_value[1] = (UINT32) vmcs_read(vmcs_1, VMCS_PREEMPTION_TIMER);
  1642              merged_counter_value = MIN(counter_value[0], counter_value[1]);
  1643          }
  1644      }
  1645      vmcs_write(vmcs_m, VMCS_CONTROL_VECTOR_PIN_EVENTS, (UINT64) merged_pin_exec.Uint32);
  1646      vmcs_write(vmcs_m, VMCS_EXIT_CONTROL_VECTOR, (UINT64) merged_vmexit_ctrls.Uint32);
  1647      vmcs_write(vmcs_m, VMCS_PREEMPTION_TIMER, (UINT64) merged_counter_value);
  1648  }
  1649  
  1650  void ms_split_timer_from_level2(VMCS_OBJECT *vmcs_0, VMCS_OBJECT *vmcs_1, VMCS_OBJECT *vmcs_m)
  1651  {
  1652      PIN_BASED_VM_EXECUTION_CONTROLS pin_exec[2];
  1653      VM_EXIT_CONTROLS                vmexit_ctrls[2];
  1654      UINT32                          old_counter[2];
  1655      UINT32                          new_counter;
  1656      int i;
  1657  
  1658      pin_exec[0].Uint32     = (UINT32) vmcs_read(vmcs_0, VMCS_CONTROL_VECTOR_PIN_EVENTS);
  1659      pin_exec[1].Uint32     = (UINT32) vmcs_read(vmcs_1, VMCS_CONTROL_VECTOR_PIN_EVENTS);
  1660      vmexit_ctrls[0].Uint32 = (UINT32) vmcs_read(vmcs_0, VMCS_EXIT_CONTROL_VECTOR);
  1661      vmexit_ctrls[1].Uint32 = (UINT32) vmcs_read(vmcs_1, VMCS_EXIT_CONTROL_VECTOR);
  1662      old_counter[0]         = (UINT32) vmcs_read(vmcs_0, VMCS_PREEMPTION_TIMER);
  1663      old_counter[1]         = (UINT32) vmcs_read(vmcs_1, VMCS_PREEMPTION_TIMER);
  1664  
  1665      for (i = 0; i < 2; ++i) {
  1666          if (1 == pin_exec[i].Bits.VmxTimer && 1 == vmexit_ctrls[i].Bits.SaveVmxTimer) {
  1667              if (0 == pin_exec[1 - i].Bits.VmxTimer) {
  1668                  new_counter = old_counter[i];
  1669              }
  1670              else {
  1671                  if (old_counter[i] <= old_counter[1 - i]) {
  1672                      new_counter = (UINT32) vmcs_read(vmcs_m, VMCS_PREEMPTION_TIMER);
  1673                  }
  1674                  else {
  1675                      new_counter = (UINT32) vmcs_read(vmcs_m, VMCS_PREEMPTION_TIMER)
  1676                          + (old_counter[i] - old_counter[1 - i]);
  1677                  }
  1678              }
  1679              vmcs_write(vmcs_0, VMCS_PREEMPTION_TIMER, (UINT64) new_counter);
  1680          }
  1681      }
  1682  
  1683  }