github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/startup/layout_host_memory_for_mbr_loader.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "file_codes.h"
    16  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(LAYOUT_HOST_MEMORY_FOR_MBR_LOADER_C)
    17  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(LAYOUT_HOST_MEMORY_FOR_MBR_LOADER_C, __condition)
    18  #include "layout_host_memory.h"
    19  #include "e820_abstraction.h"
    20  #include "gpm_api.h"
    21  #include "guest.h"
    22  #include "guest_cpu.h"
    23  #include "vmm_dbg.h"
    24  
    25  #define FOUR_GIGABYTE 0x100000000
    26  
    27  
    28  //  Read input data structure and create all guests
    29  //  NOTE: current implementation is valid only for MBR loader. For
    30  //        driver-loading scenario it should be changed
    31  
    32  
    33  extern UINT32 g_is_post_launch;
    34  
    35  // init memory layout
    36  // This function should perform init of "memory layout object" and
    37  // init the primary guest memory layout.
    38  // In the case of no secondary guests, memory layout object is not required
    39  //
    40  // For primary guest:
    41  //   - All memory upto 4G is mapped, except for VMM and secondary guest areas
    42  //   - Only specified memory above 4G is mapped. Mapping in the >4G region for primary
    43  //     guest should be added by demand
    44  //
    45  // For secondary guests:
    46  //   - All secondary guests are loaded lower than 4G
    47  BOOLEAN init_memory_layout_from_mbr(
    48  #if 0
    49                      // JLM(FIX)
    50                      int num_excluded
    51  #endif
    52                      const VMM_MEMORY_LAYOUT* vmm_memory_layout,
    53                      GPM_HANDLE primary_guest_gpm, BOOLEAN are_secondary_guests_exist,
    54                      const VMM_APPLICATION_PARAMS_STRUCT* application_params)
    55  {
    56      E820_ABSTRACTION_RANGE_ITERATOR         e820_iter;
    57      const INT15_E820_MEMORY_MAP_ENTRY_EXT*  e820_entry = NULL;
    58      BOOLEAN                                 ok;
    59      UINT64                                  range_start;
    60      UINT64                                  range_end;
    61      INT15_E820_RANGE_TYPE                   range_type;
    62      INT15_E820_MEMORY_MAP_EXT_ATTRIBUTES    range_attr;
    63      UINT64                                  page_index;
    64      UINT64                                 *entry_list;
    65  
    66      // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
    67      VMM_ASSERT(e820_abstraction_is_initialized());
    68  
    69      if (global_policy_uses_vtlb()) {
    70          mam_rwx_attrs.uint32 = 0x5;
    71          mam_rw_attrs.uint32 = 0x1;
    72          mam_ro_attrs.uint32= 0x0;
    73       }
    74  
    75      // 1. first map 0-4G host region to primary guest
    76      ok = gpm_add_mapping( primary_guest_gpm, 0, 0, FOUR_GIGABYTE, mam_rwx_attrs );
    77      VMM_LOG(mask_anonymous, level_trace,"Primary guest GPM: add 0-4G region\r\n");
    78      // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
    79      VMM_ASSERT( ok == TRUE );
    80  
    81      // 2. Add real memory to "memory layout object" and to the primary guest
    82      //    if this memory range is above 4G
    83      // if in the post launch mode skip it
    84      for (e820_iter = e820_abstraction_iterator_get_first(E820_ORIGINAL_MAP);
    85          e820_iter != E820_ABSTRACTION_NULL_ITERATOR;
    86          e820_iter = e820_abstraction_iterator_get_next(E820_ORIGINAL_MAP, e820_iter)) {
    87          e820_entry = e820_abstraction_iterator_get_range_details(e820_iter);
    88  
    89          range_start = e820_entry->basic_entry.base_address;
    90          range_end   = range_start + e820_entry->basic_entry.length;
    91          range_type  = e820_entry->basic_entry.address_range_type;
    92          range_attr  = e820_entry->extended_attributes;
    93  
    94          // align ranges and sizes on 4K boundaries
    95          range_start = ALIGN_FORWARD(range_start, PAGE_4KB_SIZE);
    96          range_end   = ALIGN_BACKWARD(range_end, PAGE_4KB_SIZE);
    97  
    98          VMM_DEBUG_CODE({
    99              if (range_start != e820_entry->basic_entry.base_address) {
   100                  VMM_LOG(mask_anonymous, level_trace,"init_memory_layout_from_mbr WARNING: aligning E820 range start from %P to %P\n",
   101                      e820_entry->basic_entry.base_address, range_start);
   102                  }
   103  
   104              if (range_end != e820_entry->basic_entry.base_address + e820_entry->basic_entry.length) {
   105                      VMM_LOG(mask_anonymous, level_trace,"init_memory_layout_from_mbr WARNING: aligning E820 range end from %P to %P\n",
   106                          e820_entry->basic_entry.base_address+e820_entry->basic_entry.length,
   107                          range_end);
   108                      }
   109              })
   110  
   111          if (range_end <= range_start) {
   112              // after alignment the range became invalid
   113              VMM_LOG(mask_anonymous, level_trace,"init_memory_layout_from_mbr WARNING: skipping invalid E820 memory range FROM %P to %P\n",
   114                   range_start, range_end);
   115              continue;
   116          }
   117  
   118          // add memory to the "memory layout object" if this is a real memory
   119          // lower 4G
   120          if (are_secondary_guests_exist && (range_start < FOUR_GIGABYTE) &&
   121              range_attr.Bits.enabled && (!range_attr.Bits.non_volatile)) {
   122              UINT64 top = (range_end < FOUR_GIGABYTE) ? range_end : FOUR_GIGABYTE;
   123  	    (void)top;
   124              if ((range_type == INT15_E820_ADDRESS_RANGE_TYPE_MEMORY) ||
   125                  (range_type == INT15_E820_ADDRESS_RANGE_TYPE_ACPI)) {
   126                  // here we need to all a call to the "memory layout object"
   127                  // to fill is with the range_start-top range
   128                  // to make compiler happy
   129                  top = 0;
   130              }
   131          }
   132  
   133          // add memory to the primary guest if this is a memory above 4G
   134          if (range_end > FOUR_GIGABYTE) {
   135              UINT64 bottom = (range_start < FOUR_GIGABYTE) ? FOUR_GIGABYTE : range_start;
   136  
   137              if (bottom < range_end) {
   138                  VMM_LOG(mask_anonymous, level_trace,"Primary guest GPM: add memory above 4GB base %p size %p\r\n",
   139                          bottom, range_end - bottom);
   140                  ok = gpm_add_mapping( primary_guest_gpm, bottom, bottom, range_end - bottom, mam_rwx_attrs );
   141                  // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
   142                  VMM_ASSERT( ok == TRUE );
   143              }
   144          }
   145      }
   146  
   147  #if 1
   148  #if 1   // UNMAP EVMM
   149      // now remove the VMM area from the primary guest
   150      ok= gpm_remove_mapping(primary_guest_gpm, 
   151                              vmm_memory_layout[0].base_address,
   152                              vmm_memory_layout[0].total_size);
   153      VMM_LOG(mask_anonymous, level_trace,"Primary guest GPM: remove uvmm image base %p size 0x%x\r\n", 
   154      vmm_memory_layout[uvmm_image].base_address,
   155      vmm_memory_layout[uvmm_image].total_size);
   156  #endif
   157  #if 0
   158      // and remove thunk area from the primary guest also
   159      // if post launch skip it.
   160      ok = gpm_remove_mapping(primary_guest_gpm, 
   161                              vmm_memory_layout[thunk_image].base_address,
   162                              vmm_memory_layout[thunk_image].total_size);
   163      VMM_LOG(mask_anonymous, level_trace,
   164              "Primary guest GPM: remove thunk image base %p size 0x%x\r\n", 
   165          vmm_memory_layout[thunk_image].base_address, 
   166          vmm_memory_layout[thunk_image].total_size);
   167  #endif
   168  
   169      if (g_is_post_launch) {
   170          VMM_ASSERT (application_params != NULL);
   171          entry_list = (UINT64 *)application_params->address_entry_list;
   172          
   173          for (page_index = 0; page_index < application_params->entry_number && ok; page_index++) {
   174              ok = gpm_remove_mapping(primary_guest_gpm, (GPA)entry_list[page_index], PAGE_4KB_SIZE);
   175              //VMM_LOG(mask_anonymous, level_trace,"Primary guest GPM: remove heap page base %p 4K size\r\n", (GPA)entry_list[page_index]);
   176          }
   177      }
   178  #else
   179      int i;
   180      for(i=0; i<num_excluded; i++) {
   181          ok = gpm_remove_mapping( primary_guest_gpm, vmm_memory_layout[i].base_address,
   182                               vmm_memory_layout[i].total_size );
   183      }
   184  #endif
   185  
   186      //gpm_print(primary_guest_gpm);
   187  
   188      // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
   189      VMM_ASSERT( ok == TRUE );
   190  
   191      return TRUE;
   192  }
   193  
   194  
   195