github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/utils/cache64.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "vmm_defs.h"
    16  #include "vmm_dbg.h"
    17  #include "common_libc.h"
    18  #include "memory_allocator.h"
    19  #include "cache64.h"
    20  #include "file_codes.h"
    21  
    22  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(CACHE64_C)
    23  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(CACHE64_C, __condition)
    24  
    25  struct CACHE64_STRUCT {
    26      UINT32  num_of_entries;
    27      UINT16  bitmap_size; // in bytes
    28      UINT16  flags;
    29      UINT64 *table;
    30      UINT8  *dirty_bits;
    31      UINT8  *valid_bits;
    32  };
    33  
    34  //
    35  // Helper macros
    36  //
    37  #define CACHE_FIELD_IS_VALID(__cache, __entry_no)   BITARRAY_GET((__cache)->valid_bits, __entry_no )
    38  #define CACHE_FIELD_SET_VALID(__cache, __entry_no)  BITARRAY_SET((__cache)->valid_bits, __entry_no )
    39  #define CACHE_FIELD_CLR_VALID(__cache, __entry_no)  BITARRAY_CLR((__cache)->valid_bits, __entry_no )
    40  
    41  #define CACHE_FIELD_IS_DIRTY(__cache, __entry_no)   BITARRAY_GET((__cache)->dirty_bits, __entry_no )
    42  #define CACHE_FIELD_SET_DIRTY(__cache, __entry_no)  BITARRAY_SET((__cache)->dirty_bits, __entry_no )
    43  #define CACHE_FIELD_CLR_DIRTY(__cache, __entry_no)  BITARRAY_CLR((__cache)->dirty_bits, __entry_no )
    44  
    45  #define ENUMERATE_DIRTY_ENTRIES(__cache, __func, __arg)                        \
    46      BITARRAY_ENUMERATE( (__cache)->dirty_bits,                                 \
    47                          (__cache)->num_of_entries,                             \
    48                          __func, __arg)
    49  
    50  
    51  CACHE64_OBJECT cache64_create(UINT32 num_of_entries)
    52  {
    53      struct CACHE64_STRUCT *cache;
    54      UINT64 *table;
    55      UINT8  *dirty_bits;
    56      UINT8  *valid_bits;
    57      UINT16 bitmap_size = (UINT16) BITARRAY_SIZE_IN_BYTES(num_of_entries);
    58  
    59  
    60      cache = vmm_malloc(sizeof(struct CACHE64_STRUCT));
    61      table = vmm_malloc(sizeof(UINT64) * num_of_entries);
    62      dirty_bits = vmm_malloc(bitmap_size);
    63      valid_bits = vmm_malloc(bitmap_size);
    64  
    65      if (NULL != cache && NULL != table && NULL != dirty_bits && NULL != valid_bits) {
    66          // everything is OK. fill the fields
    67          cache->num_of_entries = num_of_entries;
    68          cache->bitmap_size    = bitmap_size;
    69          cache->flags          = 0;
    70          cache->table          = table;
    71          cache->dirty_bits     = dirty_bits;
    72          cache->valid_bits     = valid_bits;
    73  
    74          vmm_memset(table, 0, sizeof(*table) * num_of_entries);
    75          vmm_memset(dirty_bits, 0, bitmap_size);
    76          vmm_memset(valid_bits, 0, bitmap_size);
    77      }
    78      else {
    79          VMM_LOG(mask_anonymous, level_trace,"[cache64] %s: Allocation failed\n", __FUNCTION__);
    80          if (NULL != cache) {
    81              vmm_mfree(cache);
    82          }
    83          if (NULL != table) {
    84              vmm_mfree(table);
    85          }
    86          if (NULL != dirty_bits) {
    87              vmm_mfree(dirty_bits);
    88          }
    89          if (NULL != valid_bits) {
    90              vmm_mfree(valid_bits);
    91          }
    92          cache = NULL;
    93      }
    94  
    95      return cache;
    96  }
    97  extern BOOLEAN vmcs_sw_shadow_disable[];
    98  
    99  void cache64_write(CACHE64_OBJECT cache, UINT64 value, UINT32 entry_no)
   100  {
   101      if (vmcs_sw_shadow_disable[hw_cpu_id()])
   102          return;
   103      VMM_ASSERT(cache);
   104      VMM_ASSERT(entry_no < cache->num_of_entries);
   105      if (entry_no < cache->num_of_entries) {
   106          if (!(cache->table[entry_no]==value && CACHE_FIELD_IS_VALID(cache, entry_no))) {
   107              cache->table[entry_no] = value;
   108              CACHE_FIELD_SET_DIRTY(cache, entry_no);
   109              CACHE_FIELD_SET_VALID(cache, entry_no);
   110              BITMAP_SET(cache->flags, CACHE_DIRTY_FLAG);
   111          }
   112      }
   113  }
   114  
   115  
   116  BOOLEAN cache64_read(CACHE64_OBJECT cache, UINT64 *p_value, UINT32 entry_no)
   117  {
   118      BOOLEAN is_valid = FALSE;
   119  
   120      if (vmcs_sw_shadow_disable[hw_cpu_id()])
   121                  return FALSE;
   122      VMM_ASSERT(cache);
   123      VMM_ASSERT(entry_no < cache->num_of_entries);
   124      VMM_ASSERT(p_value);
   125      if (entry_no < cache->num_of_entries) {
   126          if (CACHE_FIELD_IS_VALID(cache, entry_no)) {
   127              *p_value = cache->table[entry_no];
   128              is_valid = TRUE;
   129          }
   130      }
   131      return is_valid;
   132  }
   133  
   134  #ifdef INCLUDE_UNUSED_CODE
   135  UINT32 cache64_read_raw(CACHE64_OBJECT cache, UINT64 *p_value, UINT32 entry_no)
   136  {
   137      UINT32 cache_flags = 0;
   138  
   139      VMM_ASSERT(cache);
   140      VMM_ASSERT(entry_no < cache->num_of_entries);
   141      VMM_ASSERT(p_value);
   142  
   143      if (entry_no < cache->num_of_entries) {
   144          if (CACHE_FIELD_IS_VALID(cache, entry_no)) {
   145              *p_value = cache->table[entry_no];
   146              cache_flags = CACHE_VALID_FLAG;
   147              if (CACHE_FIELD_IS_DIRTY(cache, entry_no)) {
   148                  cache_flags |= CACHE_DIRTY_FLAG;
   149              }
   150          }
   151      }
   152      return cache_flags;
   153  }
   154  #endif
   155  
   156  // clean valid bits
   157  void cache64_invalidate(CACHE64_OBJECT cache, UINT32 entry_no)
   158  {
   159      VMM_ASSERT(cache);
   160  
   161      if (entry_no < cache->num_of_entries) {
   162          // invalidate specific entry
   163          CACHE_FIELD_CLR_VALID(cache, entry_no);
   164      }
   165      else {
   166          // invalidate all entries
   167          BITMAP_CLR(cache->flags, CACHE_VALID_FLAG);
   168          vmm_memset(cache->valid_bits, 0, cache->bitmap_size);
   169          vmm_memset(cache->dirty_bits, 0, cache->bitmap_size);
   170      }
   171  }
   172  
   173  // flush dirty fields using <function>
   174  void cache64_flush_dirty(
   175      CACHE64_OBJECT cache,
   176      UINT32 entry_no,
   177      CACHE64_FIELD_PROCESS_FUNCTION function,// if function == NULL, then just clean dirty bits
   178      void *arg
   179  )
   180  {
   181      VMM_ASSERT(cache);
   182  
   183      if (entry_no < cache->num_of_entries) {
   184          // flush specific entry
   185          if (CACHE_FIELD_IS_DIRTY(cache, entry_no)) {
   186              CACHE_FIELD_CLR_DIRTY(cache, entry_no);
   187              if (NULL != function) {
   188                  function(entry_no, arg);
   189              }
   190          }
   191      }
   192      else {
   193          // flush all entries
   194          BITMAP_CLR(cache->flags, CACHE_DIRTY_FLAG);
   195  
   196          if (NULL != function) {
   197              ENUMERATE_DIRTY_ENTRIES(cache, function, arg);
   198          }
   199          else {
   200              vmm_memset(cache->dirty_bits, 0, cache->bitmap_size);
   201          }
   202      }
   203  }
   204  
   205  #ifdef INCLUDE_LAYERING
   206  void cache64_flush_to_memory(CACHE64_OBJECT cache, void *p_dest, 
   207                               UINT32 max_bytes)
   208  {
   209      UINT32 cache_size = sizeof(*cache->table) * cache->num_of_entries;
   210      VMM_ASSERT(cache);
   211      VMM_ASSERT(p_dest);
   212  
   213      if (cache_size > max_bytes) {
   214          VMM_LOG(mask_anonymous, level_trace,"[cache64] %s: Warning!!! Destination size less then required\n", __FUNCTION__);
   215          cache_size = max_bytes;
   216      }
   217      vmm_memcpy(p_dest, cache->table, cache_size);
   218  }
   219  #endif
   220  
   221  BOOLEAN cache64_is_dirty(CACHE64_OBJECT cache)
   222  {
   223      VMM_ASSERT(cache);
   224  
   225      return (0 != BITMAP_GET(cache->flags, CACHE_DIRTY_FLAG));
   226  }
   227  
   228  
   229  void cache64_destroy(CACHE64_OBJECT cache)
   230  {
   231      VMM_ASSERT(cache);
   232      vmm_mfree(cache->table);
   233      vmm_mfree(cache->dirty_bits);
   234      vmm_mfree(cache->valid_bits);
   235      vmm_mfree(cache);
   236  }
   237