github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/utils/heap.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  
    16  #include "vmm_defs.h"
    17  #include "common_libc.h"
    18  #include "lock.h"
    19  #include "heap.h"
    20  #include "vmm_dbg.h"
    21  #include "file_codes.h"
    22  #include "profiling.h"
    23  #ifdef JLMDEBUG
    24  #include "jlmdebug.h"
    25  #endif
    26  
    27  #ifndef VMM_DEADLOOP
    28  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(HEAP_C)
    29  #endif
    30  
    31  #ifndef VMM_ASSERT
    32  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(HEAP_C, __condition)
    33  #endif
    34  
    35  #define CHECK_ADDRESS_IN_RANGE(addr, range_start, size) \
    36      (((UINT64)addr) >= ((UINT64)range_start) && ((UINT64)addr) <= ((UINT64)range_start) + (size))
    37  
    38  #define HEAP_PAGE_TO_POINTER(__page_no) \
    39      (__page_no >= ex_heap_start_page)?\
    40          (void *) (ADDRESS) (ex_heap_base + ((__page_no - ex_heap_start_page - 1) * PAGE_4KB_SIZE)): \
    41          (void *) (ADDRESS) (heap_base + (__page_no * PAGE_4KB_SIZE))
    42  #define HEAP_POINTER_TO_PAGE(__pointer) \
    43      (CHECK_ADDRESS_IN_RANGE(__pointer, ex_heap_base, ex_heap_pages * PAGE_4KB_SIZE))? \
    44          (HEAP_PAGE_INT) ((((ADDRESS)(__pointer) - ex_heap_base)) / PAGE_4KB_SIZE) + ex_heap_start_page + 1:\
    45          (HEAP_PAGE_INT) ((((ADDRESS)(__pointer) - heap_base)) / PAGE_4KB_SIZE)
    46      
    47  
    48  typedef struct {
    49      VMM_FREE_MEM_CALLBACK_FUNC callback_func;
    50      void* context;
    51  } FREE_MEM_CALLBACK_DESC;
    52  
    53  #define HEAP_MAX_NUM_OF_RECORDED_CALLBACKS 20
    54  
    55  static HEAP_PAGE_DESCRIPTOR *heap_array;
    56  static ADDRESS              heap_base;    // address at which the heap is located
    57  static HEAP_PAGE_INT        heap_total_pages;  // actual number of pages
    58  static UINT32               heap_total_size = 0;
    59  static VMM_LOCK             heap_lock;
    60  static FREE_MEM_CALLBACK_DESC free_mem_callbacks[HEAP_MAX_NUM_OF_RECORDED_CALLBACKS];
    61  static UINT32 num_of_registered_callbacks = 0;
    62  
    63  static HEAP_PAGE_INT        heap_pages = 0;
    64  static ADDRESS              ex_heap_base = 0;
    65  static HEAP_PAGE_INT        ex_heap_pages = 0;
    66  static HEAP_PAGE_INT        ex_heap_start_page = 0;
    67  static HEAP_PAGE_INT        max_used_pages = 0;
    68  
    69  extern UINT32 g_heap_pa_num;
    70  
    71  HEAP_PAGE_INT vmm_heap_get_total_pages(void)
    72  {
    73      return heap_total_pages;
    74  }
    75  
    76  
    77  // FUNCTION : vmm_heap_get_max_used_pages()
    78  // PURPOSE  : Returns the max amount of uVmm heap pages used
    79  //            from post-launch vmm
    80  // ARGUMENTS:
    81  // RETURNS  : HEAP max heap used in pages
    82  HEAP_PAGE_INT vmm_heap_get_max_used_pages(void)
    83  {
    84      return max_used_pages;
    85  }
    86  
    87  
    88  // FUNCTION : vmm_heap_initialize()
    89  // PURPOSE  : Partition memory for memory allocation / free services.
    90  //          : Calculate actual number of pages.
    91  // ARGUMENTS: IN ADDRESS heap_buffer_address - address at which the heap is located
    92  //          : IN size_t    heap_buffer_size - in bytes
    93  // RETURNS  : Last occupied address
    94  ADDRESS vmm_heap_initialize( IN ADDRESS heap_buffer_address, IN size_t  heap_buffer_size)
    95  {
    96      ADDRESS unaligned_heap_base;
    97      HEAP_PAGE_INT number_of_pages;
    98      HEAP_PAGE_INT i;
    99  
   100      // to be on the safe side
   101      heap_buffer_address = ALIGN_FORWARD(heap_buffer_address, sizeof(ADDRESS));
   102  
   103      // record total size of whole heap area
   104      heap_total_size = (UINT32)ALIGN_FORWARD(heap_buffer_size, PAGE_4KB_SIZE);
   105  
   106      // heap descriptors placed at the beginning
   107      heap_array = (HEAP_PAGE_DESCRIPTOR *) heap_buffer_address;
   108  
   109      // calculate how many unaligned pages we can support
   110      number_of_pages = (HEAP_PAGE_INT) ((heap_buffer_size + (g_heap_pa_num * PAGE_4KB_SIZE))
   111                                / (PAGE_4KB_SIZE + sizeof(HEAP_PAGE_DESCRIPTOR)));
   112      ex_heap_start_page = number_of_pages + 1;
   113      VMM_LOG(mask_anonymous, level_trace,"HEAP INIT: number_of_pages = %d\n", number_of_pages);
   114  
   115      // heap_base can start immediately after the end of heap_array
   116      unaligned_heap_base = (ADDRESS) &heap_array[number_of_pages];
   117  
   118      // but on the 1st 4K boundary address
   119      heap_base = ALIGN_FORWARD(unaligned_heap_base, PAGE_4KB_SIZE);    // here 4K pages start
   120      //VMM_LOG(mask_anonymous, level_trace,"HEAP INIT: heap_base is at %P\n", heap_base);
   121  
   122      // decrement heap size, due to descriptor allocation and alignment
   123      heap_buffer_size -= heap_base - heap_buffer_address;
   124  
   125      //VMM_LOG(mask_anonymous, level_trace,"HEAP INIT: heap_buffer_size = %P\n", heap_buffer_size);
   126  
   127      // now we can get actual number of available 4K pages
   128      heap_total_pages = (HEAP_PAGE_INT) (heap_buffer_size / PAGE_4KB_SIZE);
   129      heap_pages = heap_total_pages;
   130      VMM_LOG(mask_anonymous, level_trace,"HEAP INIT: heap_total_pages = %P\n", heap_total_pages);
   131  
   132      // BEFORE_VMLAUNCH. Can't hit this condition in POSTLAUNCH. Keep the
   133      // ASSERT for now.
   134      VMM_ASSERT(heap_total_pages > 0);
   135  
   136      for (i = 0; i < heap_total_pages ; ++i) {
   137          heap_array[i].in_use = 0;
   138          heap_array[i].number_of_pages = (heap_total_pages - i);
   139      }
   140  
   141      //VMM_DEBUG_CODE(vmm_heap_show());
   142      lock_initialize(&heap_lock);
   143      return heap_base + (heap_total_pages * PAGE_4KB_SIZE);
   144  
   145  }
   146  
   147  
   148  // FUNCTION : vmm_heap_extend()
   149  // PURPOSE  : Extend the heap to an additional memory block 
   150  //                      : update actual number of pages.
   151  // ARGUMENTS:IN ADDRESS ex_heap_base_address - address at which the heap is located
   152  //          : size_t    ex_heap_size - in bytes
   153  // RETURNS  : Last occupied address
   154  ADDRESS vmm_heap_extend( IN ADDRESS ex_heap_buffer_address,
   155      IN size_t  ex_heap_buffer_size)
   156  {
   157      size_t  heap_buffer_size;
   158      HEAP_PAGE_INT i;
   159  
   160      lock_acquire(&heap_lock);
   161  
   162      VMM_LOG(mask_anonymous, level_print_always,"HEAP EXT: Max Used Initial Memory %dKB\n", (max_used_pages * 4));    
   163      // extend can be called only once.
   164      // BEFORE_VMLAUNCH
   165      VMM_ASSERT(ex_heap_base == 0);
   166      
   167      // extended heap cannot overlap with previous heap 
   168      // BEFORE_VMLAUNCH
   169      VMM_ASSERT(!CHECK_ADDRESS_IN_RANGE(ex_heap_buffer_address, heap_array, heap_total_size));
   170      // BEFORE_VMLAUNCH
   171      VMM_ASSERT(!CHECK_ADDRESS_IN_RANGE(heap_array, ex_heap_buffer_address, ex_heap_buffer_size));
   172  
   173      ex_heap_base = ALIGN_FORWARD(ex_heap_buffer_address, sizeof(ADDRESS));
   174  
   175      // record total size of whole heap area
   176      heap_total_size += (UINT32)ALIGN_FORWARD(ex_heap_buffer_size, PAGE_4KB_SIZE);
   177  
   178      heap_buffer_size = ex_heap_buffer_size - (ex_heap_base - ex_heap_buffer_address);
   179      
   180      // leave one dummy page for boundry which is always marked as used.
   181      ex_heap_pages = (HEAP_PAGE_INT) (heap_buffer_size / PAGE_4KB_SIZE) + 1;
   182      
   183      ex_heap_start_page = heap_total_pages;
   184      heap_total_pages += ex_heap_pages;
   185  
   186      // BEFORE_VMLAUNCH
   187      VMM_ASSERT(heap_total_pages > 0);
   188  
   189      heap_array[ex_heap_start_page].in_use = 1;
   190      heap_array[ex_heap_start_page].number_of_pages = 1;
   191  
   192      for (i = ex_heap_start_page + 1; i < heap_total_pages ; ++i) {
   193          heap_array[i].in_use = 0;
   194          heap_array[i].number_of_pages = (heap_total_pages - i);
   195      }
   196      
   197      lock_release(&heap_lock);
   198      return ex_heap_base + (ex_heap_pages * PAGE_4KB_SIZE);
   199  
   200  }
   201  
   202  #if defined ENABLE_VTD_KEEP_CODE && defined ENABLE_VTD || defined DEBUG
   203  void vmm_heap_get_details(OUT HVA* base_addr, OUT UINT32* size) {
   204      *base_addr = (HVA)heap_array;
   205      *size = heap_total_size;
   206  }
   207  #endif
   208  
   209  static void * page_alloc_unprotected(
   210  #ifdef DEBUG
   211      char *file_name,
   212      INT32 line_number,
   213  #endif
   214      HEAP_PAGE_INT number_of_pages)
   215  {
   216      HEAP_PAGE_INT i;
   217      HEAP_PAGE_INT allocated_page_no;
   218      void *p_buffer = NULL;
   219  
   220      if (number_of_pages == 0) {
   221          return NULL;
   222      }
   223  
   224      for (i = 0; i < heap_total_pages ; ++i) {
   225          if ((0 == heap_array[i].in_use) && (number_of_pages <= heap_array[i].number_of_pages)) {
   226              VMM_ASSERT((i + heap_array[i].number_of_pages) <= heap_total_pages); // validity check
   227  
   228              // found the suitable buffer
   229              allocated_page_no = i;
   230              p_buffer = HEAP_PAGE_TO_POINTER(allocated_page_no);
   231              heap_array[allocated_page_no].in_use = 1;
   232              heap_array[allocated_page_no].number_of_pages = number_of_pages;
   233  #ifdef DEBUG
   234              heap_array[i].file_name = file_name;
   235              heap_array[i].line_number = line_number;
   236  #endif
   237              // mark next number_of_pages-1 pages as in_use
   238              for (i = allocated_page_no + 1; i < (allocated_page_no + number_of_pages); ++i) {
   239                  heap_array[i].in_use = 1;
   240                  heap_array[i].number_of_pages = 0;
   241              }
   242             
   243              if (max_used_pages < (allocated_page_no + number_of_pages))  
   244                  max_used_pages = allocated_page_no + number_of_pages;
   245              break;    // leave the outer loop
   246          }
   247      }
   248  
   249      if (NULL == p_buffer) {
   250          VMM_LOG(mask_anonymous, level_trace,"ERROR: (%s %d)  Failed to allocate %d pages\n", __FILE__, __LINE__, number_of_pages );
   251      }
   252  
   253      TMSL_PROFILING_MEMORY_ALLOC((UINT64)p_buffer, number_of_pages * PAGE_4KB_SIZE, PROF_MEM_CONTEXT_TMSL);
   254      return p_buffer;
   255  }
   256  
   257  
   258  // FUNCTION : vmm_page_allocate()
   259  // PURPOSE  : Allocates contiguous buffer of given size, and fill it with zeroes
   260  // ARGUMENTS: IN HEAP_PAGE_INT number_of_pages - size of the buffer in 4K pages
   261  // RETURNS  : void*  address of allocted buffer if OK, NULL if failed
   262  void* vmm_page_allocate(
   263  #ifdef DEBUG
   264      char    *file_name,
   265      INT32   line_number,
   266  #endif
   267      IN HEAP_PAGE_INT number_of_pages)
   268  {
   269      void *p_buffer = NULL;
   270  
   271      lock_acquire(&heap_lock);
   272      p_buffer = page_alloc_unprotected(
   273  #ifdef DEBUG
   274                       file_name, line_number,
   275  #endif
   276                       number_of_pages);
   277      lock_release(&heap_lock);
   278      return p_buffer;
   279  }
   280  
   281  
   282  // FUNCTION : vmm_page_allocate_scattered()
   283  // PURPOSE  : Fills given array with addresses of allocated 4K pages
   284  // ARGUMENTS: IN HEAP_PAGE_INT number_of_pages - number of 4K pages
   285  //          : OUT void * p_page_array[] - contains the addresses of allocated pages
   286  // RETURNS  : number of successfully allocated pages
   287  HEAP_PAGE_INT vmm_page_allocate_scattered(
   288  #ifdef DEBUG
   289      char    *file_name,
   290      INT32   line_number,
   291  #endif
   292      IN HEAP_PAGE_INT number_of_pages,
   293      OUT void * p_page_array[])
   294  {
   295      HEAP_PAGE_INT i;
   296      HEAP_PAGE_INT number_of_allocated_pages;
   297  
   298      lock_acquire(&heap_lock);
   299  
   300      for (i = 0; i < number_of_pages; ++i) {
   301          p_page_array[i] = page_alloc_unprotected(
   302              #ifdef DEBUG
   303                                       file_name, line_number,
   304              #endif
   305                                       1);
   306          if (NULL == p_page_array[i]) {
   307              VMM_LOG(mask_anonymous, level_trace,"ERROR: (%s %d)  Failed to allocate pages %d..%d\n", __FILE__, __LINE__, i+1, number_of_pages);
   308              break;    // leave the loop
   309          }
   310      }
   311      lock_release(&heap_lock);
   312  
   313      number_of_allocated_pages = i;
   314  
   315      // fill the pages which failed to be allocated with NULLs
   316      for ( ; i < number_of_pages; ++i) {
   317          p_page_array[i] = NULL;
   318      }
   319      return number_of_allocated_pages;
   320  }
   321  
   322  
   323  static void vmm_mark_pages_free(
   324      HEAP_PAGE_INT page_from,
   325      HEAP_PAGE_INT page_to,
   326      HEAP_PAGE_INT pages_to_release)
   327  {
   328      HEAP_PAGE_INT i;
   329  
   330      for (i = page_from; i < page_to; ++i) {
   331          heap_array[i].in_use = 0;
   332          heap_array[i].number_of_pages = pages_to_release - (i - page_from);
   333      }
   334  }
   335  
   336  
   337  // FUNCTION : vmm_page_free()
   338  // PURPOSE  : Release previously allocated buffer
   339  // ARGUMENTS: IN void *p_buffer - buffer to be released
   340  // RETURNS  : void
   341  void vmm_page_free(IN void *p_buffer)
   342  {
   343      HEAP_PAGE_INT release_from_page_id;    // first page to release
   344      HEAP_PAGE_INT release_to_page_id;      // page next to last to release
   345      HEAP_PAGE_INT pages_to_release;        // num of pages, to be released
   346      ADDRESS address;
   347  
   348      address = (ADDRESS) (size_t) p_buffer;
   349  
   350      if (!(CHECK_ADDRESS_IN_RANGE(address, heap_base, heap_pages * PAGE_4KB_SIZE) ||
   351           CHECK_ADDRESS_IN_RANGE(address, ex_heap_base, ex_heap_pages * PAGE_4KB_SIZE)) ||
   352          (address & PAGE_4KB_MASK) != 0)
   353      {
   354          VMM_LOG(mask_anonymous, level_trace,"ERROR: (%s %d)  Buffer %p is out of heap space\n", __FILE__, __LINE__, p_buffer);
   355          // BEFORE_VMLAUNCH. MALLOC should not fail.
   356          VMM_DEADLOOP();
   357          return;
   358      }
   359      lock_acquire(&heap_lock);
   360  
   361      release_from_page_id = HEAP_POINTER_TO_PAGE(p_buffer);
   362  
   363      //VMM_LOG(mask_anonymous, level_trace,"HEAP: trying to free page_id %d\n", release_from_page_id);
   364  
   365      if (0 == heap_array[release_from_page_id].in_use ||
   366          0 == heap_array[release_from_page_id].number_of_pages) {
   367          VMM_LOG(mask_anonymous, level_trace,"ERROR: (%s %d)  Page %d is not in use\n", __FILE__, __LINE__, release_from_page_id);
   368          // BEFORE_VMLAUNCH. CRITICAL check that should not fail.
   369          VMM_DEADLOOP();
   370          return;
   371      }
   372  
   373      pages_to_release = heap_array[release_from_page_id].number_of_pages;
   374  
   375      // check if the next to the last released page is free
   376      // and if so merge both regions
   377  
   378      release_to_page_id = release_from_page_id + pages_to_release;
   379  
   380      if (release_to_page_id < heap_total_pages &&
   381          0 == heap_array[release_to_page_id].in_use  &&
   382          (release_to_page_id + heap_array[release_to_page_id].number_of_pages) <= heap_total_pages) {
   383          pages_to_release += heap_array[release_to_page_id].number_of_pages;
   384      }
   385  
   386      // move backward, to grab all free pages, trying to prevent fragmentation
   387      while (release_from_page_id > 0  &&
   388             0 == heap_array[release_from_page_id - 1].in_use &&
   389             0 != heap_array[release_from_page_id - 1].number_of_pages) // 3rd check is for sanity only
   390      {
   391          release_from_page_id--;
   392          pages_to_release++;
   393      }
   394  
   395      vmm_mark_pages_free(release_from_page_id, release_to_page_id, pages_to_release);
   396  
   397      lock_release(&heap_lock);
   398      TMSL_PROFILING_MEMORY_FREE((UINT64)p_buffer, PROF_MEM_CONTEXT_TMSL);
   399  }
   400  
   401  // FUNCTION : vmm_page_buff_size()
   402  // PURPOSE  : Identify number of pages in previously allocated buffer
   403  // ARGUMENTS: IN void *p_buffer - the buffer
   404  // RETURNS  : UINT32 - Num pages this buffer is using
   405  UINT32 vmm_page_buff_size(IN void *p_buffer)
   406  {
   407      HEAP_PAGE_INT release_from_page_id;    // first page to release
   408      UINT32 num_pages;        // num of pages, to be released
   409      ADDRESS address;
   410  
   411      address = (ADDRESS) (size_t) p_buffer;
   412  
   413      if (!(CHECK_ADDRESS_IN_RANGE(address, heap_base, heap_pages * PAGE_4KB_SIZE) ||
   414           CHECK_ADDRESS_IN_RANGE(address, ex_heap_base, ex_heap_pages * PAGE_4KB_SIZE)) ||
   415          (address & PAGE_4KB_MASK) != 0) {
   416          VMM_LOG(mask_anonymous, level_trace,"ERROR: (%s %d)  Buffer %p is out of heap space\n", __FILE__, __LINE__, p_buffer);
   417          VMM_DEADLOOP();
   418          return 0;
   419      }
   420  
   421      release_from_page_id = HEAP_POINTER_TO_PAGE(p_buffer);
   422  
   423      //VMM_LOG(mask_anonymous, level_trace,"HEAP: trying to free page_id %d\n", release_from_page_id);
   424  
   425      if (0 == heap_array[release_from_page_id].in_use ||
   426          0 == heap_array[release_from_page_id].number_of_pages) {
   427          VMM_LOG(mask_anonymous, level_trace,"ERROR: (%s %d)  Page %d is not in use\n", __FILE__, __LINE__, release_from_page_id);
   428          VMM_DEADLOOP();
   429          return 0;
   430      }
   431  
   432      num_pages = (UINT32) heap_array[release_from_page_id].number_of_pages;
   433      return num_pages;
   434  }
   435  
   436  
   437  // FUNCTION : vmm_memory_allocate()
   438  // PURPOSE  : Allocates contiguous buffer of given size, filled with zeroes
   439  // ARGUMENTS: IN UINT32 size - size of the buffer in bytes
   440  // RETURNS  : void*  address of allocted buffer if OK, NULL if failed
   441  void* vmm_memory_allocate(
   442  #ifdef DEBUG
   443      char    *file_name,
   444      INT32   line_number,
   445  #endif
   446      IN UINT32 size)
   447  {
   448      void *p_buffer = NULL;
   449  
   450      if (size == 0) {
   451          return NULL;
   452      }
   453      size = (UINT32) ALIGN_FORWARD(size, PAGE_4KB_SIZE);
   454      p_buffer = vmm_page_allocate(
   455  #ifdef DEBUG
   456                              file_name,
   457                              line_number,
   458  #endif
   459                              (HEAP_PAGE_INT) (size / PAGE_4KB_SIZE));
   460      if (NULL != p_buffer) {
   461          vmm_memset(p_buffer, 0, size);
   462      }
   463      return p_buffer;
   464  }
   465  
   466  #ifdef ENABLE_VTLB
   467  HEAP_ALLOC_HANDLE vmm_heap_register_free_mem_callback(VMM_FREE_MEM_CALLBACK_FUNC callback_func, void* context) {
   468      UINT32 free_index;
   469      if (num_of_registered_callbacks == HEAP_MAX_NUM_OF_RECORDED_CALLBACKS) {
   470          return HEAP_INVALID_ALLOC_HANDLE;
   471      }
   472  
   473      free_index = num_of_registered_callbacks;
   474      num_of_registered_callbacks++;
   475  
   476      free_mem_callbacks[free_index].callback_func = callback_func;
   477      free_mem_callbacks[free_index].context = context;
   478      return (HEAP_ALLOC_HANDLE)free_index;
   479  }
   480  #endif
   481  
   482  void* vmm_memory_allocate_must_succeed(
   483  #ifdef DEBUG
   484      char    *file_name,
   485      INT32   line_number,
   486  #endif
   487      HEAP_ALLOC_HANDLE handle,
   488      UINT32 size) {
   489      void* allcated_mem = vmm_memory_allocate(
   490      #ifdef DEBUG
   491                                            file_name,
   492                                            line_number,
   493      #endif
   494                                            size);
   495      UINT32 request_owner = (UINT32)handle;
   496  
   497      if (allcated_mem == NULL) {
   498          UINT32 i;
   499  
   500          for (i = 0; i < num_of_registered_callbacks; i++) {
   501              if (i == request_owner) {
   502                  continue;
   503              }
   504  
   505              free_mem_callbacks[i].callback_func(free_mem_callbacks[i].context);
   506          }
   507  
   508          allcated_mem = vmm_memory_allocate(
   509  #ifdef DEBUG
   510                                             file_name,
   511                                             line_number,
   512  #endif
   513                                             size);
   514          // BEFORE_VMLAUNCH. Must succeed.
   515          VMM_ASSERT(allcated_mem != NULL);
   516      }
   517      return allcated_mem;
   518  }
   519  
   520  
   521  
   522  #ifdef DEBUG
   523  
   524  void vmm_heap_show(void)
   525  {
   526      HEAP_PAGE_INT i;
   527  
   528      VMM_LOG(mask_anonymous, level_trace,"Heap Show: total_pages=%d\n", heap_total_pages);
   529      VMM_LOG(mask_anonymous, level_trace,"---------------------\n");
   530  
   531      for (i = 0; i < heap_total_pages; ) {
   532          VMM_LOG(mask_anonymous, level_trace,"Pages %d..%d ", i, i + heap_array[i].number_of_pages - 1);
   533  
   534          if (heap_array[i].in_use) {
   535              VMM_LOG(mask_anonymous, level_trace,"allocated in %s line=%d\n", heap_array[i].file_name, heap_array[i].line_number);
   536          }
   537          else {
   538              VMM_LOG(mask_anonymous, level_trace,"free\n");
   539          }
   540  
   541          i += heap_array[i].number_of_pages;
   542      }
   543      VMM_LOG(mask_anonymous, level_trace,"---------------------\n");
   544  }
   545  #endif
   546