github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/dbg/trace.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  /*
    16     Trace mechanism
    17  */
    18  
    19  #include "trace.h"
    20  #include "heap.h"
    21  #include "common_libc.h"
    22  
    23  #pragma warning( disable : 4100) // warning C4100: unreferenced formal parameter
    24  
    25  #define CYCLIC_INCREMENT(x)   do { (x)++; if ((x) == MAX_RECORDS_IN_BUFFER) (x) = 0; } while (0)
    26  
    27  #define NON_CYCLIC_INCREMENT(x)   do { (x)++; if ((x) == MAX_RECORDS_IN_BUFFER) (x) --; } while (0)
    28  
    29  #define FOREACH_BUFFER(apply_function, param) \
    30  do { \
    31      UINT32 vm_index = 0, cpu_index = 0, buffer_index = 0; \
    32      for (vm_index = 0; vm_index < trace_state->max_num_guests; vm_index++)  { \
    33          for (cpu_index = 0; cpu_index < trace_state->max_num_guest_cpus; cpu_index++) { \
    34              for (buffer_index = 0; buffer_index < MAX_TRACE_BUFFERS; buffer_index++) { \
    35                  apply_function(vm_index, cpu_index, buffer_index, param); \
    36              } \
    37          } \
    38      } \
    39  } while (0)
    40  
    41  typedef struct {
    42      BOOLEAN               valid;
    43      UINT32                index;
    44      TRACE_RECORD_DATA     data;
    45      struct _TRACE_BUFFER *buffer;
    46  } TRACE_RECORD;
    47  
    48  
    49  typedef struct _TRACE_BUFFER {
    50      UINT32        vm_index;
    51      UINT32        cpu_index;
    52      UINT32        buffer_index;
    53      UINT32        next_record_index;
    54      TRACE_RECORD  records[MAX_RECORDS_IN_BUFFER];
    55  } TRACE_BUFFER;
    56  
    57  typedef struct _TRACE_STATE {
    58      UINT32        global_counter;
    59      BOOLEAN       locked;
    60      UINT32		  max_num_guests;
    61      UINT32		  max_num_guest_cpus;
    62      TRACE_BUFFER  buffers[1]; // pointer to buffers
    63  } TRACE_STATE;
    64  
    65  static BOOLEAN trace_initialized = FALSE;
    66  static TRACE_STATE *trace_state = NULL;
    67  static BOOLEAN trace_recyclable = TRUE;
    68  
    69  
    70  #define GET_BUFFER(vm_index, cpu_index, buffer_index) \
    71      (trace_state->buffers + \
    72      vm_index * trace_state->max_num_guest_cpus * MAX_TRACE_BUFFERS \
    73      + cpu_index * MAX_TRACE_BUFFERS \
    74      + buffer_index)
    75  
    76  static void initialize_trace_buffer( UINT32 vm_index, UINT32 cpu_index,
    77                          UINT32 buffer_index, void*    param  UNUSED)
    78  {
    79      TRACE_BUFFER *buffer;
    80      UINT32 record_index;
    81  
    82      buffer = GET_BUFFER(vm_index, cpu_index, buffer_index);
    83  
    84      buffer->vm_index = vm_index;
    85      buffer->cpu_index = cpu_index;
    86      buffer->buffer_index = buffer_index;
    87  
    88      for (record_index = 0; record_index < MAX_RECORDS_IN_BUFFER; record_index++) {
    89          buffer->records[record_index].valid = FALSE;
    90          buffer->records[record_index].buffer = buffer;
    91      }
    92  
    93      buffer->next_record_index = 0;
    94  }
    95  
    96  BOOLEAN trace_init( UINT32 max_num_guests, UINT32 max_num_guest_cpus)
    97  {
    98      if (trace_initialized) {
    99          return FALSE;
   100      }
   101      trace_state = vmm_memory_alloc(sizeof(TRACE_STATE) +
   102          max_num_guests * max_num_guest_cpus * MAX_TRACE_BUFFERS * sizeof(TRACE_BUFFER) - 1); // trace_state already includes one buffer
   103      if(NULL == trace_state) {
   104          return FALSE;
   105      }
   106  
   107      trace_state->global_counter = 0;
   108      trace_state->locked = FALSE;
   109      trace_state->max_num_guests = max_num_guests;
   110      trace_state->max_num_guest_cpus = max_num_guest_cpus;
   111  
   112      FOREACH_BUFFER(initialize_trace_buffer, NULL);
   113  
   114      trace_initialized = TRUE;
   115      return TRUE;
   116  }
   117  
   118  static void
   119  add_record( TRACE_BUFFER *buffer, TRACE_RECORD_DATA *data)
   120  {
   121      TRACE_RECORD *record = &buffer->records[buffer->next_record_index];
   122  
   123      record->valid = TRUE;
   124      record->index = trace_state->global_counter++;
   125  
   126      record->data.tsc        = data->tsc;
   127      record->data.exit_reason = data->exit_reason;
   128      record->data.guest_eip   = data->guest_eip;
   129      vmm_strcpy_s(record->data.string, MAX_STRING_LENGTH, data->string);
   130  
   131      if (trace_recyclable)
   132          CYCLIC_INCREMENT(buffer->next_record_index);
   133      else
   134          NON_CYCLIC_INCREMENT(buffer->next_record_index);
   135  }
   136  
   137  BOOLEAN trace_add_record( IN  UINT32  vm_index, IN  UINT32  cpu_index,
   138                   IN  UINT32  buffer_index, IN  TRACE_RECORD_DATA *data)
   139  {
   140      if (!trace_initialized || trace_state->locked || data == NULL
   141          || vm_index >= trace_state->max_num_guests || cpu_index >= trace_state->max_num_guest_cpus
   142          || buffer_index >= MAX_TRACE_BUFFERS) {
   143              return FALSE;
   144      }
   145      add_record(GET_BUFFER(vm_index, cpu_index, buffer_index), data);
   146      return TRUE;
   147  }
   148  
   149  static void remove_record( TRACE_RECORD *record)
   150  {
   151      record->valid = FALSE;
   152      CYCLIC_INCREMENT(record->buffer->next_record_index);
   153  }
   154  
   155  static void set_buffer_pointer_to_oldest_record( UINT32  vm_index, UINT32  cpu_index,
   156                                      UINT32  buffer_index, void* param UNUSED)
   157  {
   158      TRACE_BUFFER *buffer = GET_BUFFER(vm_index, cpu_index, buffer_index);
   159  
   160      if (!buffer->records[buffer->next_record_index].valid) {
   161          UINT32 i;
   162  
   163          for (i = 0; i < MAX_RECORDS_IN_BUFFER; i++) {
   164              if (buffer->records[i].valid) {
   165                  break; // found
   166              }
   167          }
   168          buffer->next_record_index = (i < MAX_RECORDS_IN_BUFFER)? i: 0;
   169      }
   170  }
   171  
   172  static void find_buffer_with_oldest_record( UINT32  vm_index, UINT32  cpu_index,
   173                                 UINT32  buffer_index, void   *param)
   174  {
   175      TRACE_RECORD **oldest_record_ptr = (TRACE_RECORD **)param;
   176      TRACE_BUFFER  *buffer = GET_BUFFER(vm_index, cpu_index, buffer_index);
   177      TRACE_RECORD  *record = &buffer->records[buffer->next_record_index];
   178  
   179      if (record->valid) {
   180          if ((*oldest_record_ptr == NULL) ||                // this record is the first record encountered
   181              (record->index < (*oldest_record_ptr)->index)) // this record is older than the oldest record
   182          {
   183              *oldest_record_ptr = record;
   184          }
   185      }
   186  }
   187  
   188  static TRACE_RECORD *
   189  find_oldest_record( void)
   190  {
   191      TRACE_RECORD *oldest_record = NULL;
   192  
   193      // find the oldest record in each buffer
   194      FOREACH_BUFFER(set_buffer_pointer_to_oldest_record, NULL);
   195  
   196      // find the globally oldest record
   197      FOREACH_BUFFER(find_buffer_with_oldest_record, &oldest_record);
   198  
   199      return oldest_record;
   200  }
   201  
   202  BOOLEAN trace_remove_oldest_record( OUT UINT32 *vm_index, OUT UINT32 *cpu_index,
   203                             OUT UINT32 *buffer_index, OUT UINT32 *record_index,
   204                             OUT TRACE_RECORD_DATA *data)
   205  {
   206      TRACE_RECORD *oldest_record;
   207  
   208      if (!trace_initialized) {
   209          return FALSE;
   210      }
   211  
   212      oldest_record = find_oldest_record();
   213      if (oldest_record == NULL) {
   214          return FALSE;
   215      }
   216  
   217      remove_record(oldest_record);
   218  
   219      if (vm_index  != NULL)    *vm_index  = oldest_record->buffer->vm_index;
   220      if (cpu_index != NULL)    *cpu_index = oldest_record->buffer->cpu_index;
   221      if (buffer_index != NULL) *buffer_index = oldest_record->buffer->buffer_index;
   222      if (record_index != NULL) *record_index = oldest_record->index;
   223      if (data != NULL) {
   224          data->exit_reason = oldest_record->data.exit_reason;
   225          data->guest_eip   = oldest_record->data.guest_eip;
   226          data->tsc        = oldest_record->data.tsc;
   227          vmm_strcpy_s(data->string, MAX_STRING_LENGTH, oldest_record->data.string);
   228      }
   229  
   230      return TRUE;
   231  }
   232  
   233  BOOLEAN trace_lock( void)
   234  {
   235      if (!trace_initialized || trace_state->locked) {
   236          return FALSE;
   237      }
   238      trace_state->locked = TRUE;
   239      return TRUE;
   240  }
   241  
   242  BOOLEAN trace_unlock( void)
   243  {
   244      if (!trace_initialized || !trace_state->locked) {
   245          return FALSE;
   246      }
   247      trace_state->locked = FALSE;
   248      return TRUE;
   249  }
   250  
   251  void trace_set_recyclable(BOOLEAN recyclable)
   252  {
   253      trace_recyclable = recyclable;
   254  }
   255