github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/guest/scheduler/scheduler.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "file_codes.h"
    16  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(SCHEDULER_C)
    17  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(SCHEDULER_C, __condition)
    18  #include "scheduler.h"
    19  #include "hw_utils.h"
    20  #include "heap.h"
    21  #include "guest.h"
    22  #include "vmm_dbg.h"
    23  #include "list.h"
    24  #include "memory_allocator.h"
    25  #include "lock.h"
    26  #ifdef JLMDEBUG
    27  #include "jlmdebug.h"
    28  #endif
    29  
    30  
    31  // Guest Scheduler
    32  // Principles:
    33  // 1. Scheduler works independently on each host CPU
    34  // 2. Scheduler on different host CPUs may communicate to make common decision
    35  
    36  
    37  // scheduler vCPU object
    38  struct SCHEDULER_VCPU_OBJECT;
    39  typedef struct _SCHEDULER_VCPU_OBJECT {
    40      GUEST_CPU_HANDLE               gcpu;
    41      CPU_ID                         host_cpu;
    42      UINT16                         flags;
    43      UINT32                         reserved;
    44      struct _SCHEDULER_VCPU_OBJECT* next_same_host_cpu;
    45      struct _SCHEDULER_VCPU_OBJECT* next_all_cpus;
    46  } SCHEDULER_VCPU_OBJECT;
    47  
    48  // SCHEDULER_VCPU_OBJECT flags
    49  #define VCPU_ALLOCATED_FLAG 1
    50  #define VCPU_READY_FLAG  2
    51  
    52  typedef struct _SCHEDULER_CPU_STATE {
    53      SCHEDULER_VCPU_OBJECT*  vcpu_obj_list;
    54      SCHEDULER_VCPU_OBJECT*  current_vcpu_obj;
    55  } SCHEDULER_CPU_STATE;
    56  
    57  
    58  SCHEDULER_VCPU_OBJECT* scheduler_get_current_vcpu_for_guest( GUEST_ID guest_id );
    59  static UINT32 g_host_cpus_count  = 0;
    60  static UINT32 g_registered_vcpus_count = 0;
    61  
    62  // allocated space for internal objects
    63  static SCHEDULER_VCPU_OBJECT* g_registered_vcpus = NULL;
    64  
    65  // scheduler state per host CPU
    66  static SCHEDULER_CPU_STATE* g_scheduler_state = 0;
    67  
    68  // lock to support guest addition while performing scheduling operations
    69  static VMM_READ_WRITE_LOCK g_registration_lock[1];
    70  
    71  
    72  static SCHEDULER_VCPU_OBJECT* gcpu_2_vcpu_obj( GUEST_CPU_HANDLE gcpu )
    73  {
    74      SCHEDULER_VCPU_OBJECT *vcpu_obj = NULL;
    75  
    76      for(vcpu_obj = g_registered_vcpus; vcpu_obj != NULL; 
    77                      vcpu_obj = vcpu_obj->next_all_cpus) {
    78          if(vcpu_obj->gcpu == gcpu) {
    79              return vcpu_obj;
    80          }
    81      }
    82      return NULL;
    83  }
    84  
    85  // list funcs
    86  void add_to_per_cpu_list(SCHEDULER_VCPU_OBJECT* vcpu_obj)
    87  {
    88  #ifdef JLMDEBUG
    89      bprint("add_to_per_cpu_list, host_cpup: %p\n", vcpu_obj);
    90      bprint("add_to_per_cpu_list, host_cpu: %d\n", vcpu_obj->host_cpu);
    91  #endif
    92      CPU_ID host_cpu = vcpu_obj->host_cpu;
    93      SCHEDULER_CPU_STATE* state = &(g_scheduler_state[host_cpu]);
    94  #ifdef JLMDEBUG1
    95      bprint("add_to_per_cpu_list, state: %p\n", state);
    96  #endif
    97      vcpu_obj->next_same_host_cpu = state->vcpu_obj_list;
    98      state->vcpu_obj_list = vcpu_obj;
    99  #ifdef JLMDEBUG1
   100      bprint("add_to_per_cpu_list, done\n");
   101  #endif
   102      return;
   103  }
   104  
   105  // init
   106  void scheduler_init( UINT16 number_of_host_cpus )
   107  {
   108      UINT32 memory_for_state  = 0;
   109  
   110      vmm_memset(g_registration_lock, 0, sizeof(g_registration_lock));
   111      g_host_cpus_count = number_of_host_cpus;
   112      VMM_ASSERT( number_of_host_cpus != 0 );
   113      // count needed memory amount
   114      memory_for_state = sizeof(SCHEDULER_CPU_STATE)*g_host_cpus_count;
   115      lock_initialize_read_write_lock(g_registration_lock);
   116  #ifdef JLMDEBUG
   117      bprint("g_host_cpus_count: %d, memory_for_state: %d\n", 
   118             g_host_cpus_count, memory_for_state);
   119  #endif
   120      g_scheduler_state = (SCHEDULER_CPU_STATE*) vmm_malloc(memory_for_state);
   121      vmm_memset((void*)g_scheduler_state, 0, sizeof(SCHEDULER_CPU_STATE));
   122  #ifdef JLMDEBUG
   123      if(g_scheduler_state ==0) {
   124          bprint("Cant allocate scheduler state\n");
   125          LOOP_FOREVER
   126      }
   127  #endif
   128      VMM_ASSERT(g_scheduler_state != 0);
   129  }
   130  
   131  // register guest cpu
   132  void scheduler_register_gcpu(GUEST_CPU_HANDLE gcpu_handle, CPU_ID host_cpu_id,
   133                               BOOLEAN schedule_immediately )
   134  {
   135      SCHEDULER_VCPU_OBJECT* vcpu_obj = NULL;
   136  
   137      vcpu_obj = (SCHEDULER_VCPU_OBJECT*) vmm_malloc(sizeof(SCHEDULER_VCPU_OBJECT));
   138  #ifdef JLMDEBUG
   139      bprint("scheduler_register_gcpu, host %d, reg gpus %d\n", 
   140              host_cpu_id, g_registered_vcpus_count);
   141  #endif
   142      VMM_ASSERT(vcpu_obj);
   143      interruptible_lock_acquire_writelock(g_registration_lock);
   144      vcpu_obj->next_all_cpus = g_registered_vcpus;
   145      g_registered_vcpus = vcpu_obj;
   146      hw_interlocked_increment((INT32*)&g_registered_vcpus_count);
   147      vcpu_obj->gcpu  = gcpu_handle;
   148      vcpu_obj->flags = 0;
   149      vcpu_obj->host_cpu = host_cpu_id;
   150      vcpu_obj->flags|= VCPU_ALLOCATED_FLAG;
   151      if (schedule_immediately) {
   152          vcpu_obj->flags|= (UINT16)VCPU_READY_FLAG;
   153      }
   154      // add to the per-host-cpu list
   155      add_to_per_cpu_list(vcpu_obj);
   156      lock_release_writelock(g_registration_lock);
   157  #ifdef JLMDEBUG
   158      bprint("scheduler_register_gcpu done, gpus: %d\n", g_registered_vcpus_count);
   159  #endif
   160  }
   161  
   162  // Get current GUEST_CPU_HANDLE
   163  GUEST_CPU_HANDLE scheduler_current_gcpu( void )
   164  {
   165      SCHEDULER_VCPU_OBJECT* vcpu_obj = 0;
   166      vcpu_obj = g_scheduler_state[hw_cpu_id()].current_vcpu_obj;
   167      VMM_ASSERT( vcpu_obj != NULL );
   168      return vcpu_obj == NULL ? NULL : vcpu_obj->gcpu;
   169  }
   170  
   171  // Get Host CPU Id for which given Guest CPU is assigned. Function assumes gcpu as valid input.
   172  //Validate gcpu in caller.
   173  UINT16 scheduler_get_host_cpu_id( GUEST_CPU_HANDLE gcpu )
   174  {
   175      SCHEDULER_VCPU_OBJECT* vcpu_obj = NULL;
   176  
   177      interruptible_lock_acquire_readlock(g_registration_lock);
   178      vcpu_obj = gcpu_2_vcpu_obj(gcpu);
   179      VMM_ASSERT(vcpu_obj);
   180      lock_release_readlock(g_registration_lock);
   181      return vcpu_obj->host_cpu;
   182  }
   183  
   184  // iterator
   185  GUEST_CPU_HANDLE
   186  scheduler_same_host_cpu_gcpu_next( SCHEDULER_GCPU_ITERATOR* ctx )
   187  {
   188      SCHEDULER_VCPU_OBJECT* vcpu_obj;
   189  
   190      VMM_ASSERT( ctx );
   191      vcpu_obj = *ctx;
   192      if (vcpu_obj) {
   193          vcpu_obj = vcpu_obj->next_same_host_cpu;
   194          *ctx = vcpu_obj;
   195      }
   196      return (vcpu_obj ? vcpu_obj->gcpu : NULL);
   197  }
   198  
   199  GUEST_CPU_HANDLE scheduler_same_host_cpu_gcpu_first( SCHEDULER_GCPU_ITERATOR* ctx,
   200                                      CPU_ID host_cpu_id)
   201  {
   202      SCHEDULER_VCPU_OBJECT* vcpu_obj;
   203  
   204      if(!(host_cpu_id < g_host_cpus_count))
   205          return NULL;
   206      if(!ctx)
   207          return NULL;
   208      VMM_ASSERT(g_scheduler_state);
   209      vcpu_obj = g_scheduler_state[host_cpu_id].vcpu_obj_list;
   210      *ctx = vcpu_obj;
   211      return (vcpu_obj ? vcpu_obj->gcpu : NULL);
   212  }
   213  
   214  
   215  // scheduler
   216  GUEST_CPU_HANDLE scheduler_select_initial_gcpu(void)
   217  {
   218      CPU_ID                 host_cpu = hw_cpu_id();
   219      SCHEDULER_CPU_STATE*   state = &(g_scheduler_state[host_cpu]);
   220      SCHEDULER_VCPU_OBJECT* next_vcpu = state->vcpu_obj_list;
   221     
   222  #ifdef JLMDEBUG
   223      bprint("scheduler_select_initial_gcpu\n");
   224  #endif
   225      // very simple implementation
   226      // assume only one guest per host CPU
   227      if (!(next_vcpu && ((next_vcpu->flags&VCPU_READY_FLAG)!=0))) {
   228          return NULL;
   229      }
   230      state->current_vcpu_obj = next_vcpu;
   231      // load full state of new guest from memory
   232      gcpu_swap_in(state->current_vcpu_obj->gcpu);  
   233      return next_vcpu->gcpu;
   234  }
   235  
   236  GUEST_CPU_HANDLE scheduler_select_next_gcpu( void )
   237  {
   238      CPU_ID                 host_cpu = hw_cpu_id();
   239      SCHEDULER_CPU_STATE*   state = &(g_scheduler_state[host_cpu]);
   240      SCHEDULER_VCPU_OBJECT* next_vcpu = NULL;
   241  
   242      if(state->current_vcpu_obj != NULL) {
   243          next_vcpu = state->current_vcpu_obj->next_same_host_cpu;
   244      }
   245      if(next_vcpu == NULL) {
   246          next_vcpu = state->vcpu_obj_list;
   247      }
   248      // very simple implementation
   249      // assume only one guest per host CPU
   250      if (!(next_vcpu && ((next_vcpu->flags&VCPU_READY_FLAG)!=0))) {
   251          return NULL;
   252      }
   253      if (state->current_vcpu_obj != next_vcpu) {
   254          if (state->current_vcpu_obj != NULL) {
   255              // save full state of prev. guest in memory
   256              gcpu_swap_out(state->current_vcpu_obj->gcpu);   
   257          }
   258          state->current_vcpu_obj = next_vcpu;
   259          gcpu_swap_in(state->current_vcpu_obj->gcpu);        // load full state of new guest from memory
   260      }
   261      return next_vcpu->gcpu;
   262  }
   263  
   264  //Function assumes input parameter gcpu is valid. Validate in caller function.
   265  GUEST_CPU_HANDLE scheduler_schedule_gcpu( GUEST_CPU_HANDLE gcpu )
   266  {
   267      CPU_ID                 host_cpu = hw_cpu_id();
   268      SCHEDULER_CPU_STATE*   state = NULL;
   269      SCHEDULER_VCPU_OBJECT* next_vcpu = gcpu_2_vcpu_obj(gcpu);
   270  
   271      if (!(next_vcpu && ((next_vcpu->flags&VCPU_READY_FLAG)!=0))) {
   272          return NULL;
   273      }
   274      state = &(g_scheduler_state[host_cpu]);
   275      if (state->current_vcpu_obj != next_vcpu) {
   276          if (state->current_vcpu_obj != NULL) {
   277              // save full state of prev. guest in memory
   278              gcpu_swap_out(state->current_vcpu_obj->gcpu);   
   279          }
   280          state->current_vcpu_obj = next_vcpu;
   281          // load full state of new guest from memory
   282          gcpu_swap_in(state->current_vcpu_obj->gcpu);        
   283      }
   284      return state->current_vcpu_obj->gcpu;
   285  }
   286  
   287  GUEST_CPU_HANDLE scheduler_get_current_gcpu_for_guest( GUEST_ID guest_id )
   288  {
   289      SCHEDULER_VCPU_OBJECT* vcpu_obj;
   290      const VIRTUAL_CPU_ID* vcpuid=NULL;
   291      VMM_ASSERT(g_scheduler_state);
   292      for (vcpu_obj = g_scheduler_state[ hw_cpu_id() ].vcpu_obj_list;
   293           NULL != vcpu_obj;
   294           vcpu_obj = vcpu_obj->next_same_host_cpu) {
   295          vcpuid=guest_vcpu(vcpu_obj->gcpu);
   296          //paranoid check. If assertion fails, possible memory corruption.
   297          VMM_ASSERT(vcpuid);
   298          if (vcpuid->guest_id == guest_id) {
   299              return vcpu_obj->gcpu; // found
   300          }
   301      }
   302      return NULL;
   303  }
   304  
   305  #ifdef INCLUDE_UNUSED_CODE
   306  // Not MT-safe. Must be called when all CPUs are stopped.
   307  GUEST_CPU_HANDLE scheduler_get_current_gcpu_on_host_cpu( CPU_ID host_cpu_id )
   308  {
   309      if(g_scheduler_state[host_cpu_id].current_vcpu_obj == NULL) {
   310          return NULL;
   311      }
   312      return g_scheduler_state[host_cpu_id].current_vcpu_obj->gcpu;
   313  }
   314  #endif
   315