github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/ipc/ipc.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  #include "file_codes.h"
    16  #define VMM_DEADLOOP()          VMM_DEADLOOP_LOG(IPC_C)
    17  #define VMM_ASSERT(__condition) VMM_ASSERT_LOG(IPC_C, __condition)
    18  #include "hw_interlocked.h"
    19  #include "vmm_defs.h"
    20  #include "ipc_impl.h"
    21  #include "scheduler.h"
    22  #include "vmcs_actual.h"
    23  #include "vmx_ctrl_msrs.h"
    24  #include "list.h"
    25  #include "heap.h"
    26  #include "guest_cpu_vmenter_event.h"
    27  #include "vmm_dbg.h"
    28  #include "guest.h"
    29  #include "cli.h"
    30  #include "vmx_nmi.h"
    31  #include "hw_includes.h"
    32  
    33  #ifdef JLMDEBUG
    34  #include "jlmdebug.h"
    35  #endif
    36  
    37  extern UINT64 hw_interlocked_increment64(INT64* p_counter);
    38  
    39  
    40  #pragma warning( disable : 4100)        // unreferenced formal parameter
    41  
    42  
    43  static UINT16                  num_of_host_processors = 0;
    44  static GUEST_ID                nmi_owner_guest_id = 0;
    45  static char                    *ipc_state_memory = NULL;
    46  
    47  // per-CPU contexts for IPC bookkeeping
    48  static IPC_CPU_CONTEXT         *ipc_cpu_contexts = NULL;
    49  
    50  // Acknowledge array.
    51  static volatile UINT32         *ipc_ack_array = NULL;
    52  
    53  // Per CPU activity state -- active/not-active (Wait-for-SIPI)
    54  static volatile IPC_CPU_ACTIVITY_STATE  *cpu_activity_state = NULL;
    55  
    56  // IPC send lock in order to have only one send in progress.
    57  static VMM_LOCK                send_lock;
    58  
    59  // Forward declaration of message preprocessing function.
    60  static BOOLEAN ipc_preprocess_message(IPC_CPU_CONTEXT *ipc, CPU_ID dst, IPC_MESSAGE_TYPE  msg_type);
    61  
    62  // Forward declaration of IPC cli registartion function.
    63  static void ipc_cli_register(void);
    64  
    65  
    66  // Debug variables.
    67  static INT32                   debug_not_resend = 0;
    68  
    69  
    70  static UINT32 ipc_get_max_pending_messages(UINT32 number_of_host_processors)
    71  {
    72      // the max ipc message queue length for each processor.
    73      return number_of_host_processors;
    74  }
    75  
    76  static UINT32 ipc_get_message_array_list_size(UINT32 number_of_host_processors) 
    77  {
    78      return (UINT32) ALIGN_FORWARD(array_list_memory_size( NULL, sizeof(IPC_MESSAGE), 
    79                          ipc_get_max_pending_messages(number_of_host_processors), 
    80                          IPC_ALIGNMENT), IPC_ALIGNMENT);
    81  }
    82  
    83  static BOOLEAN ipc_hw_signal_nmi(IPC_DESTINATION dst)
    84  {
    85      return local_apic_send_ipi(dst.addr_shorthand, dst.addr, 
    86                         IPI_DESTINATION_MODE_PHYSICAL,
    87                         IPI_DELIVERY_MODE_NMI, 0, 
    88                         IPI_DELIVERY_LEVEL_ASSERT /* must be 1 */,
    89                         IPI_DELIVERY_TRIGGER_MODE_EDGE);
    90  }
    91  
    92  
    93  static BOOLEAN ipc_hw_signal_sipi(IPC_DESTINATION dst)
    94  {
    95      return local_apic_send_ipi(dst.addr_shorthand, dst.addr, 
    96                        IPI_DESTINATION_MODE_PHYSICAL,
    97                        IPI_DELIVERY_MODE_START_UP, 0xFF, IPI_DELIVERY_LEVEL_ASSERT,
    98                        IPI_DELIVERY_TRIGGER_MODE_EDGE);
    99  }
   100  
   101  #ifdef INCLUDE_UNUSED_CODE
   102  static BOOLEAN ipc_is_nmi_owner_gcpu(GUEST_CPU_HANDLE gcpu)
   103  {
   104      const VIRTUAL_CPU_ID *vcpu = NULL;
   105  
   106      vcpu = guest_vcpu(gcpu);
   107      return (vcpu->guest_id == nmi_owner_guest_id);
   108  }
   109  #endif
   110  
   111  
   112  static BOOLEAN ipc_cpu_is_destination(IPC_DESTINATION dst, CPU_ID this_cpu_id, CPU_ID dst_cpu_id)
   113  {
   114      BOOLEAN retVal = FALSE;
   115  
   116      switch(dst.addr_shorthand) {
   117        case IPI_DST_SELF:
   118          retVal = (this_cpu_id == dst_cpu_id);
   119          break;
   120        case IPI_DST_ALL_INCLUDING_SELF:
   121          retVal = TRUE;
   122          break;
   123        case IPI_DST_ALL_EXCLUDING_SELF:
   124          retVal = (this_cpu_id != dst_cpu_id);
   125          break;
   126        case IPI_DST_NO_SHORTHAND:
   127          retVal = ((CPU_ID) dst.addr == dst_cpu_id);
   128          break;
   129        case IPI_DST_CORE_ID_BITMAP:
   130          retVal = (BITMAP_ARRAY64_GET(dst.CoreBitMap, dst_cpu_id) != 0);
   131          break;
   132      }
   133      return retVal;
   134  }
   135  
   136  
   137  static void ipc_increment_ack(volatile UINT32 *ack)
   138  {
   139      if (NULL != ack)
   140      {
   141          hw_interlocked_increment((INT32 *) ack);
   142      }
   143  }
   144  
   145  // NOTE: Queue function are not multi-thread safe. Caller must acquire lock
   146  // Add message to the queue. Caller must acquire the lock before calling.
   147  // RETURN VALUE:    TRUE if message was queued, FALSE if message could not be queued
   148  static BOOLEAN ipc_enqueue_message(IPC_CPU_CONTEXT *ipc, IPC_MESSAGE_TYPE type, IPC_HANDLER_FN handler, void* arg,
   149                                     volatile UINT32 *before_handler_ack, volatile UINT32 *after_handler_ack)
   150  {
   151      IPC_MESSAGE  msg;
   152      CPU_ID       cpu_id = IPC_CPU_ID();
   153  
   154      VMM_ASSERT(ipc != NULL);
   155      VMM_ASSERT(handler != NULL);
   156      msg.type = type;
   157      msg.from = cpu_id;
   158      msg.handler = handler;
   159      msg.arg = arg;
   160      msg.before_handler_ack = before_handler_ack;
   161      msg.after_handler_ack = after_handler_ack;
   162      return array_list_add(ipc->message_queue, &msg);
   163  }
   164  
   165  // Dequeue message for processing. Acknowledge the sender. 
   166  // Caller must acquire the lock before calling.
   167  // RETURN VALUE:    TRUE if message was dequeued, FALSE if queue is empty
   168  static IPC_MESSAGE *ipc_dequeue_message(IPC_CPU_CONTEXT *ipc)
   169  {
   170      IPC_MESSAGE  *msg = NULL;
   171  
   172      VMM_ASSERT(ipc != NULL);
   173      msg = (IPC_MESSAGE *) array_list_first(ipc->message_queue, NULL);
   174      if (msg != NULL) {
   175          array_list_remove(ipc->message_queue, msg);
   176          ipc_increment_ack(msg->before_handler_ack);
   177          ipc->num_of_received_ipc_messages++;            // Receive IPC message counting.
   178      }
   179      return msg;
   180  }
   181  
   182  #ifdef INCLUDE_UNUSED_CODE
   183  // Clear message queue without processing. Acknowledge the sender. Caller must acquire the lock before calling.
   184  static void ipc_clear_message_queue(IPC_CPU_CONTEXT *ipc)
   185  {
   186      IPC_MESSAGE *msg = NULL;
   187  
   188      do {
   189          msg = ipc_dequeue_message(ipc);
   190          if (msg != NULL) {
   191              ipc_increment_ack(msg->after_handler_ack);
   192          }
   193      } while(msg != NULL);
   194  }
   195  #endif
   196  
   197  
   198  // Send message to destination processors.
   199  // RETURN VALUE:    number of CPUs on which handler is about to execute
   200  UINT32 ipc_execute_send(IPC_DESTINATION dst, IPC_MESSAGE_TYPE type, 
   201                          IPC_HANDLER_FN handler,
   202                          void *arg, BOOLEAN wait_for_handler_finish)
   203  {
   204      CPU_ID                  i;
   205      CPU_ID                  sender_cpu_id = IPC_CPU_ID();
   206      IPC_CPU_CONTEXT         *ipc = NULL;
   207      volatile UINT32         num_received_acks = 0;
   208      UINT32                  num_required_acks = 0;
   209      volatile UINT32         *ack_array = &ipc_ack_array[sender_cpu_id * num_of_host_processors];
   210      BOOLEAN                 status;
   211      IPC_DESTINATION         single_dst;
   212      UINT32                  wait_count = 0;
   213      UINT64                  nmi_accounted_flag[CPU_BITMAP_MAX] = {0};
   214      UINT64                  enqueue_flag[CPU_BITMAP_MAX] = {0};
   215      UINT64                  next_send_tsc;
   216      (void)status;
   217      // Initializ ack array.
   218      vmm_memset((void *) ack_array, 0, num_of_host_processors * sizeof(UINT32));
   219  
   220      for(i = 0; i < num_of_host_processors; i++) {
   221          if (i != sender_cpu_id) {                               // Exclude yourself.
   222              if (ipc_cpu_is_destination(dst, sender_cpu_id, i)) {
   223                  ipc = &ipc_cpu_contexts[i];
   224                  lock_acquire(&ipc->data_lock);
   225                  if (ipc_preprocess_message(ipc, i, type)) {     // Preprocess IPC and check if need to enqueue.
   226                      BOOLEAN  empty_queue= (array_list_size(ipc->message_queue)==0);
   227                      BITMAP_ARRAY64_SET(enqueue_flag, i);  // Mark CPU active.
   228                      num_required_acks++;
   229                      if (!wait_for_handler_finish)  // Dont wait for handlers to finish.
   230                          status = ipc_enqueue_message(ipc, type, handler, arg, &ack_array[i], NULL);
   231                      else   // Wait for handlers to finish.
   232                          status = ipc_enqueue_message(ipc, type, handler, arg, NULL, &ack_array[i]);
   233                      ipc->num_of_sent_ipc_messages++;            // IPC sent message counting.
   234                      VMM_ASSERT(status);
   235                      // Check if IPC signal should be sent.
   236                      if (empty_queue) {
   237                          // Send IPC signal (NMI or SIPI)
   238                          single_dst.addr_shorthand = IPI_DST_NO_SHORTHAND;
   239                          single_dst.addr = (UINT8)i;
   240                          if (cpu_activity_state[i] == IPC_CPU_ACTIVE) {
   241                              BITMAP_ARRAY64_SET(nmi_accounted_flag, i);
   242                              ipc->num_of_sent_ipc_nmi_interrupts++;
   243                              ipc_hw_signal_nmi(single_dst);
   244                          }
   245                          else
   246                              ipc_hw_signal_sipi(single_dst);
   247                      }
   248                  }
   249                  lock_release(&ipc->data_lock);
   250              }
   251          }
   252      }
   253  
   254      if (num_required_acks > 0) {
   255          VMM_ASSERT(hw_get_tsc_ticks_per_second() != 0);
   256  
   257          // Calculate next tsc tick to resend NMI.
   258          next_send_tsc = hw_rdtsc() + hw_get_tsc_ticks_per_second(); 
   259          // Should be one second.
   260          // signal and wait for acknowledge
   261          while (num_received_acks != num_required_acks) {
   262              // Check wait count and time.
   263              if (wait_count++ > 1000 && hw_rdtsc() > next_send_tsc) {
   264                  wait_count = 0;
   265                  next_send_tsc = hw_rdtsc() + hw_get_tsc_ticks_per_second();
   266  
   267                  for (i = 0, num_received_acks = 0; i < num_of_host_processors; i++) {
   268                      // Send additional IPC signal to stalled cores.
   269                      if (BITMAP_ARRAY64_GET(enqueue_flag, i) && !ack_array[i]) {
   270                           // exclude yourself and non active CPUs.
   271                          single_dst.addr_shorthand = IPI_DST_NO_SHORTHAND;
   272                          single_dst.addr = (UINT8) i;
   273                          // Check that CPU is still active.
   274                          VMM_ASSERT(cpu_activity_state[i] != IPC_CPU_NOT_ACTIVE);
   275                          if (!debug_not_resend) {
   276                              ipc = &ipc_cpu_contexts[i];
   277                              lock_acquire(&ipc->data_lock);
   278                              if (cpu_activity_state[i] == IPC_CPU_ACTIVE) {
   279                                  if (!BITMAP_ARRAY64_GET(nmi_accounted_flag, i)) {
   280                                      BITMAP_ARRAY64_SET(nmi_accounted_flag, i);
   281                                      ipc->num_of_sent_ipc_nmi_interrupts++;
   282                                  }
   283                                  ipc_hw_signal_nmi(single_dst);
   284                                  VMM_LOG(mask_anonymous, level_trace,
   285                                          "[%d] send additional NMI to %d\n", 
   286                                           (int) sender_cpu_id, (int) i);
   287                              }
   288                              else {
   289                                  ipc_hw_signal_sipi(single_dst);
   290                                  VMM_LOG(mask_anonymous, level_trace,
   291                                          "[%d] send additional SIPI to %d\n", 
   292                                          (int) sender_cpu_id, (int) i);
   293                              }
   294                          lock_release(&ipc->data_lock);
   295                          }
   296                      }
   297                  }
   298              }
   299              else {
   300                  // Try to processs own received messages.
   301                  // To prevent deadlock situation when 2 core send messages simultaneously.
   302                  if (!ipc_process_one_ipc())
   303                      hw_pause();
   304                  // Count received acks.
   305                  for (i = 0, num_received_acks = 0; i < num_of_host_processors; i++)
   306                      num_received_acks += ack_array[i];
   307              }
   308          }
   309      }
   310      return num_required_acks;
   311  }
   312  
   313  
   314  // Process all IPC from this CPU's message queue.
   315  void ipc_process_all_ipc_messages(IPC_CPU_CONTEXT  *ipc, BOOLEAN  nmi_flag)
   316  {
   317      IPC_MESSAGE      *msg = 0;
   318      IPC_HANDLER_FN   handler = NULL;
   319      void             *arg = NULL;
   320      volatile UINT32  *after_handler_ack = NULL;
   321      BOOLEAN          last_msg = FALSE;
   322  
   323      if (array_list_size(ipc->message_queue) == 0)
   324          return;
   325  
   326      // Process all IPC messages.
   327      lock_acquire(&ipc->data_lock);
   328  
   329      do {
   330          // Get an IPC message from the queue.
   331          msg = ipc_dequeue_message(ipc);
   332          VMM_ASSERT(msg != NULL);
   333          // Check for last message.
   334          if (array_list_size(ipc->message_queue) == 0) {
   335              last_msg = TRUE;
   336              // Adjust processed interrupt counters.
   337              if (nmi_flag) {
   338                  ipc->num_processed_nmi_interrupts++;
   339                  ipc->num_of_processed_ipc_nmi_interrupts++;
   340              }
   341          }
   342  
   343          // Process message.
   344          handler = msg->handler;
   345          arg = msg->arg;
   346          after_handler_ack = msg->after_handler_ack;
   347          lock_release(&ipc->data_lock);
   348          handler(IPC_CPU_ID(), arg);
   349          lock_acquire(&ipc->data_lock);
   350  
   351          // Postprocessing.
   352          ipc_increment_ack(after_handler_ack);
   353      } while (!last_msg);
   354      lock_release(&ipc->data_lock);
   355  }
   356  
   357  #ifdef ENABLE_VTD
   358  extern BOOLEAN vtd_handle_fault(void);
   359  #endif
   360  
   361  
   362  // Dequeue message and call the handler. Caller must acquire the lock before calling.
   363  // RETURN: TRUE if message was handled, FALSE if queue is empty
   364  static BOOLEAN ipc_dispatcher(IPC_CPU_CONTEXT *ipc, GUEST_CPU_HANDLE gcpu UNUSED)
   365  {
   366      BOOLEAN  nmi_injected_to_guest = FALSE;
   367  
   368      // Process all IPC messages.
   369      ipc_process_all_ipc_messages(ipc, TRUE);
   370      // Perform decision about MNI injection to guest.
   371      lock_acquire(&ipc->data_lock);
   372      VMM_DEBUG_CODE(
   373      // Sanity check.
   374      if (ipc->num_received_nmi_interrupts < ipc->num_processed_nmi_interrupts ||
   375          ipc->num_of_sent_ipc_nmi_interrupts<ipc->num_of_processed_ipc_nmi_interrupts) {
   376          VMM_LOG(mask_anonymous, level_trace,"[%d] IPC Anomaly\n", IPC_CPU_ID());
   377          VMM_DEADLOOP();
   378      }
   379      )
   380  
   381      // Check if we have blocked guest NMI's.
   382      if (ipc->num_blocked_nmi_injections_to_guest > 0) {
   383          VMM_LOG(mask_anonymous, level_trace,"[%d] - %s: Blocked Injection counter = %d\n", IPC_CPU_ID(),
   384                  __FUNCTION__, ipc->num_blocked_nmi_injections_to_guest);
   385  
   386          nmi_injected_to_guest = TRUE;               // Set injection flag.
   387          ipc->num_blocked_nmi_injections_to_guest--; // Adjust blocked NMI counter.
   388      }
   389      else if (ipc->num_of_sent_ipc_nmi_interrupts!=ipc->num_received_nmi_interrupts &&
   390               NMIS_WAITING_FOR_PROCESSING(ipc) != IPC_NMIS_WAITING_FOR_PROCESSING(ipc)) {
   391          /*   
   392          VMM_LOG(mask_anonymous, level_trace,
   393             "[%d] - %s: NMI_RCVD = %d NMI_PROCESSED = %d, IPC_NMI_SENT = %d IPC_NMI_PROCESSED = %d\n",
   394                   IPC_CPU_ID(), __FUNCTION__,
   395                   ipc->num_received_nmi_interrupts, ipc->num_processed_nmi_interrupts,
   396                   ipc->num_of_sent_ipc_nmi_interrupts, ipc->num_of_processed_ipc_nmi_interrupts);
   397          */
   398          nmi_injected_to_guest = TRUE;     // Set injection flag.
   399          ipc->num_processed_nmi_interrupts++; // Adjust common NMI processed counter.
   400          nmi_raise_this();
   401      }
   402      lock_release(&ipc->data_lock);
   403      return nmi_injected_to_guest;
   404  }
   405  
   406  #ifdef ENABLE_VTD
   407  extern BOOLEAN vtd_handle_fault(void);
   408  #endif
   409  
   410  
   411  // ISR to handle NMI exception while in VMM (vector 2).
   412  // Enables NMI Window for all guests to defer handling to more
   413  // convinient conditions (e.g. stack, blocking etc.)
   414  static void ipc_nmi_interrupt_handler(const ISR_PARAMETERS_ON_STACK  *p_stack UNUSED)
   415  {
   416      CPU_ID            cpu_id = IPC_CPU_ID();
   417      IPC_CPU_CONTEXT   *ipc = &ipc_cpu_contexts[cpu_id];
   418      GUEST_CPU_HANDLE  gcpu = NULL;
   419  
   420  #ifdef ENABLE_VTD
   421      if (vtd_handle_fault()) {
   422          return;
   423      }
   424  #endif // ENABLE_VTD
   425      hw_interlocked_increment64((INT64*)(&ipc->num_received_nmi_interrupts));
   426      // inject nmi windows to right guest on this host cpu.
   427      gcpu = scheduler_current_gcpu();
   428      VMM_ASSERT(gcpu);
   429      vmcs_nmi_handler(gcpu_get_vmcs(gcpu));
   430  }
   431  
   432  
   433  // Handle Vm-Exit due to NMI Window -- handle pending IPC if any.
   434  // Decide on injecting NMIs to guest if required.
   435  BOOLEAN ipc_nmi_window_vmexit_handler(GUEST_CPU_HANDLE gcpu)
   436  {
   437      CPU_ID           cpu_id = IPC_CPU_ID();
   438      IPC_CPU_CONTEXT  *ipc = &ipc_cpu_contexts[cpu_id];
   439  
   440      VMM_ASSERT(gcpu != NULL);
   441      gcpu_set_pending_nmi(gcpu, 0);                      // disable nmi window
   442      // handle queued IPC's
   443      return !ipc_dispatcher(ipc, gcpu);
   444  }
   445  
   446  
   447  // Handle Vm-Exit due to NMI while in guest. Handle IPC if NMI was due to IPC.
   448  // Reflect NMI back to guest if it is hardware or guest initiated NMI.
   449  BOOLEAN ipc_nmi_vmexit_handler(GUEST_CPU_HANDLE gcpu)
   450  {
   451      CPU_ID           cpu_id = IPC_CPU_ID();
   452      IPC_CPU_CONTEXT  *ipc = &ipc_cpu_contexts[cpu_id];
   453  
   454  #ifdef ENABLE_VTD
   455      if(vtd_handle_fault()) {
   456          // Clean hardware NMI block.
   457          hw_perform_asm_iret();
   458          return TRUE;
   459      }
   460  #endif //ENABLE_VTD
   461      hw_interlocked_increment64((INT64*)&ipc->num_received_nmi_interrupts);
   462      hw_perform_asm_iret();
   463      // Handle queued IPC's
   464      return !ipc_dispatcher(ipc, gcpu);
   465  }
   466  
   467  
   468  // Handle IPC if SIPI was due to IPC.
   469  // RETURN : TRUE, if SIPI was due to IPC, FALSE otherwise.
   470  BOOLEAN ipc_sipi_vmexit_handler(GUEST_CPU_HANDLE gcpu)
   471  {
   472      CPU_ID                       cpu_id = IPC_CPU_ID();
   473      IPC_CPU_CONTEXT              *ipc = &ipc_cpu_contexts[cpu_id];
   474      VMCS_OBJECT                  *vmcs = gcpu_get_vmcs(gcpu);
   475      IA32_VMX_EXIT_QUALIFICATION  qualification;
   476      BOOLEAN                      ret_val = FALSE;
   477  
   478  #ifdef JLMDEBUG
   479      bprint("ipc_sipi_vmexit_handle, cpu: %d\n", cpu_id);
   480  #endif
   481      qualification.Uint64 = vmcs_read(vmcs, VMCS_EXIT_INFO_QUALIFICATION);
   482      // Check if this is IPC SIPI signal.
   483      if (qualification.Sipi.Vector == 0xFF) {
   484          // Process all IPC messages.
   485          ipc_process_all_ipc_messages(ipc, FALSE);
   486          // Clear all NMI counters.
   487          lock_acquire(&ipc->data_lock);
   488          ipc->num_received_nmi_interrupts = 0;
   489          ipc->num_processed_nmi_interrupts = 0;
   490          ipc->num_of_sent_ipc_nmi_interrupts = 0;
   491          ipc->num_of_processed_ipc_nmi_interrupts = 0;
   492          ipc->num_blocked_nmi_injections_to_guest = 0;
   493          lock_release(&ipc->data_lock);
   494          ret_val = TRUE;
   495      }
   496      return ret_val;
   497  }
   498  
   499  
   500  // Preprocess normal message. Caller must acquire the lock before calling.
   501  // RETURN VALUE: TRUE if message must be enqueued at destination CPU
   502  BOOLEAN ipc_preprocess_normal_message(IPC_CPU_CONTEXT *ipc UNUSED, CPU_ID dst)
   503  {
   504      BOOLEAN enqueue_to_dst;
   505  
   506      enqueue_to_dst = (cpu_activity_state[dst] != IPC_CPU_NOT_ACTIVE);
   507      return enqueue_to_dst;
   508  }
   509  
   510  
   511  // Preprocess ON message. Caller must acquire the lock before calling.
   512  // RETURN : TRUE if message must be enqueued at destination CPU
   513  BOOLEAN ipc_preprocess_start_message(IPC_CPU_CONTEXT *ipc, CPU_ID dst UNUSED)
   514  {
   515      ipc->num_start_messages++;
   516      // never enqueue 'start' message
   517      return FALSE;
   518  }
   519  
   520  
   521  // Preprocess OFF message. Caller must acquire the lock before calling.
   522  // RETURN :    TRUE if message must be enqueued at destination CPU
   523  BOOLEAN ipc_preprocess_stop_message(IPC_CPU_CONTEXT *ipc, CPU_ID dst)
   524  {
   525      BOOLEAN enqueue_to_dst;
   526  
   527      enqueue_to_dst = (cpu_activity_state[dst] != IPC_CPU_NOT_ACTIVE);
   528      ipc->num_stop_messages++;
   529      return enqueue_to_dst;
   530  }
   531  
   532  
   533  // Preprocess message. Caller must acquire the lock before calling.
   534  // RETURN TRUE  if message must be enqueued at destination CPU,
   535  BOOLEAN ipc_preprocess_message(IPC_CPU_CONTEXT *ipc , CPU_ID dst, IPC_MESSAGE_TYPE  msg_type)
   536  {
   537      BOOLEAN enqueue_to_dst = FALSE;
   538  
   539      switch (msg_type) {
   540          case IPC_TYPE_NORMAL:
   541              enqueue_to_dst = ipc_preprocess_normal_message(ipc, dst);
   542              break;
   543          case IPC_TYPE_START:
   544              enqueue_to_dst = ipc_preprocess_start_message(ipc, dst);
   545              break;
   546          case IPC_TYPE_STOP:
   547              enqueue_to_dst = ipc_preprocess_stop_message(ipc, dst);
   548              break;
   549          case IPC_TYPE_SYNC:
   550          default:
   551              break;
   552      }
   553      return enqueue_to_dst;
   554  }
   555  
   556  
   557  // Send IPC to destination CPUs. Returns just before handlers are about to execute.
   558  // RETURN VALUE:    number of CPUs on which handler is about to execute
   559  UINT32 ipc_send_message(IPC_DESTINATION dst, IPC_MESSAGE_TYPE type, IPC_HANDLER_FN handler, void* arg)
   560  {
   561      UINT32  num_of_receivers = 0;
   562  
   563      if ((int) type >= IPC_TYPE_NORMAL && (int) type < IPC_TYPE_LAST) {
   564          switch (dst.addr_shorthand) {
   565  //              case IPI_DST_SELF:
   566  //              case IPI_DST_ALL_INCLUDING_SELF:
   567                  case IPI_DST_ALL_EXCLUDING_SELF:
   568                  case IPI_DST_NO_SHORTHAND:
   569                  case IPI_DST_CORE_ID_BITMAP:
   570                  //      interruptible_lock_acquire(&send_lock);
   571                    num_of_receivers= ipc_execute_send(dst, type, handler, arg, FALSE);
   572                  //      lock_release(&send_lock);
   573                    break;
   574                  default:
   575                    VMM_LOG(mask_anonymous, level_trace,"ipc_send_message: Bad message destination shorthand 0x%X\r\n", dst.addr_shorthand);
   576                    break;
   577          }
   578      }
   579      else {
   580          VMM_LOG(mask_anonymous, level_trace,
   581                  "ipc_send_message: Bad message type %d\r\n", type);
   582      }
   583      return num_of_receivers;
   584  }
   585  
   586  
   587  // Send IPC to destination CPUs. Returns after handlers finished their execution
   588  // RETURN VALUE:    number of CPUs on which handler is about to execute
   589  UINT32 ipc_send_message_sync(IPC_DESTINATION dst, IPC_MESSAGE_TYPE type, IPC_HANDLER_FN handler, void* arg)
   590  {
   591      UINT32  num_of_receivers = 0;
   592  
   593      if ((int) type >= IPC_TYPE_NORMAL && (int) type < IPC_TYPE_LAST) {
   594          switch (dst.addr_shorthand) {
   595  //          case IPI_DST_SELF:
   596  //          case IPI_DST_ALL_INCLUDING_SELF:
   597              case IPI_DST_ALL_EXCLUDING_SELF:
   598              case IPI_DST_NO_SHORTHAND:
   599              case IPI_DST_CORE_ID_BITMAP:            
   600          //      interruptible_lock_acquire(&send_lock);
   601                  num_of_receivers = ipc_execute_send(dst, type, handler, arg, TRUE);
   602          //      lock_release(&send_lock);
   603                  break;
   604  
   605              default:
   606                  VMM_LOG(mask_anonymous, level_trace,"ipc_send_message_sync: Bad message destination shorthand 0x%X\r\n", dst.addr_shorthand);
   607                  break;
   608          }
   609      }
   610      else {
   611          VMM_LOG(mask_anonymous, level_trace,"ipc_send_message_sync: Bad message type %d\r\n", type);
   612      }
   613      return num_of_receivers;
   614  }
   615  
   616  
   617  // Process one IPC from this CPU's message queue.
   618  // RETURN VALUE:    TRUE if IPC was processed, FALSE if there were no pending IPCs.
   619  BOOLEAN ipc_process_one_ipc(void)
   620  {
   621      CPU_ID           cpu_id = IPC_CPU_ID();
   622      IPC_CPU_CONTEXT  *ipc = &ipc_cpu_contexts[cpu_id];
   623      IPC_MESSAGE      *msg = 0;
   624      IPC_HANDLER_FN   handler = NULL;
   625      void             *arg = NULL;
   626      volatile UINT32  *after_handler_ack = NULL;
   627      BOOLEAN          process_ipc_msg = FALSE;
   628  
   629      if (array_list_size(ipc->message_queue) == 0)
   630          return process_ipc_msg;
   631  
   632      lock_acquire(&ipc->data_lock);
   633  
   634      msg = ipc_dequeue_message(ipc);
   635      process_ipc_msg = (msg != NULL);
   636  
   637      if (process_ipc_msg) {
   638          // Check for last message.
   639          if (array_list_size(ipc->message_queue) == 0 && cpu_activity_state[cpu_id] == IPC_CPU_ACTIVE) {
   640              // Adjust processed interrupt counters.
   641              ipc->num_processed_nmi_interrupts++;
   642              ipc->num_of_processed_ipc_nmi_interrupts++;
   643          }
   644  
   645          // Process a message.
   646          handler = msg->handler;
   647          arg = msg->arg;
   648          after_handler_ack = msg->after_handler_ack;
   649  
   650          lock_release(&ipc->data_lock);
   651          handler(IPC_CPU_ID(), arg);
   652          lock_acquire(&ipc->data_lock);
   653  
   654          // Postprocessing.
   655          ipc_increment_ack(after_handler_ack);
   656      }
   657  
   658      lock_release(&ipc->data_lock);
   659  
   660      return process_ipc_msg;
   661  }
   662  
   663  
   664  // Mark CPU as ready for IPC. Called when CPU is no longer in Wait-for-SIPI state.
   665  // Waits for all start/stop messages to arrive before changing CPU's state.
   666  void ipc_change_state_to_active(GUEST_CPU_HANDLE gcpu UNUSED)
   667  {
   668      CPU_ID           cpu_id = IPC_CPU_ID();
   669      IPC_CPU_CONTEXT  *ipc = &ipc_cpu_contexts[cpu_id];
   670  
   671      if (cpu_activity_state[cpu_id] == IPC_CPU_ACTIVE)
   672          return;
   673  
   674      lock_acquire(&ipc->data_lock);
   675      cpu_activity_state[cpu_id] = IPC_CPU_ACTIVE;
   676      lock_release(&ipc->data_lock);
   677      VMM_LOG(mask_anonymous, level_trace,"CPU%d: IPC state changed to ACTIVE\n", cpu_id);
   678  }
   679  
   680  
   681  // Mark CPU as NOT ready for IPC. 
   682  // Called when CPU is about to enter Wait-for-SIPI state.
   683  // Acknowledge and discard all queued messages.
   684  void ipc_change_state_to_sipi(GUEST_CPU_HANDLE gcpu)
   685  {
   686      CPU_ID           cpu_id = IPC_CPU_ID();
   687      IPC_CPU_CONTEXT  *ipc = &ipc_cpu_contexts[cpu_id];
   688  
   689      if (cpu_activity_state[cpu_id] == IPC_CPU_SIPI)
   690          return;
   691      lock_acquire(&ipc->data_lock);
   692      cpu_activity_state[cpu_id] = IPC_CPU_SIPI;
   693      gcpu_set_pending_nmi(gcpu, 0);
   694      lock_release(&ipc->data_lock);
   695      VMM_LOG(mask_anonymous, level_trace,"CPU%d: IPC state changed to SIPI\n", cpu_id);
   696  }
   697  
   698  // Called when NMI injection to gues failed and should be performed once more later.
   699  // Adjust right ounters.
   700  void ipc_mni_injection_failed(void)
   701  {
   702      CPU_ID           cpu_id = IPC_CPU_ID();
   703      IPC_CPU_CONTEXT  *ipc = &ipc_cpu_contexts[cpu_id];
   704  
   705      // Count blocked NMI injection.
   706      hw_interlocked_increment64((INT64*)(&ipc->num_blocked_nmi_injections_to_guest));
   707  }
   708  
   709  
   710  BOOLEAN ipc_state_init(UINT16 number_of_host_processors)
   711  {
   712      UINT32   i = 0,
   713               ipc_cpu_context_size = 0,
   714               ipc_msg_array_size = 0,
   715               cpu_state_size = 0,
   716               ipc_ack_array_size = 0,
   717               ipc_data_size = 0,
   718               message_queue_offset = 0;
   719      IPC_CPU_CONTEXT  *ipc = 0;
   720  
   721      VMM_LOG(mask_anonymous, level_trace,"IPC state init: #host CPUs = %d\r\n", number_of_host_processors);
   722      num_of_host_processors = number_of_host_processors;
   723      nmi_owner_guest_id = INVALID_GUEST_ID;
   724      ipc_cpu_context_size = number_of_host_processors * ALIGN_FORWARD(sizeof(IPC_CPU_CONTEXT), IPC_ALIGNMENT);
   725      ipc_msg_array_size = number_of_host_processors * ipc_get_message_array_list_size(number_of_host_processors);
   726      cpu_state_size = (UINT32) ALIGN_FORWARD(num_of_host_processors * sizeof(IPC_CPU_ACTIVITY_STATE), IPC_ALIGNMENT);
   727      ipc_ack_array_size = number_of_host_processors * sizeof(UINT32) * number_of_host_processors;
   728      ipc_ack_array_size = (UINT32) ALIGN_FORWARD(ipc_ack_array_size, IPC_ALIGNMENT);
   729      ipc_data_size = ipc_cpu_context_size + ipc_msg_array_size + cpu_state_size + ipc_ack_array_size;
   730      ipc_state_memory = (char *) vmm_memory_alloc(ipc_data_size);
   731      if(ipc_state_memory == NULL) {
   732          return FALSE;
   733      }
   734  
   735      vmm_memset(ipc_state_memory, 0, ipc_data_size);
   736      ipc_cpu_contexts = (IPC_CPU_CONTEXT *) ipc_state_memory;
   737  
   738      for (i = 0; i < number_of_host_processors; i++) {
   739          ipc = &ipc_cpu_contexts[i];
   740  
   741          message_queue_offset = ipc_cpu_context_size + i * ipc_get_message_array_list_size(number_of_host_processors);
   742  
   743          ipc->message_queue = array_list_init(ipc_state_memory + message_queue_offset,
   744                                               ipc_get_message_array_list_size(number_of_host_processors), 
   745                                               sizeof(IPC_MESSAGE),
   746                                               ipc_get_max_pending_messages(number_of_host_processors), 
   747                                               IPC_ALIGNMENT);
   748          lock_initialize(&ipc->data_lock);
   749      }
   750  
   751      cpu_activity_state = (IPC_CPU_ACTIVITY_STATE *) (ipc_state_memory + ipc_cpu_context_size + ipc_msg_array_size);
   752      ipc_ack_array = (UINT32 *) ((char *) cpu_activity_state + cpu_state_size);
   753      lock_initialize(&send_lock);
   754      isr_register_handler((VMM_ISR_HANDLER) ipc_nmi_interrupt_handler, NMI_VECTOR);
   755      ipc_cli_register();
   756      return TRUE;
   757  }
   758  
   759  
   760  BOOLEAN ipc_guest_state_init(GUEST_ID guest_id)
   761  {
   762      if (guest_is_nmi_owner(guest_handle(guest_id))) {
   763          nmi_owner_guest_id = guest_id;
   764      }
   765      return TRUE;
   766  }
   767  #ifdef INCLUDE_UNUSED_CODE
   768  void ipc_finalize(void)
   769  {
   770      VMM_ASSERT(ipc_state_memory);
   771      vmm_memory_free(ipc_state_memory);
   772  }
   773  #endif
   774  
   775  void ipc_set_no_resend_flag(BOOLEAN  val)
   776  {
   777      if (val) {
   778          hw_interlocked_increment(&debug_not_resend);
   779      }
   780      else {
   781          hw_interlocked_decrement(&debug_not_resend);
   782      }
   783  }
   784  
   785  void ipc_print_cpu_context(CPU_ID cpu_id, BOOLEAN use_lock)
   786  {
   787      IPC_CPU_CONTEXT *ipc = &ipc_cpu_contexts[cpu_id];
   788  
   789      if (use_lock) {
   790          lock_acquire(&ipc->data_lock);
   791  
   792          VMM_LOG(mask_anonymous, level_trace,"IPC context on CPU %d:\r\n", cpu_id);
   793          VMM_LOG(mask_anonymous, level_trace,"    num_received_nmi_interrupts         = %d\r\n", ipc->num_received_nmi_interrupts);
   794          VMM_LOG(mask_anonymous, level_trace,"    num_processed_nmi_interrupts        = %d\r\n", ipc->num_processed_nmi_interrupts);
   795          VMM_LOG(mask_anonymous, level_trace,"    num_of_sent_ipc_nmi_interrupts      = %d\r\n", ipc->num_of_sent_ipc_nmi_interrupts);
   796          VMM_LOG(mask_anonymous, level_trace,"    num_of_processed_ipc_nmi_interrupts = %d\r\n", ipc->num_of_processed_ipc_nmi_interrupts);
   797          VMM_LOG(mask_anonymous, level_trace,"    num_of_sent_ipc_messages            = %d\r\n", ipc->num_of_sent_ipc_messages);
   798          VMM_LOG(mask_anonymous, level_trace,"    num_of_received_ipc_messages        = %d\r\n", ipc->num_of_received_ipc_messages);
   799          VMM_LOG(mask_anonymous, level_trace,"    num_start_messages                  = %d\r\n", ipc->num_start_messages);
   800          VMM_LOG(mask_anonymous, level_trace,"    num_stop_messages                   = %d\r\n", ipc->num_stop_messages);
   801          VMM_LOG(mask_anonymous, level_trace,"    num_blocked_nmi_injections_to_guest = %d\r\n", ipc->num_blocked_nmi_injections_to_guest);
   802          VMM_LOG(mask_anonymous, level_trace,"    Num of queued IPC messages          = %d\r\n", array_list_size(ipc->message_queue));
   803  
   804          lock_release(&ipc->data_lock);
   805      }
   806      else {
   807          VMM_LOG_NOLOCK("IPC context on CPU %d:\r\n", cpu_id);
   808          VMM_LOG_NOLOCK("    num_received_nmi_interrupts         = %d\r\n", ipc->num_received_nmi_interrupts);
   809          VMM_LOG_NOLOCK("    num_processed_nmi_interrupts        = %d\r\n", ipc->num_processed_nmi_interrupts);
   810          VMM_LOG_NOLOCK("    num_of_sent_ipc_nmi_interrupts      = %d\r\n", ipc->num_of_sent_ipc_nmi_interrupts);
   811          VMM_LOG_NOLOCK("    num_of_processed_ipc_nmi_interrupts = %d\r\n", ipc->num_of_processed_ipc_nmi_interrupts);
   812          VMM_LOG_NOLOCK("    num_of_sent_ipc_messages            = %d\r\n", ipc->num_of_sent_ipc_messages);
   813          VMM_LOG_NOLOCK("    num_of_received_ipc_messages        = %d\r\n", ipc->num_of_received_ipc_messages);
   814          VMM_LOG_NOLOCK("    num_start_messages                  = %d\r\n", ipc->num_start_messages);
   815          VMM_LOG_NOLOCK("    num_stop_messages                   = %d\r\n", ipc->num_stop_messages);
   816          VMM_LOG_NOLOCK("    num_blocked_nmi_injections_to_guest = %d\r\n", ipc->num_blocked_nmi_injections_to_guest);
   817          VMM_LOG_NOLOCK("    Num of queued IPC messages          = %d\r\n", array_list_size(ipc->message_queue));
   818     }
   819  }
   820  
   821  #ifdef CLI_INCLUDE
   822  static int cli_ipc_print(unsigned argc, char *argv[])
   823  {
   824      CPU_ID cpu_id;
   825  
   826      if (argc != 2)
   827          return -1;
   828  
   829      cpu_id = (CPU_ID) CLI_ATOL(argv[1]);
   830  
   831      if (cpu_id < 0 || cpu_id >= num_of_host_processors) {
   832          CLI_PRINT("CpuId must be in [0..%d] range\n", (int) num_of_host_processors - 1);
   833          return -1;
   834      }
   835      ipc_print_cpu_context(cpu_id, FALSE);
   836      return 0;
   837  }
   838  
   839  VMM_DEBUG_CODE(
   840  static int cli_ipc_resend(unsigned argc UNUSED, char *argv[] UNUSED)
   841  {
   842      BOOLEAN  no_resend;
   843  
   844          if (!CLI_STRNCMP(argv[1], "start", sizeof("start")))
   845                  no_resend = FALSE;
   846          else if (!CLI_STRNCMP(argv[1], "stop", sizeof("stop")))
   847                  no_resend = TRUE;
   848          else if (!CLI_STRNCMP(argv[1], "state", sizeof("state"))) {
   849                  CLI_PRINT("IPC resend disable state counter = %d\n", debug_not_resend);
   850                  CLI_PRINT("IPC resend is %s\n", (debug_not_resend == 0) ? "ENABLED" : "DISABLED");
   851          return 0;
   852          }
   853          else {
   854                  CLI_PRINT("Wrong command argument\n");
   855                  return -1;
   856          }
   857  
   858      ipc_set_no_resend_flag(no_resend);
   859          CLI_PRINT("IPC resend disable state counter = %d\n", debug_not_resend);
   860          CLI_PRINT("IPC resend is %s\n", (debug_not_resend == 0) ? "ENABLED" : "DISABLED");
   861  
   862      return 0;
   863  }
   864  )
   865  
   866  
   867  static void ipc_cli_register(void)
   868  {
   869      VMM_DEBUG_CODE(
   870          CLI_AddCommand(cli_ipc_print, "ipc print",
   871               "Print internal IPC state for given CPU.", "<cpu id>", 
   872                CLI_ACCESS_LEVEL_SYSTEM)
   873          );
   874      VMM_DEBUG_CODE(
   875          CLI_AddCommand(cli_ipc_resend, "ipc resend",
   876               "Stop/Start resend IPC signal.", "stop | start | state", 
   877                CLI_ACCESS_LEVEL_SYSTEM)
   878          );
   879  }
   880  #else
   881  
   882  static void ipc_cli_register(void) {}
   883  
   884  #endif
   885