github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/libc/common_libc.c (about)

     1  /*
     2   * Copyright (c) 2013 Intel Corporation
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   * Unless required by applicable law or agreed to in writing, software
     9   * distributed under the License is distributed on an "AS IS" BASIS,
    10   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11   * See the License for the specific language governing permissions and
    12   * limitations under the License.
    13   */
    14  
    15  
    16  //   commonly used libc utilities
    17  
    18  
    19  #include "common_libc.h"
    20  
    21  extern void vmm_lock_xchg_byte(UINT8 *dst, UINT8 *src);
    22  
    23  #pragma optimize( "", off )
    24  void *  vmm_memset(void *dest, int filler, size_t count)
    25  {
    26      UINT8* p= (UINT8*) dest;
    27      while(count-->0)
    28  	*(p++)= (UINT8) filler;
    29      return dest;
    30  #if 0
    31      size_t i = 0, j, cnt_64bit;
    32      UINT64 filler_64;
    33      UINT64 *fill = &filler_64;
    34  
    35      cnt_64bit = count >> 3;
    36  
    37      if (cnt_64bit) {
    38          if (filler != 0) {
    39              *(UINT8*)fill = (UINT8)filler;
    40              *((UINT8*)fill + 1) = (UINT8)filler;
    41              *((UINT16*)fill + 1) = *(UINT16*)fill;
    42              *((UINT32*)fill + 1) = *(UINT32*)fill;
    43          }
    44  
    45          for (i = 0; i < cnt_64bit; i++) {
    46              if (filler == 0)
    47                  ((UINT64*) dest)[i] = 0;
    48              else    
    49                  ((UINT64*) dest)[i] = filler_64;
    50          }
    51          i = i << 3;
    52      }
    53  
    54      for (j = i; j < count; j++) {
    55          ((UINT8*) dest)[j] = (UINT8)filler;
    56      }
    57      return dest;
    58  #endif
    59  }
    60  #pragma optimize( "", on )
    61  
    62  void *  vmm_memcpy_ascending(void *dest, const void* src, size_t count)
    63  {
    64      size_t i = 0, j, cnt_64bit;
    65      UINT64 *d = (UINT64 *)dest;
    66      const UINT64 *s = (const UINT64*)src;
    67  
    68      cnt_64bit = count >> 3;
    69      if (cnt_64bit) {
    70          for(i = 0; i < cnt_64bit; i++)
    71              ((UINT64*) d)[i] = ((UINT64*) s)[i];
    72          i = i << 3;
    73      }
    74  
    75      for (j = i; j < count; j++) {
    76          ((UINT8*) dest)[j] = ((UINT8*)src)[j];
    77      }
    78      return dest;
    79  }
    80  
    81  void *  vmm_memcpy_descending(void *dest, const void* src, size_t count)
    82  {
    83      size_t i, cnt, rem;
    84      VMM_LONG *d = (VMM_LONG*)dest;
    85      const VMM_LONG *s = (const VMM_LONG*)src;
    86  
    87      cnt = COUNT_32_64(count);
    88      rem = REMAINDER_32_64(count);
    89  
    90      for (i = 0; i < rem; i++) {
    91          ((UINT8*) d)[count - i - 1] = ((UINT8*)s)[count - i - 1];
    92      }
    93  
    94      if (cnt) {
    95          for(i = cnt; i > 0; i--)
    96              ((VMM_LONG*)d)[i - 1] = ((VMM_LONG*)s)[i - 1];
    97      }
    98      return dest;
    99  }
   100  
   101  void *  vmm_memcpy(void *dest, const void* src, size_t count)
   102  {
   103      if (dest >= src) {
   104          return vmm_memcpy_descending(dest, src, count);
   105      }
   106      else {
   107          return vmm_memcpy_ascending(dest, src, count);
   108      }
   109  }
   110  
   111  void *  vmm_memmove(void *dest, const void* src, int count)
   112  {
   113      if (dest == src) {
   114          return dest;
   115      } 
   116      else if (dest >= src) {
   117          return vmm_memcpy_descending(dest, src, count);
   118      } 
   119      else {
   120          return vmm_memcpy_ascending(dest, src, count);
   121      }
   122  }
   123  
   124  size_t  vmm_strlen(const char* string)
   125  {
   126      size_t len = 0;
   127      const char* next = string;
   128  
   129      if (! string) {
   130          return SIZE_T_ALL_ONES;
   131      }
   132      for (; *next != 0; ++next) {
   133          ++len;
   134      }
   135      return len;
   136  }
   137  
   138  char*  vmm_strcpy(char* dst, const char* src)
   139  {
   140      if (! src || ! dst) {
   141          return NULL;
   142      }
   143  
   144      while ((*dst++ = *src++) != 0);
   145      return dst;
   146  }
   147  
   148  char*  vmm_strcpy_s(char* dst, size_t dst_length, const char* src)
   149  {
   150      size_t src_length = vmm_strlen(src);
   151      const char* s = src;
   152  
   153      if (! src || ! dst || ! dst_length || dst_length < src_length + 1) {
   154          return NULL;
   155      }
   156      while (*s != 0) {
   157          *dst++ = *s++;
   158      }
   159      *dst = '\0';
   160      return dst;
   161  }
   162  
   163  int vmm_strcmp(const char* string1, const char* string2)
   164  {
   165      const char* str1 = string1;
   166      const char* str2 = string2;
   167  
   168      if(str1 == str2) {
   169          return 0;
   170      }
   171      if(NULL == str1) {
   172          return -1;
   173      }
   174      if(NULL == str2) {
   175          return 1;
   176      }
   177      while(*str1 == *str2) {
   178          if('\0' == *str1) {
   179              break;
   180          }
   181          str1++;
   182          str2++;
   183      }
   184      return *str1 - *str2;
   185  }
   186  
   187  int vmm_memcmp(const void* mem1, const void* mem2, size_t count)
   188  {
   189      const char *m1 = mem1;
   190      const char *m2 = mem2;
   191  
   192      while (count) {
   193          count--;
   194          if (m1[count] != m2[count])
   195              break;
   196      }
   197      return (m1[count] - m2[count]);
   198  }
   199  
   200  void vmm_memcpy_assuming_mmio( UINT8 *dst, UINT8 *src, INT32 count)
   201  {
   202      switch (count) {
   203      case 0:
   204          break;
   205  
   206      case 1:
   207          *dst = *src;
   208          break;
   209  
   210      case 2:
   211          *(UINT16 *) dst = *(UINT16 *) src;
   212          break;
   213  
   214      case 4:
   215          *(UINT32 *) dst = *(UINT32 *) src;
   216        break;
   217  
   218      case 8:
   219          *(UINT64 *) dst = *(UINT64 *) src;
   220          break;
   221  
   222      case 16:
   223          *(UINT64 *) dst = *(UINT64 *) src;
   224          dst += sizeof(UINT64);
   225          src += sizeof(UINT64);
   226          *(UINT64 *) dst = *(UINT64 *) src;
   227          break;
   228  
   229      default:
   230          vmm_memcpy(dst, src, count);
   231          break;
   232      }
   233  }
   234  
   235  /******************* Locked versions of functions ***********************/
   236  
   237  /*
   238   * NOTE: Use vmm_lock_memcpy with caution. Although it is a locked version of
   239   * memcpy, it locks only at the DWORD level. Users need to implement their
   240   * own MUTEX LOCK to ensure other processor cores don't get in the way.
   241   * This copy only ensures that at DWORD level there are no synchronization
   242   * issues.
   243   */
   244  void *  vmm_lock_memcpy_ascending(void *dest, const void* src, size_t count)
   245  {
   246      size_t i = 0, j, cnt;
   247      VMM_LONG *d = (VMM_LONG *)dest;
   248      const VMM_LONG *s = (const VMM_LONG*)src;
   249  
   250      cnt = COUNT_32_64(count);
   251      if (cnt) {
   252          for(i = 0; i < cnt; i++) {
   253              vmm_lock_xchg_32_64_word(&((VMM_LONG*) d)[i], &((VMM_LONG*) s)[i]);
   254          }
   255          i = SHL_32_64(i);
   256      }
   257      for (j = i; j < count; j++) {
   258          vmm_lock_xchg_byte(&((UINT8*) dest)[j], &((UINT8*)src)[j]);
   259      }
   260      return dest;
   261  }
   262  
   263  void *  vmm_lock_memcpy_descending(void *dest, const void* src, size_t count)
   264  {
   265      size_t i, cnt, rem;
   266      VMM_LONG *d = (VMM_LONG*)dest;
   267      const VMM_LONG *s = (const VMM_LONG*)src;
   268  
   269      cnt = COUNT_32_64(count);
   270      rem = REMAINDER_32_64(count);
   271      for (i = 0; i < rem; i++) {
   272          vmm_lock_xchg_byte(&((UINT8*)d)[count - i - 1],
   273                             &((UINT8*)s)[count - i - 1]);
   274      }
   275      if (cnt) {
   276          for(i = cnt; i > 0; i--) {
   277              vmm_lock_xchg_32_64_word(&((VMM_LONG*)d)[i - 1],
   278                                       &((VMM_LONG*)s)[i - 1]);
   279          }
   280      }
   281      return dest;
   282  }
   283  
   284  /*
   285   * NOTE: READ THE NOTE AT BEGINNING OF Locked Versions of functions.
   286   */
   287  void *  vmm_lock_memcpy(void *dest, const void* src, size_t count)
   288  {
   289      if (dest >= src) {
   290          return vmm_lock_memcpy_descending(dest, src, count);
   291      }
   292      else {
   293          return vmm_lock_memcpy_ascending(dest, src, count);
   294      }
   295  }
   296