github.com/bytedance/sonic@v1.11.7-0.20240517092252-d2edb31b167b/native/utils.h (about)

     1  /*
     2   * Copyright 2022 ByteDance Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  #pragma once
    18  
    19  #include <string.h>
    20  #include "native.h"
    21  
    22  static always_inline bool vec_cross_page(const void * p, size_t n) {
    23  #define PAGE_SIZE 4096
    24      return (((size_t)(p)) & (PAGE_SIZE - 1)) > (PAGE_SIZE - n);
    25  #undef PAGE_SIZE
    26  }
    27  
    28  static always_inline void memcpy4 (void *__restrict dp, const void *__restrict sp) {
    29      ((uint32_t *)dp)[0] = ((const uint32_t *)sp)[0];
    30  }
    31  
    32  static always_inline void memcpy8 (void *__restrict dp, const void *__restrict sp) {
    33      ((uint64_t *)dp)[0] = ((const uint64_t *)sp)[0];
    34  }
    35  
    36  static always_inline void memcpy16 (void *__restrict dp, const void *__restrict sp) {
    37      _mm_storeu_si128((void *)(dp), _mm_loadu_si128((const void *)(sp)));
    38  }
    39  
    40  static always_inline void memcpy32(void *__restrict dp, const void *__restrict sp) {
    41  #if USE_AVX2
    42      _mm256_storeu_si256((void *)dp,     _mm256_loadu_si256((const void *)sp));
    43  #else
    44      _mm_storeu_si128((void *)(dp),      _mm_loadu_si128((const void *)(sp)));
    45      _mm_storeu_si128((void *)(dp + 16), _mm_loadu_si128((const void *)(sp + 16)));
    46  #endif
    47  }
    48  
    49  static always_inline void memcpy64(void *__restrict dp, const void *__restrict sp) {
    50      memcpy32(dp, sp);
    51      memcpy32(dp + 32, sp + 32);
    52  }
    53  
    54  static always_inline void memcpy_p4(void *__restrict dp, const void *__restrict sp, size_t nb) {
    55      if (nb >= 2) { *(uint16_t *)dp = *(const uint16_t *)sp; sp += 2, dp += 2, nb -= 2; }
    56      if (nb >= 1) { *(uint8_t *) dp = *(const uint8_t *)sp; }
    57  }
    58  
    59  static always_inline void memcpy_p8(void *__restrict dp, const void *__restrict sp, ssize_t nb) {
    60      if (nb >= 4) { memcpy4(dp, sp); sp += 4, dp += 4, nb -= 4; }
    61      memcpy_p4(dp, sp, nb);
    62  }
    63  
    64  static always_inline void memcpy_p16(void *__restrict dp, const void *__restrict sp, size_t nb) {
    65      if (nb >= 8) { memcpy8(dp, sp); sp += 8, dp += 8, nb -= 8; }
    66      memcpy_p8(dp, sp, nb);
    67  }
    68  
    69  static always_inline void memcpy_p32(void *__restrict dp, const void *__restrict sp, size_t nb) {
    70      if (nb >= 16) { memcpy16(dp, sp); sp += 16, dp += 16, nb -= 16; }
    71      memcpy_p16(dp, sp, nb);
    72  }
    73  
    74  static always_inline void memcpy_p64(void *__restrict dp, const void *__restrict sp, size_t nb) {
    75      if (nb >= 32) { memcpy32(dp, sp); sp += 32, dp += 32, nb -= 32; }
    76      memcpy_p32(dp, sp, nb);
    77  }