github.com/Schaudge/grailbase@v0.0.0-20240223061707-44c758a471c0/compress/libdeflate/arm/crc32_impl.h (about) 1 #ifndef GO_SRC_GITHUB_COM_GRAILBIO_BASE_COMPRESS_LIBDEFLATE_ARM_CRC32_IMPL_H_ 2 #define GO_SRC_GITHUB_COM_GRAILBIO_BASE_COMPRESS_LIBDEFLATE_ARM_CRC32_IMPL_H_ 3 /* 4 * arm/crc32_impl.h 5 * 6 * Copyright 2017 Jun He <jun.he@linaro.org> 7 * Copyright 2018 Eric Biggers 8 * 9 * Permission is hereby granted, free of charge, to any person 10 * obtaining a copy of this software and associated documentation 11 * files (the "Software"), to deal in the Software without 12 * restriction, including without limitation the rights to use, 13 * copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the 15 * Software is furnished to do so, subject to the following 16 * conditions: 17 * 18 * The above copyright notice and this permission notice shall be 19 * included in all copies or substantial portions of the Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 28 * OTHER DEALINGS IN THE SOFTWARE. 29 */ 30 31 #include "cpu_features.h" 32 33 /* 34 * CRC-32 folding with ARM Crypto extension-PMULL 35 * 36 * This works the same way as the x86 PCLMUL version. 37 * See x86/crc32_pclmul_template.h for an explanation. 38 */ 39 #undef DISPATCH_PMULL 40 #if (defined(__ARM_FEATURE_CRYPTO) || \ 41 (ARM_CPU_FEATURES_ENABLED && \ 42 COMPILER_SUPPORTS_PMULL_TARGET_INTRINSICS)) && \ 43 /* not yet tested on big endian, probably needs changes to work there */ \ 44 (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) && \ 45 /* clang as of v5.0.1 doesn't allow pmull intrinsics in 32-bit mode, even 46 * when compiling with -mfpu=crypto-neon-fp-armv8 */ \ 47 !(defined(__clang__) && defined(__arm__)) 48 # define FUNCNAME crc32_pmull 49 # define FUNCNAME_ALIGNED crc32_pmull_aligned 50 # ifdef __ARM_FEATURE_CRYPTO 51 # define ATTRIBUTES 52 # define DEFAULT_IMPL crc32_pmull 53 # else 54 # ifdef __arm__ 55 # define ATTRIBUTES __attribute__((target("fpu=crypto-neon-fp-armv8"))) 56 # else 57 # ifdef __clang__ 58 # define ATTRIBUTES __attribute__((target("crypto"))) 59 # else 60 # define ATTRIBUTES __attribute__((target("+crypto"))) 61 # endif 62 # endif 63 # define DISPATCH 1 64 # define DISPATCH_PMULL 1 65 # endif 66 67 #include <arm_neon.h> 68 69 static forceinline ATTRIBUTES uint8x16_t 70 clmul_00(uint8x16_t a, uint8x16_t b) 71 { 72 return (uint8x16_t)vmull_p64((poly64_t)vget_low_u8(a), 73 (poly64_t)vget_low_u8(b)); 74 } 75 76 static forceinline ATTRIBUTES uint8x16_t 77 clmul_10(uint8x16_t a, uint8x16_t b) 78 { 79 return (uint8x16_t)vmull_p64((poly64_t)vget_low_u8(a), 80 (poly64_t)vget_high_u8(b)); 81 } 82 83 static forceinline ATTRIBUTES uint8x16_t 84 clmul_11(uint8x16_t a, uint8x16_t b) 85 { 86 return (uint8x16_t)vmull_high_p64((poly64x2_t)a, (poly64x2_t)b); 87 } 88 89 static forceinline ATTRIBUTES uint8x16_t 90 fold_128b(uint8x16_t dst, uint8x16_t src, uint8x16_t multipliers) 91 { 92 return dst ^ clmul_00(src, multipliers) ^ clmul_11(src, multipliers); 93 } 94 95 static forceinline ATTRIBUTES u32 96 crc32_pmull_aligned(u32 remainder, const uint8x16_t *p, size_t nr_segs) 97 { 98 /* Constants precomputed by gen_crc32_multipliers.c. Do not edit! */ 99 const uint8x16_t multipliers_4 = 100 (uint8x16_t)(uint64x2_t){ 0x8F352D95, 0x1D9513D7 }; 101 const uint8x16_t multipliers_1 = 102 (uint8x16_t)(uint64x2_t){ 0xAE689191, 0xCCAA009E }; 103 const uint8x16_t final_multiplier = 104 (uint8x16_t)(uint64x2_t){ 0xB8BC6765 }; 105 const uint8x16_t mask32 = (uint8x16_t)(uint32x4_t){ 0xFFFFFFFF }; 106 const uint8x16_t barrett_reduction_constants = 107 (uint8x16_t)(uint64x2_t){ 0x00000001F7011641, 108 0x00000001DB710641 }; 109 const uint8x16_t zeroes = (uint8x16_t){ 0 }; 110 111 const uint8x16_t * const end = p + nr_segs; 112 const uint8x16_t * const end512 = p + (nr_segs & ~3); 113 uint8x16_t x0, x1, x2, x3; 114 115 x0 = *p++ ^ (uint8x16_t)(uint32x4_t){ remainder }; 116 if (nr_segs >= 4) { 117 x1 = *p++; 118 x2 = *p++; 119 x3 = *p++; 120 121 /* Fold 512 bits at a time */ 122 while (p != end512) { 123 x0 = fold_128b(*p++, x0, multipliers_4); 124 x1 = fold_128b(*p++, x1, multipliers_4); 125 x2 = fold_128b(*p++, x2, multipliers_4); 126 x3 = fold_128b(*p++, x3, multipliers_4); 127 } 128 129 /* Fold 512 bits => 128 bits */ 130 x1 = fold_128b(x1, x0, multipliers_1); 131 x2 = fold_128b(x2, x1, multipliers_1); 132 x0 = fold_128b(x3, x2, multipliers_1); 133 } 134 135 /* Fold 128 bits at a time */ 136 while (p != end) 137 x0 = fold_128b(*p++, x0, multipliers_1); 138 139 /* Fold 128 => 96 bits, implicitly appending 32 zeroes */ 140 x0 = vextq_u8(x0, zeroes, 8) ^ clmul_10(x0, multipliers_1); 141 142 /* Fold 96 => 64 bits */ 143 x0 = vextq_u8(x0, zeroes, 4) ^ clmul_00(x0 & mask32, final_multiplier); 144 145 /* Reduce 64 => 32 bits using Barrett reduction */ 146 x1 = x0; 147 x0 = clmul_00(x0 & mask32, barrett_reduction_constants); 148 x0 = clmul_10(x0 & mask32, barrett_reduction_constants); 149 return vgetq_lane_u32((uint32x4_t)(x0 ^ x1), 1); 150 } 151 #define IMPL_ALIGNMENT 16 152 #define IMPL_SEGMENT_SIZE 16 153 #include "../crc32_vec_template.h" 154 #endif /* PMULL implementation */ 155 156 #ifdef DISPATCH 157 static inline crc32_func_t 158 arch_select_crc32_func(void) 159 { 160 u32 features = get_cpu_features(); 161 162 #ifdef DISPATCH_PMULL 163 if (features & ARM_CPU_FEATURE_PMULL) 164 return crc32_pmull; 165 #endif 166 return NULL; 167 } 168 #endif /* DISPATCH */ 169 170 #endif // GO_SRC_GITHUB_COM_GRAILBIO_BASE_COMPRESS_LIBDEFLATE_ARM_CRC32_IMPL_H_