github.com/Bytom/bytom@v1.1.2-0.20210127130405-ae40204c0b09/mining/tensority/cgo_algorithm/lib/byte_order-allInOne.h (about) 1 /* byte_order-allInOne.h */ 2 #ifndef BYTE_ORDER_H 3 #define BYTE_ORDER_H 4 #include "ustd.h" 5 #include <stdlib.h> 6 7 #ifdef __GLIBC__ 8 # include <endian.h> 9 #endif 10 11 #ifdef __cplusplus 12 extern "C" { 13 #endif 14 15 /* if x86 compatible cpu */ 16 #if defined(i386) || defined(__i386__) || defined(__i486__) || \ 17 defined(__i586__) || defined(__i686__) || defined(__pentium__) || \ 18 defined(__pentiumpro__) || defined(__pentium4__) || \ 19 defined(__nocona__) || defined(prescott) || defined(__core2__) || \ 20 defined(__k6__) || defined(__k8__) || defined(__athlon__) || \ 21 defined(__amd64) || defined(__amd64__) || \ 22 defined(__x86_64) || defined(__x86_64__) || defined(_M_IX86) || \ 23 defined(_M_AMD64) || defined(_M_IA64) || defined(_M_X64) 24 /* detect if x86-64 instruction set is supported */ 25 # if defined(_LP64) || defined(__LP64__) || defined(__x86_64) || \ 26 defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64) 27 # define CPU_X64 28 # else 29 # define CPU_IA32 30 # endif 31 #endif 32 33 34 /* detect CPU endianness */ 35 #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \ 36 __BYTE_ORDER == __LITTLE_ENDIAN) || \ 37 (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ 38 __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \ 39 defined(CPU_IA32) || defined(CPU_X64) || \ 40 defined(__ia64) || defined(__ia64__) || defined(__alpha__) || defined(_M_ALPHA) || \ 41 defined(vax) || defined(MIPSEL) || defined(_ARM_) || defined(__arm__) 42 # define CPU_LITTLE_ENDIAN 43 # define IS_BIG_ENDIAN 0 44 # define IS_LITTLE_ENDIAN 1 45 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \ 46 __BYTE_ORDER == __BIG_ENDIAN) || \ 47 (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ 48 __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \ 49 defined(__sparc) || defined(__sparc__) || defined(sparc) || \ 50 defined(_ARCH_PPC) || defined(_ARCH_PPC64) || defined(_POWER) || \ 51 defined(__POWERPC__) || defined(POWERPC) || defined(__powerpc) || \ 52 defined(__powerpc__) || defined(__powerpc64__) || defined(__ppc__) || \ 53 defined(__hpux) || defined(_MIPSEB) || defined(mc68000) || \ 54 defined(__s390__) || defined(__s390x__) || defined(sel) 55 # define CPU_BIG_ENDIAN 56 # define IS_BIG_ENDIAN 1 57 # define IS_LITTLE_ENDIAN 0 58 #else 59 # error "Can't detect CPU architechture" 60 #endif 61 62 #ifndef __has_builtin 63 # define __has_builtin(x) 0 64 #endif 65 66 #define IS_ALIGNED_32(p) (0 == (3 & ((const char*)(p) - (const char*)0))) 67 #define IS_ALIGNED_64(p) (0 == (7 & ((const char*)(p) - (const char*)0))) 68 69 #if defined(_MSC_VER) 70 #define ALIGN_ATTR(n) __declspec(align(n)) 71 #elif defined(__GNUC__) 72 #define ALIGN_ATTR(n) __attribute__((aligned (n))) 73 #else 74 #define ALIGN_ATTR(n) /* nothing */ 75 #endif 76 77 78 #if defined(_MSC_VER) || defined(__BORLANDC__) 79 #define I64(x) x##ui64 80 #else 81 #define I64(x) x##ULL 82 #endif 83 84 85 #ifndef __STRICT_ANSI__ 86 #define RHASH_INLINE inline 87 #elif defined(__GNUC__) 88 #define RHASH_INLINE __inline__ 89 #else 90 #define RHASH_INLINE 91 #endif 92 93 /* define rhash_ctz - count traling zero bits */ 94 #if (defined(__GNUC__) && __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) || \ 95 (defined(__clang__) && __has_builtin(__builtin_ctz)) 96 /* GCC >= 3.4 or clang */ 97 # define rhash_ctz(x) __builtin_ctz(x) 98 #else 99 unsigned rhash_ctz(unsigned); /* define as function */ 100 #endif 101 102 /* bswap definitions */ 103 #if (defined(__GNUC__) && (__GNUC__ >= 4) && (__GNUC__ > 4 || __GNUC_MINOR__ >= 3)) || \ 104 (defined(__clang__) && __has_builtin(__builtin_bswap32) && __has_builtin(__builtin_bswap64)) 105 /* GCC >= 4.3 or clang */ 106 # define bswap_32(x) __builtin_bswap32(x) 107 # define bswap_64(x) __builtin_bswap64(x) 108 #elif (_MSC_VER > 1300) && (defined(CPU_IA32) || defined(CPU_X64)) /* MS VC */ 109 # define bswap_32(x) _byteswap_ulong((unsigned long)x) 110 # define bswap_64(x) _byteswap_uint64((__int64)x) 111 #else 112 /* fallback to generic bswap definition */ 113 static RHASH_INLINE uint32_t bswap_32(uint32_t x) 114 { 115 # if defined(__GNUC__) && defined(CPU_IA32) && !defined(__i386__) && !defined(RHASH_NO_ASM) 116 __asm("bswap\t%0" : "=r" (x) : "0" (x)); /* gcc x86 version */ 117 return x; 118 # else 119 x = ((x << 8) & 0xFF00FF00u) | ((x >> 8) & 0x00FF00FFu); 120 return (x >> 16) | (x << 16); 121 # endif 122 } 123 static RHASH_INLINE uint64_t bswap_64(uint64_t x) 124 { 125 union { 126 uint64_t ll; 127 uint32_t l[2]; 128 } w, r; 129 w.ll = x; 130 r.l[0] = bswap_32(w.l[1]); 131 r.l[1] = bswap_32(w.l[0]); 132 return r.ll; 133 } 134 #endif /* bswap definitions */ 135 136 #ifdef CPU_BIG_ENDIAN 137 # define be2me_32(x) (x) 138 # define be2me_64(x) (x) 139 # define le2me_32(x) bswap_32(x) 140 # define le2me_64(x) bswap_64(x) 141 142 # define be32_copy(to, index, from, length) memcpy((to) + (index), (from), (length)) 143 # define le32_copy(to, index, from, length) rhash_swap_copy_str_to_u32((to), (index), (from), (length)) 144 # define be64_copy(to, index, from, length) memcpy((to) + (index), (from), (length)) 145 # define le64_copy(to, index, from, length) rhash_swap_copy_str_to_u64((to), (index), (from), (length)) 146 # define me64_to_be_str(to, from, length) memcpy((to), (from), (length)) 147 # define me64_to_le_str(to, from, length) rhash_swap_copy_u64_to_str((to), (from), (length)) 148 149 #else /* CPU_BIG_ENDIAN */ 150 # define be2me_32(x) bswap_32(x) 151 # define be2me_64(x) bswap_64(x) 152 # define le2me_32(x) (x) 153 # define le2me_64(x) (x) 154 155 # define be32_copy(to, index, from, length) rhash_swap_copy_str_to_u32((to), (index), (from), (length)) 156 # define le32_copy(to, index, from, length) memcpy((to) + (index), (from), (length)) 157 # define be64_copy(to, index, from, length) rhash_swap_copy_str_to_u64((to), (index), (from), (length)) 158 # define le64_copy(to, index, from, length) memcpy((to) + (index), (from), (length)) 159 # define me64_to_be_str(to, from, length) rhash_swap_copy_u64_to_str((to), (from), (length)) 160 # define me64_to_le_str(to, from, length) memcpy((to), (from), (length)) 161 #endif /* CPU_BIG_ENDIAN */ 162 163 /* ROTL/ROTR macros rotate a 32/64-bit word left/right by n bits */ 164 #define ROTL32(dword, n) ((dword) << (n) ^ ((dword) >> (32 - (n)))) 165 #define ROTR32(dword, n) ((dword) >> (n) ^ ((dword) << (32 - (n)))) 166 #define ROTL64(qword, n) ((qword) << (n) ^ ((qword) >> (64 - (n)))) 167 #define ROTR64(qword, n) ((qword) >> (n) ^ ((qword) << (64 - (n)))) 168 169 #ifdef __cplusplus 170 } /* extern "C" */ 171 #endif /* __cplusplus */ 172 173 #endif /* BYTE_ORDER_H */ 174 175 176 // Apdated from byte_order.c 177 /* byte_order.c - byte order related platform dependent routines, 178 * 179 * Copyright: 2008-2012 Aleksey Kravchenko <rhash.admin@gmail.com> 180 * 181 * Permission is hereby granted, free of charge, to any person obtaining a 182 * copy of this software and associated documentation files (the "Software"), 183 * to deal in the Software without restriction, including without limitation 184 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 185 * and/or sell copies of the Software, and to permit persons to whom the 186 * Software is furnished to do so. 187 * 188 * This program is distributed in the hope that it will be useful, but 189 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 190 * or FITNESS FOR A PARTICULAR PURPOSE. Use this program at your own risk! 191 */ 192 193 #ifndef rhash_ctz 194 195 # if _MSC_VER >= 1300 && (_M_IX86 || _M_AMD64 || _M_IA64) /* if MSVC++ >= 2002 on x86/x64 */ 196 # include <intrin.h> 197 # pragma intrinsic(_BitScanForward) 198 199 /** 200 * Returns index of the trailing bit of x. 201 * 202 * @param x the number to process 203 * @return zero-based index of the trailing bit 204 */ 205 inline unsigned rhash_ctz(unsigned x) 206 { 207 unsigned long index; 208 unsigned char isNonzero = _BitScanForward(&index, x); /* MSVC intrinsic */ 209 return (isNonzero ? (unsigned)index : 0); 210 } 211 # else /* _MSC_VER >= 1300... */ 212 213 /** 214 * Returns index of the trailing bit of a 32-bit number. 215 * This is a plain C equivalent for GCC __builtin_ctz() bit scan. 216 * 217 * @param x the number to process 218 * @return zero-based index of the trailing bit 219 */ 220 inline unsigned rhash_ctz(unsigned x) 221 { 222 /* array for conversion to bit position */ 223 static unsigned char bit_pos[32] = { 224 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 225 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 226 }; 227 228 /* The De Bruijn bit-scan was devised in 1997, according to Donald Knuth 229 * by Martin Lauter. The constant 0x077CB531UL is a De Bruijn sequence, 230 * which produces a unique pattern of bits into the high 5 bits for each 231 * possible bit position that it is multiplied against. 232 * See http://graphics.stanford.edu/~seander/bithacks.html 233 * and http://chessprogramming.wikispaces.com/BitScan */ 234 return (unsigned)bit_pos[((uint32_t)((x & -x) * 0x077CB531U)) >> 27]; 235 } 236 # endif /* _MSC_VER >= 1300... */ 237 #endif /* rhash_ctz */ 238 239 /** 240 * Copy a memory block with simultaneous exchanging byte order. 241 * The byte order is changed from little-endian 32-bit integers 242 * to big-endian (or vice-versa). 243 * 244 * @param to the pointer where to copy memory block 245 * @param index the index to start writing from 246 * @param from the source block to copy 247 * @param length length of the memory block 248 */ 249 inline void rhash_swap_copy_str_to_u32(void* to, int index, const void* from, size_t length) 250 { 251 /* if all pointers and length are 32-bits aligned */ 252 if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | index | length ) & 3) ) { 253 /* copy memory as 32-bit words */ 254 const uint32_t* src = (const uint32_t*)from; 255 const uint32_t* end = (const uint32_t*)((const char*)src + length); 256 uint32_t* dst = (uint32_t*)((char*)to + index); 257 for (; src < end; dst++, src++) 258 *dst = bswap_32(*src); 259 } else { 260 const char* src = (const char*)from; 261 for (length += index; (size_t)index < length; index++) 262 ((char*)to)[index ^ 3] = *(src++); 263 } 264 } 265 266 /** 267 * Copy a memory block with changed byte order. 268 * The byte order is changed from little-endian 64-bit integers 269 * to big-endian (or vice-versa). 270 * 271 * @param to the pointer where to copy memory block 272 * @param index the index to start writing from 273 * @param from the source block to copy 274 * @param length length of the memory block 275 */ 276 inline void rhash_swap_copy_str_to_u64(void* to, int index, const void* from, size_t length) 277 { 278 /* if all pointers and length are 64-bits aligned */ 279 if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | index | length ) & 7) ) { 280 /* copy aligned memory block as 64-bit integers */ 281 const uint64_t* src = (const uint64_t*)from; 282 const uint64_t* end = (const uint64_t*)((const char*)src + length); 283 uint64_t* dst = (uint64_t*)((char*)to + index); 284 while (src < end) *(dst++) = bswap_64( *(src++) ); 285 } else { 286 const char* src = (const char*)from; 287 for (length += index; (size_t)index < length; index++) ((char*)to)[index ^ 7] = *(src++); 288 } 289 } 290 291 /** 292 * Copy data from a sequence of 64-bit words to a binary string of given length, 293 * while changing byte order. 294 * 295 * @param to the binary string to receive data 296 * @param from the source sequence of 64-bit words 297 * @param length the size in bytes of the data being copied 298 */ 299 inline void rhash_swap_copy_u64_to_str(void* to, const void* from, size_t length) 300 { 301 /* if all pointers and length are 64-bits aligned */ 302 if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | length ) & 7) ) { 303 /* copy aligned memory block as 64-bit integers */ 304 const uint64_t* src = (const uint64_t*)from; 305 const uint64_t* end = (const uint64_t*)((const char*)src + length); 306 uint64_t* dst = (uint64_t*)to; 307 while (src < end) *(dst++) = bswap_64( *(src++) ); 308 } else { 309 size_t index; 310 char* dst = (char*)to; 311 for (index = 0; index < length; index++) *(dst++) = ((char*)from)[index ^ 7]; 312 } 313 } 314 315 /** 316 * Exchange byte order in the given array of 32-bit integers. 317 * 318 * @param arr the array to process 319 * @param length array length 320 */ 321 inline void rhash_u32_mem_swap(unsigned *arr, int length) 322 { 323 unsigned* end = arr + length; 324 for (; arr < end; arr++) { 325 *arr = bswap_32(*arr); 326 } 327 }