github.com/cilium/cilium@v1.16.2/bpf/tests/builtin_test.h (about) 1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 /* Copyright Authors of Cilium */ 3 4 #include "lib/common.h" 5 6 /* Manual slow versions, but doesn't matter for the sake of testing here. 7 * Mainly to make sure we don't end up using the overridden builtin. 8 */ 9 static __always_inline __u32 __cmp_mem(const void *x, const void *y, __u32 len) 10 { 11 const __u8 *x8 = x, *y8 = y; 12 __u32 i; 13 14 for (i = 0; i < len; i++) { 15 if (x8[i] != y8[i]) 16 return 1; 17 } 18 19 return 0; 20 } 21 22 static __always_inline void __cpy_mem(void *d, void *s, __u32 len) 23 { 24 __u8 *d8 = d, *s8 = s; 25 __u32 i; 26 27 for (i = 0; i < len; i++) 28 d8[i] = s8[i]; 29 } 30 31 static void __fill_rnd(void *buff, __u32 len) 32 { 33 __u8 *dest = buff; 34 __u32 i; 35 36 for (i = 0; i < len; i++) 37 dest[i] = random(); 38 } 39 40 static __always_inline bool __corrupt_mem(void *d, __u32 len) 41 { 42 bool corrupted = random() % 2 == 1; 43 __u32 pos = random() % len; 44 __u8 *d8 = d; 45 46 if (corrupted) 47 d8[pos]++; 48 return corrupted; 49 } 50 51 static void __fill_cnt(void *buff, __u32 len) 52 { 53 __u8 *dest = buff; 54 __u32 i, cnt = 0; 55 56 for (i = 0; i < len; i++) 57 dest[i] = cnt++; 58 } 59 60 #define test___builtin_memzero_single(op, len) \ 61 do { \ 62 __u##op __x[len] __align_stack_8; \ 63 __u##op __y[len] __align_stack_8; \ 64 __bpf_memset_builtin(__y, 0, sizeof(__y)); \ 65 __fill_rnd(__x, sizeof(__x)); \ 66 barrier_data(__x); \ 67 __bpf_memzero(__x, sizeof(__x)); \ 68 barrier_data(__x); \ 69 barrier_data(__y); \ 70 assert(!__cmp_mem(__x, __y, sizeof(__x))); \ 71 } while (0) 72 73 static void test___builtin_memzero(void) 74 { 75 /* ./builtin_gen memzero 768 > builtin_memzero.h */ 76 #include "builtin_memzero.h" 77 } 78 79 #define test___builtin_memcpy_single(op, len) \ 80 do { \ 81 __u##op __x[len] __align_stack_8; \ 82 __u##op __y[len] __align_stack_8; \ 83 __u##op __z[len] __align_stack_8; \ 84 __bpf_memset_builtin(__x, 0, sizeof(__x)); \ 85 __fill_rnd(__y, sizeof(__y)); \ 86 __bpf_memcpy_builtin(__z, __y, sizeof(__z)); \ 87 barrier_data(__x); \ 88 barrier_data(__y); \ 89 __bpf_memcpy(__x, __y, sizeof(__x)); \ 90 barrier_data(__x); \ 91 barrier_data(__z); \ 92 assert(!__cmp_mem(__x, __z, sizeof(__x))); \ 93 } while (0) 94 95 static void test___builtin_memcpy(void) 96 { 97 /* ./builtin_gen memcpy 768 > builtin_memcpy.h */ 98 #include "builtin_memcpy.h" 99 } 100 101 #define test___builtin_memcmp_single(op, len) \ 102 do { \ 103 bool res, cor; \ 104 __u##op __x[len] __align_stack_8; \ 105 __u##op __y[len] __align_stack_8; \ 106 __fill_rnd(__x, sizeof(__x)); \ 107 __cpy_mem(__y, __x, sizeof(__x)); \ 108 cor = __corrupt_mem(__y, sizeof(__x)); \ 109 barrier_data(__x); \ 110 barrier_data(__y); \ 111 res = __bpf_memcmp(__x, __y, sizeof(__x)); \ 112 assert(cor == res); \ 113 } while (0) 114 115 static void test___builtin_memcmp(void) 116 { 117 int i; 118 119 for (i = 0; i < 100; i++) { 120 /* ./builtin_gen memcmp 256 > builtin_memcmp.h */ 121 #include "builtin_memcmp.h" 122 } 123 } 124 125 /* Same as test___builtin_memcpy_single(). */ 126 #define test___builtin_memmove1_single(op, len) \ 127 do { \ 128 __u##op __x[len] __align_stack_8; \ 129 __u##op __y[len] __align_stack_8; \ 130 __u##op __z[len] __align_stack_8; \ 131 __bpf_memset_builtin(__x, 0, sizeof(__x)); \ 132 __fill_rnd(__y, sizeof(__y)); \ 133 __bpf_memcpy_builtin(__z, __y, sizeof(__z)); \ 134 barrier_data(__x); \ 135 barrier_data(__y); \ 136 __bpf_memmove(__x, __y, sizeof(__x)); \ 137 barrier_data(__x); \ 138 barrier_data(__z); \ 139 assert(!__cmp_mem(__x, __z, sizeof(__x))); \ 140 } while (0) 141 142 /* Overlapping with src == dst. */ 143 #define test___builtin_memmove2_single(op, len) \ 144 do { \ 145 __u##op __x[len] __align_stack_8; \ 146 __u##op __y[len] __align_stack_8; \ 147 __u8 *__p_x = (__u8 *)__x; \ 148 __u8 *__p_y = (__u8 *)__y; \ 149 const __u32 off = 0; \ 150 __fill_cnt(__x, sizeof(__x)); \ 151 __bpf_memcpy_builtin(__y, __x, sizeof(__x)); \ 152 __bpf_memcpy_builtin(__p_y + off, __x, sizeof(__x) - off); \ 153 barrier_data(__x); \ 154 __bpf_memmove(__p_x + off, __x, sizeof(__x) - off); \ 155 barrier_data(__x); \ 156 barrier_data(__y); \ 157 assert(!__cmp_mem(__x, __y, sizeof(__x))); \ 158 } while (0) 159 160 /* Overlapping with src < dst. */ 161 #define test___builtin_memmove3_single(op, len) \ 162 do { \ 163 __u##op __x[len] __align_stack_8; \ 164 __u##op __y[len] __align_stack_8; \ 165 __u8 *__p_x = (__u8 *)__x; \ 166 __u8 *__p_y = (__u8 *)__y; \ 167 const __u32 off = (sizeof(__x[0]) * len / 2) & ~1U; \ 168 __fill_cnt(__x, sizeof(__x)); \ 169 __bpf_memcpy_builtin(__y, __x, sizeof(__x)); \ 170 __bpf_memcpy_builtin(__p_y + off, __x, sizeof(__x) - off); \ 171 barrier_data(__x); \ 172 __bpf_memmove(__p_x + off, __x, sizeof(__x) - off); \ 173 barrier_data(__x); \ 174 barrier_data(__y); \ 175 assert(!__cmp_mem(__x, __y, sizeof(__x))); \ 176 } while (0) 177 178 /* Overlapping with src > dst. */ 179 #define test___builtin_memmove4_single(op, len) \ 180 do { \ 181 __u##op __x[len] __align_stack_8; \ 182 __u##op __y[len] __align_stack_8; \ 183 __u8 *__p_x = (__u8 *)__x; \ 184 const __u32 off = (sizeof(__x[0]) * len / 2) & ~1U; \ 185 __fill_cnt(__x, sizeof(__x)); \ 186 __bpf_memcpy_builtin(__y, __x, sizeof(__x)); \ 187 __bpf_memcpy_builtin(__y, __p_x + off, sizeof(__x) - off); \ 188 barrier_data(__x); \ 189 __bpf_memmove(__x, __p_x + off, sizeof(__x) - off); \ 190 barrier_data(__x); \ 191 barrier_data(__y); \ 192 assert(!__cmp_mem(__x, __y, sizeof(__x))); \ 193 } while (0) 194 195 static void test___builtin_memmove(void) 196 { 197 /* ./builtin_gen memmove1 768 > builtin_memmove.h */ 198 /* ./builtin_gen memmove2 768 >> builtin_memmove.h */ 199 /* ./builtin_gen memmove3 768 >> builtin_memmove.h */ 200 /* ./builtin_gen memmove4 768 >> builtin_memmove.h */ 201 #include "builtin_memmove.h" 202 }