github.com/zhyoulun/cilium@v1.6.12/contrib/codegen/hexgen.h (about) 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __HEXGEN_H 3 #define __HEXGEN_H 4 5 #include <linux/bpf.h> 6 7 /* ArgX, context and stack frame pointer register positions. Note, 8 * Arg1, Arg2, Arg3, etc are used as argument mappings of function 9 * calls in BPF_CALL instruction. 10 */ 11 #define BPF_REG_ARG1 BPF_REG_1 12 #define BPF_REG_ARG2 BPF_REG_2 13 #define BPF_REG_ARG3 BPF_REG_3 14 #define BPF_REG_ARG4 BPF_REG_4 15 #define BPF_REG_ARG5 BPF_REG_5 16 #define BPF_REG_CTX BPF_REG_6 17 #define BPF_REG_FP BPF_REG_10 18 19 /* Additional register mappings for converted user programs. */ 20 #define BPF_REG_A BPF_REG_0 21 #define BPF_REG_X BPF_REG_7 22 #define BPF_REG_TMP BPF_REG_8 23 24 /* BPF program can access up to 512 bytes of stack space. */ 25 #define MAX_BPF_STACK 512 26 27 /* Helper macros for filter block array initializers. */ 28 29 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ 30 31 #define BPF_ALU64_REG(OP, DST, SRC) \ 32 ((struct bpf_insn) { \ 33 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ 34 .dst_reg = DST, \ 35 .src_reg = SRC, \ 36 .off = 0, \ 37 .imm = 0 }) 38 39 #define BPF_ALU32_REG(OP, DST, SRC) \ 40 ((struct bpf_insn) { \ 41 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ 42 .dst_reg = DST, \ 43 .src_reg = SRC, \ 44 .off = 0, \ 45 .imm = 0 }) 46 47 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ 48 49 #define BPF_ALU64_IMM(OP, DST, IMM) \ 50 ((struct bpf_insn) { \ 51 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ 52 .dst_reg = DST, \ 53 .src_reg = 0, \ 54 .off = 0, \ 55 .imm = IMM }) 56 57 #define BPF_ALU32_IMM(OP, DST, IMM) \ 58 ((struct bpf_insn) { \ 59 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ 60 .dst_reg = DST, \ 61 .src_reg = 0, \ 62 .off = 0, \ 63 .imm = IMM }) 64 65 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ 66 67 #define BPF_ENDIAN(TYPE, DST, LEN) \ 68 ((struct bpf_insn) { \ 69 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ 70 .dst_reg = DST, \ 71 .src_reg = 0, \ 72 .off = 0, \ 73 .imm = LEN }) 74 75 /* Short form of mov, dst_reg = src_reg */ 76 77 #define BPF_MOV64_REG(DST, SRC) \ 78 ((struct bpf_insn) { \ 79 .code = BPF_ALU64 | BPF_MOV | BPF_X, \ 80 .dst_reg = DST, \ 81 .src_reg = SRC, \ 82 .off = 0, \ 83 .imm = 0 }) 84 85 #define BPF_MOV32_REG(DST, SRC) \ 86 ((struct bpf_insn) { \ 87 .code = BPF_ALU | BPF_MOV | BPF_X, \ 88 .dst_reg = DST, \ 89 .src_reg = SRC, \ 90 .off = 0, \ 91 .imm = 0 }) 92 93 /* Short form of mov, dst_reg = imm32 */ 94 95 #define BPF_MOV64_IMM(DST, IMM) \ 96 ((struct bpf_insn) { \ 97 .code = BPF_ALU64 | BPF_MOV | BPF_K, \ 98 .dst_reg = DST, \ 99 .src_reg = 0, \ 100 .off = 0, \ 101 .imm = IMM }) 102 103 #define BPF_MOV32_IMM(DST, IMM) \ 104 ((struct bpf_insn) { \ 105 .code = BPF_ALU | BPF_MOV | BPF_K, \ 106 .dst_reg = DST, \ 107 .src_reg = 0, \ 108 .off = 0, \ 109 .imm = IMM }) 110 111 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ 112 113 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ 114 ((struct bpf_insn) { \ 115 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ 116 .dst_reg = DST, \ 117 .src_reg = SRC, \ 118 .off = 0, \ 119 .imm = IMM }) 120 121 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ 122 ((struct bpf_insn) { \ 123 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ 124 .dst_reg = DST, \ 125 .src_reg = SRC, \ 126 .off = 0, \ 127 .imm = IMM }) 128 129 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ 130 131 #define BPF_LD_ABS(SIZE, IMM) \ 132 ((struct bpf_insn) { \ 133 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ 134 .dst_reg = 0, \ 135 .src_reg = 0, \ 136 .off = 0, \ 137 .imm = IMM }) 138 139 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ 140 141 #define BPF_LD_IND(SIZE, SRC, IMM) \ 142 ((struct bpf_insn) { \ 143 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ 144 .dst_reg = 0, \ 145 .src_reg = SRC, \ 146 .off = 0, \ 147 .imm = IMM }) 148 149 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ 150 151 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ 152 ((struct bpf_insn) { \ 153 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ 154 .dst_reg = DST, \ 155 .src_reg = SRC, \ 156 .off = OFF, \ 157 .imm = 0 }) 158 159 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ 160 161 #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ 162 ((struct bpf_insn) { \ 163 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ 164 .dst_reg = DST, \ 165 .src_reg = SRC, \ 166 .off = OFF, \ 167 .imm = 0 }) 168 169 /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ 170 171 #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ 172 ((struct bpf_insn) { \ 173 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ 174 .dst_reg = DST, \ 175 .src_reg = SRC, \ 176 .off = OFF, \ 177 .imm = 0 }) 178 179 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 180 181 #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 182 ((struct bpf_insn) { \ 183 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ 184 .dst_reg = DST, \ 185 .src_reg = 0, \ 186 .off = OFF, \ 187 .imm = IMM }) 188 189 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ 190 191 #define BPF_JMP_REG(OP, DST, SRC, OFF) \ 192 ((struct bpf_insn) { \ 193 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ 194 .dst_reg = DST, \ 195 .src_reg = SRC, \ 196 .off = OFF, \ 197 .imm = 0 }) 198 199 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ 200 201 #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ 202 ((struct bpf_insn) { \ 203 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ 204 .dst_reg = DST, \ 205 .src_reg = 0, \ 206 .off = OFF, \ 207 .imm = IMM }) 208 209 /* Unconditional jumps, goto pc + off16 */ 210 211 #define BPF_JMP_A(OFF) \ 212 ((struct bpf_insn) { \ 213 .code = BPF_JMP | BPF_JA, \ 214 .dst_reg = 0, \ 215 .src_reg = 0, \ 216 .off = OFF, \ 217 .imm = 0 }) 218 219 /* Function call */ 220 221 #define BPF_EMIT_CALL(FUNC) \ 222 ((struct bpf_insn) { \ 223 .code = BPF_JMP | BPF_CALL, \ 224 .dst_reg = 0, \ 225 .src_reg = 0, \ 226 .off = 0, \ 227 .imm = ((FUNC) - BPF_FUNC_unspec) }) 228 229 /* Raw code statement block */ 230 231 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ 232 ((struct bpf_insn) { \ 233 .code = CODE, \ 234 .dst_reg = DST, \ 235 .src_reg = SRC, \ 236 .off = OFF, \ 237 .imm = IMM }) 238 239 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ 240 241 #define BPF_LD_IMM64(DST, IMM) \ 242 BPF_LD_IMM64_RAW(DST, 0, IMM) 243 244 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ 245 ((struct bpf_insn) { \ 246 .code = BPF_LD | BPF_DW | BPF_IMM, \ 247 .dst_reg = DST, \ 248 .src_reg = SRC, \ 249 .off = 0, \ 250 .imm = (__u32) (IMM) }), \ 251 ((struct bpf_insn) { \ 252 .code = 0, /* zero is reserved opcode */ \ 253 .dst_reg = 0, \ 254 .src_reg = 0, \ 255 .off = 0, \ 256 .imm = ((__u64) (IMM)) >> 32 }) 257 258 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ 259 260 #define BPF_LD_MAP_FD(DST, MAP_FD) \ 261 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) 262 263 /* Relative call */ 264 265 #define BPF_CALL_REL(TGT) \ 266 ((struct bpf_insn) { \ 267 .code = BPF_JMP | BPF_CALL, \ 268 .dst_reg = 0, \ 269 .src_reg = BPF_PSEUDO_CALL, \ 270 .off = 0, \ 271 .imm = TGT }) 272 273 /* Program exit */ 274 275 #define BPF_EXIT_INSN() \ 276 ((struct bpf_insn) { \ 277 .code = BPF_JMP | BPF_EXIT, \ 278 .dst_reg = 0, \ 279 .src_reg = 0, \ 280 .off = 0, \ 281 .imm = 0 }) 282 283 #endif /* __HEXGEN_H */