1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Linux Socket Filter Data Structures 4 */ 5 #ifndef __TOOLS_LINUX_FILTER_H 6 #define __TOOLS_LINUX_FILTER_H 7 8 #include <linux/bpf.h> 9 10 /* ArgX, context and stack frame pointer register positions. Note, 11 * Arg1, Arg2, Arg3, etc are used as argument mappings of function 12 * calls in BPF_CALL instruction. 13 */ 14 #define BPF_REG_ARG1 BPF_REG_1 15 #define BPF_REG_ARG2 BPF_REG_2 16 #define BPF_REG_ARG3 BPF_REG_3 17 #define BPF_REG_ARG4 BPF_REG_4 18 #define BPF_REG_ARG5 BPF_REG_5 19 #define BPF_REG_CTX BPF_REG_6 20 #define BPF_REG_FP BPF_REG_10 21 22 /* Additional register mappings for converted user programs. */ 23 #define BPF_REG_A BPF_REG_0 24 #define BPF_REG_X BPF_REG_7 25 #define BPF_REG_TMP BPF_REG_8 26 27 /* BPF program can access up to 512 bytes of stack space. */ 28 #define MAX_BPF_STACK 512 29 30 /* Helper macros for filter block array initializers. */ 31 32 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ 33 34 #define BPF_ALU64_REG(OP, DST, SRC) \ 35 ((struct bpf_insn) { \ 36 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ 37 .dst_reg = DST, \ 38 .src_reg = SRC, \ 39 .off = 0, \ 40 .imm = 0 }) 41 42 #define BPF_ALU32_REG(OP, DST, SRC) \ 43 ((struct bpf_insn) { \ 44 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ 45 .dst_reg = DST, \ 46 .src_reg = SRC, \ 47 .off = 0, \ 48 .imm = 0 }) 49 50 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ 51 52 #define BPF_ALU64_IMM(OP, DST, IMM) \ 53 ((struct bpf_insn) { \ 54 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ 55 .dst_reg = DST, \ 56 .src_reg = 0, \ 57 .off = 0, \ 58 .imm = IMM }) 59 60 #define BPF_ALU32_IMM(OP, DST, IMM) \ 61 ((struct bpf_insn) { \ 62 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ 63 .dst_reg = DST, \ 64 .src_reg = 0, \ 65 .off = 0, \ 66 .imm = IMM }) 67 68 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ 69 70 #define BPF_ENDIAN(TYPE, DST, LEN) \ 71 ((struct bpf_insn) { \ 72 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ 73 .dst_reg = DST, \ 74 .src_reg = 0, \ 75 .off = 0, \ 76 .imm = LEN }) 77 78 /* Short form of mov, dst_reg = src_reg */ 79 80 #define BPF_MOV64_REG(DST, SRC) \ 81 ((struct bpf_insn) { \ 82 .code = BPF_ALU64 | BPF_MOV | BPF_X, \ 83 .dst_reg = DST, \ 84 .src_reg = SRC, \ 85 .off = 0, \ 86 .imm = 0 }) 87 88 #define BPF_MOV32_REG(DST, SRC) \ 89 ((struct bpf_insn) { \ 90 .code = BPF_ALU | BPF_MOV | BPF_X, \ 91 .dst_reg = DST, \ 92 .src_reg = SRC, \ 93 .off = 0, \ 94 .imm = 0 }) 95 96 /* Short form of mov, dst_reg = imm32 */ 97 98 #define BPF_MOV64_IMM(DST, IMM) \ 99 ((struct bpf_insn) { \ 100 .code = BPF_ALU64 | BPF_MOV | BPF_K, \ 101 .dst_reg = DST, \ 102 .src_reg = 0, \ 103 .off = 0, \ 104 .imm = IMM }) 105 106 #define BPF_MOV32_IMM(DST, IMM) \ 107 ((struct bpf_insn) { \ 108 .code = BPF_ALU | BPF_MOV | BPF_K, \ 109 .dst_reg = DST, \ 110 .src_reg = 0, \ 111 .off = 0, \ 112 .imm = IMM }) 113 114 /* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */ 115 116 #define BPF_MOVSX64_REG(DST, SRC, OFF) \ 117 ((struct bpf_insn) { \ 118 .code = BPF_ALU64 | BPF_MOV | BPF_X, \ 119 .dst_reg = DST, \ 120 .src_reg = SRC, \ 121 .off = OFF, \ 122 .imm = 0 }) 123 124 #define BPF_MOVSX32_REG(DST, SRC, OFF) \ 125 ((struct bpf_insn) { \ 126 .code = BPF_ALU | BPF_MOV | BPF_X, \ 127 .dst_reg = DST, \ 128 .src_reg = SRC, \ 129 .off = OFF, \ 130 .imm = 0 }) 131 132 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ 133 134 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ 135 ((struct bpf_insn) { \ 136 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ 137 .dst_reg = DST, \ 138 .src_reg = SRC, \ 139 .off = 0, \ 140 .imm = IMM }) 141 142 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ 143 ((struct bpf_insn) { \ 144 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ 145 .dst_reg = DST, \ 146 .src_reg = SRC, \ 147 .off = 0, \ 148 .imm = IMM }) 149 150 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ 151 152 #define BPF_LD_ABS(SIZE, IMM) \ 153 ((struct bpf_insn) { \ 154 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ 155 .dst_reg = 0, \ 156 .src_reg = 0, \ 157 .off = 0, \ 158 .imm = IMM }) 159 160 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ 161 162 #define BPF_LD_IND(SIZE, SRC, IMM) \ 163 ((struct bpf_insn) { \ 164 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ 165 .dst_reg = 0, \ 166 .src_reg = SRC, \ 167 .off = 0, \ 168 .imm = IMM }) 169 170 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ 171 172 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ 173 ((struct bpf_insn) { \ 174 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ 175 .dst_reg = DST, \ 176 .src_reg = SRC, \ 177 .off = OFF, \ 178 .imm = 0 }) 179 180 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ 181 182 #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ 183 ((struct bpf_insn) { \ 184 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ 185 .dst_reg = DST, \ 186 .src_reg = SRC, \ 187 .off = OFF, \ 188 .imm = 0 }) 189 190 /* 191 * Atomic operations: 192 * 193 * BPF_ADD *(uint *) (dst_reg + off16) += src_reg 194 * BPF_AND *(uint *) (dst_reg + off16) &= src_reg 195 * BPF_OR *(uint *) (dst_reg + off16) |= src_reg 196 * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg 197 * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg); 198 * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg); 199 * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg); 200 * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg); 201 * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg) 202 * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg) 203 */ 204 205 #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \ 206 ((struct bpf_insn) { \ 207 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \ 208 .dst_reg = DST, \ 209 .src_reg = SRC, \ 210 .off = OFF, \ 211 .imm = OP }) 212 213 /* Legacy alias */ 214 #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF) 215 216 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 217 218 #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 219 ((struct bpf_insn) { \ 220 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ 221 .dst_reg = DST, \ 222 .src_reg = 0, \ 223 .off = OFF, \ 224 .imm = IMM }) 225 226 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ 227 228 #define BPF_JMP_REG(OP, DST, SRC, OFF) \ 229 ((struct bpf_insn) { \ 230 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ 231 .dst_reg = DST, \ 232 .src_reg = SRC, \ 233 .off = OFF, \ 234 .imm = 0 }) 235 236 /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ 237 238 #define BPF_JMP32_REG(OP, DST, SRC, OFF) \ 239 ((struct bpf_insn) { \ 240 .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ 241 .dst_reg = DST, \ 242 .src_reg = SRC, \ 243 .off = OFF, \ 244 .imm = 0 }) 245 246 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ 247 248 #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ 249 ((struct bpf_insn) { \ 250 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ 251 .dst_reg = DST, \ 252 .src_reg = 0, \ 253 .off = OFF, \ 254 .imm = IMM }) 255 256 /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ 257 258 #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ 259 ((struct bpf_insn) { \ 260 .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ 261 .dst_reg = DST, \ 262 .src_reg = 0, \ 263 .off = OFF, \ 264 .imm = IMM }) 265 266 /* Unconditional jumps, goto pc + off16 */ 267 268 #define BPF_JMP_A(OFF) \ 269 ((struct bpf_insn) { \ 270 .code = BPF_JMP | BPF_JA, \ 271 .dst_reg = 0, \ 272 .src_reg = 0, \ 273 .off = OFF, \ 274 .imm = 0 }) 275 276 /* Unconditional jumps, gotol pc + imm32 */ 277 278 #define BPF_JMP32_A(IMM) \ 279 ((struct bpf_insn) { \ 280 .code = BPF_JMP32 | BPF_JA, \ 281 .dst_reg = 0, \ 282 .src_reg = 0, \ 283 .off = 0, \ 284 .imm = IMM }) 285 286 /* Function call */ 287 288 #define BPF_EMIT_CALL(FUNC) \ 289 ((struct bpf_insn) { \ 290 .code = BPF_JMP | BPF_CALL, \ 291 .dst_reg = 0, \ 292 .src_reg = 0, \ 293 .off = 0, \ 294 .imm = ((FUNC) - BPF_FUNC_unspec) }) 295 296 /* Raw code statement block */ 297 298 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ 299 ((struct bpf_insn) { \ 300 .code = CODE, \ 301 .dst_reg = DST, \ 302 .src_reg = SRC, \ 303 .off = OFF, \ 304 .imm = IMM }) 305 306 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ 307 308 #define BPF_LD_IMM64(DST, IMM) \ 309 BPF_LD_IMM64_RAW(DST, 0, IMM) 310 311 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ 312 ((struct bpf_insn) { \ 313 .code = BPF_LD | BPF_DW | BPF_IMM, \ 314 .dst_reg = DST, \ 315 .src_reg = SRC, \ 316 .off = 0, \ 317 .imm = (__u32) (IMM) }), \ 318 ((struct bpf_insn) { \ 319 .code = 0, /* zero is reserved opcode */ \ 320 .dst_reg = 0, \ 321 .src_reg = 0, \ 322 .off = 0, \ 323 .imm = ((__u64) (IMM)) >> 32 }) 324 325 #define BPF_LD_IMM64_RAW_FULL(DST, SRC, OFF1, OFF2, IMM1, IMM2) \ 326 ((struct bpf_insn) { \ 327 .code = BPF_LD | BPF_DW | BPF_IMM, \ 328 .dst_reg = DST, \ 329 .src_reg = SRC, \ 330 .off = OFF1, \ 331 .imm = IMM1 }), \ 332 ((struct bpf_insn) { \ 333 .code = 0, /* zero is reserved opcode */ \ 334 .dst_reg = 0, \ 335 .src_reg = 0, \ 336 .off = OFF2, \ 337 .imm = IMM2 }) 338 339 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ 340 341 #define BPF_LD_MAP_FD(DST, MAP_FD) \ 342 BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_FD, 0, 0, \ 343 MAP_FD, 0) 344 345 #define BPF_LD_MAP_VALUE(DST, MAP_FD, VALUE_OFF) \ 346 BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_VALUE, 0, 0, \ 347 MAP_FD, VALUE_OFF) 348 349 /* Relative call */ 350 351 #define BPF_CALL_REL(TGT) \ 352 ((struct bpf_insn) { \ 353 .code = BPF_JMP | BPF_CALL, \ 354 .dst_reg = 0, \ 355 .src_reg = BPF_PSEUDO_CALL, \ 356 .off = 0, \ 357 .imm = TGT }) 358 359 /* Program exit */ 360 361 #define BPF_EXIT_INSN() \ 362 ((struct bpf_insn) { \ 363 .code = BPF_JMP | BPF_EXIT, \ 364 .dst_reg = 0, \ 365 .src_reg = 0, \ 366 .off = 0, \ 367 .imm = 0 }) 368 369 #endif /* __TOOLS_LINUX_FILTER_H */ 370