1 { 2 "calls: invalid kfunc call not eliminated", 3 .insns = { 4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 5 BPF_MOV64_IMM(BPF_REG_0, 1), 6 BPF_EXIT_INSN(), 7 }, 8 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 9 .result = REJECT, 10 .errstr = "invalid kernel function call not eliminated in verifier pass", 11 }, 12 { 13 "calls: invalid kfunc call unreachable", 14 .insns = { 15 BPF_MOV64_IMM(BPF_REG_0, 1), 16 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2), 17 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 18 BPF_MOV64_IMM(BPF_REG_0, 1), 19 BPF_EXIT_INSN(), 20 }, 21 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 22 .result = ACCEPT, 23 }, 24 { 25 "calls: invalid kfunc call: ptr_to_mem to struct with non-scalar", 26 .insns = { 27 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 28 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 29 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 30 BPF_EXIT_INSN(), 31 }, 32 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 33 .result = REJECT, 34 .errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar", 35 .fixup_kfunc_btf_id = { 36 { "bpf_kfunc_call_test_fail1", 2 }, 37 }, 38 }, 39 { 40 "calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4", 41 .insns = { 42 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 43 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 44 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 45 BPF_EXIT_INSN(), 46 }, 47 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 48 .result = REJECT, 49 .errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2", 50 .fixup_kfunc_btf_id = { 51 { "bpf_kfunc_call_test_fail2", 2 }, 52 }, 53 }, 54 { 55 "calls: invalid kfunc call: ptr_to_mem to struct with FAM", 56 .insns = { 57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 58 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 59 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 60 BPF_EXIT_INSN(), 61 }, 62 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 63 .result = REJECT, 64 .errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar", 65 .fixup_kfunc_btf_id = { 66 { "bpf_kfunc_call_test_fail3", 2 }, 67 }, 68 }, 69 { 70 "calls: invalid kfunc call: reg->type != PTR_TO_CTX", 71 .insns = { 72 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 73 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 74 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 75 BPF_EXIT_INSN(), 76 }, 77 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 78 .result = REJECT, 79 .errstr = "arg#0 expected pointer to ctx, but got PTR", 80 .fixup_kfunc_btf_id = { 81 { "bpf_kfunc_call_test_pass_ctx", 2 }, 82 }, 83 }, 84 { 85 "calls: invalid kfunc call: void * not allowed in func proto without mem size arg", 86 .insns = { 87 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 89 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 90 BPF_EXIT_INSN(), 91 }, 92 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 93 .result = REJECT, 94 .errstr = "arg#0 pointer type UNKNOWN must point to scalar", 95 .fixup_kfunc_btf_id = { 96 { "bpf_kfunc_call_test_mem_len_fail1", 2 }, 97 }, 98 }, 99 { 100 "calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX", 101 .insns = { 102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 104 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 108 BPF_EXIT_INSN(), 109 }, 110 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 111 .result = REJECT, 112 .errstr = "arg#0 pointer type STRUCT prog_test_ref_kfunc must point", 113 .fixup_kfunc_btf_id = { 114 { "bpf_kfunc_call_test_acquire", 3 }, 115 { "bpf_kfunc_call_test_release", 5 }, 116 }, 117 }, 118 { 119 "calls: invalid kfunc call: reg->off must be zero when passed to release kfunc", 120 .insns = { 121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 123 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 125 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 126 BPF_EXIT_INSN(), 127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 130 BPF_MOV64_IMM(BPF_REG_0, 0), 131 BPF_EXIT_INSN(), 132 }, 133 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 134 .result = REJECT, 135 .errstr = "R1 must have zero offset when passed to release func", 136 .fixup_kfunc_btf_id = { 137 { "bpf_kfunc_call_test_acquire", 3 }, 138 { "bpf_kfunc_call_memb_release", 8 }, 139 }, 140 }, 141 { 142 "calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset", 143 .insns = { 144 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 146 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 147 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 148 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 149 BPF_EXIT_INSN(), 150 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 151 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16), 152 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4), 153 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 154 BPF_MOV64_IMM(BPF_REG_0, 0), 155 BPF_EXIT_INSN(), 156 }, 157 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 158 .fixup_kfunc_btf_id = { 159 { "bpf_kfunc_call_test_acquire", 3 }, 160 { "bpf_kfunc_call_test_release", 9 }, 161 }, 162 .result_unpriv = REJECT, 163 .result = REJECT, 164 .errstr = "negative offset ptr_ ptr R1 off=-4 disallowed", 165 }, 166 { 167 "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset", 168 .insns = { 169 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 171 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 172 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 173 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 174 BPF_EXIT_INSN(), 175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 176 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), 177 BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3), 178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 179 BPF_MOV64_IMM(BPF_REG_0, 0), 180 BPF_EXIT_INSN(), 181 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3), 182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 183 BPF_MOV64_IMM(BPF_REG_0, 0), 184 BPF_EXIT_INSN(), 185 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), 186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 187 BPF_MOV64_IMM(BPF_REG_0, 0), 188 BPF_EXIT_INSN(), 189 }, 190 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 191 .fixup_kfunc_btf_id = { 192 { "bpf_kfunc_call_test_acquire", 3 }, 193 { "bpf_kfunc_call_test_release", 9 }, 194 { "bpf_kfunc_call_test_release", 13 }, 195 { "bpf_kfunc_call_test_release", 17 }, 196 }, 197 .result_unpriv = REJECT, 198 .result = REJECT, 199 .errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed", 200 }, 201 { 202 "calls: basic sanity", 203 .insns = { 204 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 205 BPF_MOV64_IMM(BPF_REG_0, 1), 206 BPF_EXIT_INSN(), 207 BPF_MOV64_IMM(BPF_REG_0, 2), 208 BPF_EXIT_INSN(), 209 }, 210 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 211 .result = ACCEPT, 212 }, 213 { 214 "calls: not on unpriviledged", 215 .insns = { 216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 217 BPF_MOV64_IMM(BPF_REG_0, 1), 218 BPF_EXIT_INSN(), 219 BPF_MOV64_IMM(BPF_REG_0, 2), 220 BPF_EXIT_INSN(), 221 }, 222 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 223 .result_unpriv = REJECT, 224 .result = ACCEPT, 225 .retval = 1, 226 }, 227 { 228 "calls: div by 0 in subprog", 229 .insns = { 230 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 231 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 232 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 233 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 234 offsetof(struct __sk_buff, data_end)), 235 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 237 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 238 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 239 BPF_MOV64_IMM(BPF_REG_0, 1), 240 BPF_EXIT_INSN(), 241 BPF_MOV32_IMM(BPF_REG_2, 0), 242 BPF_MOV32_IMM(BPF_REG_3, 1), 243 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2), 244 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 245 offsetof(struct __sk_buff, data)), 246 BPF_EXIT_INSN(), 247 }, 248 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 249 .result = ACCEPT, 250 .retval = 1, 251 }, 252 { 253 "calls: multiple ret types in subprog 1", 254 .insns = { 255 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 256 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 257 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 258 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 259 offsetof(struct __sk_buff, data_end)), 260 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 262 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 263 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 264 BPF_MOV64_IMM(BPF_REG_0, 1), 265 BPF_EXIT_INSN(), 266 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 267 offsetof(struct __sk_buff, data)), 268 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 269 BPF_MOV32_IMM(BPF_REG_0, 42), 270 BPF_EXIT_INSN(), 271 }, 272 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 273 .result = REJECT, 274 .errstr = "R0 invalid mem access 'scalar'", 275 }, 276 { 277 "calls: multiple ret types in subprog 2", 278 .insns = { 279 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 280 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 281 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 282 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 283 offsetof(struct __sk_buff, data_end)), 284 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 285 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 286 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 287 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 288 BPF_MOV64_IMM(BPF_REG_0, 1), 289 BPF_EXIT_INSN(), 290 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 291 offsetof(struct __sk_buff, data)), 292 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 293 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9), 294 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 295 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 297 BPF_LD_MAP_FD(BPF_REG_1, 0), 298 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 299 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 300 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 301 offsetof(struct __sk_buff, data)), 302 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), 303 BPF_EXIT_INSN(), 304 }, 305 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 306 .fixup_map_hash_8b = { 16 }, 307 .result = REJECT, 308 .errstr = "R0 min value is outside of the allowed memory range", 309 }, 310 { 311 "calls: overlapping caller/callee", 312 .insns = { 313 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0), 314 BPF_MOV64_IMM(BPF_REG_0, 1), 315 BPF_EXIT_INSN(), 316 }, 317 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 318 .errstr = "last insn is not an exit or jmp", 319 .result = REJECT, 320 }, 321 { 322 "calls: wrong recursive calls", 323 .insns = { 324 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 325 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 327 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 328 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 329 BPF_MOV64_IMM(BPF_REG_0, 1), 330 BPF_EXIT_INSN(), 331 }, 332 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 333 .errstr = "jump out of range", 334 .result = REJECT, 335 }, 336 { 337 "calls: wrong src reg", 338 .insns = { 339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0), 340 BPF_MOV64_IMM(BPF_REG_0, 1), 341 BPF_EXIT_INSN(), 342 }, 343 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 344 .errstr = "BPF_CALL uses reserved fields", 345 .result = REJECT, 346 }, 347 { 348 "calls: wrong off value", 349 .insns = { 350 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2), 351 BPF_MOV64_IMM(BPF_REG_0, 1), 352 BPF_EXIT_INSN(), 353 BPF_MOV64_IMM(BPF_REG_0, 2), 354 BPF_EXIT_INSN(), 355 }, 356 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 357 .errstr = "BPF_CALL uses reserved fields", 358 .result = REJECT, 359 }, 360 { 361 "calls: jump back loop", 362 .insns = { 363 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), 364 BPF_MOV64_IMM(BPF_REG_0, 1), 365 BPF_EXIT_INSN(), 366 }, 367 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 368 .errstr = "back-edge from insn 0 to 0", 369 .result = REJECT, 370 }, 371 { 372 "calls: conditional call", 373 .insns = { 374 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 375 offsetof(struct __sk_buff, mark)), 376 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 377 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 378 BPF_MOV64_IMM(BPF_REG_0, 1), 379 BPF_EXIT_INSN(), 380 BPF_MOV64_IMM(BPF_REG_0, 2), 381 BPF_EXIT_INSN(), 382 }, 383 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 384 .errstr = "jump out of range", 385 .result = REJECT, 386 }, 387 { 388 "calls: conditional call 2", 389 .insns = { 390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 391 offsetof(struct __sk_buff, mark)), 392 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 393 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 394 BPF_MOV64_IMM(BPF_REG_0, 1), 395 BPF_EXIT_INSN(), 396 BPF_MOV64_IMM(BPF_REG_0, 2), 397 BPF_EXIT_INSN(), 398 BPF_MOV64_IMM(BPF_REG_0, 3), 399 BPF_EXIT_INSN(), 400 }, 401 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 402 .result = ACCEPT, 403 }, 404 { 405 "calls: conditional call 3", 406 .insns = { 407 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 408 offsetof(struct __sk_buff, mark)), 409 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 410 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 411 BPF_MOV64_IMM(BPF_REG_0, 1), 412 BPF_EXIT_INSN(), 413 BPF_MOV64_IMM(BPF_REG_0, 1), 414 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 415 BPF_MOV64_IMM(BPF_REG_0, 3), 416 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 417 }, 418 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 419 .errstr_unpriv = "back-edge from insn", 420 .result_unpriv = REJECT, 421 .result = ACCEPT, 422 .retval = 1, 423 }, 424 { 425 "calls: conditional call 4", 426 .insns = { 427 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 428 offsetof(struct __sk_buff, mark)), 429 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 430 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 431 BPF_MOV64_IMM(BPF_REG_0, 1), 432 BPF_EXIT_INSN(), 433 BPF_MOV64_IMM(BPF_REG_0, 1), 434 BPF_JMP_IMM(BPF_JA, 0, 0, -5), 435 BPF_MOV64_IMM(BPF_REG_0, 3), 436 BPF_EXIT_INSN(), 437 }, 438 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 439 .result = ACCEPT, 440 }, 441 { 442 "calls: conditional call 5", 443 .insns = { 444 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 445 offsetof(struct __sk_buff, mark)), 446 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 447 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 448 BPF_MOV64_IMM(BPF_REG_0, 1), 449 BPF_EXIT_INSN(), 450 BPF_MOV64_IMM(BPF_REG_0, 1), 451 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 452 BPF_MOV64_IMM(BPF_REG_0, 3), 453 BPF_EXIT_INSN(), 454 }, 455 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 456 .result = ACCEPT, 457 .retval = 1, 458 }, 459 { 460 "calls: conditional call 6", 461 .insns = { 462 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 463 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), 466 BPF_EXIT_INSN(), 467 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 468 offsetof(struct __sk_buff, mark)), 469 BPF_EXIT_INSN(), 470 }, 471 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 472 .errstr = "infinite loop detected", 473 .result = REJECT, 474 }, 475 { 476 "calls: using r0 returned by callee", 477 .insns = { 478 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 479 BPF_EXIT_INSN(), 480 BPF_MOV64_IMM(BPF_REG_0, 2), 481 BPF_EXIT_INSN(), 482 }, 483 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 484 .result = ACCEPT, 485 }, 486 { 487 "calls: using uninit r0 from callee", 488 .insns = { 489 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 490 BPF_EXIT_INSN(), 491 BPF_EXIT_INSN(), 492 }, 493 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 494 .errstr = "!read_ok", 495 .result = REJECT, 496 }, 497 { 498 "calls: callee is using r1", 499 .insns = { 500 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 501 BPF_EXIT_INSN(), 502 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 503 offsetof(struct __sk_buff, len)), 504 BPF_EXIT_INSN(), 505 }, 506 .prog_type = BPF_PROG_TYPE_SCHED_ACT, 507 .result = ACCEPT, 508 .retval = TEST_DATA_LEN, 509 }, 510 { 511 "calls: callee using args1", 512 .insns = { 513 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 514 BPF_EXIT_INSN(), 515 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 516 BPF_EXIT_INSN(), 517 }, 518 .errstr_unpriv = "allowed for", 519 .result_unpriv = REJECT, 520 .result = ACCEPT, 521 .retval = POINTER_VALUE, 522 }, 523 { 524 "calls: callee using wrong args2", 525 .insns = { 526 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 527 BPF_EXIT_INSN(), 528 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 529 BPF_EXIT_INSN(), 530 }, 531 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 532 .errstr = "R2 !read_ok", 533 .result = REJECT, 534 }, 535 { 536 "calls: callee using two args", 537 .insns = { 538 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 539 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, 540 offsetof(struct __sk_buff, len)), 541 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6, 542 offsetof(struct __sk_buff, len)), 543 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 544 BPF_EXIT_INSN(), 545 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 546 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 547 BPF_EXIT_INSN(), 548 }, 549 .errstr_unpriv = "allowed for", 550 .result_unpriv = REJECT, 551 .result = ACCEPT, 552 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN, 553 }, 554 { 555 "calls: callee changing pkt pointers", 556 .insns = { 557 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)), 558 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 559 offsetof(struct xdp_md, data_end)), 560 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), 561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8), 562 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2), 563 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 564 /* clear_all_pkt_pointers() has to walk all frames 565 * to make sure that pkt pointers in the caller 566 * are cleared when callee is calling a helper that 567 * adjusts packet size 568 */ 569 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 570 BPF_MOV32_IMM(BPF_REG_0, 0), 571 BPF_EXIT_INSN(), 572 BPF_MOV64_IMM(BPF_REG_2, 0), 573 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head), 574 BPF_EXIT_INSN(), 575 }, 576 .result = REJECT, 577 .errstr = "R6 invalid mem access 'scalar'", 578 .prog_type = BPF_PROG_TYPE_XDP, 579 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 580 }, 581 { 582 "calls: ptr null check in subprog", 583 .insns = { 584 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 585 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 586 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 587 BPF_LD_MAP_FD(BPF_REG_1, 0), 588 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 590 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 591 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 592 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 593 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), 594 BPF_EXIT_INSN(), 595 BPF_MOV64_IMM(BPF_REG_0, 0), 596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 597 BPF_MOV64_IMM(BPF_REG_0, 1), 598 BPF_EXIT_INSN(), 599 }, 600 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 601 .fixup_map_hash_48b = { 3 }, 602 .result_unpriv = REJECT, 603 .result = ACCEPT, 604 .retval = 0, 605 }, 606 { 607 "calls: two calls with args", 608 .insns = { 609 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 610 BPF_EXIT_INSN(), 611 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 612 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 613 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 614 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 616 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 617 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 618 BPF_EXIT_INSN(), 619 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 620 offsetof(struct __sk_buff, len)), 621 BPF_EXIT_INSN(), 622 }, 623 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 624 .result = ACCEPT, 625 .retval = TEST_DATA_LEN + TEST_DATA_LEN, 626 }, 627 { 628 "calls: calls with stack arith", 629 .insns = { 630 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 633 BPF_EXIT_INSN(), 634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 635 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 636 BPF_EXIT_INSN(), 637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 638 BPF_MOV64_IMM(BPF_REG_0, 42), 639 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 640 BPF_EXIT_INSN(), 641 }, 642 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 643 .result = ACCEPT, 644 .retval = 42, 645 }, 646 { 647 "calls: calls with misaligned stack access", 648 .insns = { 649 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), 651 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 652 BPF_EXIT_INSN(), 653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61), 654 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 655 BPF_EXIT_INSN(), 656 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), 657 BPF_MOV64_IMM(BPF_REG_0, 42), 658 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 659 BPF_EXIT_INSN(), 660 }, 661 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 662 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 663 .errstr = "misaligned stack access", 664 .result = REJECT, 665 }, 666 { 667 "calls: calls control flow, jump test", 668 .insns = { 669 BPF_MOV64_IMM(BPF_REG_0, 42), 670 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 671 BPF_MOV64_IMM(BPF_REG_0, 43), 672 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 673 BPF_JMP_IMM(BPF_JA, 0, 0, -3), 674 BPF_EXIT_INSN(), 675 }, 676 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 677 .result = ACCEPT, 678 .retval = 43, 679 }, 680 { 681 "calls: calls control flow, jump test 2", 682 .insns = { 683 BPF_MOV64_IMM(BPF_REG_0, 42), 684 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 685 BPF_MOV64_IMM(BPF_REG_0, 43), 686 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), 688 BPF_EXIT_INSN(), 689 }, 690 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 691 .errstr = "jump out of range from insn 1 to 4", 692 .result = REJECT, 693 }, 694 { 695 "calls: two calls with bad jump", 696 .insns = { 697 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 698 BPF_EXIT_INSN(), 699 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 700 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 701 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 702 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 703 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 704 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 705 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 706 BPF_EXIT_INSN(), 707 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 708 offsetof(struct __sk_buff, len)), 709 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), 710 BPF_EXIT_INSN(), 711 }, 712 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 713 .errstr = "jump out of range from insn 11 to 9", 714 .result = REJECT, 715 }, 716 { 717 "calls: recursive call. test1", 718 .insns = { 719 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 720 BPF_EXIT_INSN(), 721 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), 722 BPF_EXIT_INSN(), 723 }, 724 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 725 .errstr = "back-edge", 726 .result = REJECT, 727 }, 728 { 729 "calls: recursive call. test2", 730 .insns = { 731 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 732 BPF_EXIT_INSN(), 733 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), 734 BPF_EXIT_INSN(), 735 }, 736 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 737 .errstr = "back-edge", 738 .result = REJECT, 739 }, 740 { 741 "calls: unreachable code", 742 .insns = { 743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 744 BPF_EXIT_INSN(), 745 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 746 BPF_EXIT_INSN(), 747 BPF_MOV64_IMM(BPF_REG_0, 0), 748 BPF_EXIT_INSN(), 749 BPF_MOV64_IMM(BPF_REG_0, 0), 750 BPF_EXIT_INSN(), 751 }, 752 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 753 .errstr = "unreachable insn 6", 754 .result = REJECT, 755 }, 756 { 757 "calls: invalid call", 758 .insns = { 759 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 760 BPF_EXIT_INSN(), 761 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4), 762 BPF_EXIT_INSN(), 763 }, 764 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 765 .errstr = "invalid destination", 766 .result = REJECT, 767 }, 768 { 769 "calls: invalid call 2", 770 .insns = { 771 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 772 BPF_EXIT_INSN(), 773 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff), 774 BPF_EXIT_INSN(), 775 }, 776 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 777 .errstr = "invalid destination", 778 .result = REJECT, 779 }, 780 { 781 "calls: jumping across function bodies. test1", 782 .insns = { 783 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 784 BPF_MOV64_IMM(BPF_REG_0, 0), 785 BPF_EXIT_INSN(), 786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), 787 BPF_EXIT_INSN(), 788 }, 789 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 790 .errstr = "jump out of range", 791 .result = REJECT, 792 }, 793 { 794 "calls: jumping across function bodies. test2", 795 .insns = { 796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 797 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 798 BPF_MOV64_IMM(BPF_REG_0, 0), 799 BPF_EXIT_INSN(), 800 BPF_EXIT_INSN(), 801 }, 802 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 803 .errstr = "jump out of range", 804 .result = REJECT, 805 }, 806 { 807 "calls: call without exit", 808 .insns = { 809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 810 BPF_EXIT_INSN(), 811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 812 BPF_EXIT_INSN(), 813 BPF_MOV64_IMM(BPF_REG_0, 0), 814 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2), 815 }, 816 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 817 .errstr = "not an exit", 818 .result = REJECT, 819 }, 820 { 821 "calls: call into middle of ld_imm64", 822 .insns = { 823 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 825 BPF_MOV64_IMM(BPF_REG_0, 0), 826 BPF_EXIT_INSN(), 827 BPF_LD_IMM64(BPF_REG_0, 0), 828 BPF_EXIT_INSN(), 829 }, 830 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 831 .errstr = "last insn", 832 .result = REJECT, 833 }, 834 { 835 "calls: call into middle of other call", 836 .insns = { 837 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 838 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 839 BPF_MOV64_IMM(BPF_REG_0, 0), 840 BPF_EXIT_INSN(), 841 BPF_MOV64_IMM(BPF_REG_0, 0), 842 BPF_MOV64_IMM(BPF_REG_0, 0), 843 BPF_EXIT_INSN(), 844 }, 845 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 846 .errstr = "last insn", 847 .result = REJECT, 848 }, 849 { 850 "calls: subprog call with ld_abs in main prog", 851 .insns = { 852 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 853 BPF_LD_ABS(BPF_B, 0), 854 BPF_LD_ABS(BPF_H, 0), 855 BPF_LD_ABS(BPF_W, 0), 856 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), 857 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 858 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 859 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), 860 BPF_LD_ABS(BPF_B, 0), 861 BPF_LD_ABS(BPF_H, 0), 862 BPF_LD_ABS(BPF_W, 0), 863 BPF_EXIT_INSN(), 864 BPF_MOV64_IMM(BPF_REG_2, 1), 865 BPF_MOV64_IMM(BPF_REG_3, 2), 866 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push), 867 BPF_EXIT_INSN(), 868 }, 869 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 870 .result = ACCEPT, 871 }, 872 { 873 "calls: two calls with bad fallthrough", 874 .insns = { 875 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 876 BPF_EXIT_INSN(), 877 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 878 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 879 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 880 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 881 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 882 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 883 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 884 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0), 885 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 886 offsetof(struct __sk_buff, len)), 887 BPF_EXIT_INSN(), 888 }, 889 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 890 .errstr = "not an exit", 891 .result = REJECT, 892 }, 893 { 894 "calls: two calls with stack read", 895 .insns = { 896 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 897 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 900 BPF_EXIT_INSN(), 901 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 903 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 904 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 905 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 906 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 907 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 908 BPF_EXIT_INSN(), 909 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 910 BPF_EXIT_INSN(), 911 }, 912 .prog_type = BPF_PROG_TYPE_XDP, 913 .result = ACCEPT, 914 }, 915 { 916 "calls: two calls with stack write", 917 .insns = { 918 /* main prog */ 919 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 920 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 922 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 924 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 925 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 926 BPF_EXIT_INSN(), 927 928 /* subprog 1 */ 929 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 930 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 931 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7), 932 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 933 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 934 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 935 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), 936 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), 937 /* write into stack frame of main prog */ 938 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 939 BPF_EXIT_INSN(), 940 941 /* subprog 2 */ 942 /* read from stack frame of main prog */ 943 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 944 BPF_EXIT_INSN(), 945 }, 946 .prog_type = BPF_PROG_TYPE_XDP, 947 .result = ACCEPT, 948 }, 949 { 950 "calls: stack overflow using two frames (pre-call access)", 951 .insns = { 952 /* prog 1 */ 953 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 954 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), 955 BPF_EXIT_INSN(), 956 957 /* prog 2 */ 958 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 959 BPF_MOV64_IMM(BPF_REG_0, 0), 960 BPF_EXIT_INSN(), 961 }, 962 .prog_type = BPF_PROG_TYPE_XDP, 963 .errstr = "combined stack size", 964 .result = REJECT, 965 }, 966 { 967 "calls: stack overflow using two frames (post-call access)", 968 .insns = { 969 /* prog 1 */ 970 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), 971 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 972 BPF_EXIT_INSN(), 973 974 /* prog 2 */ 975 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 976 BPF_MOV64_IMM(BPF_REG_0, 0), 977 BPF_EXIT_INSN(), 978 }, 979 .prog_type = BPF_PROG_TYPE_XDP, 980 .errstr = "combined stack size", 981 .result = REJECT, 982 }, 983 { 984 "calls: stack depth check using three frames. test1", 985 .insns = { 986 /* main */ 987 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 988 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ 989 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), 990 BPF_MOV64_IMM(BPF_REG_0, 0), 991 BPF_EXIT_INSN(), 992 /* A */ 993 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 994 BPF_EXIT_INSN(), 995 /* B */ 996 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ 997 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 998 BPF_EXIT_INSN(), 999 }, 1000 .prog_type = BPF_PROG_TYPE_XDP, 1001 /* stack_main=32, stack_A=256, stack_B=64 1002 * and max(main+A, main+A+B) < 512 1003 */ 1004 .result = ACCEPT, 1005 }, 1006 { 1007 "calls: stack depth check using three frames. test2", 1008 .insns = { 1009 /* main */ 1010 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 1011 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ 1012 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), 1013 BPF_MOV64_IMM(BPF_REG_0, 0), 1014 BPF_EXIT_INSN(), 1015 /* A */ 1016 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 1017 BPF_EXIT_INSN(), 1018 /* B */ 1019 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ 1020 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 1021 BPF_EXIT_INSN(), 1022 }, 1023 .prog_type = BPF_PROG_TYPE_XDP, 1024 /* stack_main=32, stack_A=64, stack_B=256 1025 * and max(main+A, main+A+B) < 512 1026 */ 1027 .result = ACCEPT, 1028 }, 1029 { 1030 "calls: stack depth check using three frames. test3", 1031 .insns = { 1032 /* main */ 1033 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1034 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ 1035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1036 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */ 1037 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1), 1038 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 1039 BPF_MOV64_IMM(BPF_REG_0, 0), 1040 BPF_EXIT_INSN(), 1041 /* A */ 1042 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1), 1043 BPF_EXIT_INSN(), 1044 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0), 1045 BPF_JMP_IMM(BPF_JA, 0, 0, -3), 1046 /* B */ 1047 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1), 1048 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */ 1049 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 1050 BPF_EXIT_INSN(), 1051 }, 1052 .prog_type = BPF_PROG_TYPE_XDP, 1053 /* stack_main=64, stack_A=224, stack_B=256 1054 * and max(main+A, main+A+B) > 512 1055 */ 1056 .errstr = "combined stack", 1057 .result = REJECT, 1058 }, 1059 { 1060 "calls: stack depth check using three frames. test4", 1061 /* void main(void) { 1062 * func1(0); 1063 * func1(1); 1064 * func2(1); 1065 * } 1066 * void func1(int alloc_or_recurse) { 1067 * if (alloc_or_recurse) { 1068 * frame_pointer[-300] = 1; 1069 * } else { 1070 * func2(alloc_or_recurse); 1071 * } 1072 * } 1073 * void func2(int alloc_or_recurse) { 1074 * if (alloc_or_recurse) { 1075 * frame_pointer[-300] = 1; 1076 * } 1077 * } 1078 */ 1079 .insns = { 1080 /* main */ 1081 BPF_MOV64_IMM(BPF_REG_1, 0), 1082 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ 1083 BPF_MOV64_IMM(BPF_REG_1, 1), 1084 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 1085 BPF_MOV64_IMM(BPF_REG_1, 1), 1086 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */ 1087 BPF_MOV64_IMM(BPF_REG_0, 0), 1088 BPF_EXIT_INSN(), 1089 /* A */ 1090 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), 1091 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 1092 BPF_EXIT_INSN(), 1093 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ 1094 BPF_EXIT_INSN(), 1095 /* B */ 1096 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 1097 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 1098 BPF_EXIT_INSN(), 1099 }, 1100 .prog_type = BPF_PROG_TYPE_XDP, 1101 .result = REJECT, 1102 .errstr = "combined stack", 1103 }, 1104 { 1105 "calls: stack depth check using three frames. test5", 1106 .insns = { 1107 /* main */ 1108 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ 1109 BPF_EXIT_INSN(), 1110 /* A */ 1111 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ 1112 BPF_EXIT_INSN(), 1113 /* B */ 1114 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ 1115 BPF_EXIT_INSN(), 1116 /* C */ 1117 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ 1118 BPF_EXIT_INSN(), 1119 /* D */ 1120 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ 1121 BPF_EXIT_INSN(), 1122 /* E */ 1123 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ 1124 BPF_EXIT_INSN(), 1125 /* F */ 1126 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ 1127 BPF_EXIT_INSN(), 1128 /* G */ 1129 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ 1130 BPF_EXIT_INSN(), 1131 /* H */ 1132 BPF_MOV64_IMM(BPF_REG_0, 0), 1133 BPF_EXIT_INSN(), 1134 }, 1135 .prog_type = BPF_PROG_TYPE_XDP, 1136 .errstr = "call stack", 1137 .result = REJECT, 1138 }, 1139 { 1140 "calls: stack depth check in dead code", 1141 .insns = { 1142 /* main */ 1143 BPF_MOV64_IMM(BPF_REG_1, 0), 1144 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ 1145 BPF_EXIT_INSN(), 1146 /* A */ 1147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 1148 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */ 1149 BPF_MOV64_IMM(BPF_REG_0, 0), 1150 BPF_EXIT_INSN(), 1151 /* B */ 1152 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ 1153 BPF_EXIT_INSN(), 1154 /* C */ 1155 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ 1156 BPF_EXIT_INSN(), 1157 /* D */ 1158 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ 1159 BPF_EXIT_INSN(), 1160 /* E */ 1161 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ 1162 BPF_EXIT_INSN(), 1163 /* F */ 1164 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ 1165 BPF_EXIT_INSN(), 1166 /* G */ 1167 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ 1168 BPF_EXIT_INSN(), 1169 /* H */ 1170 BPF_MOV64_IMM(BPF_REG_0, 0), 1171 BPF_EXIT_INSN(), 1172 }, 1173 .prog_type = BPF_PROG_TYPE_XDP, 1174 .errstr = "call stack", 1175 .result = REJECT, 1176 }, 1177 { 1178 "calls: spill into caller stack frame", 1179 .insns = { 1180 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1181 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1182 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1183 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1184 BPF_EXIT_INSN(), 1185 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), 1186 BPF_MOV64_IMM(BPF_REG_0, 0), 1187 BPF_EXIT_INSN(), 1188 }, 1189 .prog_type = BPF_PROG_TYPE_XDP, 1190 .errstr = "cannot spill", 1191 .result = REJECT, 1192 }, 1193 { 1194 "calls: write into caller stack frame", 1195 .insns = { 1196 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1197 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1198 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1199 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1200 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1201 BPF_EXIT_INSN(), 1202 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), 1203 BPF_MOV64_IMM(BPF_REG_0, 0), 1204 BPF_EXIT_INSN(), 1205 }, 1206 .prog_type = BPF_PROG_TYPE_XDP, 1207 .result = ACCEPT, 1208 .retval = 42, 1209 }, 1210 { 1211 "calls: write into callee stack frame", 1212 .insns = { 1213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1214 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), 1215 BPF_EXIT_INSN(), 1216 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), 1217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8), 1218 BPF_EXIT_INSN(), 1219 }, 1220 .prog_type = BPF_PROG_TYPE_XDP, 1221 .errstr = "cannot return stack pointer", 1222 .result = REJECT, 1223 }, 1224 { 1225 "calls: two calls with stack write and void return", 1226 .insns = { 1227 /* main prog */ 1228 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1229 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1231 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1233 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1234 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 1235 BPF_EXIT_INSN(), 1236 1237 /* subprog 1 */ 1238 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1239 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1241 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1242 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1243 BPF_EXIT_INSN(), 1244 1245 /* subprog 2 */ 1246 /* write into stack frame of main prog */ 1247 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 1248 BPF_EXIT_INSN(), /* void return */ 1249 }, 1250 .prog_type = BPF_PROG_TYPE_XDP, 1251 .result = ACCEPT, 1252 }, 1253 { 1254 "calls: ambiguous return value", 1255 .insns = { 1256 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1257 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 1258 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 1259 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1261 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 1262 BPF_EXIT_INSN(), 1263 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 1264 BPF_MOV64_IMM(BPF_REG_0, 0), 1265 BPF_EXIT_INSN(), 1266 }, 1267 .errstr_unpriv = "allowed for", 1268 .result_unpriv = REJECT, 1269 .errstr = "R0 !read_ok", 1270 .result = REJECT, 1271 }, 1272 { 1273 "calls: two calls that return map_value", 1274 .insns = { 1275 /* main prog */ 1276 /* pass fp-16, fp-8 into a function */ 1277 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1279 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1281 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 1282 1283 /* fetch map_value_ptr from the stack of this function */ 1284 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 1285 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1286 /* write into map value */ 1287 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1288 /* fetch secound map_value_ptr from the stack */ 1289 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 1290 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1291 /* write into map value */ 1292 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1293 BPF_MOV64_IMM(BPF_REG_0, 0), 1294 BPF_EXIT_INSN(), 1295 1296 /* subprog 1 */ 1297 /* call 3rd function twice */ 1298 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1299 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1300 /* first time with fp-8 */ 1301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1302 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1303 /* second time with fp-16 */ 1304 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1305 BPF_EXIT_INSN(), 1306 1307 /* subprog 2 */ 1308 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1309 /* lookup from map */ 1310 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1313 BPF_LD_MAP_FD(BPF_REG_1, 0), 1314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1315 /* write map_value_ptr into stack frame of main prog */ 1316 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1317 BPF_MOV64_IMM(BPF_REG_0, 0), 1318 BPF_EXIT_INSN(), /* return 0 */ 1319 }, 1320 .prog_type = BPF_PROG_TYPE_XDP, 1321 .fixup_map_hash_8b = { 23 }, 1322 .result = ACCEPT, 1323 }, 1324 { 1325 "calls: two calls that return map_value with bool condition", 1326 .insns = { 1327 /* main prog */ 1328 /* pass fp-16, fp-8 into a function */ 1329 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1331 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1334 BPF_MOV64_IMM(BPF_REG_0, 0), 1335 BPF_EXIT_INSN(), 1336 1337 /* subprog 1 */ 1338 /* call 3rd function twice */ 1339 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1340 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1341 /* first time with fp-8 */ 1342 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), 1343 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1344 /* fetch map_value_ptr from the stack of this function */ 1345 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1346 /* write into map value */ 1347 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1348 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1349 /* second time with fp-16 */ 1350 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1351 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1352 /* fetch secound map_value_ptr from the stack */ 1353 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), 1354 /* write into map value */ 1355 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1356 BPF_EXIT_INSN(), 1357 1358 /* subprog 2 */ 1359 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1360 /* lookup from map */ 1361 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1362 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1364 BPF_LD_MAP_FD(BPF_REG_1, 0), 1365 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1366 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1367 BPF_MOV64_IMM(BPF_REG_0, 0), 1368 BPF_EXIT_INSN(), /* return 0 */ 1369 /* write map_value_ptr into stack frame of main prog */ 1370 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1371 BPF_MOV64_IMM(BPF_REG_0, 1), 1372 BPF_EXIT_INSN(), /* return 1 */ 1373 }, 1374 .prog_type = BPF_PROG_TYPE_XDP, 1375 .fixup_map_hash_8b = { 23 }, 1376 .result = ACCEPT, 1377 }, 1378 { 1379 "calls: two calls that return map_value with incorrect bool check", 1380 .insns = { 1381 /* main prog */ 1382 /* pass fp-16, fp-8 into a function */ 1383 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1384 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1385 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1388 BPF_MOV64_IMM(BPF_REG_0, 0), 1389 BPF_EXIT_INSN(), 1390 1391 /* subprog 1 */ 1392 /* call 3rd function twice */ 1393 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1394 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1395 /* first time with fp-8 */ 1396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), 1397 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1398 /* fetch map_value_ptr from the stack of this function */ 1399 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1400 /* write into map value */ 1401 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1402 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1403 /* second time with fp-16 */ 1404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1405 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1406 /* fetch secound map_value_ptr from the stack */ 1407 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), 1408 /* write into map value */ 1409 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1410 BPF_EXIT_INSN(), 1411 1412 /* subprog 2 */ 1413 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1414 /* lookup from map */ 1415 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1416 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1418 BPF_LD_MAP_FD(BPF_REG_1, 0), 1419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1420 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1421 BPF_MOV64_IMM(BPF_REG_0, 0), 1422 BPF_EXIT_INSN(), /* return 0 */ 1423 /* write map_value_ptr into stack frame of main prog */ 1424 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1425 BPF_MOV64_IMM(BPF_REG_0, 1), 1426 BPF_EXIT_INSN(), /* return 1 */ 1427 }, 1428 .prog_type = BPF_PROG_TYPE_XDP, 1429 .fixup_map_hash_8b = { 23 }, 1430 .result = REJECT, 1431 .errstr = "invalid read from stack R7 off=-16 size=8", 1432 }, 1433 { 1434 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1", 1435 .insns = { 1436 /* main prog */ 1437 /* pass fp-16, fp-8 into a function */ 1438 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1440 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1441 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1443 BPF_MOV64_IMM(BPF_REG_0, 0), 1444 BPF_EXIT_INSN(), 1445 1446 /* subprog 1 */ 1447 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1448 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1449 /* 1st lookup from map */ 1450 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1453 BPF_LD_MAP_FD(BPF_REG_1, 0), 1454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1455 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1456 BPF_MOV64_IMM(BPF_REG_8, 0), 1457 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1458 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1459 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1460 BPF_MOV64_IMM(BPF_REG_8, 1), 1461 1462 /* 2nd lookup from map */ 1463 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ 1464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1465 BPF_LD_MAP_FD(BPF_REG_1, 0), 1466 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ 1467 BPF_FUNC_map_lookup_elem), 1468 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1469 BPF_MOV64_IMM(BPF_REG_9, 0), 1470 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1471 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1472 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1473 BPF_MOV64_IMM(BPF_REG_9, 1), 1474 1475 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1476 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ 1477 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1478 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1479 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1480 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ 1481 BPF_EXIT_INSN(), 1482 1483 /* subprog 2 */ 1484 /* if arg2 == 1 do *arg1 = 0 */ 1485 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1486 /* fetch map_value_ptr from the stack of this function */ 1487 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1488 /* write into map value */ 1489 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1490 1491 /* if arg4 == 1 do *arg3 = 0 */ 1492 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1493 /* fetch map_value_ptr from the stack of this function */ 1494 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1495 /* write into map value */ 1496 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), 1497 BPF_EXIT_INSN(), 1498 }, 1499 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1500 .fixup_map_hash_8b = { 12, 22 }, 1501 .result = REJECT, 1502 .errstr = "invalid access to map value, value_size=8 off=2 size=8", 1503 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1504 }, 1505 { 1506 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2", 1507 .insns = { 1508 /* main prog */ 1509 /* pass fp-16, fp-8 into a function */ 1510 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1511 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1512 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1514 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1515 BPF_MOV64_IMM(BPF_REG_0, 0), 1516 BPF_EXIT_INSN(), 1517 1518 /* subprog 1 */ 1519 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1520 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1521 /* 1st lookup from map */ 1522 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1523 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1525 BPF_LD_MAP_FD(BPF_REG_1, 0), 1526 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1527 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1528 BPF_MOV64_IMM(BPF_REG_8, 0), 1529 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1530 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1531 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1532 BPF_MOV64_IMM(BPF_REG_8, 1), 1533 1534 /* 2nd lookup from map */ 1535 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ 1536 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1537 BPF_LD_MAP_FD(BPF_REG_1, 0), 1538 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ 1539 BPF_FUNC_map_lookup_elem), 1540 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1541 BPF_MOV64_IMM(BPF_REG_9, 0), 1542 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1543 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1544 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1545 BPF_MOV64_IMM(BPF_REG_9, 1), 1546 1547 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ 1549 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1550 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1551 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ 1553 BPF_EXIT_INSN(), 1554 1555 /* subprog 2 */ 1556 /* if arg2 == 1 do *arg1 = 0 */ 1557 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1558 /* fetch map_value_ptr from the stack of this function */ 1559 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1560 /* write into map value */ 1561 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1562 1563 /* if arg4 == 1 do *arg3 = 0 */ 1564 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1565 /* fetch map_value_ptr from the stack of this function */ 1566 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1567 /* write into map value */ 1568 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1569 BPF_EXIT_INSN(), 1570 }, 1571 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1572 .fixup_map_hash_8b = { 12, 22 }, 1573 .result = ACCEPT, 1574 }, 1575 { 1576 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3", 1577 .insns = { 1578 /* main prog */ 1579 /* pass fp-16, fp-8 into a function */ 1580 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1582 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1584 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 1585 BPF_MOV64_IMM(BPF_REG_0, 0), 1586 BPF_EXIT_INSN(), 1587 1588 /* subprog 1 */ 1589 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1590 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1591 /* 1st lookup from map */ 1592 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0), 1593 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), 1595 BPF_LD_MAP_FD(BPF_REG_1, 0), 1596 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1597 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1598 BPF_MOV64_IMM(BPF_REG_8, 0), 1599 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1600 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1601 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1602 BPF_MOV64_IMM(BPF_REG_8, 1), 1603 1604 /* 2nd lookup from map */ 1605 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), 1607 BPF_LD_MAP_FD(BPF_REG_1, 0), 1608 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1609 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1610 BPF_MOV64_IMM(BPF_REG_9, 0), // 26 1611 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1612 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1613 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1614 BPF_MOV64_IMM(BPF_REG_9, 1), 1615 1616 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1617 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30 1618 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1619 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1620 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1621 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34 1622 BPF_JMP_IMM(BPF_JA, 0, 0, -30), 1623 1624 /* subprog 2 */ 1625 /* if arg2 == 1 do *arg1 = 0 */ 1626 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1627 /* fetch map_value_ptr from the stack of this function */ 1628 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1629 /* write into map value */ 1630 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1631 1632 /* if arg4 == 1 do *arg3 = 0 */ 1633 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1634 /* fetch map_value_ptr from the stack of this function */ 1635 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1636 /* write into map value */ 1637 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), 1638 BPF_JMP_IMM(BPF_JA, 0, 0, -8), 1639 }, 1640 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1641 .fixup_map_hash_8b = { 12, 22 }, 1642 .result = REJECT, 1643 .errstr = "invalid access to map value, value_size=8 off=2 size=8", 1644 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1645 }, 1646 { 1647 "calls: two calls that receive map_value_ptr_or_null via arg. test1", 1648 .insns = { 1649 /* main prog */ 1650 /* pass fp-16, fp-8 into a function */ 1651 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1653 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1655 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1656 BPF_MOV64_IMM(BPF_REG_0, 0), 1657 BPF_EXIT_INSN(), 1658 1659 /* subprog 1 */ 1660 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1661 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1662 /* 1st lookup from map */ 1663 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1664 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1665 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1666 BPF_LD_MAP_FD(BPF_REG_1, 0), 1667 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1668 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 1669 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1670 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1671 BPF_MOV64_IMM(BPF_REG_8, 0), 1672 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1673 BPF_MOV64_IMM(BPF_REG_8, 1), 1674 1675 /* 2nd lookup from map */ 1676 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1677 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1678 BPF_LD_MAP_FD(BPF_REG_1, 0), 1679 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1680 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ 1681 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1682 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1683 BPF_MOV64_IMM(BPF_REG_9, 0), 1684 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1685 BPF_MOV64_IMM(BPF_REG_9, 1), 1686 1687 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1688 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1689 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1690 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1691 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1693 BPF_EXIT_INSN(), 1694 1695 /* subprog 2 */ 1696 /* if arg2 == 1 do *arg1 = 0 */ 1697 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1698 /* fetch map_value_ptr from the stack of this function */ 1699 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1700 /* write into map value */ 1701 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1702 1703 /* if arg4 == 1 do *arg3 = 0 */ 1704 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1705 /* fetch map_value_ptr from the stack of this function */ 1706 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1707 /* write into map value */ 1708 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1709 BPF_EXIT_INSN(), 1710 }, 1711 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1712 .fixup_map_hash_8b = { 12, 22 }, 1713 .result = ACCEPT, 1714 }, 1715 { 1716 "calls: two calls that receive map_value_ptr_or_null via arg. test2", 1717 .insns = { 1718 /* main prog */ 1719 /* pass fp-16, fp-8 into a function */ 1720 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1722 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1724 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1725 BPF_MOV64_IMM(BPF_REG_0, 0), 1726 BPF_EXIT_INSN(), 1727 1728 /* subprog 1 */ 1729 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1730 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1731 /* 1st lookup from map */ 1732 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1733 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1735 BPF_LD_MAP_FD(BPF_REG_1, 0), 1736 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1737 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 1738 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1739 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1740 BPF_MOV64_IMM(BPF_REG_8, 0), 1741 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1742 BPF_MOV64_IMM(BPF_REG_8, 1), 1743 1744 /* 2nd lookup from map */ 1745 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1747 BPF_LD_MAP_FD(BPF_REG_1, 0), 1748 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1749 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ 1750 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1751 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1752 BPF_MOV64_IMM(BPF_REG_9, 0), 1753 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1754 BPF_MOV64_IMM(BPF_REG_9, 1), 1755 1756 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1758 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1759 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1760 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1761 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1762 BPF_EXIT_INSN(), 1763 1764 /* subprog 2 */ 1765 /* if arg2 == 1 do *arg1 = 0 */ 1766 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1767 /* fetch map_value_ptr from the stack of this function */ 1768 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1769 /* write into map value */ 1770 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1771 1772 /* if arg4 == 0 do *arg3 = 0 */ 1773 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2), 1774 /* fetch map_value_ptr from the stack of this function */ 1775 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1776 /* write into map value */ 1777 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1778 BPF_EXIT_INSN(), 1779 }, 1780 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1781 .fixup_map_hash_8b = { 12, 22 }, 1782 .result = REJECT, 1783 .errstr = "R0 invalid mem access 'scalar'", 1784 }, 1785 { 1786 "calls: pkt_ptr spill into caller stack", 1787 .insns = { 1788 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1790 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1791 BPF_EXIT_INSN(), 1792 1793 /* subprog 1 */ 1794 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1795 offsetof(struct __sk_buff, data)), 1796 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1797 offsetof(struct __sk_buff, data_end)), 1798 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1799 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1800 /* spill unchecked pkt_ptr into stack of caller */ 1801 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1802 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1803 /* now the pkt range is verified, read pkt_ptr from stack */ 1804 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1805 /* write 4 bytes into packet */ 1806 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1807 BPF_EXIT_INSN(), 1808 }, 1809 .result = ACCEPT, 1810 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1811 .retval = POINTER_VALUE, 1812 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1813 }, 1814 { 1815 "calls: pkt_ptr spill into caller stack 2", 1816 .insns = { 1817 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1819 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1820 /* Marking is still kept, but not in all cases safe. */ 1821 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1822 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1823 BPF_EXIT_INSN(), 1824 1825 /* subprog 1 */ 1826 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1827 offsetof(struct __sk_buff, data)), 1828 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1829 offsetof(struct __sk_buff, data_end)), 1830 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1832 /* spill unchecked pkt_ptr into stack of caller */ 1833 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1834 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1835 /* now the pkt range is verified, read pkt_ptr from stack */ 1836 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1837 /* write 4 bytes into packet */ 1838 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1839 BPF_EXIT_INSN(), 1840 }, 1841 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1842 .errstr = "invalid access to packet", 1843 .result = REJECT, 1844 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1845 }, 1846 { 1847 "calls: pkt_ptr spill into caller stack 3", 1848 .insns = { 1849 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1851 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 1853 /* Marking is still kept and safe here. */ 1854 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1855 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1856 BPF_EXIT_INSN(), 1857 1858 /* subprog 1 */ 1859 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1860 offsetof(struct __sk_buff, data)), 1861 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1862 offsetof(struct __sk_buff, data_end)), 1863 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1865 /* spill unchecked pkt_ptr into stack of caller */ 1866 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1867 BPF_MOV64_IMM(BPF_REG_5, 0), 1868 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1869 BPF_MOV64_IMM(BPF_REG_5, 1), 1870 /* now the pkt range is verified, read pkt_ptr from stack */ 1871 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1872 /* write 4 bytes into packet */ 1873 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1874 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1875 BPF_EXIT_INSN(), 1876 }, 1877 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1878 .result = ACCEPT, 1879 .retval = 1, 1880 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1881 }, 1882 { 1883 "calls: pkt_ptr spill into caller stack 4", 1884 .insns = { 1885 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1886 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1887 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1888 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 1889 /* Check marking propagated. */ 1890 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1891 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1892 BPF_EXIT_INSN(), 1893 1894 /* subprog 1 */ 1895 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1896 offsetof(struct __sk_buff, data)), 1897 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1898 offsetof(struct __sk_buff, data_end)), 1899 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1901 /* spill unchecked pkt_ptr into stack of caller */ 1902 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1903 BPF_MOV64_IMM(BPF_REG_5, 0), 1904 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1905 BPF_MOV64_IMM(BPF_REG_5, 1), 1906 /* don't read back pkt_ptr from stack here */ 1907 /* write 4 bytes into packet */ 1908 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1909 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1910 BPF_EXIT_INSN(), 1911 }, 1912 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1913 .result = ACCEPT, 1914 .retval = 1, 1915 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1916 }, 1917 { 1918 "calls: pkt_ptr spill into caller stack 5", 1919 .insns = { 1920 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1922 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0), 1923 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1924 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1925 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1926 BPF_EXIT_INSN(), 1927 1928 /* subprog 1 */ 1929 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1930 offsetof(struct __sk_buff, data)), 1931 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1932 offsetof(struct __sk_buff, data_end)), 1933 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1934 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1935 BPF_MOV64_IMM(BPF_REG_5, 0), 1936 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1937 /* spill checked pkt_ptr into stack of caller */ 1938 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1939 BPF_MOV64_IMM(BPF_REG_5, 1), 1940 /* don't read back pkt_ptr from stack here */ 1941 /* write 4 bytes into packet */ 1942 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1943 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1944 BPF_EXIT_INSN(), 1945 }, 1946 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1947 .errstr = "same insn cannot be used with different", 1948 .result = REJECT, 1949 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1950 }, 1951 { 1952 "calls: pkt_ptr spill into caller stack 6", 1953 .insns = { 1954 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1955 offsetof(struct __sk_buff, data_end)), 1956 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1957 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1958 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1959 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1960 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1961 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1962 BPF_EXIT_INSN(), 1963 1964 /* subprog 1 */ 1965 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1966 offsetof(struct __sk_buff, data)), 1967 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1968 offsetof(struct __sk_buff, data_end)), 1969 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1971 BPF_MOV64_IMM(BPF_REG_5, 0), 1972 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1973 /* spill checked pkt_ptr into stack of caller */ 1974 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1975 BPF_MOV64_IMM(BPF_REG_5, 1), 1976 /* don't read back pkt_ptr from stack here */ 1977 /* write 4 bytes into packet */ 1978 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1979 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1980 BPF_EXIT_INSN(), 1981 }, 1982 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1983 .errstr = "R4 invalid mem access", 1984 .result = REJECT, 1985 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1986 }, 1987 { 1988 "calls: pkt_ptr spill into caller stack 7", 1989 .insns = { 1990 BPF_MOV64_IMM(BPF_REG_2, 0), 1991 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1993 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1995 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1996 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1997 BPF_EXIT_INSN(), 1998 1999 /* subprog 1 */ 2000 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2001 offsetof(struct __sk_buff, data)), 2002 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2003 offsetof(struct __sk_buff, data_end)), 2004 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2006 BPF_MOV64_IMM(BPF_REG_5, 0), 2007 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 2008 /* spill checked pkt_ptr into stack of caller */ 2009 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2010 BPF_MOV64_IMM(BPF_REG_5, 1), 2011 /* don't read back pkt_ptr from stack here */ 2012 /* write 4 bytes into packet */ 2013 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 2014 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 2015 BPF_EXIT_INSN(), 2016 }, 2017 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2018 .errstr = "R4 invalid mem access", 2019 .result = REJECT, 2020 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 2021 }, 2022 { 2023 "calls: pkt_ptr spill into caller stack 8", 2024 .insns = { 2025 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2026 offsetof(struct __sk_buff, data)), 2027 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2028 offsetof(struct __sk_buff, data_end)), 2029 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2031 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), 2032 BPF_EXIT_INSN(), 2033 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 2034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 2035 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2036 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 2037 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 2038 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 2039 BPF_EXIT_INSN(), 2040 2041 /* subprog 1 */ 2042 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2043 offsetof(struct __sk_buff, data)), 2044 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2045 offsetof(struct __sk_buff, data_end)), 2046 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2048 BPF_MOV64_IMM(BPF_REG_5, 0), 2049 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 2050 /* spill checked pkt_ptr into stack of caller */ 2051 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2052 BPF_MOV64_IMM(BPF_REG_5, 1), 2053 /* don't read back pkt_ptr from stack here */ 2054 /* write 4 bytes into packet */ 2055 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 2056 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 2057 BPF_EXIT_INSN(), 2058 }, 2059 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2060 .result = ACCEPT, 2061 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 2062 }, 2063 { 2064 "calls: pkt_ptr spill into caller stack 9", 2065 .insns = { 2066 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2067 offsetof(struct __sk_buff, data)), 2068 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2069 offsetof(struct __sk_buff, data_end)), 2070 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2071 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2072 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), 2073 BPF_EXIT_INSN(), 2074 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 2075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 2076 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2077 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 2078 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 2079 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 2080 BPF_EXIT_INSN(), 2081 2082 /* subprog 1 */ 2083 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2084 offsetof(struct __sk_buff, data)), 2085 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2086 offsetof(struct __sk_buff, data_end)), 2087 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2089 BPF_MOV64_IMM(BPF_REG_5, 0), 2090 /* spill unchecked pkt_ptr into stack of caller */ 2091 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2092 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 2093 BPF_MOV64_IMM(BPF_REG_5, 1), 2094 /* don't read back pkt_ptr from stack here */ 2095 /* write 4 bytes into packet */ 2096 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 2097 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 2098 BPF_EXIT_INSN(), 2099 }, 2100 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2101 .errstr = "invalid access to packet", 2102 .result = REJECT, 2103 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 2104 }, 2105 { 2106 "calls: caller stack init to zero or map_value_or_null", 2107 .insns = { 2108 BPF_MOV64_IMM(BPF_REG_0, 0), 2109 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 2110 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2112 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 2113 /* fetch map_value_or_null or const_zero from stack */ 2114 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 2115 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 2116 /* store into map_value */ 2117 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0), 2118 BPF_EXIT_INSN(), 2119 2120 /* subprog 1 */ 2121 /* if (ctx == 0) return; */ 2122 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8), 2123 /* else bpf_map_lookup() and *(fp - 8) = r0 */ 2124 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2125 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2127 BPF_LD_MAP_FD(BPF_REG_1, 0), 2128 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 2129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2130 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 2131 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 2132 BPF_EXIT_INSN(), 2133 }, 2134 .fixup_map_hash_8b = { 13 }, 2135 .result = ACCEPT, 2136 .prog_type = BPF_PROG_TYPE_XDP, 2137 }, 2138 { 2139 "calls: stack init to zero and pruning", 2140 .insns = { 2141 /* first make allocated_stack 16 byte */ 2142 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), 2143 /* now fork the execution such that the false branch 2144 * of JGT insn will be verified second and it skisp zero 2145 * init of fp-8 stack slot. If stack liveness marking 2146 * is missing live_read marks from call map_lookup 2147 * processing then pruning will incorrectly assume 2148 * that fp-8 stack slot was unused in the fall-through 2149 * branch and will accept the program incorrectly 2150 */ 2151 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2), 2152 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 2153 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 2154 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2155 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2156 BPF_LD_MAP_FD(BPF_REG_1, 0), 2157 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2158 BPF_EXIT_INSN(), 2159 }, 2160 .fixup_map_hash_48b = { 6 }, 2161 .errstr = "invalid indirect read from stack R2 off -8+0 size 8", 2162 .result = REJECT, 2163 .prog_type = BPF_PROG_TYPE_XDP, 2164 }, 2165 { 2166 "calls: ctx read at start of subprog", 2167 .insns = { 2168 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 2169 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 2170 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0), 2171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 2172 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 2173 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 2174 BPF_EXIT_INSN(), 2175 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), 2176 BPF_MOV64_IMM(BPF_REG_0, 0), 2177 BPF_EXIT_INSN(), 2178 }, 2179 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2180 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 2181 .result_unpriv = REJECT, 2182 .result = ACCEPT, 2183 }, 2184 { 2185 "calls: cross frame pruning", 2186 .insns = { 2187 /* r8 = !!random(); 2188 * call pruner() 2189 * if (r8) 2190 * do something bad; 2191 */ 2192 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 2193 BPF_MOV64_IMM(BPF_REG_8, 0), 2194 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 2195 BPF_MOV64_IMM(BPF_REG_8, 1), 2196 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 2197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 2198 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), 2199 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), 2200 BPF_MOV64_IMM(BPF_REG_0, 0), 2201 BPF_EXIT_INSN(), 2202 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 2203 BPF_EXIT_INSN(), 2204 }, 2205 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2206 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 2207 .errstr = "!read_ok", 2208 .result = REJECT, 2209 }, 2210 { 2211 "calls: cross frame pruning - liveness propagation", 2212 .insns = { 2213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 2214 BPF_MOV64_IMM(BPF_REG_8, 0), 2215 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 2216 BPF_MOV64_IMM(BPF_REG_8, 1), 2217 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 2218 BPF_MOV64_IMM(BPF_REG_9, 0), 2219 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 2220 BPF_MOV64_IMM(BPF_REG_9, 1), 2221 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 2222 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 2223 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), 2224 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), 2225 BPF_MOV64_IMM(BPF_REG_0, 0), 2226 BPF_EXIT_INSN(), 2227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 2228 BPF_EXIT_INSN(), 2229 }, 2230 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2231 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 2232 .errstr = "!read_ok", 2233 .result = REJECT, 2234 }, 2235