1 { 2 "calls: invalid kfunc call not eliminated", 3 .insns = { 4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 5 BPF_MOV64_IMM(BPF_REG_0, 1), 6 BPF_EXIT_INSN(), 7 }, 8 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 9 .result = REJECT, 10 .errstr = "invalid kernel function call not eliminated in verifier pass", 11 }, 12 { 13 "calls: invalid kfunc call unreachable", 14 .insns = { 15 BPF_MOV64_IMM(BPF_REG_0, 1), 16 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2), 17 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 18 BPF_MOV64_IMM(BPF_REG_0, 1), 19 BPF_EXIT_INSN(), 20 }, 21 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 22 .result = ACCEPT, 23 }, 24 { 25 "calls: invalid kfunc call: ptr_to_mem to struct with non-scalar", 26 .insns = { 27 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 28 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 29 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 30 BPF_EXIT_INSN(), 31 }, 32 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 33 .result = REJECT, 34 .errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar", 35 .fixup_kfunc_btf_id = { 36 { "bpf_kfunc_call_test_fail1", 2 }, 37 }, 38 }, 39 { 40 "calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4", 41 .insns = { 42 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 43 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 44 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 45 BPF_EXIT_INSN(), 46 }, 47 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 48 .result = REJECT, 49 .errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2", 50 .fixup_kfunc_btf_id = { 51 { "bpf_kfunc_call_test_fail2", 2 }, 52 }, 53 }, 54 { 55 "calls: invalid kfunc call: ptr_to_mem to struct with FAM", 56 .insns = { 57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 58 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 59 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 60 BPF_EXIT_INSN(), 61 }, 62 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 63 .result = REJECT, 64 .errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar", 65 .fixup_kfunc_btf_id = { 66 { "bpf_kfunc_call_test_fail3", 2 }, 67 }, 68 }, 69 { 70 "calls: invalid kfunc call: reg->type != PTR_TO_CTX", 71 .insns = { 72 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 73 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 74 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 75 BPF_EXIT_INSN(), 76 }, 77 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 78 .result = REJECT, 79 .errstr = "arg#0 expected pointer to ctx, but got PTR", 80 .fixup_kfunc_btf_id = { 81 { "bpf_kfunc_call_test_pass_ctx", 2 }, 82 }, 83 }, 84 { 85 "calls: invalid kfunc call: void * not allowed in func proto without mem size arg", 86 .insns = { 87 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 89 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 90 BPF_EXIT_INSN(), 91 }, 92 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 93 .result = REJECT, 94 .errstr = "arg#0 pointer type UNKNOWN must point to scalar", 95 .fixup_kfunc_btf_id = { 96 { "bpf_kfunc_call_test_mem_len_fail1", 2 }, 97 }, 98 }, 99 { 100 "calls: basic sanity", 101 .insns = { 102 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 103 BPF_MOV64_IMM(BPF_REG_0, 1), 104 BPF_EXIT_INSN(), 105 BPF_MOV64_IMM(BPF_REG_0, 2), 106 BPF_EXIT_INSN(), 107 }, 108 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 109 .result = ACCEPT, 110 }, 111 { 112 "calls: not on unpriviledged", 113 .insns = { 114 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 115 BPF_MOV64_IMM(BPF_REG_0, 1), 116 BPF_EXIT_INSN(), 117 BPF_MOV64_IMM(BPF_REG_0, 2), 118 BPF_EXIT_INSN(), 119 }, 120 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 121 .result_unpriv = REJECT, 122 .result = ACCEPT, 123 .retval = 1, 124 }, 125 { 126 "calls: div by 0 in subprog", 127 .insns = { 128 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 130 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 131 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 132 offsetof(struct __sk_buff, data_end)), 133 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 135 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 136 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 137 BPF_MOV64_IMM(BPF_REG_0, 1), 138 BPF_EXIT_INSN(), 139 BPF_MOV32_IMM(BPF_REG_2, 0), 140 BPF_MOV32_IMM(BPF_REG_3, 1), 141 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2), 142 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 143 offsetof(struct __sk_buff, data)), 144 BPF_EXIT_INSN(), 145 }, 146 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 147 .result = ACCEPT, 148 .retval = 1, 149 }, 150 { 151 "calls: multiple ret types in subprog 1", 152 .insns = { 153 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 154 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 155 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 156 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 157 offsetof(struct __sk_buff, data_end)), 158 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 160 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 161 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 162 BPF_MOV64_IMM(BPF_REG_0, 1), 163 BPF_EXIT_INSN(), 164 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 165 offsetof(struct __sk_buff, data)), 166 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 167 BPF_MOV32_IMM(BPF_REG_0, 42), 168 BPF_EXIT_INSN(), 169 }, 170 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 171 .result = REJECT, 172 .errstr = "R0 invalid mem access 'inv'", 173 }, 174 { 175 "calls: multiple ret types in subprog 2", 176 .insns = { 177 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 179 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 180 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 181 offsetof(struct __sk_buff, data_end)), 182 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 183 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 184 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 185 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 186 BPF_MOV64_IMM(BPF_REG_0, 1), 187 BPF_EXIT_INSN(), 188 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 189 offsetof(struct __sk_buff, data)), 190 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 191 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9), 192 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 193 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 195 BPF_LD_MAP_FD(BPF_REG_1, 0), 196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 197 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 198 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 199 offsetof(struct __sk_buff, data)), 200 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), 201 BPF_EXIT_INSN(), 202 }, 203 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 204 .fixup_map_hash_8b = { 16 }, 205 .result = REJECT, 206 .errstr = "R0 min value is outside of the allowed memory range", 207 }, 208 { 209 "calls: overlapping caller/callee", 210 .insns = { 211 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0), 212 BPF_MOV64_IMM(BPF_REG_0, 1), 213 BPF_EXIT_INSN(), 214 }, 215 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 216 .errstr = "last insn is not an exit or jmp", 217 .result = REJECT, 218 }, 219 { 220 "calls: wrong recursive calls", 221 .insns = { 222 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 223 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 224 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 225 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 226 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 227 BPF_MOV64_IMM(BPF_REG_0, 1), 228 BPF_EXIT_INSN(), 229 }, 230 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 231 .errstr = "jump out of range", 232 .result = REJECT, 233 }, 234 { 235 "calls: wrong src reg", 236 .insns = { 237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0), 238 BPF_MOV64_IMM(BPF_REG_0, 1), 239 BPF_EXIT_INSN(), 240 }, 241 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 242 .errstr = "BPF_CALL uses reserved fields", 243 .result = REJECT, 244 }, 245 { 246 "calls: wrong off value", 247 .insns = { 248 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2), 249 BPF_MOV64_IMM(BPF_REG_0, 1), 250 BPF_EXIT_INSN(), 251 BPF_MOV64_IMM(BPF_REG_0, 2), 252 BPF_EXIT_INSN(), 253 }, 254 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 255 .errstr = "BPF_CALL uses reserved fields", 256 .result = REJECT, 257 }, 258 { 259 "calls: jump back loop", 260 .insns = { 261 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), 262 BPF_MOV64_IMM(BPF_REG_0, 1), 263 BPF_EXIT_INSN(), 264 }, 265 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 266 .errstr = "back-edge from insn 0 to 0", 267 .result = REJECT, 268 }, 269 { 270 "calls: conditional call", 271 .insns = { 272 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 273 offsetof(struct __sk_buff, mark)), 274 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 275 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 276 BPF_MOV64_IMM(BPF_REG_0, 1), 277 BPF_EXIT_INSN(), 278 BPF_MOV64_IMM(BPF_REG_0, 2), 279 BPF_EXIT_INSN(), 280 }, 281 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 282 .errstr = "jump out of range", 283 .result = REJECT, 284 }, 285 { 286 "calls: conditional call 2", 287 .insns = { 288 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 289 offsetof(struct __sk_buff, mark)), 290 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 291 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 292 BPF_MOV64_IMM(BPF_REG_0, 1), 293 BPF_EXIT_INSN(), 294 BPF_MOV64_IMM(BPF_REG_0, 2), 295 BPF_EXIT_INSN(), 296 BPF_MOV64_IMM(BPF_REG_0, 3), 297 BPF_EXIT_INSN(), 298 }, 299 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 300 .result = ACCEPT, 301 }, 302 { 303 "calls: conditional call 3", 304 .insns = { 305 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 306 offsetof(struct __sk_buff, mark)), 307 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 308 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 309 BPF_MOV64_IMM(BPF_REG_0, 1), 310 BPF_EXIT_INSN(), 311 BPF_MOV64_IMM(BPF_REG_0, 1), 312 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 313 BPF_MOV64_IMM(BPF_REG_0, 3), 314 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 315 }, 316 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 317 .errstr_unpriv = "back-edge from insn", 318 .result_unpriv = REJECT, 319 .result = ACCEPT, 320 .retval = 1, 321 }, 322 { 323 "calls: conditional call 4", 324 .insns = { 325 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 326 offsetof(struct __sk_buff, mark)), 327 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 328 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 329 BPF_MOV64_IMM(BPF_REG_0, 1), 330 BPF_EXIT_INSN(), 331 BPF_MOV64_IMM(BPF_REG_0, 1), 332 BPF_JMP_IMM(BPF_JA, 0, 0, -5), 333 BPF_MOV64_IMM(BPF_REG_0, 3), 334 BPF_EXIT_INSN(), 335 }, 336 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 337 .result = ACCEPT, 338 }, 339 { 340 "calls: conditional call 5", 341 .insns = { 342 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 343 offsetof(struct __sk_buff, mark)), 344 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 345 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 346 BPF_MOV64_IMM(BPF_REG_0, 1), 347 BPF_EXIT_INSN(), 348 BPF_MOV64_IMM(BPF_REG_0, 1), 349 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 350 BPF_MOV64_IMM(BPF_REG_0, 3), 351 BPF_EXIT_INSN(), 352 }, 353 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 354 .result = ACCEPT, 355 .retval = 1, 356 }, 357 { 358 "calls: conditional call 6", 359 .insns = { 360 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 361 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 362 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 363 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), 364 BPF_EXIT_INSN(), 365 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 366 offsetof(struct __sk_buff, mark)), 367 BPF_EXIT_INSN(), 368 }, 369 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 370 .errstr = "infinite loop detected", 371 .result = REJECT, 372 }, 373 { 374 "calls: using r0 returned by callee", 375 .insns = { 376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 377 BPF_EXIT_INSN(), 378 BPF_MOV64_IMM(BPF_REG_0, 2), 379 BPF_EXIT_INSN(), 380 }, 381 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 382 .result = ACCEPT, 383 }, 384 { 385 "calls: using uninit r0 from callee", 386 .insns = { 387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 388 BPF_EXIT_INSN(), 389 BPF_EXIT_INSN(), 390 }, 391 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 392 .errstr = "!read_ok", 393 .result = REJECT, 394 }, 395 { 396 "calls: callee is using r1", 397 .insns = { 398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 399 BPF_EXIT_INSN(), 400 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 401 offsetof(struct __sk_buff, len)), 402 BPF_EXIT_INSN(), 403 }, 404 .prog_type = BPF_PROG_TYPE_SCHED_ACT, 405 .result = ACCEPT, 406 .retval = TEST_DATA_LEN, 407 }, 408 { 409 "calls: callee using args1", 410 .insns = { 411 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 412 BPF_EXIT_INSN(), 413 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 414 BPF_EXIT_INSN(), 415 }, 416 .errstr_unpriv = "allowed for", 417 .result_unpriv = REJECT, 418 .result = ACCEPT, 419 .retval = POINTER_VALUE, 420 }, 421 { 422 "calls: callee using wrong args2", 423 .insns = { 424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 425 BPF_EXIT_INSN(), 426 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 427 BPF_EXIT_INSN(), 428 }, 429 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 430 .errstr = "R2 !read_ok", 431 .result = REJECT, 432 }, 433 { 434 "calls: callee using two args", 435 .insns = { 436 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 437 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, 438 offsetof(struct __sk_buff, len)), 439 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6, 440 offsetof(struct __sk_buff, len)), 441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 442 BPF_EXIT_INSN(), 443 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 444 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 445 BPF_EXIT_INSN(), 446 }, 447 .errstr_unpriv = "allowed for", 448 .result_unpriv = REJECT, 449 .result = ACCEPT, 450 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN, 451 }, 452 { 453 "calls: callee changing pkt pointers", 454 .insns = { 455 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)), 456 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 457 offsetof(struct xdp_md, data_end)), 458 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), 459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8), 460 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2), 461 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 462 /* clear_all_pkt_pointers() has to walk all frames 463 * to make sure that pkt pointers in the caller 464 * are cleared when callee is calling a helper that 465 * adjusts packet size 466 */ 467 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 468 BPF_MOV32_IMM(BPF_REG_0, 0), 469 BPF_EXIT_INSN(), 470 BPF_MOV64_IMM(BPF_REG_2, 0), 471 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head), 472 BPF_EXIT_INSN(), 473 }, 474 .result = REJECT, 475 .errstr = "R6 invalid mem access 'inv'", 476 .prog_type = BPF_PROG_TYPE_XDP, 477 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 478 }, 479 { 480 "calls: ptr null check in subprog", 481 .insns = { 482 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 483 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 485 BPF_LD_MAP_FD(BPF_REG_1, 0), 486 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 487 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 488 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 489 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 490 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 491 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), 492 BPF_EXIT_INSN(), 493 BPF_MOV64_IMM(BPF_REG_0, 0), 494 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 495 BPF_MOV64_IMM(BPF_REG_0, 1), 496 BPF_EXIT_INSN(), 497 }, 498 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 499 .fixup_map_hash_48b = { 3 }, 500 .result_unpriv = REJECT, 501 .result = ACCEPT, 502 .retval = 0, 503 }, 504 { 505 "calls: two calls with args", 506 .insns = { 507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 508 BPF_EXIT_INSN(), 509 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 510 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 511 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 512 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 513 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 514 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 515 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 516 BPF_EXIT_INSN(), 517 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 518 offsetof(struct __sk_buff, len)), 519 BPF_EXIT_INSN(), 520 }, 521 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 522 .result = ACCEPT, 523 .retval = TEST_DATA_LEN + TEST_DATA_LEN, 524 }, 525 { 526 "calls: calls with stack arith", 527 .insns = { 528 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 530 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 531 BPF_EXIT_INSN(), 532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 533 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 534 BPF_EXIT_INSN(), 535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 536 BPF_MOV64_IMM(BPF_REG_0, 42), 537 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 538 BPF_EXIT_INSN(), 539 }, 540 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 541 .result = ACCEPT, 542 .retval = 42, 543 }, 544 { 545 "calls: calls with misaligned stack access", 546 .insns = { 547 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), 549 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 550 BPF_EXIT_INSN(), 551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61), 552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 553 BPF_EXIT_INSN(), 554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), 555 BPF_MOV64_IMM(BPF_REG_0, 42), 556 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 557 BPF_EXIT_INSN(), 558 }, 559 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 560 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 561 .errstr = "misaligned stack access", 562 .result = REJECT, 563 }, 564 { 565 "calls: calls control flow, jump test", 566 .insns = { 567 BPF_MOV64_IMM(BPF_REG_0, 42), 568 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 569 BPF_MOV64_IMM(BPF_REG_0, 43), 570 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 571 BPF_JMP_IMM(BPF_JA, 0, 0, -3), 572 BPF_EXIT_INSN(), 573 }, 574 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 575 .result = ACCEPT, 576 .retval = 43, 577 }, 578 { 579 "calls: calls control flow, jump test 2", 580 .insns = { 581 BPF_MOV64_IMM(BPF_REG_0, 42), 582 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 583 BPF_MOV64_IMM(BPF_REG_0, 43), 584 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 585 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), 586 BPF_EXIT_INSN(), 587 }, 588 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 589 .errstr = "jump out of range from insn 1 to 4", 590 .result = REJECT, 591 }, 592 { 593 "calls: two calls with bad jump", 594 .insns = { 595 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 596 BPF_EXIT_INSN(), 597 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 598 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 599 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 600 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 601 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 602 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 603 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 604 BPF_EXIT_INSN(), 605 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 606 offsetof(struct __sk_buff, len)), 607 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), 608 BPF_EXIT_INSN(), 609 }, 610 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 611 .errstr = "jump out of range from insn 11 to 9", 612 .result = REJECT, 613 }, 614 { 615 "calls: recursive call. test1", 616 .insns = { 617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 618 BPF_EXIT_INSN(), 619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), 620 BPF_EXIT_INSN(), 621 }, 622 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 623 .errstr = "back-edge", 624 .result = REJECT, 625 }, 626 { 627 "calls: recursive call. test2", 628 .insns = { 629 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 630 BPF_EXIT_INSN(), 631 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), 632 BPF_EXIT_INSN(), 633 }, 634 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 635 .errstr = "back-edge", 636 .result = REJECT, 637 }, 638 { 639 "calls: unreachable code", 640 .insns = { 641 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 642 BPF_EXIT_INSN(), 643 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 644 BPF_EXIT_INSN(), 645 BPF_MOV64_IMM(BPF_REG_0, 0), 646 BPF_EXIT_INSN(), 647 BPF_MOV64_IMM(BPF_REG_0, 0), 648 BPF_EXIT_INSN(), 649 }, 650 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 651 .errstr = "unreachable insn 6", 652 .result = REJECT, 653 }, 654 { 655 "calls: invalid call", 656 .insns = { 657 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 658 BPF_EXIT_INSN(), 659 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4), 660 BPF_EXIT_INSN(), 661 }, 662 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 663 .errstr = "invalid destination", 664 .result = REJECT, 665 }, 666 { 667 "calls: invalid call 2", 668 .insns = { 669 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 670 BPF_EXIT_INSN(), 671 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff), 672 BPF_EXIT_INSN(), 673 }, 674 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 675 .errstr = "invalid destination", 676 .result = REJECT, 677 }, 678 { 679 "calls: jumping across function bodies. test1", 680 .insns = { 681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 682 BPF_MOV64_IMM(BPF_REG_0, 0), 683 BPF_EXIT_INSN(), 684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), 685 BPF_EXIT_INSN(), 686 }, 687 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 688 .errstr = "jump out of range", 689 .result = REJECT, 690 }, 691 { 692 "calls: jumping across function bodies. test2", 693 .insns = { 694 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 695 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 696 BPF_MOV64_IMM(BPF_REG_0, 0), 697 BPF_EXIT_INSN(), 698 BPF_EXIT_INSN(), 699 }, 700 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 701 .errstr = "jump out of range", 702 .result = REJECT, 703 }, 704 { 705 "calls: call without exit", 706 .insns = { 707 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 708 BPF_EXIT_INSN(), 709 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 710 BPF_EXIT_INSN(), 711 BPF_MOV64_IMM(BPF_REG_0, 0), 712 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2), 713 }, 714 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 715 .errstr = "not an exit", 716 .result = REJECT, 717 }, 718 { 719 "calls: call into middle of ld_imm64", 720 .insns = { 721 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 722 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 723 BPF_MOV64_IMM(BPF_REG_0, 0), 724 BPF_EXIT_INSN(), 725 BPF_LD_IMM64(BPF_REG_0, 0), 726 BPF_EXIT_INSN(), 727 }, 728 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 729 .errstr = "last insn", 730 .result = REJECT, 731 }, 732 { 733 "calls: call into middle of other call", 734 .insns = { 735 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 736 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 737 BPF_MOV64_IMM(BPF_REG_0, 0), 738 BPF_EXIT_INSN(), 739 BPF_MOV64_IMM(BPF_REG_0, 0), 740 BPF_MOV64_IMM(BPF_REG_0, 0), 741 BPF_EXIT_INSN(), 742 }, 743 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 744 .errstr = "last insn", 745 .result = REJECT, 746 }, 747 { 748 "calls: subprog call with ld_abs in main prog", 749 .insns = { 750 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 751 BPF_LD_ABS(BPF_B, 0), 752 BPF_LD_ABS(BPF_H, 0), 753 BPF_LD_ABS(BPF_W, 0), 754 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), 755 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 757 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), 758 BPF_LD_ABS(BPF_B, 0), 759 BPF_LD_ABS(BPF_H, 0), 760 BPF_LD_ABS(BPF_W, 0), 761 BPF_EXIT_INSN(), 762 BPF_MOV64_IMM(BPF_REG_2, 1), 763 BPF_MOV64_IMM(BPF_REG_3, 2), 764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push), 765 BPF_EXIT_INSN(), 766 }, 767 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 768 .result = ACCEPT, 769 }, 770 { 771 "calls: two calls with bad fallthrough", 772 .insns = { 773 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 774 BPF_EXIT_INSN(), 775 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 776 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 777 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 778 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 779 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 780 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 781 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 782 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0), 783 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 784 offsetof(struct __sk_buff, len)), 785 BPF_EXIT_INSN(), 786 }, 787 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 788 .errstr = "not an exit", 789 .result = REJECT, 790 }, 791 { 792 "calls: two calls with stack read", 793 .insns = { 794 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 795 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 796 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 797 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 798 BPF_EXIT_INSN(), 799 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 800 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 801 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 802 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 804 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 805 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 806 BPF_EXIT_INSN(), 807 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 808 BPF_EXIT_INSN(), 809 }, 810 .prog_type = BPF_PROG_TYPE_XDP, 811 .result = ACCEPT, 812 }, 813 { 814 "calls: two calls with stack write", 815 .insns = { 816 /* main prog */ 817 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 818 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 820 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 822 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 823 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 824 BPF_EXIT_INSN(), 825 826 /* subprog 1 */ 827 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 828 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 829 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7), 830 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 831 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 832 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 833 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), 834 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), 835 /* write into stack frame of main prog */ 836 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 837 BPF_EXIT_INSN(), 838 839 /* subprog 2 */ 840 /* read from stack frame of main prog */ 841 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 842 BPF_EXIT_INSN(), 843 }, 844 .prog_type = BPF_PROG_TYPE_XDP, 845 .result = ACCEPT, 846 }, 847 { 848 "calls: stack overflow using two frames (pre-call access)", 849 .insns = { 850 /* prog 1 */ 851 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 852 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), 853 BPF_EXIT_INSN(), 854 855 /* prog 2 */ 856 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 857 BPF_MOV64_IMM(BPF_REG_0, 0), 858 BPF_EXIT_INSN(), 859 }, 860 .prog_type = BPF_PROG_TYPE_XDP, 861 .errstr = "combined stack size", 862 .result = REJECT, 863 }, 864 { 865 "calls: stack overflow using two frames (post-call access)", 866 .insns = { 867 /* prog 1 */ 868 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), 869 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 870 BPF_EXIT_INSN(), 871 872 /* prog 2 */ 873 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 874 BPF_MOV64_IMM(BPF_REG_0, 0), 875 BPF_EXIT_INSN(), 876 }, 877 .prog_type = BPF_PROG_TYPE_XDP, 878 .errstr = "combined stack size", 879 .result = REJECT, 880 }, 881 { 882 "calls: stack depth check using three frames. test1", 883 .insns = { 884 /* main */ 885 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 886 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ 887 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), 888 BPF_MOV64_IMM(BPF_REG_0, 0), 889 BPF_EXIT_INSN(), 890 /* A */ 891 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 892 BPF_EXIT_INSN(), 893 /* B */ 894 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ 895 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 896 BPF_EXIT_INSN(), 897 }, 898 .prog_type = BPF_PROG_TYPE_XDP, 899 /* stack_main=32, stack_A=256, stack_B=64 900 * and max(main+A, main+A+B) < 512 901 */ 902 .result = ACCEPT, 903 }, 904 { 905 "calls: stack depth check using three frames. test2", 906 .insns = { 907 /* main */ 908 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 909 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ 910 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), 911 BPF_MOV64_IMM(BPF_REG_0, 0), 912 BPF_EXIT_INSN(), 913 /* A */ 914 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 915 BPF_EXIT_INSN(), 916 /* B */ 917 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ 918 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 919 BPF_EXIT_INSN(), 920 }, 921 .prog_type = BPF_PROG_TYPE_XDP, 922 /* stack_main=32, stack_A=64, stack_B=256 923 * and max(main+A, main+A+B) < 512 924 */ 925 .result = ACCEPT, 926 }, 927 { 928 "calls: stack depth check using three frames. test3", 929 .insns = { 930 /* main */ 931 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 932 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ 933 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 934 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */ 935 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1), 936 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 937 BPF_MOV64_IMM(BPF_REG_0, 0), 938 BPF_EXIT_INSN(), 939 /* A */ 940 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1), 941 BPF_EXIT_INSN(), 942 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0), 943 BPF_JMP_IMM(BPF_JA, 0, 0, -3), 944 /* B */ 945 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1), 946 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */ 947 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 948 BPF_EXIT_INSN(), 949 }, 950 .prog_type = BPF_PROG_TYPE_XDP, 951 /* stack_main=64, stack_A=224, stack_B=256 952 * and max(main+A, main+A+B) > 512 953 */ 954 .errstr = "combined stack", 955 .result = REJECT, 956 }, 957 { 958 "calls: stack depth check using three frames. test4", 959 /* void main(void) { 960 * func1(0); 961 * func1(1); 962 * func2(1); 963 * } 964 * void func1(int alloc_or_recurse) { 965 * if (alloc_or_recurse) { 966 * frame_pointer[-300] = 1; 967 * } else { 968 * func2(alloc_or_recurse); 969 * } 970 * } 971 * void func2(int alloc_or_recurse) { 972 * if (alloc_or_recurse) { 973 * frame_pointer[-300] = 1; 974 * } 975 * } 976 */ 977 .insns = { 978 /* main */ 979 BPF_MOV64_IMM(BPF_REG_1, 0), 980 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ 981 BPF_MOV64_IMM(BPF_REG_1, 1), 982 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 983 BPF_MOV64_IMM(BPF_REG_1, 1), 984 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */ 985 BPF_MOV64_IMM(BPF_REG_0, 0), 986 BPF_EXIT_INSN(), 987 /* A */ 988 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), 989 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 990 BPF_EXIT_INSN(), 991 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ 992 BPF_EXIT_INSN(), 993 /* B */ 994 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 995 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 996 BPF_EXIT_INSN(), 997 }, 998 .prog_type = BPF_PROG_TYPE_XDP, 999 .result = REJECT, 1000 .errstr = "combined stack", 1001 }, 1002 { 1003 "calls: stack depth check using three frames. test5", 1004 .insns = { 1005 /* main */ 1006 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ 1007 BPF_EXIT_INSN(), 1008 /* A */ 1009 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ 1010 BPF_EXIT_INSN(), 1011 /* B */ 1012 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ 1013 BPF_EXIT_INSN(), 1014 /* C */ 1015 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ 1016 BPF_EXIT_INSN(), 1017 /* D */ 1018 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ 1019 BPF_EXIT_INSN(), 1020 /* E */ 1021 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ 1022 BPF_EXIT_INSN(), 1023 /* F */ 1024 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ 1025 BPF_EXIT_INSN(), 1026 /* G */ 1027 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ 1028 BPF_EXIT_INSN(), 1029 /* H */ 1030 BPF_MOV64_IMM(BPF_REG_0, 0), 1031 BPF_EXIT_INSN(), 1032 }, 1033 .prog_type = BPF_PROG_TYPE_XDP, 1034 .errstr = "call stack", 1035 .result = REJECT, 1036 }, 1037 { 1038 "calls: stack depth check in dead code", 1039 .insns = { 1040 /* main */ 1041 BPF_MOV64_IMM(BPF_REG_1, 0), 1042 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ 1043 BPF_EXIT_INSN(), 1044 /* A */ 1045 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 1046 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */ 1047 BPF_MOV64_IMM(BPF_REG_0, 0), 1048 BPF_EXIT_INSN(), 1049 /* B */ 1050 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ 1051 BPF_EXIT_INSN(), 1052 /* C */ 1053 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ 1054 BPF_EXIT_INSN(), 1055 /* D */ 1056 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ 1057 BPF_EXIT_INSN(), 1058 /* E */ 1059 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ 1060 BPF_EXIT_INSN(), 1061 /* F */ 1062 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ 1063 BPF_EXIT_INSN(), 1064 /* G */ 1065 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ 1066 BPF_EXIT_INSN(), 1067 /* H */ 1068 BPF_MOV64_IMM(BPF_REG_0, 0), 1069 BPF_EXIT_INSN(), 1070 }, 1071 .prog_type = BPF_PROG_TYPE_XDP, 1072 .errstr = "call stack", 1073 .result = REJECT, 1074 }, 1075 { 1076 "calls: spill into caller stack frame", 1077 .insns = { 1078 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1079 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1081 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1082 BPF_EXIT_INSN(), 1083 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), 1084 BPF_MOV64_IMM(BPF_REG_0, 0), 1085 BPF_EXIT_INSN(), 1086 }, 1087 .prog_type = BPF_PROG_TYPE_XDP, 1088 .errstr = "cannot spill", 1089 .result = REJECT, 1090 }, 1091 { 1092 "calls: write into caller stack frame", 1093 .insns = { 1094 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1095 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1096 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1097 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1098 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1099 BPF_EXIT_INSN(), 1100 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), 1101 BPF_MOV64_IMM(BPF_REG_0, 0), 1102 BPF_EXIT_INSN(), 1103 }, 1104 .prog_type = BPF_PROG_TYPE_XDP, 1105 .result = ACCEPT, 1106 .retval = 42, 1107 }, 1108 { 1109 "calls: write into callee stack frame", 1110 .insns = { 1111 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1112 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), 1113 BPF_EXIT_INSN(), 1114 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), 1115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8), 1116 BPF_EXIT_INSN(), 1117 }, 1118 .prog_type = BPF_PROG_TYPE_XDP, 1119 .errstr = "cannot return stack pointer", 1120 .result = REJECT, 1121 }, 1122 { 1123 "calls: two calls with stack write and void return", 1124 .insns = { 1125 /* main prog */ 1126 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1127 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1128 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1131 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1132 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 1133 BPF_EXIT_INSN(), 1134 1135 /* subprog 1 */ 1136 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1137 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1138 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1139 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1140 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1141 BPF_EXIT_INSN(), 1142 1143 /* subprog 2 */ 1144 /* write into stack frame of main prog */ 1145 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 1146 BPF_EXIT_INSN(), /* void return */ 1147 }, 1148 .prog_type = BPF_PROG_TYPE_XDP, 1149 .result = ACCEPT, 1150 }, 1151 { 1152 "calls: ambiguous return value", 1153 .insns = { 1154 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 1156 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 1157 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1159 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 1160 BPF_EXIT_INSN(), 1161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 1162 BPF_MOV64_IMM(BPF_REG_0, 0), 1163 BPF_EXIT_INSN(), 1164 }, 1165 .errstr_unpriv = "allowed for", 1166 .result_unpriv = REJECT, 1167 .errstr = "R0 !read_ok", 1168 .result = REJECT, 1169 }, 1170 { 1171 "calls: two calls that return map_value", 1172 .insns = { 1173 /* main prog */ 1174 /* pass fp-16, fp-8 into a function */ 1175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1177 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1179 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 1180 1181 /* fetch map_value_ptr from the stack of this function */ 1182 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 1183 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1184 /* write into map value */ 1185 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1186 /* fetch secound map_value_ptr from the stack */ 1187 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 1188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1189 /* write into map value */ 1190 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1191 BPF_MOV64_IMM(BPF_REG_0, 0), 1192 BPF_EXIT_INSN(), 1193 1194 /* subprog 1 */ 1195 /* call 3rd function twice */ 1196 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1197 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1198 /* first time with fp-8 */ 1199 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1200 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1201 /* second time with fp-16 */ 1202 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1203 BPF_EXIT_INSN(), 1204 1205 /* subprog 2 */ 1206 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1207 /* lookup from map */ 1208 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1211 BPF_LD_MAP_FD(BPF_REG_1, 0), 1212 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1213 /* write map_value_ptr into stack frame of main prog */ 1214 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1215 BPF_MOV64_IMM(BPF_REG_0, 0), 1216 BPF_EXIT_INSN(), /* return 0 */ 1217 }, 1218 .prog_type = BPF_PROG_TYPE_XDP, 1219 .fixup_map_hash_8b = { 23 }, 1220 .result = ACCEPT, 1221 }, 1222 { 1223 "calls: two calls that return map_value with bool condition", 1224 .insns = { 1225 /* main prog */ 1226 /* pass fp-16, fp-8 into a function */ 1227 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1229 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1231 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1232 BPF_MOV64_IMM(BPF_REG_0, 0), 1233 BPF_EXIT_INSN(), 1234 1235 /* subprog 1 */ 1236 /* call 3rd function twice */ 1237 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1238 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1239 /* first time with fp-8 */ 1240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), 1241 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1242 /* fetch map_value_ptr from the stack of this function */ 1243 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1244 /* write into map value */ 1245 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1246 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1247 /* second time with fp-16 */ 1248 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1249 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1250 /* fetch secound map_value_ptr from the stack */ 1251 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), 1252 /* write into map value */ 1253 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1254 BPF_EXIT_INSN(), 1255 1256 /* subprog 2 */ 1257 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1258 /* lookup from map */ 1259 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1260 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1262 BPF_LD_MAP_FD(BPF_REG_1, 0), 1263 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1264 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1265 BPF_MOV64_IMM(BPF_REG_0, 0), 1266 BPF_EXIT_INSN(), /* return 0 */ 1267 /* write map_value_ptr into stack frame of main prog */ 1268 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1269 BPF_MOV64_IMM(BPF_REG_0, 1), 1270 BPF_EXIT_INSN(), /* return 1 */ 1271 }, 1272 .prog_type = BPF_PROG_TYPE_XDP, 1273 .fixup_map_hash_8b = { 23 }, 1274 .result = ACCEPT, 1275 }, 1276 { 1277 "calls: two calls that return map_value with incorrect bool check", 1278 .insns = { 1279 /* main prog */ 1280 /* pass fp-16, fp-8 into a function */ 1281 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1283 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1286 BPF_MOV64_IMM(BPF_REG_0, 0), 1287 BPF_EXIT_INSN(), 1288 1289 /* subprog 1 */ 1290 /* call 3rd function twice */ 1291 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1292 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1293 /* first time with fp-8 */ 1294 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), 1295 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1296 /* fetch map_value_ptr from the stack of this function */ 1297 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1298 /* write into map value */ 1299 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1300 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1301 /* second time with fp-16 */ 1302 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1303 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1304 /* fetch secound map_value_ptr from the stack */ 1305 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), 1306 /* write into map value */ 1307 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1308 BPF_EXIT_INSN(), 1309 1310 /* subprog 2 */ 1311 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1312 /* lookup from map */ 1313 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1314 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1316 BPF_LD_MAP_FD(BPF_REG_1, 0), 1317 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1318 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1319 BPF_MOV64_IMM(BPF_REG_0, 0), 1320 BPF_EXIT_INSN(), /* return 0 */ 1321 /* write map_value_ptr into stack frame of main prog */ 1322 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1323 BPF_MOV64_IMM(BPF_REG_0, 1), 1324 BPF_EXIT_INSN(), /* return 1 */ 1325 }, 1326 .prog_type = BPF_PROG_TYPE_XDP, 1327 .fixup_map_hash_8b = { 23 }, 1328 .result = REJECT, 1329 .errstr = "invalid read from stack R7 off=-16 size=8", 1330 }, 1331 { 1332 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1", 1333 .insns = { 1334 /* main prog */ 1335 /* pass fp-16, fp-8 into a function */ 1336 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1337 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1338 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1340 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1341 BPF_MOV64_IMM(BPF_REG_0, 0), 1342 BPF_EXIT_INSN(), 1343 1344 /* subprog 1 */ 1345 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1346 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1347 /* 1st lookup from map */ 1348 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1349 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1351 BPF_LD_MAP_FD(BPF_REG_1, 0), 1352 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1353 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1354 BPF_MOV64_IMM(BPF_REG_8, 0), 1355 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1356 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1357 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1358 BPF_MOV64_IMM(BPF_REG_8, 1), 1359 1360 /* 2nd lookup from map */ 1361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ 1362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1363 BPF_LD_MAP_FD(BPF_REG_1, 0), 1364 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ 1365 BPF_FUNC_map_lookup_elem), 1366 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1367 BPF_MOV64_IMM(BPF_REG_9, 0), 1368 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1369 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1370 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1371 BPF_MOV64_IMM(BPF_REG_9, 1), 1372 1373 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1374 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ 1375 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1376 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1377 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1378 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ 1379 BPF_EXIT_INSN(), 1380 1381 /* subprog 2 */ 1382 /* if arg2 == 1 do *arg1 = 0 */ 1383 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1384 /* fetch map_value_ptr from the stack of this function */ 1385 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1386 /* write into map value */ 1387 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1388 1389 /* if arg4 == 1 do *arg3 = 0 */ 1390 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1391 /* fetch map_value_ptr from the stack of this function */ 1392 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1393 /* write into map value */ 1394 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), 1395 BPF_EXIT_INSN(), 1396 }, 1397 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1398 .fixup_map_hash_8b = { 12, 22 }, 1399 .result = REJECT, 1400 .errstr = "invalid access to map value, value_size=8 off=2 size=8", 1401 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1402 }, 1403 { 1404 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2", 1405 .insns = { 1406 /* main prog */ 1407 /* pass fp-16, fp-8 into a function */ 1408 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1410 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1411 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1413 BPF_MOV64_IMM(BPF_REG_0, 0), 1414 BPF_EXIT_INSN(), 1415 1416 /* subprog 1 */ 1417 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1418 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1419 /* 1st lookup from map */ 1420 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1421 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1422 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1423 BPF_LD_MAP_FD(BPF_REG_1, 0), 1424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1425 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1426 BPF_MOV64_IMM(BPF_REG_8, 0), 1427 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1428 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1429 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1430 BPF_MOV64_IMM(BPF_REG_8, 1), 1431 1432 /* 2nd lookup from map */ 1433 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ 1434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1435 BPF_LD_MAP_FD(BPF_REG_1, 0), 1436 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ 1437 BPF_FUNC_map_lookup_elem), 1438 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1439 BPF_MOV64_IMM(BPF_REG_9, 0), 1440 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1441 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1442 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1443 BPF_MOV64_IMM(BPF_REG_9, 1), 1444 1445 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1446 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ 1447 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1448 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1449 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1450 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ 1451 BPF_EXIT_INSN(), 1452 1453 /* subprog 2 */ 1454 /* if arg2 == 1 do *arg1 = 0 */ 1455 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1456 /* fetch map_value_ptr from the stack of this function */ 1457 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1458 /* write into map value */ 1459 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1460 1461 /* if arg4 == 1 do *arg3 = 0 */ 1462 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1463 /* fetch map_value_ptr from the stack of this function */ 1464 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1465 /* write into map value */ 1466 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1467 BPF_EXIT_INSN(), 1468 }, 1469 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1470 .fixup_map_hash_8b = { 12, 22 }, 1471 .result = ACCEPT, 1472 }, 1473 { 1474 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3", 1475 .insns = { 1476 /* main prog */ 1477 /* pass fp-16, fp-8 into a function */ 1478 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1480 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1482 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 1483 BPF_MOV64_IMM(BPF_REG_0, 0), 1484 BPF_EXIT_INSN(), 1485 1486 /* subprog 1 */ 1487 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1488 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1489 /* 1st lookup from map */ 1490 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0), 1491 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1492 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), 1493 BPF_LD_MAP_FD(BPF_REG_1, 0), 1494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1495 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1496 BPF_MOV64_IMM(BPF_REG_8, 0), 1497 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1498 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1499 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1500 BPF_MOV64_IMM(BPF_REG_8, 1), 1501 1502 /* 2nd lookup from map */ 1503 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), 1505 BPF_LD_MAP_FD(BPF_REG_1, 0), 1506 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1507 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1508 BPF_MOV64_IMM(BPF_REG_9, 0), // 26 1509 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1510 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1511 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1512 BPF_MOV64_IMM(BPF_REG_9, 1), 1513 1514 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1515 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30 1516 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1517 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1518 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1519 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34 1520 BPF_JMP_IMM(BPF_JA, 0, 0, -30), 1521 1522 /* subprog 2 */ 1523 /* if arg2 == 1 do *arg1 = 0 */ 1524 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1525 /* fetch map_value_ptr from the stack of this function */ 1526 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1527 /* write into map value */ 1528 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1529 1530 /* if arg4 == 1 do *arg3 = 0 */ 1531 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1532 /* fetch map_value_ptr from the stack of this function */ 1533 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1534 /* write into map value */ 1535 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), 1536 BPF_JMP_IMM(BPF_JA, 0, 0, -8), 1537 }, 1538 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1539 .fixup_map_hash_8b = { 12, 22 }, 1540 .result = REJECT, 1541 .errstr = "invalid access to map value, value_size=8 off=2 size=8", 1542 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1543 }, 1544 { 1545 "calls: two calls that receive map_value_ptr_or_null via arg. test1", 1546 .insns = { 1547 /* main prog */ 1548 /* pass fp-16, fp-8 into a function */ 1549 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1551 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1553 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1554 BPF_MOV64_IMM(BPF_REG_0, 0), 1555 BPF_EXIT_INSN(), 1556 1557 /* subprog 1 */ 1558 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1559 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1560 /* 1st lookup from map */ 1561 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1562 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1563 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1564 BPF_LD_MAP_FD(BPF_REG_1, 0), 1565 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1566 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 1567 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1568 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1569 BPF_MOV64_IMM(BPF_REG_8, 0), 1570 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1571 BPF_MOV64_IMM(BPF_REG_8, 1), 1572 1573 /* 2nd lookup from map */ 1574 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1576 BPF_LD_MAP_FD(BPF_REG_1, 0), 1577 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1578 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ 1579 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1580 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1581 BPF_MOV64_IMM(BPF_REG_9, 0), 1582 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1583 BPF_MOV64_IMM(BPF_REG_9, 1), 1584 1585 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1586 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1587 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1588 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1589 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1590 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1591 BPF_EXIT_INSN(), 1592 1593 /* subprog 2 */ 1594 /* if arg2 == 1 do *arg1 = 0 */ 1595 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1596 /* fetch map_value_ptr from the stack of this function */ 1597 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1598 /* write into map value */ 1599 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1600 1601 /* if arg4 == 1 do *arg3 = 0 */ 1602 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1603 /* fetch map_value_ptr from the stack of this function */ 1604 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1605 /* write into map value */ 1606 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1607 BPF_EXIT_INSN(), 1608 }, 1609 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1610 .fixup_map_hash_8b = { 12, 22 }, 1611 .result = ACCEPT, 1612 }, 1613 { 1614 "calls: two calls that receive map_value_ptr_or_null via arg. test2", 1615 .insns = { 1616 /* main prog */ 1617 /* pass fp-16, fp-8 into a function */ 1618 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1620 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1621 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1622 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1623 BPF_MOV64_IMM(BPF_REG_0, 0), 1624 BPF_EXIT_INSN(), 1625 1626 /* subprog 1 */ 1627 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1628 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1629 /* 1st lookup from map */ 1630 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1631 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1633 BPF_LD_MAP_FD(BPF_REG_1, 0), 1634 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1635 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 1636 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1637 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1638 BPF_MOV64_IMM(BPF_REG_8, 0), 1639 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1640 BPF_MOV64_IMM(BPF_REG_8, 1), 1641 1642 /* 2nd lookup from map */ 1643 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1645 BPF_LD_MAP_FD(BPF_REG_1, 0), 1646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1647 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ 1648 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1649 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1650 BPF_MOV64_IMM(BPF_REG_9, 0), 1651 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1652 BPF_MOV64_IMM(BPF_REG_9, 1), 1653 1654 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1655 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1656 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1657 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1658 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1659 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1660 BPF_EXIT_INSN(), 1661 1662 /* subprog 2 */ 1663 /* if arg2 == 1 do *arg1 = 0 */ 1664 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1665 /* fetch map_value_ptr from the stack of this function */ 1666 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1667 /* write into map value */ 1668 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1669 1670 /* if arg4 == 0 do *arg3 = 0 */ 1671 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2), 1672 /* fetch map_value_ptr from the stack of this function */ 1673 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1674 /* write into map value */ 1675 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1676 BPF_EXIT_INSN(), 1677 }, 1678 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1679 .fixup_map_hash_8b = { 12, 22 }, 1680 .result = REJECT, 1681 .errstr = "R0 invalid mem access 'inv'", 1682 }, 1683 { 1684 "calls: pkt_ptr spill into caller stack", 1685 .insns = { 1686 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1688 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1689 BPF_EXIT_INSN(), 1690 1691 /* subprog 1 */ 1692 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1693 offsetof(struct __sk_buff, data)), 1694 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1695 offsetof(struct __sk_buff, data_end)), 1696 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1698 /* spill unchecked pkt_ptr into stack of caller */ 1699 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1700 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1701 /* now the pkt range is verified, read pkt_ptr from stack */ 1702 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1703 /* write 4 bytes into packet */ 1704 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1705 BPF_EXIT_INSN(), 1706 }, 1707 .result = ACCEPT, 1708 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1709 .retval = POINTER_VALUE, 1710 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1711 }, 1712 { 1713 "calls: pkt_ptr spill into caller stack 2", 1714 .insns = { 1715 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1716 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1717 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1718 /* Marking is still kept, but not in all cases safe. */ 1719 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1720 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1721 BPF_EXIT_INSN(), 1722 1723 /* subprog 1 */ 1724 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1725 offsetof(struct __sk_buff, data)), 1726 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1727 offsetof(struct __sk_buff, data_end)), 1728 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1729 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1730 /* spill unchecked pkt_ptr into stack of caller */ 1731 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1732 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1733 /* now the pkt range is verified, read pkt_ptr from stack */ 1734 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1735 /* write 4 bytes into packet */ 1736 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1737 BPF_EXIT_INSN(), 1738 }, 1739 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1740 .errstr = "invalid access to packet", 1741 .result = REJECT, 1742 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1743 }, 1744 { 1745 "calls: pkt_ptr spill into caller stack 3", 1746 .insns = { 1747 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1748 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1749 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1750 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 1751 /* Marking is still kept and safe here. */ 1752 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1753 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1754 BPF_EXIT_INSN(), 1755 1756 /* subprog 1 */ 1757 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1758 offsetof(struct __sk_buff, data)), 1759 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1760 offsetof(struct __sk_buff, data_end)), 1761 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1763 /* spill unchecked pkt_ptr into stack of caller */ 1764 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1765 BPF_MOV64_IMM(BPF_REG_5, 0), 1766 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1767 BPF_MOV64_IMM(BPF_REG_5, 1), 1768 /* now the pkt range is verified, read pkt_ptr from stack */ 1769 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1770 /* write 4 bytes into packet */ 1771 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1772 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1773 BPF_EXIT_INSN(), 1774 }, 1775 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1776 .result = ACCEPT, 1777 .retval = 1, 1778 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1779 }, 1780 { 1781 "calls: pkt_ptr spill into caller stack 4", 1782 .insns = { 1783 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1784 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 1787 /* Check marking propagated. */ 1788 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1789 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1790 BPF_EXIT_INSN(), 1791 1792 /* subprog 1 */ 1793 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1794 offsetof(struct __sk_buff, data)), 1795 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1796 offsetof(struct __sk_buff, data_end)), 1797 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1799 /* spill unchecked pkt_ptr into stack of caller */ 1800 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1801 BPF_MOV64_IMM(BPF_REG_5, 0), 1802 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1803 BPF_MOV64_IMM(BPF_REG_5, 1), 1804 /* don't read back pkt_ptr from stack here */ 1805 /* write 4 bytes into packet */ 1806 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1807 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1808 BPF_EXIT_INSN(), 1809 }, 1810 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1811 .result = ACCEPT, 1812 .retval = 1, 1813 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1814 }, 1815 { 1816 "calls: pkt_ptr spill into caller stack 5", 1817 .insns = { 1818 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1820 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0), 1821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1822 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1823 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1824 BPF_EXIT_INSN(), 1825 1826 /* subprog 1 */ 1827 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1828 offsetof(struct __sk_buff, data)), 1829 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1830 offsetof(struct __sk_buff, data_end)), 1831 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1832 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1833 BPF_MOV64_IMM(BPF_REG_5, 0), 1834 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1835 /* spill checked pkt_ptr into stack of caller */ 1836 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1837 BPF_MOV64_IMM(BPF_REG_5, 1), 1838 /* don't read back pkt_ptr from stack here */ 1839 /* write 4 bytes into packet */ 1840 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1841 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1842 BPF_EXIT_INSN(), 1843 }, 1844 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1845 .errstr = "same insn cannot be used with different", 1846 .result = REJECT, 1847 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1848 }, 1849 { 1850 "calls: pkt_ptr spill into caller stack 6", 1851 .insns = { 1852 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1853 offsetof(struct __sk_buff, data_end)), 1854 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1855 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1856 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1857 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1858 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1859 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1860 BPF_EXIT_INSN(), 1861 1862 /* subprog 1 */ 1863 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1864 offsetof(struct __sk_buff, data)), 1865 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1866 offsetof(struct __sk_buff, data_end)), 1867 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1869 BPF_MOV64_IMM(BPF_REG_5, 0), 1870 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1871 /* spill checked pkt_ptr into stack of caller */ 1872 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1873 BPF_MOV64_IMM(BPF_REG_5, 1), 1874 /* don't read back pkt_ptr from stack here */ 1875 /* write 4 bytes into packet */ 1876 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1877 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1878 BPF_EXIT_INSN(), 1879 }, 1880 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1881 .errstr = "R4 invalid mem access", 1882 .result = REJECT, 1883 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1884 }, 1885 { 1886 "calls: pkt_ptr spill into caller stack 7", 1887 .insns = { 1888 BPF_MOV64_IMM(BPF_REG_2, 0), 1889 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1891 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1892 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1893 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1894 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1895 BPF_EXIT_INSN(), 1896 1897 /* subprog 1 */ 1898 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1899 offsetof(struct __sk_buff, data)), 1900 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1901 offsetof(struct __sk_buff, data_end)), 1902 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1903 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1904 BPF_MOV64_IMM(BPF_REG_5, 0), 1905 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1906 /* spill checked pkt_ptr into stack of caller */ 1907 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1908 BPF_MOV64_IMM(BPF_REG_5, 1), 1909 /* don't read back pkt_ptr from stack here */ 1910 /* write 4 bytes into packet */ 1911 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1912 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1913 BPF_EXIT_INSN(), 1914 }, 1915 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1916 .errstr = "R4 invalid mem access", 1917 .result = REJECT, 1918 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1919 }, 1920 { 1921 "calls: pkt_ptr spill into caller stack 8", 1922 .insns = { 1923 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1924 offsetof(struct __sk_buff, data)), 1925 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1926 offsetof(struct __sk_buff, data_end)), 1927 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1929 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), 1930 BPF_EXIT_INSN(), 1931 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1933 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1934 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1935 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1936 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1937 BPF_EXIT_INSN(), 1938 1939 /* subprog 1 */ 1940 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1941 offsetof(struct __sk_buff, data)), 1942 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1943 offsetof(struct __sk_buff, data_end)), 1944 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1946 BPF_MOV64_IMM(BPF_REG_5, 0), 1947 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1948 /* spill checked pkt_ptr into stack of caller */ 1949 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1950 BPF_MOV64_IMM(BPF_REG_5, 1), 1951 /* don't read back pkt_ptr from stack here */ 1952 /* write 4 bytes into packet */ 1953 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1954 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1955 BPF_EXIT_INSN(), 1956 }, 1957 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1958 .result = ACCEPT, 1959 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1960 }, 1961 { 1962 "calls: pkt_ptr spill into caller stack 9", 1963 .insns = { 1964 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1965 offsetof(struct __sk_buff, data)), 1966 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1967 offsetof(struct __sk_buff, data_end)), 1968 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1970 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), 1971 BPF_EXIT_INSN(), 1972 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1973 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1974 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1975 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1976 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1977 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1978 BPF_EXIT_INSN(), 1979 1980 /* subprog 1 */ 1981 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1982 offsetof(struct __sk_buff, data)), 1983 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1984 offsetof(struct __sk_buff, data_end)), 1985 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1987 BPF_MOV64_IMM(BPF_REG_5, 0), 1988 /* spill unchecked pkt_ptr into stack of caller */ 1989 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1990 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1991 BPF_MOV64_IMM(BPF_REG_5, 1), 1992 /* don't read back pkt_ptr from stack here */ 1993 /* write 4 bytes into packet */ 1994 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1995 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1996 BPF_EXIT_INSN(), 1997 }, 1998 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1999 .errstr = "invalid access to packet", 2000 .result = REJECT, 2001 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 2002 }, 2003 { 2004 "calls: caller stack init to zero or map_value_or_null", 2005 .insns = { 2006 BPF_MOV64_IMM(BPF_REG_0, 0), 2007 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 2008 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2010 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 2011 /* fetch map_value_or_null or const_zero from stack */ 2012 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 2013 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 2014 /* store into map_value */ 2015 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0), 2016 BPF_EXIT_INSN(), 2017 2018 /* subprog 1 */ 2019 /* if (ctx == 0) return; */ 2020 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8), 2021 /* else bpf_map_lookup() and *(fp - 8) = r0 */ 2022 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2023 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2025 BPF_LD_MAP_FD(BPF_REG_1, 0), 2026 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 2027 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2028 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 2029 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 2030 BPF_EXIT_INSN(), 2031 }, 2032 .fixup_map_hash_8b = { 13 }, 2033 .result = ACCEPT, 2034 .prog_type = BPF_PROG_TYPE_XDP, 2035 }, 2036 { 2037 "calls: stack init to zero and pruning", 2038 .insns = { 2039 /* first make allocated_stack 16 byte */ 2040 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), 2041 /* now fork the execution such that the false branch 2042 * of JGT insn will be verified second and it skisp zero 2043 * init of fp-8 stack slot. If stack liveness marking 2044 * is missing live_read marks from call map_lookup 2045 * processing then pruning will incorrectly assume 2046 * that fp-8 stack slot was unused in the fall-through 2047 * branch and will accept the program incorrectly 2048 */ 2049 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2), 2050 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 2051 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 2052 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2053 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2054 BPF_LD_MAP_FD(BPF_REG_1, 0), 2055 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2056 BPF_EXIT_INSN(), 2057 }, 2058 .fixup_map_hash_48b = { 6 }, 2059 .errstr = "invalid indirect read from stack R2 off -8+0 size 8", 2060 .result = REJECT, 2061 .prog_type = BPF_PROG_TYPE_XDP, 2062 }, 2063 { 2064 "calls: ctx read at start of subprog", 2065 .insns = { 2066 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 2067 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 2068 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0), 2069 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 2070 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 2071 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 2072 BPF_EXIT_INSN(), 2073 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), 2074 BPF_MOV64_IMM(BPF_REG_0, 0), 2075 BPF_EXIT_INSN(), 2076 }, 2077 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2078 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 2079 .result_unpriv = REJECT, 2080 .result = ACCEPT, 2081 }, 2082 { 2083 "calls: cross frame pruning", 2084 .insns = { 2085 /* r8 = !!random(); 2086 * call pruner() 2087 * if (r8) 2088 * do something bad; 2089 */ 2090 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 2091 BPF_MOV64_IMM(BPF_REG_8, 0), 2092 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 2093 BPF_MOV64_IMM(BPF_REG_8, 1), 2094 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 2095 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 2096 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), 2097 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), 2098 BPF_MOV64_IMM(BPF_REG_0, 0), 2099 BPF_EXIT_INSN(), 2100 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 2101 BPF_EXIT_INSN(), 2102 }, 2103 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2104 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 2105 .errstr = "!read_ok", 2106 .result = REJECT, 2107 }, 2108 { 2109 "calls: cross frame pruning - liveness propagation", 2110 .insns = { 2111 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 2112 BPF_MOV64_IMM(BPF_REG_8, 0), 2113 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 2114 BPF_MOV64_IMM(BPF_REG_8, 1), 2115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 2116 BPF_MOV64_IMM(BPF_REG_9, 0), 2117 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 2118 BPF_MOV64_IMM(BPF_REG_9, 1), 2119 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 2120 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 2121 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), 2122 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), 2123 BPF_MOV64_IMM(BPF_REG_0, 0), 2124 BPF_EXIT_INSN(), 2125 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 2126 BPF_EXIT_INSN(), 2127 }, 2128 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2129 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 2130 .errstr = "!read_ok", 2131 .result = REJECT, 2132 }, 2133