1 { 2 "calls: invalid kfunc call not eliminated", 3 .insns = { 4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 5 BPF_MOV64_IMM(BPF_REG_0, 1), 6 BPF_EXIT_INSN(), 7 }, 8 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 9 .result = REJECT, 10 .errstr = "invalid kernel function call not eliminated in verifier pass", 11 }, 12 { 13 "calls: invalid kfunc call unreachable", 14 .insns = { 15 BPF_MOV64_IMM(BPF_REG_0, 1), 16 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2), 17 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 18 BPF_MOV64_IMM(BPF_REG_0, 1), 19 BPF_EXIT_INSN(), 20 }, 21 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 22 .result = ACCEPT, 23 }, 24 { 25 "calls: invalid kfunc call: ptr_to_mem to struct with non-scalar", 26 .insns = { 27 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 28 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 29 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 30 BPF_EXIT_INSN(), 31 }, 32 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 33 .result = REJECT, 34 .errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar", 35 .fixup_kfunc_btf_id = { 36 { "bpf_kfunc_call_test_fail1", 2 }, 37 }, 38 }, 39 { 40 "calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4", 41 .insns = { 42 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 43 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 44 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 45 BPF_EXIT_INSN(), 46 }, 47 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 48 .result = REJECT, 49 .errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2", 50 .fixup_kfunc_btf_id = { 51 { "bpf_kfunc_call_test_fail2", 2 }, 52 }, 53 }, 54 { 55 "calls: invalid kfunc call: ptr_to_mem to struct with FAM", 56 .insns = { 57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 58 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 59 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 60 BPF_EXIT_INSN(), 61 }, 62 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 63 .result = REJECT, 64 .errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar", 65 .fixup_kfunc_btf_id = { 66 { "bpf_kfunc_call_test_fail3", 2 }, 67 }, 68 }, 69 { 70 "calls: invalid kfunc call: reg->type != PTR_TO_CTX", 71 .insns = { 72 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 73 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 74 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 75 BPF_EXIT_INSN(), 76 }, 77 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 78 .result = REJECT, 79 .errstr = "arg#0 expected pointer to ctx, but got fp", 80 .fixup_kfunc_btf_id = { 81 { "bpf_kfunc_call_test_pass_ctx", 2 }, 82 }, 83 }, 84 { 85 "calls: invalid kfunc call: void * not allowed in func proto without mem size arg", 86 .insns = { 87 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 89 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 90 BPF_EXIT_INSN(), 91 }, 92 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 93 .result = REJECT, 94 .errstr = "arg#0 pointer type UNKNOWN must point to scalar", 95 .fixup_kfunc_btf_id = { 96 { "bpf_kfunc_call_test_mem_len_fail1", 2 }, 97 }, 98 }, 99 { 100 "calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX", 101 .insns = { 102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 104 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 108 BPF_EXIT_INSN(), 109 }, 110 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 111 .result = REJECT, 112 .errstr = "Possibly NULL pointer passed to trusted arg0", 113 .fixup_kfunc_btf_id = { 114 { "bpf_kfunc_call_test_acquire", 3 }, 115 { "bpf_kfunc_call_test_release", 5 }, 116 }, 117 }, 118 { 119 "calls: invalid kfunc call: reg->off must be zero when passed to release kfunc", 120 .insns = { 121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 123 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 125 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 126 BPF_EXIT_INSN(), 127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 130 BPF_MOV64_IMM(BPF_REG_0, 0), 131 BPF_EXIT_INSN(), 132 }, 133 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 134 .result = REJECT, 135 .errstr = "R1 must have zero offset when passed to release func", 136 .fixup_kfunc_btf_id = { 137 { "bpf_kfunc_call_test_acquire", 3 }, 138 { "bpf_kfunc_call_memb_release", 8 }, 139 }, 140 }, 141 { 142 "calls: invalid kfunc call: don't match first member type when passed to release kfunc", 143 .insns = { 144 BPF_MOV64_IMM(BPF_REG_0, 0), 145 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 146 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 147 BPF_EXIT_INSN(), 148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 150 BPF_MOV64_IMM(BPF_REG_0, 0), 151 BPF_EXIT_INSN(), 152 }, 153 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 154 .result = REJECT, 155 .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer", 156 .fixup_kfunc_btf_id = { 157 { "bpf_kfunc_call_memb_acquire", 1 }, 158 { "bpf_kfunc_call_memb1_release", 5 }, 159 }, 160 }, 161 { 162 "calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset", 163 .insns = { 164 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 166 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 168 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 169 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 170 BPF_EXIT_INSN(), 171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4), 173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 174 BPF_MOV64_IMM(BPF_REG_0, 0), 175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 176 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 177 BPF_MOV64_IMM(BPF_REG_0, 0), 178 BPF_EXIT_INSN(), 179 }, 180 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 181 .fixup_kfunc_btf_id = { 182 { "bpf_kfunc_call_test_acquire", 3 }, 183 { "bpf_kfunc_call_test_offset", 9 }, 184 { "bpf_kfunc_call_test_release", 12 }, 185 }, 186 .result_unpriv = REJECT, 187 .result = REJECT, 188 .errstr = "ptr R1 off=-4 disallowed", 189 }, 190 { 191 "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset", 192 .insns = { 193 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 195 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 197 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 198 BPF_EXIT_INSN(), 199 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 200 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), 201 BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3), 202 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 203 BPF_MOV64_IMM(BPF_REG_0, 0), 204 BPF_EXIT_INSN(), 205 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3), 206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 207 BPF_MOV64_IMM(BPF_REG_0, 0), 208 BPF_EXIT_INSN(), 209 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), 210 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 211 BPF_MOV64_IMM(BPF_REG_0, 0), 212 BPF_EXIT_INSN(), 213 }, 214 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 215 .fixup_kfunc_btf_id = { 216 { "bpf_kfunc_call_test_acquire", 3 }, 217 { "bpf_kfunc_call_test_release", 9 }, 218 { "bpf_kfunc_call_test_release", 13 }, 219 { "bpf_kfunc_call_test_release", 17 }, 220 }, 221 .result_unpriv = REJECT, 222 .result = REJECT, 223 .errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc", 224 }, 225 { 226 "calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID", 227 .insns = { 228 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 230 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 231 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 232 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 233 BPF_EXIT_INSN(), 234 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 235 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 237 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 16), 238 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 239 BPF_MOV64_IMM(BPF_REG_0, 0), 240 BPF_EXIT_INSN(), 241 }, 242 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 243 .fixup_kfunc_btf_id = { 244 { "bpf_kfunc_call_test_acquire", 3 }, 245 { "bpf_kfunc_call_test_ref", 8 }, 246 { "bpf_kfunc_call_test_ref", 10 }, 247 }, 248 .result_unpriv = REJECT, 249 .result = REJECT, 250 .errstr = "R1 must be", 251 }, 252 { 253 "calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID", 254 .insns = { 255 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 257 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 258 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 259 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 260 BPF_EXIT_INSN(), 261 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 262 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 263 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 264 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 266 BPF_MOV64_IMM(BPF_REG_0, 0), 267 BPF_EXIT_INSN(), 268 }, 269 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 270 .fixup_kfunc_btf_id = { 271 { "bpf_kfunc_call_test_acquire", 3 }, 272 { "bpf_kfunc_call_test_ref", 8 }, 273 { "bpf_kfunc_call_test_release", 10 }, 274 }, 275 .result_unpriv = REJECT, 276 .result = ACCEPT, 277 }, 278 { 279 "calls: invalid kfunc call: must provide (attach_prog_fd, btf_id) pair when freplace", 280 .insns = { 281 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), 282 BPF_EXIT_INSN(), 283 }, 284 .prog_type = BPF_PROG_TYPE_EXT, 285 .result = REJECT, 286 .errstr = "Tracing programs must provide btf_id", 287 .fixup_kfunc_btf_id = { 288 { "bpf_dynptr_from_skb", 0 }, 289 }, 290 }, 291 { 292 "calls: basic sanity", 293 .insns = { 294 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 295 BPF_MOV64_IMM(BPF_REG_0, 1), 296 BPF_EXIT_INSN(), 297 BPF_MOV64_IMM(BPF_REG_0, 2), 298 BPF_EXIT_INSN(), 299 }, 300 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 301 .result = ACCEPT, 302 }, 303 { 304 "calls: not on unprivileged", 305 .insns = { 306 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 307 BPF_MOV64_IMM(BPF_REG_0, 1), 308 BPF_EXIT_INSN(), 309 BPF_MOV64_IMM(BPF_REG_0, 2), 310 BPF_EXIT_INSN(), 311 }, 312 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 313 .result_unpriv = REJECT, 314 .result = ACCEPT, 315 .retval = 1, 316 }, 317 { 318 "calls: div by 0 in subprog", 319 .insns = { 320 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 321 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 322 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 323 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 324 offsetof(struct __sk_buff, data_end)), 325 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 326 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 327 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 328 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 329 BPF_MOV64_IMM(BPF_REG_0, 1), 330 BPF_EXIT_INSN(), 331 BPF_MOV32_IMM(BPF_REG_2, 0), 332 BPF_MOV32_IMM(BPF_REG_3, 1), 333 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2), 334 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 335 offsetof(struct __sk_buff, data)), 336 BPF_EXIT_INSN(), 337 }, 338 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 339 .result = ACCEPT, 340 .retval = 1, 341 }, 342 { 343 "calls: multiple ret types in subprog 1", 344 .insns = { 345 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 346 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 347 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 348 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 349 offsetof(struct __sk_buff, data_end)), 350 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 351 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 352 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 353 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 354 BPF_MOV64_IMM(BPF_REG_0, 1), 355 BPF_EXIT_INSN(), 356 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 357 offsetof(struct __sk_buff, data)), 358 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 359 BPF_MOV32_IMM(BPF_REG_0, 42), 360 BPF_EXIT_INSN(), 361 }, 362 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 363 .result = REJECT, 364 .errstr = "R0 invalid mem access 'scalar'", 365 }, 366 { 367 "calls: multiple ret types in subprog 2", 368 .insns = { 369 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 370 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 371 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 372 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 373 offsetof(struct __sk_buff, data_end)), 374 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 375 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 376 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 377 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 378 BPF_MOV64_IMM(BPF_REG_0, 1), 379 BPF_EXIT_INSN(), 380 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 381 offsetof(struct __sk_buff, data)), 382 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 383 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9), 384 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 385 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 387 BPF_LD_MAP_FD(BPF_REG_1, 0), 388 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 389 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 391 offsetof(struct __sk_buff, data)), 392 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), 393 BPF_EXIT_INSN(), 394 }, 395 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 396 .fixup_map_hash_8b = { 16 }, 397 .result = REJECT, 398 .errstr = "R0 min value is outside of the allowed memory range", 399 }, 400 { 401 "calls: overlapping caller/callee", 402 .insns = { 403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0), 404 BPF_MOV64_IMM(BPF_REG_0, 1), 405 BPF_EXIT_INSN(), 406 }, 407 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 408 .errstr = "last insn is not an exit or jmp", 409 .result = REJECT, 410 }, 411 { 412 "calls: wrong recursive calls", 413 .insns = { 414 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 415 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 417 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 418 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 419 BPF_MOV64_IMM(BPF_REG_0, 1), 420 BPF_EXIT_INSN(), 421 }, 422 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 423 .errstr = "jump out of range", 424 .result = REJECT, 425 }, 426 { 427 "calls: wrong src reg", 428 .insns = { 429 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0), 430 BPF_MOV64_IMM(BPF_REG_0, 1), 431 BPF_EXIT_INSN(), 432 }, 433 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 434 .errstr = "BPF_CALL uses reserved fields", 435 .result = REJECT, 436 }, 437 { 438 "calls: wrong off value", 439 .insns = { 440 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2), 441 BPF_MOV64_IMM(BPF_REG_0, 1), 442 BPF_EXIT_INSN(), 443 BPF_MOV64_IMM(BPF_REG_0, 2), 444 BPF_EXIT_INSN(), 445 }, 446 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 447 .errstr = "BPF_CALL uses reserved fields", 448 .result = REJECT, 449 }, 450 { 451 "calls: jump back loop", 452 .insns = { 453 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), 454 BPF_MOV64_IMM(BPF_REG_0, 1), 455 BPF_EXIT_INSN(), 456 }, 457 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 458 .errstr = "recursive call", 459 .result = REJECT, 460 }, 461 { 462 "calls: conditional call", 463 .insns = { 464 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 465 offsetof(struct __sk_buff, mark)), 466 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 467 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 468 BPF_MOV64_IMM(BPF_REG_0, 1), 469 BPF_EXIT_INSN(), 470 BPF_MOV64_IMM(BPF_REG_0, 2), 471 BPF_EXIT_INSN(), 472 }, 473 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 474 .errstr = "jump out of range", 475 .result = REJECT, 476 }, 477 { 478 "calls: conditional call 2", 479 .insns = { 480 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 481 offsetof(struct __sk_buff, mark)), 482 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 483 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 484 BPF_MOV64_IMM(BPF_REG_0, 1), 485 BPF_EXIT_INSN(), 486 BPF_MOV64_IMM(BPF_REG_0, 2), 487 BPF_EXIT_INSN(), 488 BPF_MOV64_IMM(BPF_REG_0, 3), 489 BPF_EXIT_INSN(), 490 }, 491 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 492 .result = ACCEPT, 493 }, 494 { 495 "calls: conditional call 3", 496 .insns = { 497 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 498 offsetof(struct __sk_buff, mark)), 499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 500 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 501 BPF_MOV64_IMM(BPF_REG_0, 1), 502 BPF_EXIT_INSN(), 503 BPF_MOV64_IMM(BPF_REG_0, 1), 504 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 505 BPF_MOV64_IMM(BPF_REG_0, 3), 506 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 507 }, 508 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 509 .errstr_unpriv = "back-edge from insn", 510 .result_unpriv = REJECT, 511 .result = ACCEPT, 512 .retval = 1, 513 }, 514 { 515 "calls: conditional call 4", 516 .insns = { 517 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 518 offsetof(struct __sk_buff, mark)), 519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 521 BPF_MOV64_IMM(BPF_REG_0, 1), 522 BPF_EXIT_INSN(), 523 BPF_MOV64_IMM(BPF_REG_0, 1), 524 BPF_JMP_IMM(BPF_JA, 0, 0, -5), 525 BPF_MOV64_IMM(BPF_REG_0, 3), 526 BPF_EXIT_INSN(), 527 }, 528 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 529 .result = ACCEPT, 530 }, 531 { 532 "calls: conditional call 5", 533 .insns = { 534 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 535 offsetof(struct __sk_buff, mark)), 536 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 538 BPF_MOV64_IMM(BPF_REG_0, 1), 539 BPF_EXIT_INSN(), 540 BPF_MOV64_IMM(BPF_REG_0, 1), 541 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 542 BPF_MOV64_IMM(BPF_REG_0, 3), 543 BPF_EXIT_INSN(), 544 }, 545 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 546 .result = ACCEPT, 547 .retval = 1, 548 }, 549 { 550 "calls: conditional call 6", 551 .insns = { 552 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 553 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), 556 BPF_EXIT_INSN(), 557 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 558 offsetof(struct __sk_buff, mark)), 559 BPF_EXIT_INSN(), 560 }, 561 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 562 .errstr = "infinite loop detected", 563 .result = REJECT, 564 }, 565 { 566 "calls: using r0 returned by callee", 567 .insns = { 568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 569 BPF_EXIT_INSN(), 570 BPF_MOV64_IMM(BPF_REG_0, 2), 571 BPF_EXIT_INSN(), 572 }, 573 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 574 .result = ACCEPT, 575 }, 576 { 577 "calls: using uninit r0 from callee", 578 .insns = { 579 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 580 BPF_EXIT_INSN(), 581 BPF_EXIT_INSN(), 582 }, 583 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 584 .errstr = "!read_ok", 585 .result = REJECT, 586 }, 587 { 588 "calls: callee is using r1", 589 .insns = { 590 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 591 BPF_EXIT_INSN(), 592 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 593 offsetof(struct __sk_buff, len)), 594 BPF_EXIT_INSN(), 595 }, 596 .prog_type = BPF_PROG_TYPE_SCHED_ACT, 597 .result = ACCEPT, 598 .retval = TEST_DATA_LEN, 599 }, 600 { 601 "calls: callee using args1", 602 .insns = { 603 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 604 BPF_EXIT_INSN(), 605 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 606 BPF_EXIT_INSN(), 607 }, 608 .errstr_unpriv = "allowed for", 609 .result_unpriv = REJECT, 610 .result = ACCEPT, 611 .retval = POINTER_VALUE, 612 }, 613 { 614 "calls: callee using wrong args2", 615 .insns = { 616 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 617 BPF_EXIT_INSN(), 618 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 619 BPF_EXIT_INSN(), 620 }, 621 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 622 .errstr = "R2 !read_ok", 623 .result = REJECT, 624 }, 625 { 626 "calls: callee using two args", 627 .insns = { 628 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 629 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, 630 offsetof(struct __sk_buff, len)), 631 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6, 632 offsetof(struct __sk_buff, len)), 633 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 634 BPF_EXIT_INSN(), 635 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 636 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 637 BPF_EXIT_INSN(), 638 }, 639 .errstr_unpriv = "allowed for", 640 .result_unpriv = REJECT, 641 .result = ACCEPT, 642 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN, 643 }, 644 { 645 "calls: callee changing pkt pointers", 646 .insns = { 647 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)), 648 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 649 offsetof(struct xdp_md, data_end)), 650 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), 651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8), 652 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2), 653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 654 /* clear_all_pkt_pointers() has to walk all frames 655 * to make sure that pkt pointers in the caller 656 * are cleared when callee is calling a helper that 657 * adjusts packet size 658 */ 659 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 660 BPF_MOV32_IMM(BPF_REG_0, 0), 661 BPF_EXIT_INSN(), 662 BPF_MOV64_IMM(BPF_REG_2, 0), 663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head), 664 BPF_EXIT_INSN(), 665 }, 666 .result = REJECT, 667 .errstr = "R6 invalid mem access 'scalar'", 668 .prog_type = BPF_PROG_TYPE_XDP, 669 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 670 }, 671 { 672 "calls: ptr null check in subprog", 673 .insns = { 674 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 675 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 677 BPF_LD_MAP_FD(BPF_REG_1, 0), 678 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 679 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 680 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 683 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), 684 BPF_EXIT_INSN(), 685 BPF_MOV64_IMM(BPF_REG_0, 0), 686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 687 BPF_MOV64_IMM(BPF_REG_0, 1), 688 BPF_EXIT_INSN(), 689 }, 690 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 691 .fixup_map_hash_48b = { 3 }, 692 .result_unpriv = REJECT, 693 .result = ACCEPT, 694 .retval = 0, 695 }, 696 { 697 "calls: two calls with args", 698 .insns = { 699 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 700 BPF_EXIT_INSN(), 701 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 703 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 704 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 705 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 706 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 708 BPF_EXIT_INSN(), 709 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 710 offsetof(struct __sk_buff, len)), 711 BPF_EXIT_INSN(), 712 }, 713 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 714 .result = ACCEPT, 715 .retval = TEST_DATA_LEN + TEST_DATA_LEN, 716 }, 717 { 718 "calls: calls with stack arith", 719 .insns = { 720 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 722 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 723 BPF_EXIT_INSN(), 724 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 725 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 726 BPF_EXIT_INSN(), 727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 728 BPF_MOV64_IMM(BPF_REG_0, 42), 729 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 730 BPF_EXIT_INSN(), 731 }, 732 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 733 .result = ACCEPT, 734 .retval = 42, 735 }, 736 { 737 "calls: calls with misaligned stack access", 738 .insns = { 739 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), 741 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 742 BPF_EXIT_INSN(), 743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61), 744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 745 BPF_EXIT_INSN(), 746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), 747 BPF_MOV64_IMM(BPF_REG_0, 42), 748 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 749 BPF_EXIT_INSN(), 750 }, 751 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 752 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 753 .errstr = "misaligned stack access", 754 .result = REJECT, 755 }, 756 { 757 "calls: calls control flow, jump test", 758 .insns = { 759 BPF_MOV64_IMM(BPF_REG_0, 42), 760 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 761 BPF_MOV64_IMM(BPF_REG_0, 43), 762 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 763 BPF_JMP_IMM(BPF_JA, 0, 0, -3), 764 BPF_EXIT_INSN(), 765 }, 766 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 767 .result = ACCEPT, 768 .retval = 43, 769 }, 770 { 771 "calls: calls control flow, jump test 2", 772 .insns = { 773 BPF_MOV64_IMM(BPF_REG_0, 42), 774 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 775 BPF_MOV64_IMM(BPF_REG_0, 43), 776 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 777 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), 778 BPF_EXIT_INSN(), 779 }, 780 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 781 .errstr = "jump out of range from insn 1 to 4", 782 .result = REJECT, 783 }, 784 { 785 "calls: two calls with bad jump", 786 .insns = { 787 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 788 BPF_EXIT_INSN(), 789 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 790 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 791 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 794 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 795 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 796 BPF_EXIT_INSN(), 797 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 798 offsetof(struct __sk_buff, len)), 799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), 800 BPF_EXIT_INSN(), 801 }, 802 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 803 .errstr = "jump out of range from insn 11 to 9", 804 .result = REJECT, 805 }, 806 { 807 "calls: recursive call. test1", 808 .insns = { 809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 810 BPF_EXIT_INSN(), 811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), 812 BPF_EXIT_INSN(), 813 }, 814 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 815 .errstr = "recursive call", 816 .result = REJECT, 817 }, 818 { 819 "calls: recursive call. test2", 820 .insns = { 821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 822 BPF_EXIT_INSN(), 823 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), 824 BPF_EXIT_INSN(), 825 }, 826 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 827 .errstr = "recursive call", 828 .result = REJECT, 829 }, 830 { 831 "calls: unreachable code", 832 .insns = { 833 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 834 BPF_EXIT_INSN(), 835 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 836 BPF_EXIT_INSN(), 837 BPF_MOV64_IMM(BPF_REG_0, 0), 838 BPF_EXIT_INSN(), 839 BPF_MOV64_IMM(BPF_REG_0, 0), 840 BPF_EXIT_INSN(), 841 }, 842 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 843 .errstr = "unreachable insn 6", 844 .result = REJECT, 845 }, 846 { 847 "calls: invalid call", 848 .insns = { 849 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 850 BPF_EXIT_INSN(), 851 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4), 852 BPF_EXIT_INSN(), 853 }, 854 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 855 .errstr = "invalid destination", 856 .result = REJECT, 857 }, 858 { 859 "calls: invalid call 2", 860 .insns = { 861 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 862 BPF_EXIT_INSN(), 863 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff), 864 BPF_EXIT_INSN(), 865 }, 866 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 867 .errstr = "invalid destination", 868 .result = REJECT, 869 }, 870 { 871 "calls: jumping across function bodies. test1", 872 .insns = { 873 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 874 BPF_MOV64_IMM(BPF_REG_0, 0), 875 BPF_EXIT_INSN(), 876 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), 877 BPF_EXIT_INSN(), 878 }, 879 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 880 .errstr = "jump out of range", 881 .result = REJECT, 882 }, 883 { 884 "calls: jumping across function bodies. test2", 885 .insns = { 886 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 887 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 888 BPF_MOV64_IMM(BPF_REG_0, 0), 889 BPF_EXIT_INSN(), 890 BPF_EXIT_INSN(), 891 }, 892 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 893 .errstr = "jump out of range", 894 .result = REJECT, 895 }, 896 { 897 "calls: call without exit", 898 .insns = { 899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 900 BPF_EXIT_INSN(), 901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 902 BPF_EXIT_INSN(), 903 BPF_MOV64_IMM(BPF_REG_0, 0), 904 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2), 905 }, 906 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 907 .errstr = "not an exit", 908 .result = REJECT, 909 }, 910 { 911 "calls: call into middle of ld_imm64", 912 .insns = { 913 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 914 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 915 BPF_MOV64_IMM(BPF_REG_0, 0), 916 BPF_EXIT_INSN(), 917 BPF_LD_IMM64(BPF_REG_0, 0), 918 BPF_EXIT_INSN(), 919 }, 920 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 921 .errstr = "last insn", 922 .result = REJECT, 923 }, 924 { 925 "calls: call into middle of other call", 926 .insns = { 927 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 929 BPF_MOV64_IMM(BPF_REG_0, 0), 930 BPF_EXIT_INSN(), 931 BPF_MOV64_IMM(BPF_REG_0, 0), 932 BPF_MOV64_IMM(BPF_REG_0, 0), 933 BPF_EXIT_INSN(), 934 }, 935 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 936 .errstr = "last insn", 937 .result = REJECT, 938 }, 939 { 940 "calls: subprog call with ld_abs in main prog", 941 .insns = { 942 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 943 BPF_LD_ABS(BPF_B, 0), 944 BPF_LD_ABS(BPF_H, 0), 945 BPF_LD_ABS(BPF_W, 0), 946 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), 947 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 948 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 949 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), 950 BPF_LD_ABS(BPF_B, 0), 951 BPF_LD_ABS(BPF_H, 0), 952 BPF_LD_ABS(BPF_W, 0), 953 BPF_EXIT_INSN(), 954 BPF_MOV64_IMM(BPF_REG_2, 1), 955 BPF_MOV64_IMM(BPF_REG_3, 2), 956 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push), 957 BPF_EXIT_INSN(), 958 }, 959 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 960 .result = ACCEPT, 961 }, 962 { 963 "calls: two calls with bad fallthrough", 964 .insns = { 965 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 966 BPF_EXIT_INSN(), 967 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 968 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 969 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 970 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 971 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 972 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 973 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 974 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0), 975 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 976 offsetof(struct __sk_buff, len)), 977 BPF_EXIT_INSN(), 978 }, 979 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 980 .errstr = "not an exit", 981 .result = REJECT, 982 }, 983 { 984 "calls: two calls with stack read", 985 .insns = { 986 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 987 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 989 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 990 BPF_EXIT_INSN(), 991 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 993 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 994 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 995 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 996 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 997 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 998 BPF_EXIT_INSN(), 999 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 1000 BPF_EXIT_INSN(), 1001 }, 1002 .prog_type = BPF_PROG_TYPE_XDP, 1003 .result = ACCEPT, 1004 }, 1005 { 1006 "calls: two calls with stack write", 1007 .insns = { 1008 /* main prog */ 1009 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1010 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1012 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1015 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 1016 BPF_EXIT_INSN(), 1017 1018 /* subprog 1 */ 1019 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1020 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1021 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7), 1022 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 1023 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1024 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1025 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), 1026 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), 1027 /* write into stack frame of main prog */ 1028 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1029 BPF_EXIT_INSN(), 1030 1031 /* subprog 2 */ 1032 /* read from stack frame of main prog */ 1033 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 1034 BPF_EXIT_INSN(), 1035 }, 1036 .prog_type = BPF_PROG_TYPE_XDP, 1037 .result = ACCEPT, 1038 }, 1039 { 1040 "calls: stack overflow using two frames (pre-call access)", 1041 .insns = { 1042 /* prog 1 */ 1043 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 1044 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), 1045 BPF_EXIT_INSN(), 1046 1047 /* prog 2 */ 1048 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 1049 BPF_MOV64_IMM(BPF_REG_0, 0), 1050 BPF_EXIT_INSN(), 1051 }, 1052 .prog_type = BPF_PROG_TYPE_XDP, 1053 .errstr = "combined stack size", 1054 .result = REJECT, 1055 }, 1056 { 1057 "calls: stack overflow using two frames (post-call access)", 1058 .insns = { 1059 /* prog 1 */ 1060 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), 1061 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 1062 BPF_EXIT_INSN(), 1063 1064 /* prog 2 */ 1065 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 1066 BPF_MOV64_IMM(BPF_REG_0, 0), 1067 BPF_EXIT_INSN(), 1068 }, 1069 .prog_type = BPF_PROG_TYPE_XDP, 1070 .errstr = "combined stack size", 1071 .result = REJECT, 1072 }, 1073 { 1074 "calls: stack depth check using three frames. test1", 1075 .insns = { 1076 /* main */ 1077 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 1078 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ 1079 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), 1080 BPF_MOV64_IMM(BPF_REG_0, 0), 1081 BPF_EXIT_INSN(), 1082 /* A */ 1083 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 1084 BPF_EXIT_INSN(), 1085 /* B */ 1086 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ 1087 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 1088 BPF_EXIT_INSN(), 1089 }, 1090 .prog_type = BPF_PROG_TYPE_XDP, 1091 /* stack_main=32, stack_A=256, stack_B=64 1092 * and max(main+A, main+A+B) < 512 1093 */ 1094 .result = ACCEPT, 1095 }, 1096 { 1097 "calls: stack depth check using three frames. test2", 1098 .insns = { 1099 /* main */ 1100 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 1101 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ 1102 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), 1103 BPF_MOV64_IMM(BPF_REG_0, 0), 1104 BPF_EXIT_INSN(), 1105 /* A */ 1106 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 1107 BPF_EXIT_INSN(), 1108 /* B */ 1109 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ 1110 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 1111 BPF_EXIT_INSN(), 1112 }, 1113 .prog_type = BPF_PROG_TYPE_XDP, 1114 /* stack_main=32, stack_A=64, stack_B=256 1115 * and max(main+A, main+A+B) < 512 1116 */ 1117 .result = ACCEPT, 1118 }, 1119 { 1120 "calls: stack depth check using three frames. test3", 1121 .insns = { 1122 /* main */ 1123 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1124 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ 1125 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1126 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */ 1127 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1), 1128 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 1129 BPF_MOV64_IMM(BPF_REG_0, 0), 1130 BPF_EXIT_INSN(), 1131 /* A */ 1132 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1), 1133 BPF_EXIT_INSN(), 1134 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0), 1135 BPF_JMP_IMM(BPF_JA, 0, 0, -3), 1136 /* B */ 1137 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1), 1138 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */ 1139 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 1140 BPF_EXIT_INSN(), 1141 }, 1142 .prog_type = BPF_PROG_TYPE_XDP, 1143 /* stack_main=64, stack_A=224, stack_B=256 1144 * and max(main+A, main+A+B) > 512 1145 */ 1146 .errstr = "combined stack", 1147 .result = REJECT, 1148 }, 1149 { 1150 "calls: stack depth check using three frames. test4", 1151 /* void main(void) { 1152 * func1(0); 1153 * func1(1); 1154 * func2(1); 1155 * } 1156 * void func1(int alloc_or_recurse) { 1157 * if (alloc_or_recurse) { 1158 * frame_pointer[-300] = 1; 1159 * } else { 1160 * func2(alloc_or_recurse); 1161 * } 1162 * } 1163 * void func2(int alloc_or_recurse) { 1164 * if (alloc_or_recurse) { 1165 * frame_pointer[-300] = 1; 1166 * } 1167 * } 1168 */ 1169 .insns = { 1170 /* main */ 1171 BPF_MOV64_IMM(BPF_REG_1, 0), 1172 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ 1173 BPF_MOV64_IMM(BPF_REG_1, 1), 1174 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 1175 BPF_MOV64_IMM(BPF_REG_1, 1), 1176 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */ 1177 BPF_MOV64_IMM(BPF_REG_0, 0), 1178 BPF_EXIT_INSN(), 1179 /* A */ 1180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), 1181 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 1182 BPF_EXIT_INSN(), 1183 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ 1184 BPF_EXIT_INSN(), 1185 /* B */ 1186 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 1187 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 1188 BPF_EXIT_INSN(), 1189 }, 1190 .prog_type = BPF_PROG_TYPE_XDP, 1191 .result = REJECT, 1192 .errstr = "combined stack", 1193 }, 1194 { 1195 "calls: stack depth check using three frames. test5", 1196 .insns = { 1197 /* main */ 1198 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ 1199 BPF_EXIT_INSN(), 1200 /* A */ 1201 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ 1202 BPF_EXIT_INSN(), 1203 /* B */ 1204 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ 1205 BPF_EXIT_INSN(), 1206 /* C */ 1207 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ 1208 BPF_EXIT_INSN(), 1209 /* D */ 1210 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ 1211 BPF_EXIT_INSN(), 1212 /* E */ 1213 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ 1214 BPF_EXIT_INSN(), 1215 /* F */ 1216 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ 1217 BPF_EXIT_INSN(), 1218 /* G */ 1219 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ 1220 BPF_EXIT_INSN(), 1221 /* H */ 1222 BPF_MOV64_IMM(BPF_REG_0, 0), 1223 BPF_EXIT_INSN(), 1224 }, 1225 .prog_type = BPF_PROG_TYPE_XDP, 1226 .errstr = "call stack", 1227 .result = REJECT, 1228 }, 1229 { 1230 "calls: stack depth check in dead code", 1231 .insns = { 1232 /* main */ 1233 BPF_MOV64_IMM(BPF_REG_1, 0), 1234 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ 1235 BPF_EXIT_INSN(), 1236 /* A */ 1237 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 1238 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */ 1239 BPF_MOV64_IMM(BPF_REG_0, 0), 1240 BPF_EXIT_INSN(), 1241 /* B */ 1242 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ 1243 BPF_EXIT_INSN(), 1244 /* C */ 1245 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ 1246 BPF_EXIT_INSN(), 1247 /* D */ 1248 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ 1249 BPF_EXIT_INSN(), 1250 /* E */ 1251 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ 1252 BPF_EXIT_INSN(), 1253 /* F */ 1254 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ 1255 BPF_EXIT_INSN(), 1256 /* G */ 1257 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ 1258 BPF_EXIT_INSN(), 1259 /* H */ 1260 BPF_MOV64_IMM(BPF_REG_0, 0), 1261 BPF_EXIT_INSN(), 1262 }, 1263 .prog_type = BPF_PROG_TYPE_XDP, 1264 .errstr = "call stack", 1265 .result = REJECT, 1266 }, 1267 { 1268 "calls: spill into caller stack frame", 1269 .insns = { 1270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1273 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1274 BPF_EXIT_INSN(), 1275 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), 1276 BPF_MOV64_IMM(BPF_REG_0, 0), 1277 BPF_EXIT_INSN(), 1278 }, 1279 .prog_type = BPF_PROG_TYPE_XDP, 1280 .errstr = "cannot spill", 1281 .result = REJECT, 1282 }, 1283 { 1284 "calls: write into caller stack frame", 1285 .insns = { 1286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1288 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1289 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1290 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1291 BPF_EXIT_INSN(), 1292 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), 1293 BPF_MOV64_IMM(BPF_REG_0, 0), 1294 BPF_EXIT_INSN(), 1295 }, 1296 .prog_type = BPF_PROG_TYPE_XDP, 1297 .result = ACCEPT, 1298 .retval = 42, 1299 }, 1300 { 1301 "calls: write into callee stack frame", 1302 .insns = { 1303 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1304 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), 1305 BPF_EXIT_INSN(), 1306 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), 1307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8), 1308 BPF_EXIT_INSN(), 1309 }, 1310 .prog_type = BPF_PROG_TYPE_XDP, 1311 .errstr = "cannot return stack pointer", 1312 .result = REJECT, 1313 }, 1314 { 1315 "calls: two calls with stack write and void return", 1316 .insns = { 1317 /* main prog */ 1318 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1319 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1320 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1321 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1322 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1324 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 1325 BPF_EXIT_INSN(), 1326 1327 /* subprog 1 */ 1328 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1329 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1330 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1331 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1332 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1333 BPF_EXIT_INSN(), 1334 1335 /* subprog 2 */ 1336 /* write into stack frame of main prog */ 1337 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 1338 BPF_EXIT_INSN(), /* void return */ 1339 }, 1340 .prog_type = BPF_PROG_TYPE_XDP, 1341 .result = ACCEPT, 1342 }, 1343 { 1344 "calls: ambiguous return value", 1345 .insns = { 1346 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1347 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 1348 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 1349 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1350 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1351 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 1352 BPF_EXIT_INSN(), 1353 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 1354 BPF_MOV64_IMM(BPF_REG_0, 0), 1355 BPF_EXIT_INSN(), 1356 }, 1357 .errstr_unpriv = "allowed for", 1358 .result_unpriv = REJECT, 1359 .errstr = "R0 !read_ok", 1360 .result = REJECT, 1361 }, 1362 { 1363 "calls: two calls that return map_value", 1364 .insns = { 1365 /* main prog */ 1366 /* pass fp-16, fp-8 into a function */ 1367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1369 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1371 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 1372 1373 /* fetch map_value_ptr from the stack of this function */ 1374 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 1375 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1376 /* write into map value */ 1377 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1378 /* fetch second map_value_ptr from the stack */ 1379 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 1380 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1381 /* write into map value */ 1382 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1383 BPF_MOV64_IMM(BPF_REG_0, 0), 1384 BPF_EXIT_INSN(), 1385 1386 /* subprog 1 */ 1387 /* call 3rd function twice */ 1388 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1389 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1390 /* first time with fp-8 */ 1391 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1393 /* second time with fp-16 */ 1394 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1395 BPF_EXIT_INSN(), 1396 1397 /* subprog 2 */ 1398 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1399 /* lookup from map */ 1400 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1403 BPF_LD_MAP_FD(BPF_REG_1, 0), 1404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1405 /* write map_value_ptr into stack frame of main prog */ 1406 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1407 BPF_MOV64_IMM(BPF_REG_0, 0), 1408 BPF_EXIT_INSN(), /* return 0 */ 1409 }, 1410 .prog_type = BPF_PROG_TYPE_XDP, 1411 .fixup_map_hash_8b = { 23 }, 1412 .result = ACCEPT, 1413 }, 1414 { 1415 "calls: two calls that return map_value with bool condition", 1416 .insns = { 1417 /* main prog */ 1418 /* pass fp-16, fp-8 into a function */ 1419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1421 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1422 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1424 BPF_MOV64_IMM(BPF_REG_0, 0), 1425 BPF_EXIT_INSN(), 1426 1427 /* subprog 1 */ 1428 /* call 3rd function twice */ 1429 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1430 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1431 /* first time with fp-8 */ 1432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), 1433 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1434 /* fetch map_value_ptr from the stack of this function */ 1435 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1436 /* write into map value */ 1437 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1438 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1439 /* second time with fp-16 */ 1440 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1441 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1442 /* fetch second map_value_ptr from the stack */ 1443 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), 1444 /* write into map value */ 1445 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1446 BPF_EXIT_INSN(), 1447 1448 /* subprog 2 */ 1449 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1450 /* lookup from map */ 1451 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1452 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1454 BPF_LD_MAP_FD(BPF_REG_1, 0), 1455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1456 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1457 BPF_MOV64_IMM(BPF_REG_0, 0), 1458 BPF_EXIT_INSN(), /* return 0 */ 1459 /* write map_value_ptr into stack frame of main prog */ 1460 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1461 BPF_MOV64_IMM(BPF_REG_0, 1), 1462 BPF_EXIT_INSN(), /* return 1 */ 1463 }, 1464 .prog_type = BPF_PROG_TYPE_XDP, 1465 .fixup_map_hash_8b = { 23 }, 1466 .result = ACCEPT, 1467 }, 1468 { 1469 "calls: two calls that return map_value with incorrect bool check", 1470 .insns = { 1471 /* main prog */ 1472 /* pass fp-16, fp-8 into a function */ 1473 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1475 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1477 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1478 BPF_MOV64_IMM(BPF_REG_0, 0), 1479 BPF_EXIT_INSN(), 1480 1481 /* subprog 1 */ 1482 /* call 3rd function twice */ 1483 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1484 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1485 /* first time with fp-8 */ 1486 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), 1487 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1488 /* fetch map_value_ptr from the stack of this function */ 1489 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1490 /* write into map value */ 1491 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1493 /* second time with fp-16 */ 1494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1495 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1496 /* fetch second map_value_ptr from the stack */ 1497 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), 1498 /* write into map value */ 1499 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1500 BPF_EXIT_INSN(), 1501 1502 /* subprog 2 */ 1503 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1504 /* lookup from map */ 1505 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1506 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1508 BPF_LD_MAP_FD(BPF_REG_1, 0), 1509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1510 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1511 BPF_MOV64_IMM(BPF_REG_0, 0), 1512 BPF_EXIT_INSN(), /* return 0 */ 1513 /* write map_value_ptr into stack frame of main prog */ 1514 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1515 BPF_MOV64_IMM(BPF_REG_0, 1), 1516 BPF_EXIT_INSN(), /* return 1 */ 1517 }, 1518 .prog_type = BPF_PROG_TYPE_XDP, 1519 .fixup_map_hash_8b = { 23 }, 1520 .result = REJECT, 1521 .errstr = "R0 invalid mem access 'scalar'", 1522 .result_unpriv = REJECT, 1523 .errstr_unpriv = "invalid read from stack R7 off=-16 size=8", 1524 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1525 }, 1526 { 1527 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1", 1528 .insns = { 1529 /* main prog */ 1530 /* pass fp-16, fp-8 into a function */ 1531 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1533 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1535 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1536 BPF_MOV64_IMM(BPF_REG_0, 0), 1537 BPF_EXIT_INSN(), 1538 1539 /* subprog 1 */ 1540 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1541 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1542 /* 1st lookup from map */ 1543 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1544 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1546 BPF_LD_MAP_FD(BPF_REG_1, 0), 1547 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1548 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1549 BPF_MOV64_IMM(BPF_REG_8, 0), 1550 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1551 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1552 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1553 BPF_MOV64_IMM(BPF_REG_8, 1), 1554 1555 /* 2nd lookup from map */ 1556 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ 1557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1558 BPF_LD_MAP_FD(BPF_REG_1, 0), 1559 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ 1560 BPF_FUNC_map_lookup_elem), 1561 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1562 BPF_MOV64_IMM(BPF_REG_9, 0), 1563 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1564 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1565 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1566 BPF_MOV64_IMM(BPF_REG_9, 1), 1567 1568 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1569 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ 1570 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1571 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1572 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1573 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ 1574 BPF_EXIT_INSN(), 1575 1576 /* subprog 2 */ 1577 /* if arg2 == 1 do *arg1 = 0 */ 1578 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1579 /* fetch map_value_ptr from the stack of this function */ 1580 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1581 /* write into map value */ 1582 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1583 1584 /* if arg4 == 1 do *arg3 = 0 */ 1585 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1586 /* fetch map_value_ptr from the stack of this function */ 1587 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1588 /* write into map value */ 1589 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), 1590 BPF_EXIT_INSN(), 1591 }, 1592 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1593 .fixup_map_hash_8b = { 12, 22 }, 1594 .result = REJECT, 1595 .errstr = "invalid access to map value, value_size=8 off=2 size=8", 1596 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1597 }, 1598 { 1599 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2", 1600 .insns = { 1601 /* main prog */ 1602 /* pass fp-16, fp-8 into a function */ 1603 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1604 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1605 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1607 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1608 BPF_MOV64_IMM(BPF_REG_0, 0), 1609 BPF_EXIT_INSN(), 1610 1611 /* subprog 1 */ 1612 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1613 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1614 /* 1st lookup from map */ 1615 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1616 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1618 BPF_LD_MAP_FD(BPF_REG_1, 0), 1619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1620 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1621 BPF_MOV64_IMM(BPF_REG_8, 0), 1622 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1623 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1624 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1625 BPF_MOV64_IMM(BPF_REG_8, 1), 1626 1627 /* 2nd lookup from map */ 1628 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ 1629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1630 BPF_LD_MAP_FD(BPF_REG_1, 0), 1631 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ 1632 BPF_FUNC_map_lookup_elem), 1633 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1634 BPF_MOV64_IMM(BPF_REG_9, 0), 1635 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1636 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1637 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1638 BPF_MOV64_IMM(BPF_REG_9, 1), 1639 1640 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1641 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ 1642 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1643 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1644 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1645 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ 1646 BPF_EXIT_INSN(), 1647 1648 /* subprog 2 */ 1649 /* if arg2 == 1 do *arg1 = 0 */ 1650 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1651 /* fetch map_value_ptr from the stack of this function */ 1652 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1653 /* write into map value */ 1654 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1655 1656 /* if arg4 == 1 do *arg3 = 0 */ 1657 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1658 /* fetch map_value_ptr from the stack of this function */ 1659 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1660 /* write into map value */ 1661 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1662 BPF_EXIT_INSN(), 1663 }, 1664 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1665 .fixup_map_hash_8b = { 12, 22 }, 1666 .result = ACCEPT, 1667 }, 1668 { 1669 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3", 1670 .insns = { 1671 /* main prog */ 1672 /* pass fp-16, fp-8 into a function */ 1673 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1675 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1677 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 1678 BPF_MOV64_IMM(BPF_REG_0, 0), 1679 BPF_EXIT_INSN(), 1680 1681 /* subprog 1 */ 1682 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1683 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1684 /* 1st lookup from map */ 1685 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0), 1686 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), 1688 BPF_LD_MAP_FD(BPF_REG_1, 0), 1689 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1690 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1691 BPF_MOV64_IMM(BPF_REG_8, 0), 1692 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1693 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1694 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1695 BPF_MOV64_IMM(BPF_REG_8, 1), 1696 1697 /* 2nd lookup from map */ 1698 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), 1700 BPF_LD_MAP_FD(BPF_REG_1, 0), 1701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1702 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1703 BPF_MOV64_IMM(BPF_REG_9, 0), // 26 1704 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1705 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1706 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1707 BPF_MOV64_IMM(BPF_REG_9, 1), 1708 1709 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1710 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30 1711 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1712 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1713 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1714 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34 1715 BPF_JMP_IMM(BPF_JA, 0, 0, -30), 1716 1717 /* subprog 2 */ 1718 /* if arg2 == 1 do *arg1 = 0 */ 1719 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1720 /* fetch map_value_ptr from the stack of this function */ 1721 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1722 /* write into map value */ 1723 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1724 1725 /* if arg4 == 1 do *arg3 = 0 */ 1726 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1727 /* fetch map_value_ptr from the stack of this function */ 1728 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1729 /* write into map value */ 1730 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), 1731 BPF_JMP_IMM(BPF_JA, 0, 0, -8), 1732 }, 1733 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1734 .fixup_map_hash_8b = { 12, 22 }, 1735 .result = REJECT, 1736 .errstr = "invalid access to map value, value_size=8 off=2 size=8", 1737 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1738 }, 1739 { 1740 "calls: two calls that receive map_value_ptr_or_null via arg. test1", 1741 .insns = { 1742 /* main prog */ 1743 /* pass fp-16, fp-8 into a function */ 1744 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1746 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1748 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1749 BPF_MOV64_IMM(BPF_REG_0, 0), 1750 BPF_EXIT_INSN(), 1751 1752 /* subprog 1 */ 1753 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1754 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1755 /* 1st lookup from map */ 1756 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1757 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1759 BPF_LD_MAP_FD(BPF_REG_1, 0), 1760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1761 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 1762 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1763 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1764 BPF_MOV64_IMM(BPF_REG_8, 0), 1765 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1766 BPF_MOV64_IMM(BPF_REG_8, 1), 1767 1768 /* 2nd lookup from map */ 1769 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1770 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1771 BPF_LD_MAP_FD(BPF_REG_1, 0), 1772 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1773 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ 1774 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1775 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1776 BPF_MOV64_IMM(BPF_REG_9, 0), 1777 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1778 BPF_MOV64_IMM(BPF_REG_9, 1), 1779 1780 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1781 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1782 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1783 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1784 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1786 BPF_EXIT_INSN(), 1787 1788 /* subprog 2 */ 1789 /* if arg2 == 1 do *arg1 = 0 */ 1790 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1791 /* fetch map_value_ptr from the stack of this function */ 1792 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1793 /* write into map value */ 1794 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1795 1796 /* if arg4 == 1 do *arg3 = 0 */ 1797 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1798 /* fetch map_value_ptr from the stack of this function */ 1799 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1800 /* write into map value */ 1801 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1802 BPF_EXIT_INSN(), 1803 }, 1804 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1805 .fixup_map_hash_8b = { 12, 22 }, 1806 .result = ACCEPT, 1807 }, 1808 { 1809 "calls: two calls that receive map_value_ptr_or_null via arg. test2", 1810 .insns = { 1811 /* main prog */ 1812 /* pass fp-16, fp-8 into a function */ 1813 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1815 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1817 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1818 BPF_MOV64_IMM(BPF_REG_0, 0), 1819 BPF_EXIT_INSN(), 1820 1821 /* subprog 1 */ 1822 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1823 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1824 /* 1st lookup from map */ 1825 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1826 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1828 BPF_LD_MAP_FD(BPF_REG_1, 0), 1829 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1830 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 1831 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1832 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1833 BPF_MOV64_IMM(BPF_REG_8, 0), 1834 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1835 BPF_MOV64_IMM(BPF_REG_8, 1), 1836 1837 /* 2nd lookup from map */ 1838 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1839 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1840 BPF_LD_MAP_FD(BPF_REG_1, 0), 1841 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1842 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ 1843 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1844 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1845 BPF_MOV64_IMM(BPF_REG_9, 0), 1846 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1847 BPF_MOV64_IMM(BPF_REG_9, 1), 1848 1849 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1850 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1851 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1852 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1853 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1854 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1855 BPF_EXIT_INSN(), 1856 1857 /* subprog 2 */ 1858 /* if arg2 == 1 do *arg1 = 0 */ 1859 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1860 /* fetch map_value_ptr from the stack of this function */ 1861 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1862 /* write into map value */ 1863 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1864 1865 /* if arg4 == 0 do *arg3 = 0 */ 1866 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2), 1867 /* fetch map_value_ptr from the stack of this function */ 1868 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1869 /* write into map value */ 1870 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1871 BPF_EXIT_INSN(), 1872 }, 1873 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1874 .fixup_map_hash_8b = { 12, 22 }, 1875 .result = REJECT, 1876 .errstr = "R0 invalid mem access 'scalar'", 1877 }, 1878 { 1879 "calls: pkt_ptr spill into caller stack", 1880 .insns = { 1881 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1883 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1884 BPF_EXIT_INSN(), 1885 1886 /* subprog 1 */ 1887 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1888 offsetof(struct __sk_buff, data)), 1889 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1890 offsetof(struct __sk_buff, data_end)), 1891 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1892 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1893 /* spill unchecked pkt_ptr into stack of caller */ 1894 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1895 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1896 /* now the pkt range is verified, read pkt_ptr from stack */ 1897 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1898 /* write 4 bytes into packet */ 1899 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1900 BPF_EXIT_INSN(), 1901 }, 1902 .result = ACCEPT, 1903 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1904 .retval = POINTER_VALUE, 1905 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1906 }, 1907 { 1908 "calls: pkt_ptr spill into caller stack 2", 1909 .insns = { 1910 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1912 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1913 /* Marking is still kept, but not in all cases safe. */ 1914 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1915 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1916 BPF_EXIT_INSN(), 1917 1918 /* subprog 1 */ 1919 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1920 offsetof(struct __sk_buff, data)), 1921 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1922 offsetof(struct __sk_buff, data_end)), 1923 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1925 /* spill unchecked pkt_ptr into stack of caller */ 1926 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1927 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1928 /* now the pkt range is verified, read pkt_ptr from stack */ 1929 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1930 /* write 4 bytes into packet */ 1931 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1932 BPF_EXIT_INSN(), 1933 }, 1934 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1935 .errstr = "invalid access to packet", 1936 .result = REJECT, 1937 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1938 }, 1939 { 1940 "calls: pkt_ptr spill into caller stack 3", 1941 .insns = { 1942 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1944 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1945 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 1946 /* Marking is still kept and safe here. */ 1947 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1948 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1949 BPF_EXIT_INSN(), 1950 1951 /* subprog 1 */ 1952 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1953 offsetof(struct __sk_buff, data)), 1954 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1955 offsetof(struct __sk_buff, data_end)), 1956 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1957 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1958 /* spill unchecked pkt_ptr into stack of caller */ 1959 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1960 BPF_MOV64_IMM(BPF_REG_5, 0), 1961 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1962 BPF_MOV64_IMM(BPF_REG_5, 1), 1963 /* now the pkt range is verified, read pkt_ptr from stack */ 1964 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1965 /* write 4 bytes into packet */ 1966 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1967 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1968 BPF_EXIT_INSN(), 1969 }, 1970 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1971 .result = ACCEPT, 1972 .retval = 1, 1973 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1974 }, 1975 { 1976 "calls: pkt_ptr spill into caller stack 4", 1977 .insns = { 1978 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1979 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1980 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1981 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 1982 /* Check marking propagated. */ 1983 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1984 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1985 BPF_EXIT_INSN(), 1986 1987 /* subprog 1 */ 1988 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1989 offsetof(struct __sk_buff, data)), 1990 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1991 offsetof(struct __sk_buff, data_end)), 1992 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1994 /* spill unchecked pkt_ptr into stack of caller */ 1995 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1996 BPF_MOV64_IMM(BPF_REG_5, 0), 1997 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1998 BPF_MOV64_IMM(BPF_REG_5, 1), 1999 /* don't read back pkt_ptr from stack here */ 2000 /* write 4 bytes into packet */ 2001 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 2002 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 2003 BPF_EXIT_INSN(), 2004 }, 2005 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2006 .result = ACCEPT, 2007 .retval = 1, 2008 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 2009 }, 2010 { 2011 "calls: pkt_ptr spill into caller stack 5", 2012 .insns = { 2013 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 2014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 2015 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0), 2016 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 2017 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 2018 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 2019 BPF_EXIT_INSN(), 2020 2021 /* subprog 1 */ 2022 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2023 offsetof(struct __sk_buff, data)), 2024 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2025 offsetof(struct __sk_buff, data_end)), 2026 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2027 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2028 BPF_MOV64_IMM(BPF_REG_5, 0), 2029 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 2030 /* spill checked pkt_ptr into stack of caller */ 2031 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2032 BPF_MOV64_IMM(BPF_REG_5, 1), 2033 /* don't read back pkt_ptr from stack here */ 2034 /* write 4 bytes into packet */ 2035 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 2036 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 2037 BPF_EXIT_INSN(), 2038 }, 2039 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2040 .errstr = "same insn cannot be used with different", 2041 .result = REJECT, 2042 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 2043 }, 2044 { 2045 "calls: pkt_ptr spill into caller stack 6", 2046 .insns = { 2047 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2048 offsetof(struct __sk_buff, data_end)), 2049 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 2050 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 2051 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2052 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 2053 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 2054 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 2055 BPF_EXIT_INSN(), 2056 2057 /* subprog 1 */ 2058 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2059 offsetof(struct __sk_buff, data)), 2060 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2061 offsetof(struct __sk_buff, data_end)), 2062 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2064 BPF_MOV64_IMM(BPF_REG_5, 0), 2065 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 2066 /* spill checked pkt_ptr into stack of caller */ 2067 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2068 BPF_MOV64_IMM(BPF_REG_5, 1), 2069 /* don't read back pkt_ptr from stack here */ 2070 /* write 4 bytes into packet */ 2071 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 2072 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 2073 BPF_EXIT_INSN(), 2074 }, 2075 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2076 .errstr = "R4 invalid mem access", 2077 .result = REJECT, 2078 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 2079 }, 2080 { 2081 "calls: pkt_ptr spill into caller stack 7", 2082 .insns = { 2083 BPF_MOV64_IMM(BPF_REG_2, 0), 2084 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 2085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 2086 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 2088 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 2089 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 2090 BPF_EXIT_INSN(), 2091 2092 /* subprog 1 */ 2093 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2094 offsetof(struct __sk_buff, data)), 2095 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2096 offsetof(struct __sk_buff, data_end)), 2097 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2099 BPF_MOV64_IMM(BPF_REG_5, 0), 2100 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 2101 /* spill checked pkt_ptr into stack of caller */ 2102 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2103 BPF_MOV64_IMM(BPF_REG_5, 1), 2104 /* don't read back pkt_ptr from stack here */ 2105 /* write 4 bytes into packet */ 2106 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 2107 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 2108 BPF_EXIT_INSN(), 2109 }, 2110 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2111 .errstr = "R4 invalid mem access", 2112 .result = REJECT, 2113 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 2114 }, 2115 { 2116 "calls: pkt_ptr spill into caller stack 8", 2117 .insns = { 2118 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2119 offsetof(struct __sk_buff, data)), 2120 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2121 offsetof(struct __sk_buff, data_end)), 2122 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2124 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), 2125 BPF_EXIT_INSN(), 2126 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 2127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 2128 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 2130 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 2131 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 2132 BPF_EXIT_INSN(), 2133 2134 /* subprog 1 */ 2135 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2136 offsetof(struct __sk_buff, data)), 2137 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2138 offsetof(struct __sk_buff, data_end)), 2139 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2141 BPF_MOV64_IMM(BPF_REG_5, 0), 2142 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 2143 /* spill checked pkt_ptr into stack of caller */ 2144 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2145 BPF_MOV64_IMM(BPF_REG_5, 1), 2146 /* don't read back pkt_ptr from stack here */ 2147 /* write 4 bytes into packet */ 2148 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 2149 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 2150 BPF_EXIT_INSN(), 2151 }, 2152 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2153 .result = ACCEPT, 2154 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 2155 }, 2156 { 2157 "calls: pkt_ptr spill into caller stack 9", 2158 .insns = { 2159 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2160 offsetof(struct __sk_buff, data)), 2161 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2162 offsetof(struct __sk_buff, data_end)), 2163 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2164 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2165 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), 2166 BPF_EXIT_INSN(), 2167 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 2168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 2169 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2170 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 2171 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 2172 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 2173 BPF_EXIT_INSN(), 2174 2175 /* subprog 1 */ 2176 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2177 offsetof(struct __sk_buff, data)), 2178 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2179 offsetof(struct __sk_buff, data_end)), 2180 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2182 BPF_MOV64_IMM(BPF_REG_5, 0), 2183 /* spill unchecked pkt_ptr into stack of caller */ 2184 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2185 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 2186 BPF_MOV64_IMM(BPF_REG_5, 1), 2187 /* don't read back pkt_ptr from stack here */ 2188 /* write 4 bytes into packet */ 2189 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 2190 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 2191 BPF_EXIT_INSN(), 2192 }, 2193 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2194 .errstr = "invalid access to packet", 2195 .result = REJECT, 2196 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 2197 }, 2198 { 2199 "calls: caller stack init to zero or map_value_or_null", 2200 .insns = { 2201 BPF_MOV64_IMM(BPF_REG_0, 0), 2202 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 2203 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2205 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 2206 /* fetch map_value_or_null or const_zero from stack */ 2207 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 2208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 2209 /* store into map_value */ 2210 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0), 2211 BPF_EXIT_INSN(), 2212 2213 /* subprog 1 */ 2214 /* if (ctx == 0) return; */ 2215 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8), 2216 /* else bpf_map_lookup() and *(fp - 8) = r0 */ 2217 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2218 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2220 BPF_LD_MAP_FD(BPF_REG_1, 0), 2221 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 2222 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2223 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 2224 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 2225 BPF_EXIT_INSN(), 2226 }, 2227 .fixup_map_hash_8b = { 13 }, 2228 .result = ACCEPT, 2229 .prog_type = BPF_PROG_TYPE_XDP, 2230 }, 2231 { 2232 "calls: stack init to zero and pruning", 2233 .insns = { 2234 /* first make allocated_stack 16 byte */ 2235 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), 2236 /* now fork the execution such that the false branch 2237 * of JGT insn will be verified second and it skisp zero 2238 * init of fp-8 stack slot. If stack liveness marking 2239 * is missing live_read marks from call map_lookup 2240 * processing then pruning will incorrectly assume 2241 * that fp-8 stack slot was unused in the fall-through 2242 * branch and will accept the program incorrectly 2243 */ 2244 BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), 2245 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2), 2246 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 2247 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 2248 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2249 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2250 BPF_LD_MAP_FD(BPF_REG_1, 0), 2251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2252 BPF_MOV64_IMM(BPF_REG_0, 0), 2253 BPF_EXIT_INSN(), 2254 }, 2255 .fixup_map_hash_48b = { 7 }, 2256 .errstr_unpriv = "invalid read from stack R2 off -8+0 size 8", 2257 .result_unpriv = REJECT, 2258 /* in privileged mode reads from uninitialized stack locations are permitted */ 2259 .result = ACCEPT, 2260 }, 2261 { 2262 "calls: ctx read at start of subprog", 2263 .insns = { 2264 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 2265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 2266 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0), 2267 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 2268 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 2269 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 2270 BPF_EXIT_INSN(), 2271 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), 2272 BPF_MOV64_IMM(BPF_REG_0, 0), 2273 BPF_EXIT_INSN(), 2274 }, 2275 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2276 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 2277 .result_unpriv = REJECT, 2278 .result = ACCEPT, 2279 }, 2280 { 2281 "calls: cross frame pruning", 2282 .insns = { 2283 /* r8 = !!random(); 2284 * call pruner() 2285 * if (r8) 2286 * do something bad; 2287 */ 2288 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 2289 BPF_MOV64_IMM(BPF_REG_8, 0), 2290 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 2291 BPF_MOV64_IMM(BPF_REG_8, 1), 2292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 2293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 2294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), 2295 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), 2296 BPF_MOV64_IMM(BPF_REG_0, 0), 2297 BPF_EXIT_INSN(), 2298 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 2299 BPF_EXIT_INSN(), 2300 }, 2301 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2302 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 2303 .errstr = "!read_ok", 2304 .result = REJECT, 2305 }, 2306 { 2307 "calls: cross frame pruning - liveness propagation", 2308 .insns = { 2309 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 2310 BPF_MOV64_IMM(BPF_REG_8, 0), 2311 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 2312 BPF_MOV64_IMM(BPF_REG_8, 1), 2313 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 2314 BPF_MOV64_IMM(BPF_REG_9, 0), 2315 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 2316 BPF_MOV64_IMM(BPF_REG_9, 1), 2317 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 2318 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 2319 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), 2320 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), 2321 BPF_MOV64_IMM(BPF_REG_0, 0), 2322 BPF_EXIT_INSN(), 2323 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 2324 BPF_EXIT_INSN(), 2325 }, 2326 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2327 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for", 2328 .errstr = "!read_ok", 2329 .result = REJECT, 2330 }, 2331 /* Make sure that verifier.c:states_equal() considers IDs from all 2332 * frames when building 'idmap' for check_ids(). 2333 */ 2334 { 2335 "calls: check_ids() across call boundary", 2336 .insns = { 2337 /* Function main() */ 2338 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 2339 /* fp[-24] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */ 2340 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2341 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2342 BPF_LD_MAP_FD(BPF_REG_1, 2343 0), 2344 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 2345 BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -24), 2346 /* fp[-32] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */ 2347 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2349 BPF_LD_MAP_FD(BPF_REG_1, 2350 0), 2351 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 2352 BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -32), 2353 /* call foo(&fp[-24], &fp[-32]) ; both arguments have IDs in the current 2354 * ; stack frame 2355 */ 2356 BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP), 2357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -24), 2358 BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), 2359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32), 2360 BPF_CALL_REL(2), 2361 /* exit 0 */ 2362 BPF_MOV64_IMM(BPF_REG_0, 0), 2363 BPF_EXIT_INSN(), 2364 /* Function foo() 2365 * 2366 * r9 = &frame[0].fp[-24] ; save arguments in the callee saved registers, 2367 * r8 = &frame[0].fp[-32] ; arguments are pointers to pointers to map value 2368 */ 2369 BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), 2370 BPF_MOV64_REG(BPF_REG_8, BPF_REG_2), 2371 /* r7 = ktime_get_ns() */ 2372 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), 2373 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 2374 /* r6 = ktime_get_ns() */ 2375 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), 2376 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 2377 /* if r6 > r7 goto +1 ; no new information about the state is derived from 2378 * ; this check, thus produced verifier states differ 2379 * ; only in 'insn_idx' 2380 * r9 = r8 2381 */ 2382 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), 2383 BPF_MOV64_REG(BPF_REG_9, BPF_REG_8), 2384 /* r9 = *r9 ; verifier gets to this point via two paths: 2385 * ; (I) one including r9 = r8, verified first; 2386 * ; (II) one excluding r9 = r8, verified next. 2387 * ; After load of *r9 to r9 the frame[0].fp[-24].id == r9.id. 2388 * ; Suppose that checkpoint is created here via path (I). 2389 * ; When verifying via (II) the r9.id must be compared against 2390 * ; frame[0].fp[-24].id, otherwise (I) and (II) would be 2391 * ; incorrectly deemed equivalent. 2392 * if r9 == 0 goto <exit> 2393 */ 2394 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_9, 0), 2395 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1), 2396 /* r8 = *r8 ; read map value via r8, this is not safe 2397 * r0 = *r8 ; because r8 might be not equal to r9. 2398 */ 2399 BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_8, 0), 2400 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0), 2401 /* exit 0 */ 2402 BPF_MOV64_IMM(BPF_REG_0, 0), 2403 BPF_EXIT_INSN(), 2404 }, 2405 .flags = BPF_F_TEST_STATE_FREQ, 2406 .fixup_map_hash_8b = { 3, 9 }, 2407 .result = REJECT, 2408 .errstr = "R8 invalid mem access 'map_value_or_null'", 2409 .result_unpriv = REJECT, 2410 .errstr_unpriv = "", 2411 .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 2412 }, 2413 { 2414 "calls: several args with ref_obj_id", 2415 .insns = { 2416 /* Reserve at least sizeof(struct iphdr) bytes in the ring buffer. 2417 * With a smaller size, the verifier would reject the call to 2418 * bpf_tcp_raw_gen_syncookie_ipv4 before we can reach the 2419 * ref_obj_id error. 2420 */ 2421 BPF_MOV64_IMM(BPF_REG_2, 20), 2422 BPF_MOV64_IMM(BPF_REG_3, 0), 2423 BPF_LD_MAP_FD(BPF_REG_1, 0), 2424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), 2425 /* if r0 == 0 goto <exit> */ 2426 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 2427 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 2428 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 2429 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tcp_raw_gen_syncookie_ipv4), 2430 BPF_EXIT_INSN(), 2431 }, 2432 .fixup_map_ringbuf = { 2 }, 2433 .result = REJECT, 2434 .errstr = "more than one arg with ref_obj_id", 2435 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2436 }, 2437