verifier.c (ac94be498f84f7327533b62faca4c3da64434904) verifier.c (fec56f5890d93fc2ed74166c397dc186b1c25951)
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5 */
6#include <uapi/linux/btf.h>
7#include <linux/kernel.h>
8#include <linux/types.h>

--- 191 unchanged lines hidden (view full) ---

200 bool raw_mode;
201 bool pkt_access;
202 int regno;
203 int access_size;
204 s64 msize_smax_value;
205 u64 msize_umax_value;
206 int ref_obj_id;
207 int func_id;
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5 */
6#include <uapi/linux/btf.h>
7#include <linux/kernel.h>
8#include <linux/types.h>

--- 191 unchanged lines hidden (view full) ---

200 bool raw_mode;
201 bool pkt_access;
202 int regno;
203 int access_size;
204 s64 msize_smax_value;
205 u64 msize_umax_value;
206 int ref_obj_id;
207 int func_id;
208 u32 btf_id;
208};
209
209};
210
211struct btf *btf_vmlinux;
212
210static DEFINE_MUTEX(bpf_verifier_lock);
211
212static const struct bpf_line_info *
213find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
214{
215 const struct bpf_line_info *linfo;
216 const struct bpf_prog *prog;
217 u32 i, nr_linfo;

--- 20 unchanged lines hidden (view full) ---

238 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
239
240 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
241 "verifier log line truncated - local buffer too short\n");
242
243 n = min(log->len_total - log->len_used - 1, n);
244 log->kbuf[n] = '\0';
245
213static DEFINE_MUTEX(bpf_verifier_lock);
214
215static const struct bpf_line_info *
216find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
217{
218 const struct bpf_line_info *linfo;
219 const struct bpf_prog *prog;
220 u32 i, nr_linfo;

--- 20 unchanged lines hidden (view full) ---

241 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
242
243 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
244 "verifier log line truncated - local buffer too short\n");
245
246 n = min(log->len_total - log->len_used - 1, n);
247 log->kbuf[n] = '\0';
248
249 if (log->level == BPF_LOG_KERNEL) {
250 pr_err("BPF:%s\n", log->kbuf);
251 return;
252 }
246 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
247 log->len_used += n;
248 else
249 log->ubuf = NULL;
250}
251
252/* log_level controls verbosity level of eBPF verifier.
253 * bpf_verifier_log_write() is used to dump the verification trace to the log,

--- 21 unchanged lines hidden (view full) ---

275 if (!bpf_verifier_log_needed(&env->log))
276 return;
277
278 va_start(args, fmt);
279 bpf_verifier_vlog(&env->log, fmt, args);
280 va_end(args);
281}
282
253 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
254 log->len_used += n;
255 else
256 log->ubuf = NULL;
257}
258
259/* log_level controls verbosity level of eBPF verifier.
260 * bpf_verifier_log_write() is used to dump the verification trace to the log,

--- 21 unchanged lines hidden (view full) ---

282 if (!bpf_verifier_log_needed(&env->log))
283 return;
284
285 va_start(args, fmt);
286 bpf_verifier_vlog(&env->log, fmt, args);
287 va_end(args);
288}
289
290__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
291 const char *fmt, ...)
292{
293 va_list args;
294
295 if (!bpf_verifier_log_needed(log))
296 return;
297
298 va_start(args, fmt);
299 bpf_verifier_vlog(log, fmt, args);
300 va_end(args);
301}
302
283static const char *ltrim(const char *s)
284{
285 while (isspace(*s))
286 s++;
287
288 return s;
289}
290

--- 104 unchanged lines hidden (view full) ---

395 [PTR_TO_SOCKET] = "sock",
396 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
397 [PTR_TO_SOCK_COMMON] = "sock_common",
398 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
399 [PTR_TO_TCP_SOCK] = "tcp_sock",
400 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
401 [PTR_TO_TP_BUFFER] = "tp_buffer",
402 [PTR_TO_XDP_SOCK] = "xdp_sock",
303static const char *ltrim(const char *s)
304{
305 while (isspace(*s))
306 s++;
307
308 return s;
309}
310

--- 104 unchanged lines hidden (view full) ---

415 [PTR_TO_SOCKET] = "sock",
416 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
417 [PTR_TO_SOCK_COMMON] = "sock_common",
418 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
419 [PTR_TO_TCP_SOCK] = "tcp_sock",
420 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
421 [PTR_TO_TP_BUFFER] = "tp_buffer",
422 [PTR_TO_XDP_SOCK] = "xdp_sock",
423 [PTR_TO_BTF_ID] = "ptr_",
403};
404
405static char slot_type_char[] = {
406 [STACK_INVALID] = '?',
407 [STACK_SPILL] = 'r',
408 [STACK_MISC] = 'm',
409 [STACK_ZERO] = '0',
410};

--- 14 unchanged lines hidden (view full) ---

425static struct bpf_func_state *func(struct bpf_verifier_env *env,
426 const struct bpf_reg_state *reg)
427{
428 struct bpf_verifier_state *cur = env->cur_state;
429
430 return cur->frame[reg->frameno];
431}
432
424};
425
426static char slot_type_char[] = {
427 [STACK_INVALID] = '?',
428 [STACK_SPILL] = 'r',
429 [STACK_MISC] = 'm',
430 [STACK_ZERO] = '0',
431};

--- 14 unchanged lines hidden (view full) ---

446static struct bpf_func_state *func(struct bpf_verifier_env *env,
447 const struct bpf_reg_state *reg)
448{
449 struct bpf_verifier_state *cur = env->cur_state;
450
451 return cur->frame[reg->frameno];
452}
453
454const char *kernel_type_name(u32 id)
455{
456 return btf_name_by_offset(btf_vmlinux,
457 btf_type_by_id(btf_vmlinux, id)->name_off);
458}
459
433static void print_verifier_state(struct bpf_verifier_env *env,
434 const struct bpf_func_state *state)
435{
436 const struct bpf_reg_state *reg;
437 enum bpf_reg_type t;
438 int i;
439
440 if (state->frameno)

--- 8 unchanged lines hidden (view full) ---

449 verbose(env, "=%s", reg_type_str[t]);
450 if (t == SCALAR_VALUE && reg->precise)
451 verbose(env, "P");
452 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
453 tnum_is_const(reg->var_off)) {
454 /* reg->off should be 0 for SCALAR_VALUE */
455 verbose(env, "%lld", reg->var_off.value + reg->off);
456 } else {
460static void print_verifier_state(struct bpf_verifier_env *env,
461 const struct bpf_func_state *state)
462{
463 const struct bpf_reg_state *reg;
464 enum bpf_reg_type t;
465 int i;
466
467 if (state->frameno)

--- 8 unchanged lines hidden (view full) ---

476 verbose(env, "=%s", reg_type_str[t]);
477 if (t == SCALAR_VALUE && reg->precise)
478 verbose(env, "P");
479 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
480 tnum_is_const(reg->var_off)) {
481 /* reg->off should be 0 for SCALAR_VALUE */
482 verbose(env, "%lld", reg->var_off.value + reg->off);
483 } else {
484 if (t == PTR_TO_BTF_ID)
485 verbose(env, "%s", kernel_type_name(reg->btf_id));
457 verbose(env, "(id=%d", reg->id);
458 if (reg_type_may_be_refcounted_or_null(t))
459 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
460 if (t != SCALAR_VALUE)
461 verbose(env, ",off=%d", reg->off);
462 if (type_is_pkt_pointer(t))
463 verbose(env, ",r=%d", reg->range);
464 else if (t == CONST_PTR_TO_MAP ||

--- 1861 unchanged lines hidden (view full) ---

2326 max_t(u32, env->prog->aux->max_pkt_offset,
2327 off + reg->umax_value + size - 1);
2328
2329 return err;
2330}
2331
2332/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
2333static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
486 verbose(env, "(id=%d", reg->id);
487 if (reg_type_may_be_refcounted_or_null(t))
488 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
489 if (t != SCALAR_VALUE)
490 verbose(env, ",off=%d", reg->off);
491 if (type_is_pkt_pointer(t))
492 verbose(env, ",r=%d", reg->range);
493 else if (t == CONST_PTR_TO_MAP ||

--- 1861 unchanged lines hidden (view full) ---

2355 max_t(u32, env->prog->aux->max_pkt_offset,
2356 off + reg->umax_value + size - 1);
2357
2358 return err;
2359}
2360
2361/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
2362static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
2334 enum bpf_access_type t, enum bpf_reg_type *reg_type)
2363 enum bpf_access_type t, enum bpf_reg_type *reg_type,
2364 u32 *btf_id)
2335{
2336 struct bpf_insn_access_aux info = {
2337 .reg_type = *reg_type,
2365{
2366 struct bpf_insn_access_aux info = {
2367 .reg_type = *reg_type,
2368 .log = &env->log,
2338 };
2339
2340 if (env->ops->is_valid_access &&
2341 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
2342 /* A non zero info.ctx_field_size indicates that this field is a
2343 * candidate for later verifier transformation to load the whole
2344 * field and then apply a mask when accessed with a narrower
2345 * access than actual ctx access size. A zero info.ctx_field_size
2346 * will only allow for whole field access and rejects any other
2347 * type of narrower access.
2348 */
2349 *reg_type = info.reg_type;
2350
2369 };
2370
2371 if (env->ops->is_valid_access &&
2372 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
2373 /* A non zero info.ctx_field_size indicates that this field is a
2374 * candidate for later verifier transformation to load the whole
2375 * field and then apply a mask when accessed with a narrower
2376 * access than actual ctx access size. A zero info.ctx_field_size
2377 * will only allow for whole field access and rejects any other
2378 * type of narrower access.
2379 */
2380 *reg_type = info.reg_type;
2381
2351 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
2382 if (*reg_type == PTR_TO_BTF_ID)
2383 *btf_id = info.btf_id;
2384 else
2385 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
2352 /* remember the offset of last byte accessed in ctx */
2353 if (env->prog->aux->max_ctx_offset < off + size)
2354 env->prog->aux->max_ctx_offset = off + size;
2355 return 0;
2356 }
2357
2358 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
2359 return -EACCES;

--- 374 unchanged lines hidden (view full) ---

2734 } else {
2735 reg->umin_value = 0;
2736 reg->umax_value = mask;
2737 }
2738 reg->smin_value = reg->umin_value;
2739 reg->smax_value = reg->umax_value;
2740}
2741
2386 /* remember the offset of last byte accessed in ctx */
2387 if (env->prog->aux->max_ctx_offset < off + size)
2388 env->prog->aux->max_ctx_offset = off + size;
2389 return 0;
2390 }
2391
2392 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
2393 return -EACCES;

--- 374 unchanged lines hidden (view full) ---

2768 } else {
2769 reg->umin_value = 0;
2770 reg->umax_value = mask;
2771 }
2772 reg->smin_value = reg->umin_value;
2773 reg->smax_value = reg->umax_value;
2774}
2775
2776static bool bpf_map_is_rdonly(const struct bpf_map *map)
2777{
2778 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
2779}
2780
2781static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
2782{
2783 void *ptr;
2784 u64 addr;
2785 int err;
2786
2787 err = map->ops->map_direct_value_addr(map, &addr, off);
2788 if (err)
2789 return err;
2790 ptr = (void *)(long)addr + off;
2791
2792 switch (size) {
2793 case sizeof(u8):
2794 *val = (u64)*(u8 *)ptr;
2795 break;
2796 case sizeof(u16):
2797 *val = (u64)*(u16 *)ptr;
2798 break;
2799 case sizeof(u32):
2800 *val = (u64)*(u32 *)ptr;
2801 break;
2802 case sizeof(u64):
2803 *val = *(u64 *)ptr;
2804 break;
2805 default:
2806 return -EINVAL;
2807 }
2808 return 0;
2809}
2810
2811static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
2812 struct bpf_reg_state *regs,
2813 int regno, int off, int size,
2814 enum bpf_access_type atype,
2815 int value_regno)
2816{
2817 struct bpf_reg_state *reg = regs + regno;
2818 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
2819 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
2820 u32 btf_id;
2821 int ret;
2822
2823 if (atype != BPF_READ) {
2824 verbose(env, "only read is supported\n");
2825 return -EACCES;
2826 }
2827
2828 if (off < 0) {
2829 verbose(env,
2830 "R%d is ptr_%s invalid negative access: off=%d\n",
2831 regno, tname, off);
2832 return -EACCES;
2833 }
2834 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2835 char tn_buf[48];
2836
2837 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2838 verbose(env,
2839 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
2840 regno, tname, off, tn_buf);
2841 return -EACCES;
2842 }
2843
2844 ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
2845 if (ret < 0)
2846 return ret;
2847
2848 if (ret == SCALAR_VALUE) {
2849 mark_reg_unknown(env, regs, value_regno);
2850 return 0;
2851 }
2852 mark_reg_known_zero(env, regs, value_regno);
2853 regs[value_regno].type = PTR_TO_BTF_ID;
2854 regs[value_regno].btf_id = btf_id;
2855 return 0;
2856}
2857
2742/* check whether memory at (regno + off) is accessible for t = (read | write)
2743 * if t==write, value_regno is a register which value is stored into memory
2744 * if t==read, value_regno is a register which will receive the value from memory
2745 * if t==write && value_regno==-1, some unknown value is stored into memory
2746 * if t==read && value_regno==-1, don't care what we read from memory
2747 */
2748static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
2749 int off, int bpf_size, enum bpf_access_type t,

--- 21 unchanged lines hidden (view full) ---

2771 is_pointer_value(env, value_regno)) {
2772 verbose(env, "R%d leaks addr into map\n", value_regno);
2773 return -EACCES;
2774 }
2775 err = check_map_access_type(env, regno, off, size, t);
2776 if (err)
2777 return err;
2778 err = check_map_access(env, regno, off, size, false);
2858/* check whether memory at (regno + off) is accessible for t = (read | write)
2859 * if t==write, value_regno is a register which value is stored into memory
2860 * if t==read, value_regno is a register which will receive the value from memory
2861 * if t==write && value_regno==-1, some unknown value is stored into memory
2862 * if t==read && value_regno==-1, don't care what we read from memory
2863 */
2864static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
2865 int off, int bpf_size, enum bpf_access_type t,

--- 21 unchanged lines hidden (view full) ---

2887 is_pointer_value(env, value_regno)) {
2888 verbose(env, "R%d leaks addr into map\n", value_regno);
2889 return -EACCES;
2890 }
2891 err = check_map_access_type(env, regno, off, size, t);
2892 if (err)
2893 return err;
2894 err = check_map_access(env, regno, off, size, false);
2779 if (!err && t == BPF_READ && value_regno >= 0)
2780 mark_reg_unknown(env, regs, value_regno);
2895 if (!err && t == BPF_READ && value_regno >= 0) {
2896 struct bpf_map *map = reg->map_ptr;
2781
2897
2898 /* if map is read-only, track its contents as scalars */
2899 if (tnum_is_const(reg->var_off) &&
2900 bpf_map_is_rdonly(map) &&
2901 map->ops->map_direct_value_addr) {
2902 int map_off = off + reg->var_off.value;
2903 u64 val = 0;
2904
2905 err = bpf_map_direct_read(map, map_off, size,
2906 &val);
2907 if (err)
2908 return err;
2909
2910 regs[value_regno].type = SCALAR_VALUE;
2911 __mark_reg_known(&regs[value_regno], val);
2912 } else {
2913 mark_reg_unknown(env, regs, value_regno);
2914 }
2915 }
2782 } else if (reg->type == PTR_TO_CTX) {
2783 enum bpf_reg_type reg_type = SCALAR_VALUE;
2916 } else if (reg->type == PTR_TO_CTX) {
2917 enum bpf_reg_type reg_type = SCALAR_VALUE;
2918 u32 btf_id = 0;
2784
2785 if (t == BPF_WRITE && value_regno >= 0 &&
2786 is_pointer_value(env, value_regno)) {
2787 verbose(env, "R%d leaks addr into ctx\n", value_regno);
2788 return -EACCES;
2789 }
2790
2791 err = check_ctx_reg(env, reg, regno);
2792 if (err < 0)
2793 return err;
2794
2919
2920 if (t == BPF_WRITE && value_regno >= 0 &&
2921 is_pointer_value(env, value_regno)) {
2922 verbose(env, "R%d leaks addr into ctx\n", value_regno);
2923 return -EACCES;
2924 }
2925
2926 err = check_ctx_reg(env, reg, regno);
2927 if (err < 0)
2928 return err;
2929
2795 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
2930 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
2931 if (err)
2932 verbose_linfo(env, insn_idx, "; ");
2796 if (!err && t == BPF_READ && value_regno >= 0) {
2797 /* ctx access returns either a scalar, or a
2798 * PTR_TO_PACKET[_META,_END]. In the latter
2799 * case, we know the offset is zero.
2800 */
2801 if (reg_type == SCALAR_VALUE) {
2802 mark_reg_unknown(env, regs, value_regno);
2803 } else {
2804 mark_reg_known_zero(env, regs,
2805 value_regno);
2806 if (reg_type_may_be_null(reg_type))
2807 regs[value_regno].id = ++env->id_gen;
2808 /* A load of ctx field could have different
2809 * actual load size with the one encoded in the
2810 * insn. When the dst is PTR, it is for sure not
2811 * a sub-register.
2812 */
2813 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
2933 if (!err && t == BPF_READ && value_regno >= 0) {
2934 /* ctx access returns either a scalar, or a
2935 * PTR_TO_PACKET[_META,_END]. In the latter
2936 * case, we know the offset is zero.
2937 */
2938 if (reg_type == SCALAR_VALUE) {
2939 mark_reg_unknown(env, regs, value_regno);
2940 } else {
2941 mark_reg_known_zero(env, regs,
2942 value_regno);
2943 if (reg_type_may_be_null(reg_type))
2944 regs[value_regno].id = ++env->id_gen;
2945 /* A load of ctx field could have different
2946 * actual load size with the one encoded in the
2947 * insn. When the dst is PTR, it is for sure not
2948 * a sub-register.
2949 */
2950 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
2951 if (reg_type == PTR_TO_BTF_ID)
2952 regs[value_regno].btf_id = btf_id;
2814 }
2815 regs[value_regno].type = reg_type;
2816 }
2817
2818 } else if (reg->type == PTR_TO_STACK) {
2819 off += reg->var_off.value;
2820 err = check_stack_access(env, reg, off, size);
2821 if (err)

--- 43 unchanged lines hidden (view full) ---

2865 }
2866 err = check_sock_access(env, insn_idx, regno, off, size, t);
2867 if (!err && value_regno >= 0)
2868 mark_reg_unknown(env, regs, value_regno);
2869 } else if (reg->type == PTR_TO_TP_BUFFER) {
2870 err = check_tp_buffer_access(env, reg, regno, off, size);
2871 if (!err && t == BPF_READ && value_regno >= 0)
2872 mark_reg_unknown(env, regs, value_regno);
2953 }
2954 regs[value_regno].type = reg_type;
2955 }
2956
2957 } else if (reg->type == PTR_TO_STACK) {
2958 off += reg->var_off.value;
2959 err = check_stack_access(env, reg, off, size);
2960 if (err)

--- 43 unchanged lines hidden (view full) ---

3004 }
3005 err = check_sock_access(env, insn_idx, regno, off, size, t);
3006 if (!err && value_regno >= 0)
3007 mark_reg_unknown(env, regs, value_regno);
3008 } else if (reg->type == PTR_TO_TP_BUFFER) {
3009 err = check_tp_buffer_access(env, reg, regno, off, size);
3010 if (!err && t == BPF_READ && value_regno >= 0)
3011 mark_reg_unknown(env, regs, value_regno);
3012 } else if (reg->type == PTR_TO_BTF_ID) {
3013 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
3014 value_regno);
2873 } else {
2874 verbose(env, "R%d invalid mem access '%s'\n", regno,
2875 reg_type_str[reg->type]);
2876 return -EACCES;
2877 }
2878
2879 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
2880 regs[value_regno].type == SCALAR_VALUE) {

--- 412 unchanged lines hidden (view full) ---

3293 return -EFAULT;
3294 }
3295 meta->ref_obj_id = reg->ref_obj_id;
3296 }
3297 } else if (arg_type == ARG_PTR_TO_SOCKET) {
3298 expected_type = PTR_TO_SOCKET;
3299 if (type != expected_type)
3300 goto err_type;
3015 } else {
3016 verbose(env, "R%d invalid mem access '%s'\n", regno,
3017 reg_type_str[reg->type]);
3018 return -EACCES;
3019 }
3020
3021 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
3022 regs[value_regno].type == SCALAR_VALUE) {

--- 412 unchanged lines hidden (view full) ---

3435 return -EFAULT;
3436 }
3437 meta->ref_obj_id = reg->ref_obj_id;
3438 }
3439 } else if (arg_type == ARG_PTR_TO_SOCKET) {
3440 expected_type = PTR_TO_SOCKET;
3441 if (type != expected_type)
3442 goto err_type;
3443 } else if (arg_type == ARG_PTR_TO_BTF_ID) {
3444 expected_type = PTR_TO_BTF_ID;
3445 if (type != expected_type)
3446 goto err_type;
3447 if (reg->btf_id != meta->btf_id) {
3448 verbose(env, "Helper has type %s got %s in R%d\n",
3449 kernel_type_name(meta->btf_id),
3450 kernel_type_name(reg->btf_id), regno);
3451
3452 return -EACCES;
3453 }
3454 if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) {
3455 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
3456 regno);
3457 return -EACCES;
3458 }
3301 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
3302 if (meta->func_id == BPF_FUNC_spin_lock) {
3303 if (process_spin_lock(env, regno, true))
3304 return -EACCES;
3305 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
3306 if (process_spin_lock(env, regno, false))
3307 return -EACCES;
3308 } else {

--- 131 unchanged lines hidden (view full) ---

3440 switch (map->map_type) {
3441 case BPF_MAP_TYPE_PROG_ARRAY:
3442 if (func_id != BPF_FUNC_tail_call)
3443 goto error;
3444 break;
3445 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
3446 if (func_id != BPF_FUNC_perf_event_read &&
3447 func_id != BPF_FUNC_perf_event_output &&
3459 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
3460 if (meta->func_id == BPF_FUNC_spin_lock) {
3461 if (process_spin_lock(env, regno, true))
3462 return -EACCES;
3463 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
3464 if (process_spin_lock(env, regno, false))
3465 return -EACCES;
3466 } else {

--- 131 unchanged lines hidden (view full) ---

3598 switch (map->map_type) {
3599 case BPF_MAP_TYPE_PROG_ARRAY:
3600 if (func_id != BPF_FUNC_tail_call)
3601 goto error;
3602 break;
3603 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
3604 if (func_id != BPF_FUNC_perf_event_read &&
3605 func_id != BPF_FUNC_perf_event_output &&
3606 func_id != BPF_FUNC_skb_output &&
3448 func_id != BPF_FUNC_perf_event_read_value)
3449 goto error;
3450 break;
3451 case BPF_MAP_TYPE_STACK_TRACE:
3452 if (func_id != BPF_FUNC_get_stackid)
3453 goto error;
3454 break;
3455 case BPF_MAP_TYPE_CGROUP_ARRAY:

--- 71 unchanged lines hidden (view full) ---

3527 if (env->subprog_cnt > 1) {
3528 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
3529 return -EINVAL;
3530 }
3531 break;
3532 case BPF_FUNC_perf_event_read:
3533 case BPF_FUNC_perf_event_output:
3534 case BPF_FUNC_perf_event_read_value:
3607 func_id != BPF_FUNC_perf_event_read_value)
3608 goto error;
3609 break;
3610 case BPF_MAP_TYPE_STACK_TRACE:
3611 if (func_id != BPF_FUNC_get_stackid)
3612 goto error;
3613 break;
3614 case BPF_MAP_TYPE_CGROUP_ARRAY:

--- 71 unchanged lines hidden (view full) ---

3686 if (env->subprog_cnt > 1) {
3687 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
3688 return -EINVAL;
3689 }
3690 break;
3691 case BPF_FUNC_perf_event_read:
3692 case BPF_FUNC_perf_event_output:
3693 case BPF_FUNC_perf_event_read_value:
3694 case BPF_FUNC_skb_output:
3535 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
3536 goto error;
3537 break;
3538 case BPF_FUNC_get_stackid:
3539 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
3540 goto error;
3541 break;
3542 case BPF_FUNC_current_task_under_cgroup:

--- 438 unchanged lines hidden (view full) ---

3981 if (err) {
3982 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
3983 func_id_name(func_id), func_id);
3984 return err;
3985 }
3986
3987 meta.func_id = func_id;
3988 /* check args */
3695 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
3696 goto error;
3697 break;
3698 case BPF_FUNC_get_stackid:
3699 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
3700 goto error;
3701 break;
3702 case BPF_FUNC_current_task_under_cgroup:

--- 438 unchanged lines hidden (view full) ---

4141 if (err) {
4142 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
4143 func_id_name(func_id), func_id);
4144 return err;
4145 }
4146
4147 meta.func_id = func_id;
4148 /* check args */
3989 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
3990 if (err)
3991 return err;
3992 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
3993 if (err)
3994 return err;
3995 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
3996 if (err)
3997 return err;
3998 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
3999 if (err)
4000 return err;
4001 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
4002 if (err)
4003 return err;
4149 for (i = 0; i < 5; i++) {
4150 if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID) {
4151 if (!fn->btf_id[i])
4152 fn->btf_id[i] = btf_resolve_helper_id(&env->log, fn->func, i);
4153 meta.btf_id = fn->btf_id[i];
4154 }
4155 err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta);
4156 if (err)
4157 return err;
4158 }
4004
4005 err = record_func_map(env, &meta, func_id, insn_idx);
4006 if (err)
4007 return err;
4008
4009 /* Mark slots with STACK_MISC in case of raw mode, stack offset
4010 * is inferred from register state.
4011 */

--- 2107 unchanged lines hidden (view full) ---

6119 }
6120 break;
6121 case BPF_PROG_TYPE_CGROUP_SOCK:
6122 case BPF_PROG_TYPE_SOCK_OPS:
6123 case BPF_PROG_TYPE_CGROUP_DEVICE:
6124 case BPF_PROG_TYPE_CGROUP_SYSCTL:
6125 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
6126 break;
4159
4160 err = record_func_map(env, &meta, func_id, insn_idx);
4161 if (err)
4162 return err;
4163
4164 /* Mark slots with STACK_MISC in case of raw mode, stack offset
4165 * is inferred from register state.
4166 */

--- 2107 unchanged lines hidden (view full) ---

6274 }
6275 break;
6276 case BPF_PROG_TYPE_CGROUP_SOCK:
6277 case BPF_PROG_TYPE_SOCK_OPS:
6278 case BPF_PROG_TYPE_CGROUP_DEVICE:
6279 case BPF_PROG_TYPE_CGROUP_SYSCTL:
6280 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
6281 break;
6282 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6283 if (!env->prog->aux->attach_btf_id)
6284 return 0;
6285 range = tnum_const(0);
6286 break;
6127 default:
6128 return 0;
6129 }
6130
6131 reg = cur_regs(env) + BPF_REG_0;
6132 if (reg->type != SCALAR_VALUE) {
6133 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
6134 reg_type_str[reg->type]);

--- 1300 unchanged lines hidden (view full) ---

7435 case PTR_TO_CTX:
7436 case PTR_TO_SOCKET:
7437 case PTR_TO_SOCKET_OR_NULL:
7438 case PTR_TO_SOCK_COMMON:
7439 case PTR_TO_SOCK_COMMON_OR_NULL:
7440 case PTR_TO_TCP_SOCK:
7441 case PTR_TO_TCP_SOCK_OR_NULL:
7442 case PTR_TO_XDP_SOCK:
6287 default:
6288 return 0;
6289 }
6290
6291 reg = cur_regs(env) + BPF_REG_0;
6292 if (reg->type != SCALAR_VALUE) {
6293 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
6294 reg_type_str[reg->type]);

--- 1300 unchanged lines hidden (view full) ---

7595 case PTR_TO_CTX:
7596 case PTR_TO_SOCKET:
7597 case PTR_TO_SOCKET_OR_NULL:
7598 case PTR_TO_SOCK_COMMON:
7599 case PTR_TO_SOCK_COMMON_OR_NULL:
7600 case PTR_TO_TCP_SOCK:
7601 case PTR_TO_TCP_SOCK_OR_NULL:
7602 case PTR_TO_XDP_SOCK:
7603 case PTR_TO_BTF_ID:
7443 return false;
7444 default:
7445 return true;
7446 }
7447}
7448
7449/* If an instruction was previously used with particular pointer types, then we
7450 * need to be careful to avoid cases such as the below, where it may be ok

--- 1125 unchanged lines hidden (view full) ---

8576 convert_ctx_access = bpf_sock_convert_ctx_access;
8577 break;
8578 case PTR_TO_TCP_SOCK:
8579 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
8580 break;
8581 case PTR_TO_XDP_SOCK:
8582 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
8583 break;
7604 return false;
7605 default:
7606 return true;
7607 }
7608}
7609
7610/* If an instruction was previously used with particular pointer types, then we
7611 * need to be careful to avoid cases such as the below, where it may be ok

--- 1125 unchanged lines hidden (view full) ---

8737 convert_ctx_access = bpf_sock_convert_ctx_access;
8738 break;
8739 case PTR_TO_TCP_SOCK:
8740 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
8741 break;
8742 case PTR_TO_XDP_SOCK:
8743 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
8744 break;
8745 case PTR_TO_BTF_ID:
8746 if (type == BPF_WRITE) {
8747 verbose(env, "Writes through BTF pointers are not allowed\n");
8748 return -EINVAL;
8749 }
8750 insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code);
8751 env->prog->aux->num_exentries++;
8752 continue;
8584 default:
8585 continue;
8586 }
8587
8588 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
8589 size = BPF_LDST_BYTES(insn);
8590
8591 /* If the read access is a narrower load of the field,

--- 611 unchanged lines hidden (view full) ---

9203 }
9204 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
9205 "total_states %d peak_states %d mark_read %d\n",
9206 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
9207 env->max_states_per_insn, env->total_states,
9208 env->peak_states, env->longest_mark_read_walk);
9209}
9210
8753 default:
8754 continue;
8755 }
8756
8757 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
8758 size = BPF_LDST_BYTES(insn);
8759
8760 /* If the read access is a narrower load of the field,

--- 611 unchanged lines hidden (view full) ---

9372 }
9373 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
9374 "total_states %d peak_states %d mark_read %d\n",
9375 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
9376 env->max_states_per_insn, env->total_states,
9377 env->peak_states, env->longest_mark_read_walk);
9378}
9379
9380static int check_attach_btf_id(struct bpf_verifier_env *env)
9381{
9382 struct bpf_prog *prog = env->prog;
9383 u32 btf_id = prog->aux->attach_btf_id;
9384 const char prefix[] = "btf_trace_";
9385 struct bpf_trampoline *tr;
9386 const struct btf_type *t;
9387 const char *tname;
9388 int ret = 0;
9389 long addr;
9390
9391 if (prog->type != BPF_PROG_TYPE_TRACING)
9392 return 0;
9393
9394 if (!btf_id) {
9395 verbose(env, "Tracing programs must provide btf_id\n");
9396 return -EINVAL;
9397 }
9398 t = btf_type_by_id(btf_vmlinux, btf_id);
9399 if (!t) {
9400 verbose(env, "attach_btf_id %u is invalid\n", btf_id);
9401 return -EINVAL;
9402 }
9403 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
9404 if (!tname) {
9405 verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id);
9406 return -EINVAL;
9407 }
9408
9409 switch (prog->expected_attach_type) {
9410 case BPF_TRACE_RAW_TP:
9411 if (!btf_type_is_typedef(t)) {
9412 verbose(env, "attach_btf_id %u is not a typedef\n",
9413 btf_id);
9414 return -EINVAL;
9415 }
9416 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
9417 verbose(env, "attach_btf_id %u points to wrong type name %s\n",
9418 btf_id, tname);
9419 return -EINVAL;
9420 }
9421 tname += sizeof(prefix) - 1;
9422 t = btf_type_by_id(btf_vmlinux, t->type);
9423 if (!btf_type_is_ptr(t))
9424 /* should never happen in valid vmlinux build */
9425 return -EINVAL;
9426 t = btf_type_by_id(btf_vmlinux, t->type);
9427 if (!btf_type_is_func_proto(t))
9428 /* should never happen in valid vmlinux build */
9429 return -EINVAL;
9430
9431 /* remember two read only pointers that are valid for
9432 * the life time of the kernel
9433 */
9434 prog->aux->attach_func_name = tname;
9435 prog->aux->attach_func_proto = t;
9436 prog->aux->attach_btf_trace = true;
9437 return 0;
9438 case BPF_TRACE_FENTRY:
9439 case BPF_TRACE_FEXIT:
9440 if (!btf_type_is_func(t)) {
9441 verbose(env, "attach_btf_id %u is not a function\n",
9442 btf_id);
9443 return -EINVAL;
9444 }
9445 t = btf_type_by_id(btf_vmlinux, t->type);
9446 if (!btf_type_is_func_proto(t))
9447 return -EINVAL;
9448 tr = bpf_trampoline_lookup(btf_id);
9449 if (!tr)
9450 return -ENOMEM;
9451 prog->aux->attach_func_name = tname;
9452 prog->aux->attach_func_proto = t;
9453 mutex_lock(&tr->mutex);
9454 if (tr->func.addr) {
9455 prog->aux->trampoline = tr;
9456 goto out;
9457 }
9458 ret = btf_distill_func_proto(&env->log, btf_vmlinux, t,
9459 tname, &tr->func.model);
9460 if (ret < 0)
9461 goto out;
9462 addr = kallsyms_lookup_name(tname);
9463 if (!addr) {
9464 verbose(env,
9465 "The address of function %s cannot be found\n",
9466 tname);
9467 ret = -ENOENT;
9468 goto out;
9469 }
9470 tr->func.addr = (void *)addr;
9471 prog->aux->trampoline = tr;
9472out:
9473 mutex_unlock(&tr->mutex);
9474 if (ret)
9475 bpf_trampoline_put(tr);
9476 return ret;
9477 default:
9478 return -EINVAL;
9479 }
9480}
9481
9211int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
9212 union bpf_attr __user *uattr)
9213{
9214 u64 start_time = ktime_get_ns();
9215 struct bpf_verifier_env *env;
9216 struct bpf_verifier_log *log;
9217 int i, len, ret = -EINVAL;
9218 bool is_priv;

--- 17 unchanged lines hidden (view full) ---

9236 if (!env->insn_aux_data)
9237 goto err_free_env;
9238 for (i = 0; i < len; i++)
9239 env->insn_aux_data[i].orig_idx = i;
9240 env->prog = *prog;
9241 env->ops = bpf_verifier_ops[env->prog->type];
9242 is_priv = capable(CAP_SYS_ADMIN);
9243
9482int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
9483 union bpf_attr __user *uattr)
9484{
9485 u64 start_time = ktime_get_ns();
9486 struct bpf_verifier_env *env;
9487 struct bpf_verifier_log *log;
9488 int i, len, ret = -EINVAL;
9489 bool is_priv;

--- 17 unchanged lines hidden (view full) ---

9507 if (!env->insn_aux_data)
9508 goto err_free_env;
9509 for (i = 0; i < len; i++)
9510 env->insn_aux_data[i].orig_idx = i;
9511 env->prog = *prog;
9512 env->ops = bpf_verifier_ops[env->prog->type];
9513 is_priv = capable(CAP_SYS_ADMIN);
9514
9515 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
9516 mutex_lock(&bpf_verifier_lock);
9517 if (!btf_vmlinux)
9518 btf_vmlinux = btf_parse_vmlinux();
9519 mutex_unlock(&bpf_verifier_lock);
9520 }
9521
9244 /* grab the mutex to protect few globals used by verifier */
9245 if (!is_priv)
9246 mutex_lock(&bpf_verifier_lock);
9247
9248 if (attr->log_level || attr->log_buf || attr->log_size) {
9249 /* user requested verbose verifier output
9250 * and supplied buffer to store the verification trace
9251 */
9252 log->level = attr->log_level;
9253 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
9254 log->len_total = attr->log_size;
9255
9256 ret = -EINVAL;
9257 /* log attributes have to be sane */
9258 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
9259 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
9260 goto err_unlock;
9261 }
9262
9522 /* grab the mutex to protect few globals used by verifier */
9523 if (!is_priv)
9524 mutex_lock(&bpf_verifier_lock);
9525
9526 if (attr->log_level || attr->log_buf || attr->log_size) {
9527 /* user requested verbose verifier output
9528 * and supplied buffer to store the verification trace
9529 */
9530 log->level = attr->log_level;
9531 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
9532 log->len_total = attr->log_size;
9533
9534 ret = -EINVAL;
9535 /* log attributes have to be sane */
9536 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
9537 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
9538 goto err_unlock;
9539 }
9540
9541 if (IS_ERR(btf_vmlinux)) {
9542 /* Either gcc or pahole or kernel are broken. */
9543 verbose(env, "in-kernel BTF is malformed\n");
9544 ret = PTR_ERR(btf_vmlinux);
9545 goto skip_full_check;
9546 }
9547
9548 ret = check_attach_btf_id(env);
9549 if (ret)
9550 goto skip_full_check;
9551
9263 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
9264 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
9265 env->strict_alignment = true;
9266 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
9267 env->strict_alignment = false;
9268
9269 env->allow_ptr_leaks = is_priv;
9270

--- 129 unchanged lines hidden ---
9552 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
9553 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
9554 env->strict_alignment = true;
9555 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
9556 env->strict_alignment = false;
9557
9558 env->allow_ptr_leaks = is_priv;
9559

--- 129 unchanged lines hidden ---