xref: /linux/include/linux/bpf.h (revision d9104cec3e8fe4b458b74709853231385779001f)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #ifndef _LINUX_BPF_H
5 #define _LINUX_BPF_H 1
6 
7 #include <uapi/linux/bpf.h>
8 #include <uapi/linux/filter.h>
9 
10 #include <linux/workqueue.h>
11 #include <linux/file.h>
12 #include <linux/percpu.h>
13 #include <linux/err.h>
14 #include <linux/rbtree_latch.h>
15 #include <linux/numa.h>
16 #include <linux/mm_types.h>
17 #include <linux/wait.h>
18 #include <linux/refcount.h>
19 #include <linux/mutex.h>
20 #include <linux/module.h>
21 #include <linux/kallsyms.h>
22 #include <linux/capability.h>
23 #include <linux/sched/mm.h>
24 #include <linux/slab.h>
25 #include <linux/percpu-refcount.h>
26 #include <linux/stddef.h>
27 #include <linux/bpfptr.h>
28 #include <linux/btf.h>
29 #include <linux/rcupdate_trace.h>
30 #include <linux/static_call.h>
31 #include <linux/memcontrol.h>
32 #include <linux/cfi.h>
33 #include <asm/rqspinlock.h>
34 
35 struct bpf_verifier_env;
36 struct bpf_verifier_log;
37 struct perf_event;
38 struct bpf_prog;
39 struct bpf_prog_aux;
40 struct bpf_map;
41 struct bpf_arena;
42 struct sock;
43 struct seq_file;
44 struct btf;
45 struct btf_type;
46 struct exception_table_entry;
47 struct seq_operations;
48 struct bpf_iter_aux_info;
49 struct bpf_local_storage;
50 struct bpf_local_storage_map;
51 struct kobject;
52 struct mem_cgroup;
53 struct module;
54 struct bpf_func_state;
55 struct ftrace_ops;
56 struct cgroup;
57 struct bpf_token;
58 struct user_namespace;
59 struct super_block;
60 struct inode;
61 
62 extern struct idr btf_idr;
63 extern spinlock_t btf_idr_lock;
64 extern struct kobject *btf_kobj;
65 extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
66 extern bool bpf_global_ma_set;
67 
68 typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
69 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
70 					struct bpf_iter_aux_info *aux);
71 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
72 typedef unsigned int (*bpf_func_t)(const void *,
73 				   const struct bpf_insn *);
74 struct bpf_iter_seq_info {
75 	const struct seq_operations *seq_ops;
76 	bpf_iter_init_seq_priv_t init_seq_private;
77 	bpf_iter_fini_seq_priv_t fini_seq_private;
78 	u32 seq_priv_size;
79 };
80 
81 /* map is generic key/value storage optionally accessible by eBPF programs */
82 struct bpf_map_ops {
83 	/* funcs callable from userspace (via syscall) */
84 	int (*map_alloc_check)(union bpf_attr *attr);
85 	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
86 	void (*map_release)(struct bpf_map *map, struct file *map_file);
87 	void (*map_free)(struct bpf_map *map);
88 	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
89 	void (*map_release_uref)(struct bpf_map *map);
90 	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
91 	int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
92 				union bpf_attr __user *uattr);
93 	int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
94 					  void *value, u64 flags);
95 	int (*map_lookup_and_delete_batch)(struct bpf_map *map,
96 					   const union bpf_attr *attr,
97 					   union bpf_attr __user *uattr);
98 	int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
99 				const union bpf_attr *attr,
100 				union bpf_attr __user *uattr);
101 	int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
102 				union bpf_attr __user *uattr);
103 
104 	/* funcs callable from userspace and from eBPF programs */
105 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
106 	long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
107 	long (*map_delete_elem)(struct bpf_map *map, void *key);
108 	long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
109 	long (*map_pop_elem)(struct bpf_map *map, void *value);
110 	long (*map_peek_elem)(struct bpf_map *map, void *value);
111 	void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
112 
113 	/* funcs called by prog_array and perf_event_array map */
114 	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
115 				int fd);
116 	/* If need_defer is true, the implementation should guarantee that
117 	 * the to-be-put element is still alive before the bpf program, which
118 	 * may manipulate it, exists.
119 	 */
120 	void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
121 	int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
122 	u32 (*map_fd_sys_lookup_elem)(void *ptr);
123 	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
124 				  struct seq_file *m);
125 	int (*map_check_btf)(const struct bpf_map *map,
126 			     const struct btf *btf,
127 			     const struct btf_type *key_type,
128 			     const struct btf_type *value_type);
129 
130 	/* Prog poke tracking helpers. */
131 	int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
132 	void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
133 	void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
134 			     struct bpf_prog *new);
135 
136 	/* Direct value access helpers. */
137 	int (*map_direct_value_addr)(const struct bpf_map *map,
138 				     u64 *imm, u32 off);
139 	int (*map_direct_value_meta)(const struct bpf_map *map,
140 				     u64 imm, u32 *off);
141 	int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
142 	__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
143 			     struct poll_table_struct *pts);
144 	unsigned long (*map_get_unmapped_area)(struct file *filep, unsigned long addr,
145 					       unsigned long len, unsigned long pgoff,
146 					       unsigned long flags);
147 
148 	/* Functions called by bpf_local_storage maps */
149 	int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
150 					void *owner, u32 size);
151 	void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
152 					   void *owner, u32 size);
153 	struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
154 
155 	/* Misc helpers.*/
156 	long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
157 
158 	/* map_meta_equal must be implemented for maps that can be
159 	 * used as an inner map.  It is a runtime check to ensure
160 	 * an inner map can be inserted to an outer map.
161 	 *
162 	 * Some properties of the inner map has been used during the
163 	 * verification time.  When inserting an inner map at the runtime,
164 	 * map_meta_equal has to ensure the inserting map has the same
165 	 * properties that the verifier has used earlier.
166 	 */
167 	bool (*map_meta_equal)(const struct bpf_map *meta0,
168 			       const struct bpf_map *meta1);
169 
170 
171 	int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
172 					      struct bpf_func_state *caller,
173 					      struct bpf_func_state *callee);
174 	long (*map_for_each_callback)(struct bpf_map *map,
175 				     bpf_callback_t callback_fn,
176 				     void *callback_ctx, u64 flags);
177 
178 	u64 (*map_mem_usage)(const struct bpf_map *map);
179 
180 	/* BTF id of struct allocated by map_alloc */
181 	int *map_btf_id;
182 
183 	/* bpf_iter info used to open a seq_file */
184 	const struct bpf_iter_seq_info *iter_seq_info;
185 };
186 
187 enum {
188 	/* Support at most 11 fields in a BTF type */
189 	BTF_FIELDS_MAX	   = 11,
190 };
191 
192 enum btf_field_type {
193 	BPF_SPIN_LOCK  = (1 << 0),
194 	BPF_TIMER      = (1 << 1),
195 	BPF_KPTR_UNREF = (1 << 2),
196 	BPF_KPTR_REF   = (1 << 3),
197 	BPF_KPTR_PERCPU = (1 << 4),
198 	BPF_KPTR       = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
199 	BPF_LIST_HEAD  = (1 << 5),
200 	BPF_LIST_NODE  = (1 << 6),
201 	BPF_RB_ROOT    = (1 << 7),
202 	BPF_RB_NODE    = (1 << 8),
203 	BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
204 	BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
205 	BPF_REFCOUNT   = (1 << 9),
206 	BPF_WORKQUEUE  = (1 << 10),
207 	BPF_UPTR       = (1 << 11),
208 	BPF_RES_SPIN_LOCK = (1 << 12),
209 };
210 
211 typedef void (*btf_dtor_kfunc_t)(void *);
212 
213 struct btf_field_kptr {
214 	struct btf *btf;
215 	struct module *module;
216 	/* dtor used if btf_is_kernel(btf), otherwise the type is
217 	 * program-allocated, dtor is NULL,  and __bpf_obj_drop_impl is used
218 	 */
219 	btf_dtor_kfunc_t dtor;
220 	u32 btf_id;
221 };
222 
223 struct btf_field_graph_root {
224 	struct btf *btf;
225 	u32 value_btf_id;
226 	u32 node_offset;
227 	struct btf_record *value_rec;
228 };
229 
230 struct btf_field {
231 	u32 offset;
232 	u32 size;
233 	enum btf_field_type type;
234 	union {
235 		struct btf_field_kptr kptr;
236 		struct btf_field_graph_root graph_root;
237 	};
238 };
239 
240 struct btf_record {
241 	u32 cnt;
242 	u32 field_mask;
243 	int spin_lock_off;
244 	int res_spin_lock_off;
245 	int timer_off;
246 	int wq_off;
247 	int refcount_off;
248 	struct btf_field fields[];
249 };
250 
251 /* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
252 struct bpf_rb_node_kern {
253 	struct rb_node rb_node;
254 	void *owner;
255 } __attribute__((aligned(8)));
256 
257 /* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
258 struct bpf_list_node_kern {
259 	struct list_head list_head;
260 	void *owner;
261 } __attribute__((aligned(8)));
262 
263 struct bpf_map {
264 	const struct bpf_map_ops *ops;
265 	struct bpf_map *inner_map_meta;
266 #ifdef CONFIG_SECURITY
267 	void *security;
268 #endif
269 	enum bpf_map_type map_type;
270 	u32 key_size;
271 	u32 value_size;
272 	u32 max_entries;
273 	u64 map_extra; /* any per-map-type extra fields */
274 	u32 map_flags;
275 	u32 id;
276 	struct btf_record *record;
277 	int numa_node;
278 	u32 btf_key_type_id;
279 	u32 btf_value_type_id;
280 	u32 btf_vmlinux_value_type_id;
281 	struct btf *btf;
282 #ifdef CONFIG_MEMCG
283 	struct obj_cgroup *objcg;
284 #endif
285 	char name[BPF_OBJ_NAME_LEN];
286 	struct mutex freeze_mutex;
287 	atomic64_t refcnt;
288 	atomic64_t usercnt;
289 	/* rcu is used before freeing and work is only used during freeing */
290 	union {
291 		struct work_struct work;
292 		struct rcu_head rcu;
293 	};
294 	atomic64_t writecnt;
295 	/* 'Ownership' of program-containing map is claimed by the first program
296 	 * that is going to use this map or by the first program which FD is
297 	 * stored in the map to make sure that all callers and callees have the
298 	 * same prog type, JITed flag and xdp_has_frags flag.
299 	 */
300 	struct {
301 		const struct btf_type *attach_func_proto;
302 		spinlock_t lock;
303 		enum bpf_prog_type type;
304 		bool jited;
305 		bool xdp_has_frags;
306 	} owner;
307 	bool bypass_spec_v1;
308 	bool frozen; /* write-once; write-protected by freeze_mutex */
309 	bool free_after_mult_rcu_gp;
310 	bool free_after_rcu_gp;
311 	atomic64_t sleepable_refcnt;
312 	s64 __percpu *elem_count;
313 };
314 
btf_field_type_name(enum btf_field_type type)315 static inline const char *btf_field_type_name(enum btf_field_type type)
316 {
317 	switch (type) {
318 	case BPF_SPIN_LOCK:
319 		return "bpf_spin_lock";
320 	case BPF_RES_SPIN_LOCK:
321 		return "bpf_res_spin_lock";
322 	case BPF_TIMER:
323 		return "bpf_timer";
324 	case BPF_WORKQUEUE:
325 		return "bpf_wq";
326 	case BPF_KPTR_UNREF:
327 	case BPF_KPTR_REF:
328 		return "kptr";
329 	case BPF_KPTR_PERCPU:
330 		return "percpu_kptr";
331 	case BPF_UPTR:
332 		return "uptr";
333 	case BPF_LIST_HEAD:
334 		return "bpf_list_head";
335 	case BPF_LIST_NODE:
336 		return "bpf_list_node";
337 	case BPF_RB_ROOT:
338 		return "bpf_rb_root";
339 	case BPF_RB_NODE:
340 		return "bpf_rb_node";
341 	case BPF_REFCOUNT:
342 		return "bpf_refcount";
343 	default:
344 		WARN_ON_ONCE(1);
345 		return "unknown";
346 	}
347 }
348 
349 #if IS_ENABLED(CONFIG_DEBUG_KERNEL)
350 #define BPF_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
351 #else
352 #define BPF_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
353 #endif
354 
btf_field_type_size(enum btf_field_type type)355 static inline u32 btf_field_type_size(enum btf_field_type type)
356 {
357 	switch (type) {
358 	case BPF_SPIN_LOCK:
359 		return sizeof(struct bpf_spin_lock);
360 	case BPF_RES_SPIN_LOCK:
361 		return sizeof(struct bpf_res_spin_lock);
362 	case BPF_TIMER:
363 		return sizeof(struct bpf_timer);
364 	case BPF_WORKQUEUE:
365 		return sizeof(struct bpf_wq);
366 	case BPF_KPTR_UNREF:
367 	case BPF_KPTR_REF:
368 	case BPF_KPTR_PERCPU:
369 	case BPF_UPTR:
370 		return sizeof(u64);
371 	case BPF_LIST_HEAD:
372 		return sizeof(struct bpf_list_head);
373 	case BPF_LIST_NODE:
374 		return sizeof(struct bpf_list_node);
375 	case BPF_RB_ROOT:
376 		return sizeof(struct bpf_rb_root);
377 	case BPF_RB_NODE:
378 		return sizeof(struct bpf_rb_node);
379 	case BPF_REFCOUNT:
380 		return sizeof(struct bpf_refcount);
381 	default:
382 		WARN_ON_ONCE(1);
383 		return 0;
384 	}
385 }
386 
btf_field_type_align(enum btf_field_type type)387 static inline u32 btf_field_type_align(enum btf_field_type type)
388 {
389 	switch (type) {
390 	case BPF_SPIN_LOCK:
391 		return __alignof__(struct bpf_spin_lock);
392 	case BPF_RES_SPIN_LOCK:
393 		return __alignof__(struct bpf_res_spin_lock);
394 	case BPF_TIMER:
395 		return __alignof__(struct bpf_timer);
396 	case BPF_WORKQUEUE:
397 		return __alignof__(struct bpf_wq);
398 	case BPF_KPTR_UNREF:
399 	case BPF_KPTR_REF:
400 	case BPF_KPTR_PERCPU:
401 	case BPF_UPTR:
402 		return __alignof__(u64);
403 	case BPF_LIST_HEAD:
404 		return __alignof__(struct bpf_list_head);
405 	case BPF_LIST_NODE:
406 		return __alignof__(struct bpf_list_node);
407 	case BPF_RB_ROOT:
408 		return __alignof__(struct bpf_rb_root);
409 	case BPF_RB_NODE:
410 		return __alignof__(struct bpf_rb_node);
411 	case BPF_REFCOUNT:
412 		return __alignof__(struct bpf_refcount);
413 	default:
414 		WARN_ON_ONCE(1);
415 		return 0;
416 	}
417 }
418 
bpf_obj_init_field(const struct btf_field * field,void * addr)419 static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
420 {
421 	memset(addr, 0, field->size);
422 
423 	switch (field->type) {
424 	case BPF_REFCOUNT:
425 		refcount_set((refcount_t *)addr, 1);
426 		break;
427 	case BPF_RB_NODE:
428 		RB_CLEAR_NODE((struct rb_node *)addr);
429 		break;
430 	case BPF_LIST_HEAD:
431 	case BPF_LIST_NODE:
432 		INIT_LIST_HEAD((struct list_head *)addr);
433 		break;
434 	case BPF_RB_ROOT:
435 		/* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
436 	case BPF_SPIN_LOCK:
437 	case BPF_RES_SPIN_LOCK:
438 	case BPF_TIMER:
439 	case BPF_WORKQUEUE:
440 	case BPF_KPTR_UNREF:
441 	case BPF_KPTR_REF:
442 	case BPF_KPTR_PERCPU:
443 	case BPF_UPTR:
444 		break;
445 	default:
446 		WARN_ON_ONCE(1);
447 		return;
448 	}
449 }
450 
btf_record_has_field(const struct btf_record * rec,enum btf_field_type type)451 static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type)
452 {
453 	if (IS_ERR_OR_NULL(rec))
454 		return false;
455 	return rec->field_mask & type;
456 }
457 
bpf_obj_init(const struct btf_record * rec,void * obj)458 static inline void bpf_obj_init(const struct btf_record *rec, void *obj)
459 {
460 	int i;
461 
462 	if (IS_ERR_OR_NULL(rec))
463 		return;
464 	for (i = 0; i < rec->cnt; i++)
465 		bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset);
466 }
467 
468 /* 'dst' must be a temporary buffer and should not point to memory that is being
469  * used in parallel by a bpf program or bpf syscall, otherwise the access from
470  * the bpf program or bpf syscall may be corrupted by the reinitialization,
471  * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
472  * allocator, it is still possible for 'dst' to be used in parallel by a bpf
473  * program or bpf syscall.
474  */
check_and_init_map_value(struct bpf_map * map,void * dst)475 static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
476 {
477 	bpf_obj_init(map->record, dst);
478 }
479 
480 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
481  * forced to use 'long' read/writes to try to atomically copy long counters.
482  * Best-effort only.  No barriers here, since it _will_ race with concurrent
483  * updates from BPF programs. Called from bpf syscall and mostly used with
484  * size 8 or 16 bytes, so ask compiler to inline it.
485  */
bpf_long_memcpy(void * dst,const void * src,u32 size)486 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
487 {
488 	const long *lsrc = src;
489 	long *ldst = dst;
490 
491 	size /= sizeof(long);
492 	while (size--)
493 		data_race(*ldst++ = *lsrc++);
494 }
495 
496 /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
bpf_obj_memcpy(struct btf_record * rec,void * dst,void * src,u32 size,bool long_memcpy)497 static inline void bpf_obj_memcpy(struct btf_record *rec,
498 				  void *dst, void *src, u32 size,
499 				  bool long_memcpy)
500 {
501 	u32 curr_off = 0;
502 	int i;
503 
504 	if (IS_ERR_OR_NULL(rec)) {
505 		if (long_memcpy)
506 			bpf_long_memcpy(dst, src, round_up(size, 8));
507 		else
508 			memcpy(dst, src, size);
509 		return;
510 	}
511 
512 	for (i = 0; i < rec->cnt; i++) {
513 		u32 next_off = rec->fields[i].offset;
514 		u32 sz = next_off - curr_off;
515 
516 		memcpy(dst + curr_off, src + curr_off, sz);
517 		curr_off += rec->fields[i].size + sz;
518 	}
519 	memcpy(dst + curr_off, src + curr_off, size - curr_off);
520 }
521 
copy_map_value(struct bpf_map * map,void * dst,void * src)522 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
523 {
524 	bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
525 }
526 
copy_map_value_long(struct bpf_map * map,void * dst,void * src)527 static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
528 {
529 	bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
530 }
531 
bpf_obj_swap_uptrs(const struct btf_record * rec,void * dst,void * src)532 static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src)
533 {
534 	unsigned long *src_uptr, *dst_uptr;
535 	const struct btf_field *field;
536 	int i;
537 
538 	if (!btf_record_has_field(rec, BPF_UPTR))
539 		return;
540 
541 	for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) {
542 		if (field->type != BPF_UPTR)
543 			continue;
544 
545 		src_uptr = src + field->offset;
546 		dst_uptr = dst + field->offset;
547 		swap(*src_uptr, *dst_uptr);
548 	}
549 }
550 
bpf_obj_memzero(struct btf_record * rec,void * dst,u32 size)551 static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
552 {
553 	u32 curr_off = 0;
554 	int i;
555 
556 	if (IS_ERR_OR_NULL(rec)) {
557 		memset(dst, 0, size);
558 		return;
559 	}
560 
561 	for (i = 0; i < rec->cnt; i++) {
562 		u32 next_off = rec->fields[i].offset;
563 		u32 sz = next_off - curr_off;
564 
565 		memset(dst + curr_off, 0, sz);
566 		curr_off += rec->fields[i].size + sz;
567 	}
568 	memset(dst + curr_off, 0, size - curr_off);
569 }
570 
zero_map_value(struct bpf_map * map,void * dst)571 static inline void zero_map_value(struct bpf_map *map, void *dst)
572 {
573 	bpf_obj_memzero(map->record, dst, map->value_size);
574 }
575 
576 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
577 			   bool lock_src);
578 void bpf_timer_cancel_and_free(void *timer);
579 void bpf_wq_cancel_and_free(void *timer);
580 void bpf_list_head_free(const struct btf_field *field, void *list_head,
581 			struct bpf_spin_lock *spin_lock);
582 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
583 		      struct bpf_spin_lock *spin_lock);
584 u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena);
585 u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena);
586 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
587 
588 struct bpf_offload_dev;
589 struct bpf_offloaded_map;
590 
591 struct bpf_map_dev_ops {
592 	int (*map_get_next_key)(struct bpf_offloaded_map *map,
593 				void *key, void *next_key);
594 	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
595 			       void *key, void *value);
596 	int (*map_update_elem)(struct bpf_offloaded_map *map,
597 			       void *key, void *value, u64 flags);
598 	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
599 };
600 
601 struct bpf_offloaded_map {
602 	struct bpf_map map;
603 	struct net_device *netdev;
604 	const struct bpf_map_dev_ops *dev_ops;
605 	void *dev_priv;
606 	struct list_head offloads;
607 };
608 
map_to_offmap(struct bpf_map * map)609 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
610 {
611 	return container_of(map, struct bpf_offloaded_map, map);
612 }
613 
bpf_map_offload_neutral(const struct bpf_map * map)614 static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
615 {
616 	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
617 }
618 
bpf_map_support_seq_show(const struct bpf_map * map)619 static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
620 {
621 	return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
622 		map->ops->map_seq_show_elem;
623 }
624 
625 int map_check_no_btf(const struct bpf_map *map,
626 		     const struct btf *btf,
627 		     const struct btf_type *key_type,
628 		     const struct btf_type *value_type);
629 
630 bool bpf_map_meta_equal(const struct bpf_map *meta0,
631 			const struct bpf_map *meta1);
632 
633 extern const struct bpf_map_ops bpf_map_offload_ops;
634 
635 /* bpf_type_flag contains a set of flags that are applicable to the values of
636  * arg_type, ret_type and reg_type. For example, a pointer value may be null,
637  * or a memory is read-only. We classify types into two categories: base types
638  * and extended types. Extended types are base types combined with a type flag.
639  *
640  * Currently there are no more than 32 base types in arg_type, ret_type and
641  * reg_types.
642  */
643 #define BPF_BASE_TYPE_BITS	8
644 
645 enum bpf_type_flag {
646 	/* PTR may be NULL. */
647 	PTR_MAYBE_NULL		= BIT(0 + BPF_BASE_TYPE_BITS),
648 
649 	/* MEM is read-only. When applied on bpf_arg, it indicates the arg is
650 	 * compatible with both mutable and immutable memory.
651 	 */
652 	MEM_RDONLY		= BIT(1 + BPF_BASE_TYPE_BITS),
653 
654 	/* MEM points to BPF ring buffer reservation. */
655 	MEM_RINGBUF		= BIT(2 + BPF_BASE_TYPE_BITS),
656 
657 	/* MEM is in user address space. */
658 	MEM_USER		= BIT(3 + BPF_BASE_TYPE_BITS),
659 
660 	/* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
661 	 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
662 	 * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
663 	 * or bpf_this_cpu_ptr(), which will return the pointer corresponding
664 	 * to the specified cpu.
665 	 */
666 	MEM_PERCPU		= BIT(4 + BPF_BASE_TYPE_BITS),
667 
668 	/* Indicates that the argument will be released. */
669 	OBJ_RELEASE		= BIT(5 + BPF_BASE_TYPE_BITS),
670 
671 	/* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
672 	 * unreferenced and referenced kptr loaded from map value using a load
673 	 * instruction, so that they can only be dereferenced but not escape the
674 	 * BPF program into the kernel (i.e. cannot be passed as arguments to
675 	 * kfunc or bpf helpers).
676 	 */
677 	PTR_UNTRUSTED		= BIT(6 + BPF_BASE_TYPE_BITS),
678 
679 	/* MEM can be uninitialized. */
680 	MEM_UNINIT		= BIT(7 + BPF_BASE_TYPE_BITS),
681 
682 	/* DYNPTR points to memory local to the bpf program. */
683 	DYNPTR_TYPE_LOCAL	= BIT(8 + BPF_BASE_TYPE_BITS),
684 
685 	/* DYNPTR points to a kernel-produced ringbuf record. */
686 	DYNPTR_TYPE_RINGBUF	= BIT(9 + BPF_BASE_TYPE_BITS),
687 
688 	/* Size is known at compile time. */
689 	MEM_FIXED_SIZE		= BIT(10 + BPF_BASE_TYPE_BITS),
690 
691 	/* MEM is of an allocated object of type in program BTF. This is used to
692 	 * tag PTR_TO_BTF_ID allocated using bpf_obj_new.
693 	 */
694 	MEM_ALLOC		= BIT(11 + BPF_BASE_TYPE_BITS),
695 
696 	/* PTR was passed from the kernel in a trusted context, and may be
697 	 * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
698 	 * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
699 	 * PTR_UNTRUSTED refers to a kptr that was read directly from a map
700 	 * without invoking bpf_kptr_xchg(). What we really need to know is
701 	 * whether a pointer is safe to pass to a kfunc or BPF helper function.
702 	 * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF
703 	 * helpers, they do not cover all possible instances of unsafe
704 	 * pointers. For example, a pointer that was obtained from walking a
705 	 * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the
706 	 * fact that it may be NULL, invalid, etc. This is due to backwards
707 	 * compatibility requirements, as this was the behavior that was first
708 	 * introduced when kptrs were added. The behavior is now considered
709 	 * deprecated, and PTR_UNTRUSTED will eventually be removed.
710 	 *
711 	 * PTR_TRUSTED, on the other hand, is a pointer that the kernel
712 	 * guarantees to be valid and safe to pass to kfuncs and BPF helpers.
713 	 * For example, pointers passed to tracepoint arguments are considered
714 	 * PTR_TRUSTED, as are pointers that are passed to struct_ops
715 	 * callbacks. As alluded to above, pointers that are obtained from
716 	 * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a
717 	 * struct task_struct *task is PTR_TRUSTED, then accessing
718 	 * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored
719 	 * in a BPF register. Similarly, pointers passed to certain programs
720 	 * types such as kretprobes are not guaranteed to be valid, as they may
721 	 * for example contain an object that was recently freed.
722 	 */
723 	PTR_TRUSTED		= BIT(12 + BPF_BASE_TYPE_BITS),
724 
725 	/* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
726 	MEM_RCU			= BIT(13 + BPF_BASE_TYPE_BITS),
727 
728 	/* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
729 	 * Currently only valid for linked-list and rbtree nodes. If the nodes
730 	 * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
731 	 */
732 	NON_OWN_REF		= BIT(14 + BPF_BASE_TYPE_BITS),
733 
734 	/* DYNPTR points to sk_buff */
735 	DYNPTR_TYPE_SKB		= BIT(15 + BPF_BASE_TYPE_BITS),
736 
737 	/* DYNPTR points to xdp_buff */
738 	DYNPTR_TYPE_XDP		= BIT(16 + BPF_BASE_TYPE_BITS),
739 
740 	/* Memory must be aligned on some architectures, used in combination with
741 	 * MEM_FIXED_SIZE.
742 	 */
743 	MEM_ALIGNED		= BIT(17 + BPF_BASE_TYPE_BITS),
744 
745 	/* MEM is being written to, often combined with MEM_UNINIT. Non-presence
746 	 * of MEM_WRITE means that MEM is only being read. MEM_WRITE without the
747 	 * MEM_UNINIT means that memory needs to be initialized since it is also
748 	 * read.
749 	 */
750 	MEM_WRITE		= BIT(18 + BPF_BASE_TYPE_BITS),
751 
752 	__BPF_TYPE_FLAG_MAX,
753 	__BPF_TYPE_LAST_FLAG	= __BPF_TYPE_FLAG_MAX - 1,
754 };
755 
756 #define DYNPTR_TYPE_FLAG_MASK	(DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
757 				 | DYNPTR_TYPE_XDP)
758 
759 /* Max number of base types. */
760 #define BPF_BASE_TYPE_LIMIT	(1UL << BPF_BASE_TYPE_BITS)
761 
762 /* Max number of all types. */
763 #define BPF_TYPE_LIMIT		(__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
764 
765 /* function argument constraints */
766 enum bpf_arg_type {
767 	ARG_DONTCARE = 0,	/* unused argument in helper function */
768 
769 	/* the following constraints used to prototype
770 	 * bpf_map_lookup/update/delete_elem() functions
771 	 */
772 	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
773 	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
774 	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
775 
776 	/* Used to prototype bpf_memcmp() and other functions that access data
777 	 * on eBPF program stack
778 	 */
779 	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
780 	ARG_PTR_TO_ARENA,
781 
782 	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
783 	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
784 
785 	ARG_PTR_TO_CTX,		/* pointer to context */
786 	ARG_ANYTHING,		/* any (initialized) argument is ok */
787 	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
788 	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
789 	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
790 	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
791 	ARG_PTR_TO_RINGBUF_MEM,	/* pointer to dynamically reserved ringbuf memory */
792 	ARG_CONST_ALLOC_SIZE_OR_ZERO,	/* number of allocated bytes requested */
793 	ARG_PTR_TO_BTF_ID_SOCK_COMMON,	/* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
794 	ARG_PTR_TO_PERCPU_BTF_ID,	/* pointer to in-kernel percpu type */
795 	ARG_PTR_TO_FUNC,	/* pointer to a bpf program function */
796 	ARG_PTR_TO_STACK,	/* pointer to stack */
797 	ARG_PTR_TO_CONST_STR,	/* pointer to a null terminated read-only string */
798 	ARG_PTR_TO_TIMER,	/* pointer to bpf_timer */
799 	ARG_KPTR_XCHG_DEST,	/* pointer to destination that kptrs are bpf_kptr_xchg'd into */
800 	ARG_PTR_TO_DYNPTR,      /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
801 	__BPF_ARG_TYPE_MAX,
802 
803 	/* Extended arg_types. */
804 	ARG_PTR_TO_MAP_VALUE_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
805 	ARG_PTR_TO_MEM_OR_NULL		= PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
806 	ARG_PTR_TO_CTX_OR_NULL		= PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
807 	ARG_PTR_TO_SOCKET_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
808 	ARG_PTR_TO_STACK_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
809 	ARG_PTR_TO_BTF_ID_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
810 	/* Pointer to memory does not need to be initialized, since helper function
811 	 * fills all bytes or clears them in error case.
812 	 */
813 	ARG_PTR_TO_UNINIT_MEM		= MEM_UNINIT | MEM_WRITE | ARG_PTR_TO_MEM,
814 	/* Pointer to valid memory of size known at compile time. */
815 	ARG_PTR_TO_FIXED_SIZE_MEM	= MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
816 
817 	/* This must be the last entry. Its purpose is to ensure the enum is
818 	 * wide enough to hold the higher bits reserved for bpf_type_flag.
819 	 */
820 	__BPF_ARG_TYPE_LIMIT	= BPF_TYPE_LIMIT,
821 };
822 static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
823 
824 /* type of values returned from helper functions */
825 enum bpf_return_type {
826 	RET_INTEGER,			/* function returns integer */
827 	RET_VOID,			/* function doesn't return anything */
828 	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
829 	RET_PTR_TO_SOCKET,		/* returns a pointer to a socket */
830 	RET_PTR_TO_TCP_SOCK,		/* returns a pointer to a tcp_sock */
831 	RET_PTR_TO_SOCK_COMMON,		/* returns a pointer to a sock_common */
832 	RET_PTR_TO_MEM,			/* returns a pointer to memory */
833 	RET_PTR_TO_MEM_OR_BTF_ID,	/* returns a pointer to a valid memory or a btf_id */
834 	RET_PTR_TO_BTF_ID,		/* returns a pointer to a btf_id */
835 	__BPF_RET_TYPE_MAX,
836 
837 	/* Extended ret_types. */
838 	RET_PTR_TO_MAP_VALUE_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
839 	RET_PTR_TO_SOCKET_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
840 	RET_PTR_TO_TCP_SOCK_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
841 	RET_PTR_TO_SOCK_COMMON_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
842 	RET_PTR_TO_RINGBUF_MEM_OR_NULL	= PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM,
843 	RET_PTR_TO_DYNPTR_MEM_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_MEM,
844 	RET_PTR_TO_BTF_ID_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
845 	RET_PTR_TO_BTF_ID_TRUSTED	= PTR_TRUSTED	 | RET_PTR_TO_BTF_ID,
846 
847 	/* This must be the last entry. Its purpose is to ensure the enum is
848 	 * wide enough to hold the higher bits reserved for bpf_type_flag.
849 	 */
850 	__BPF_RET_TYPE_LIMIT	= BPF_TYPE_LIMIT,
851 };
852 static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
853 
854 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
855  * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
856  * instructions after verifying
857  */
858 struct bpf_func_proto {
859 	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
860 	bool gpl_only;
861 	bool pkt_access;
862 	bool might_sleep;
863 	/* set to true if helper follows contract for llvm
864 	 * attribute bpf_fastcall:
865 	 * - void functions do not scratch r0
866 	 * - functions taking N arguments scratch only registers r1-rN
867 	 */
868 	bool allow_fastcall;
869 	enum bpf_return_type ret_type;
870 	union {
871 		struct {
872 			enum bpf_arg_type arg1_type;
873 			enum bpf_arg_type arg2_type;
874 			enum bpf_arg_type arg3_type;
875 			enum bpf_arg_type arg4_type;
876 			enum bpf_arg_type arg5_type;
877 		};
878 		enum bpf_arg_type arg_type[5];
879 	};
880 	union {
881 		struct {
882 			u32 *arg1_btf_id;
883 			u32 *arg2_btf_id;
884 			u32 *arg3_btf_id;
885 			u32 *arg4_btf_id;
886 			u32 *arg5_btf_id;
887 		};
888 		u32 *arg_btf_id[5];
889 		struct {
890 			size_t arg1_size;
891 			size_t arg2_size;
892 			size_t arg3_size;
893 			size_t arg4_size;
894 			size_t arg5_size;
895 		};
896 		size_t arg_size[5];
897 	};
898 	int *ret_btf_id; /* return value btf_id */
899 	bool (*allowed)(const struct bpf_prog *prog);
900 };
901 
902 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
903  * the first argument to eBPF programs.
904  * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
905  */
906 struct bpf_context;
907 
908 enum bpf_access_type {
909 	BPF_READ = 1,
910 	BPF_WRITE = 2
911 };
912 
913 /* types of values stored in eBPF registers */
914 /* Pointer types represent:
915  * pointer
916  * pointer + imm
917  * pointer + (u16) var
918  * pointer + (u16) var + imm
919  * if (range > 0) then [ptr, ptr + range - off) is safe to access
920  * if (id > 0) means that some 'var' was added
921  * if (off > 0) means that 'imm' was added
922  */
923 enum bpf_reg_type {
924 	NOT_INIT = 0,		 /* nothing was written into register */
925 	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
926 	PTR_TO_CTX,		 /* reg points to bpf_context */
927 	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
928 	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
929 	PTR_TO_MAP_KEY,		 /* reg points to a map element key */
930 	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
931 	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
932 	PTR_TO_PACKET,		 /* reg points to skb->data */
933 	PTR_TO_PACKET_END,	 /* skb->data + headlen */
934 	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
935 	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
936 	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
937 	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
938 	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
939 	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
940 	/* PTR_TO_BTF_ID points to a kernel struct that does not need
941 	 * to be null checked by the BPF program. This does not imply the
942 	 * pointer is _not_ null and in practice this can easily be a null
943 	 * pointer when reading pointer chains. The assumption is program
944 	 * context will handle null pointer dereference typically via fault
945 	 * handling. The verifier must keep this in mind and can make no
946 	 * assumptions about null or non-null when doing branch analysis.
947 	 * Further, when passed into helpers the helpers can not, without
948 	 * additional context, assume the value is non-null.
949 	 */
950 	PTR_TO_BTF_ID,
951 	PTR_TO_MEM,		 /* reg points to valid memory region */
952 	PTR_TO_ARENA,
953 	PTR_TO_BUF,		 /* reg points to a read/write buffer */
954 	PTR_TO_FUNC,		 /* reg points to a bpf program function */
955 	CONST_PTR_TO_DYNPTR,	 /* reg points to a const struct bpf_dynptr */
956 	__BPF_REG_TYPE_MAX,
957 
958 	/* Extended reg_types. */
959 	PTR_TO_MAP_VALUE_OR_NULL	= PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
960 	PTR_TO_SOCKET_OR_NULL		= PTR_MAYBE_NULL | PTR_TO_SOCKET,
961 	PTR_TO_SOCK_COMMON_OR_NULL	= PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
962 	PTR_TO_TCP_SOCK_OR_NULL		= PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
963 	/* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
964 	 * been checked for null. Used primarily to inform the verifier
965 	 * an explicit null check is required for this struct.
966 	 */
967 	PTR_TO_BTF_ID_OR_NULL		= PTR_MAYBE_NULL | PTR_TO_BTF_ID,
968 
969 	/* This must be the last entry. Its purpose is to ensure the enum is
970 	 * wide enough to hold the higher bits reserved for bpf_type_flag.
971 	 */
972 	__BPF_REG_TYPE_LIMIT	= BPF_TYPE_LIMIT,
973 };
974 static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
975 
976 /* The information passed from prog-specific *_is_valid_access
977  * back to the verifier.
978  */
979 struct bpf_insn_access_aux {
980 	enum bpf_reg_type reg_type;
981 	bool is_ldsx;
982 	union {
983 		int ctx_field_size;
984 		struct {
985 			struct btf *btf;
986 			u32 btf_id;
987 			u32 ref_obj_id;
988 		};
989 	};
990 	struct bpf_verifier_log *log; /* for verbose logs */
991 	bool is_retval; /* is accessing function return value ? */
992 };
993 
994 static inline void
bpf_ctx_record_field_size(struct bpf_insn_access_aux * aux,u32 size)995 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
996 {
997 	aux->ctx_field_size = size;
998 }
999 
bpf_is_ldimm64(const struct bpf_insn * insn)1000 static bool bpf_is_ldimm64(const struct bpf_insn *insn)
1001 {
1002 	return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
1003 }
1004 
bpf_pseudo_func(const struct bpf_insn * insn)1005 static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
1006 {
1007 	return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
1008 }
1009 
1010 /* Given a BPF_ATOMIC instruction @atomic_insn, return true if it is an
1011  * atomic load or store, and false if it is a read-modify-write instruction.
1012  */
1013 static inline bool
bpf_atomic_is_load_store(const struct bpf_insn * atomic_insn)1014 bpf_atomic_is_load_store(const struct bpf_insn *atomic_insn)
1015 {
1016 	switch (atomic_insn->imm) {
1017 	case BPF_LOAD_ACQ:
1018 	case BPF_STORE_REL:
1019 		return true;
1020 	default:
1021 		return false;
1022 	}
1023 }
1024 
1025 struct bpf_prog_ops {
1026 	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
1027 			union bpf_attr __user *uattr);
1028 };
1029 
1030 struct bpf_reg_state;
1031 struct bpf_verifier_ops {
1032 	/* return eBPF function prototype for verification */
1033 	const struct bpf_func_proto *
1034 	(*get_func_proto)(enum bpf_func_id func_id,
1035 			  const struct bpf_prog *prog);
1036 
1037 	/* return true if 'size' wide access at offset 'off' within bpf_context
1038 	 * with 'type' (read or write) is allowed
1039 	 */
1040 	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
1041 				const struct bpf_prog *prog,
1042 				struct bpf_insn_access_aux *info);
1043 	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
1044 			    const struct bpf_prog *prog);
1045 	int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog,
1046 			    s16 ctx_stack_off);
1047 	int (*gen_ld_abs)(const struct bpf_insn *orig,
1048 			  struct bpf_insn *insn_buf);
1049 	u32 (*convert_ctx_access)(enum bpf_access_type type,
1050 				  const struct bpf_insn *src,
1051 				  struct bpf_insn *dst,
1052 				  struct bpf_prog *prog, u32 *target_size);
1053 	int (*btf_struct_access)(struct bpf_verifier_log *log,
1054 				 const struct bpf_reg_state *reg,
1055 				 int off, int size);
1056 };
1057 
1058 struct bpf_prog_offload_ops {
1059 	/* verifier basic callbacks */
1060 	int (*insn_hook)(struct bpf_verifier_env *env,
1061 			 int insn_idx, int prev_insn_idx);
1062 	int (*finalize)(struct bpf_verifier_env *env);
1063 	/* verifier optimization callbacks (called after .finalize) */
1064 	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
1065 			    struct bpf_insn *insn);
1066 	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
1067 	/* program management callbacks */
1068 	int (*prepare)(struct bpf_prog *prog);
1069 	int (*translate)(struct bpf_prog *prog);
1070 	void (*destroy)(struct bpf_prog *prog);
1071 };
1072 
1073 struct bpf_prog_offload {
1074 	struct bpf_prog		*prog;
1075 	struct net_device	*netdev;
1076 	struct bpf_offload_dev	*offdev;
1077 	void			*dev_priv;
1078 	struct list_head	offloads;
1079 	bool			dev_state;
1080 	bool			opt_failed;
1081 	void			*jited_image;
1082 	u32			jited_len;
1083 };
1084 
1085 enum bpf_cgroup_storage_type {
1086 	BPF_CGROUP_STORAGE_SHARED,
1087 	BPF_CGROUP_STORAGE_PERCPU,
1088 	__BPF_CGROUP_STORAGE_MAX
1089 };
1090 
1091 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
1092 
1093 /* The longest tracepoint has 12 args.
1094  * See include/trace/bpf_probe.h
1095  */
1096 #define MAX_BPF_FUNC_ARGS 12
1097 
1098 /* The maximum number of arguments passed through registers
1099  * a single function may have.
1100  */
1101 #define MAX_BPF_FUNC_REG_ARGS 5
1102 
1103 /* The argument is a structure. */
1104 #define BTF_FMODEL_STRUCT_ARG		BIT(0)
1105 
1106 /* The argument is signed. */
1107 #define BTF_FMODEL_SIGNED_ARG		BIT(1)
1108 
1109 struct btf_func_model {
1110 	u8 ret_size;
1111 	u8 ret_flags;
1112 	u8 nr_args;
1113 	u8 arg_size[MAX_BPF_FUNC_ARGS];
1114 	u8 arg_flags[MAX_BPF_FUNC_ARGS];
1115 };
1116 
1117 /* Restore arguments before returning from trampoline to let original function
1118  * continue executing. This flag is used for fentry progs when there are no
1119  * fexit progs.
1120  */
1121 #define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
1122 /* Call original function after fentry progs, but before fexit progs.
1123  * Makes sense for fentry/fexit, normal calls and indirect calls.
1124  */
1125 #define BPF_TRAMP_F_CALL_ORIG		BIT(1)
1126 /* Skip current frame and return to parent.  Makes sense for fentry/fexit
1127  * programs only. Should not be used with normal calls and indirect calls.
1128  */
1129 #define BPF_TRAMP_F_SKIP_FRAME		BIT(2)
1130 /* Store IP address of the caller on the trampoline stack,
1131  * so it's available for trampoline's programs.
1132  */
1133 #define BPF_TRAMP_F_IP_ARG		BIT(3)
1134 /* Return the return value of fentry prog. Only used by bpf_struct_ops. */
1135 #define BPF_TRAMP_F_RET_FENTRY_RET	BIT(4)
1136 
1137 /* Get original function from stack instead of from provided direct address.
1138  * Makes sense for trampolines with fexit or fmod_ret programs.
1139  */
1140 #define BPF_TRAMP_F_ORIG_STACK		BIT(5)
1141 
1142 /* This trampoline is on a function with another ftrace_ops with IPMODIFY,
1143  * e.g., a live patch. This flag is set and cleared by ftrace call backs,
1144  */
1145 #define BPF_TRAMP_F_SHARE_IPMODIFY	BIT(6)
1146 
1147 /* Indicate that current trampoline is in a tail call context. Then, it has to
1148  * cache and restore tail_call_cnt to avoid infinite tail call loop.
1149  */
1150 #define BPF_TRAMP_F_TAIL_CALL_CTX	BIT(7)
1151 
1152 /*
1153  * Indicate the trampoline should be suitable to receive indirect calls;
1154  * without this indirectly calling the generated code can result in #UD/#CP,
1155  * depending on the CFI options.
1156  *
1157  * Used by bpf_struct_ops.
1158  *
1159  * Incompatible with FENTRY usage, overloads @func_addr argument.
1160  */
1161 #define BPF_TRAMP_F_INDIRECT		BIT(8)
1162 
1163 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
1164  * bytes on x86.
1165  */
1166 enum {
1167 #if defined(__s390x__)
1168 	BPF_MAX_TRAMP_LINKS = 27,
1169 #else
1170 	BPF_MAX_TRAMP_LINKS = 38,
1171 #endif
1172 };
1173 
1174 struct bpf_tramp_links {
1175 	struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
1176 	int nr_links;
1177 };
1178 
1179 struct bpf_tramp_run_ctx;
1180 
1181 /* Different use cases for BPF trampoline:
1182  * 1. replace nop at the function entry (kprobe equivalent)
1183  *    flags = BPF_TRAMP_F_RESTORE_REGS
1184  *    fentry = a set of programs to run before returning from trampoline
1185  *
1186  * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
1187  *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
1188  *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
1189  *    fentry = a set of program to run before calling original function
1190  *    fexit = a set of program to run after original function
1191  *
1192  * 3. replace direct call instruction anywhere in the function body
1193  *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
1194  *    With flags = 0
1195  *      fentry = a set of programs to run before returning from trampoline
1196  *    With flags = BPF_TRAMP_F_CALL_ORIG
1197  *      orig_call = original callback addr or direct function addr
1198  *      fentry = a set of program to run before calling original function
1199  *      fexit = a set of program to run after original function
1200  */
1201 struct bpf_tramp_image;
1202 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1203 				const struct btf_func_model *m, u32 flags,
1204 				struct bpf_tramp_links *tlinks,
1205 				void *func_addr);
1206 void *arch_alloc_bpf_trampoline(unsigned int size);
1207 void arch_free_bpf_trampoline(void *image, unsigned int size);
1208 int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size);
1209 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
1210 			     struct bpf_tramp_links *tlinks, void *func_addr);
1211 
1212 u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
1213 					     struct bpf_tramp_run_ctx *run_ctx);
1214 void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
1215 					     struct bpf_tramp_run_ctx *run_ctx);
1216 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
1217 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
1218 typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
1219 				      struct bpf_tramp_run_ctx *run_ctx);
1220 typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
1221 				      struct bpf_tramp_run_ctx *run_ctx);
1222 bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
1223 bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
1224 
1225 struct bpf_ksym {
1226 	unsigned long		 start;
1227 	unsigned long		 end;
1228 	char			 name[KSYM_NAME_LEN];
1229 	struct list_head	 lnode;
1230 	struct latch_tree_node	 tnode;
1231 	bool			 prog;
1232 };
1233 
1234 enum bpf_tramp_prog_type {
1235 	BPF_TRAMP_FENTRY,
1236 	BPF_TRAMP_FEXIT,
1237 	BPF_TRAMP_MODIFY_RETURN,
1238 	BPF_TRAMP_MAX,
1239 	BPF_TRAMP_REPLACE, /* more than MAX */
1240 };
1241 
1242 struct bpf_tramp_image {
1243 	void *image;
1244 	int size;
1245 	struct bpf_ksym ksym;
1246 	struct percpu_ref pcref;
1247 	void *ip_after_call;
1248 	void *ip_epilogue;
1249 	union {
1250 		struct rcu_head rcu;
1251 		struct work_struct work;
1252 	};
1253 };
1254 
1255 struct bpf_trampoline {
1256 	/* hlist for trampoline_table */
1257 	struct hlist_node hlist;
1258 	struct ftrace_ops *fops;
1259 	/* serializes access to fields of this trampoline */
1260 	struct mutex mutex;
1261 	refcount_t refcnt;
1262 	u32 flags;
1263 	u64 key;
1264 	struct {
1265 		struct btf_func_model model;
1266 		void *addr;
1267 		bool ftrace_managed;
1268 	} func;
1269 	/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
1270 	 * program by replacing one of its functions. func.addr is the address
1271 	 * of the function it replaced.
1272 	 */
1273 	struct bpf_prog *extension_prog;
1274 	/* list of BPF programs using this trampoline */
1275 	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
1276 	/* Number of attached programs. A counter per kind. */
1277 	int progs_cnt[BPF_TRAMP_MAX];
1278 	/* Executable image of trampoline */
1279 	struct bpf_tramp_image *cur_image;
1280 };
1281 
1282 struct bpf_attach_target_info {
1283 	struct btf_func_model fmodel;
1284 	long tgt_addr;
1285 	struct module *tgt_mod;
1286 	const char *tgt_name;
1287 	const struct btf_type *tgt_type;
1288 };
1289 
1290 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
1291 
1292 struct bpf_dispatcher_prog {
1293 	struct bpf_prog *prog;
1294 	refcount_t users;
1295 };
1296 
1297 struct bpf_dispatcher {
1298 	/* dispatcher mutex */
1299 	struct mutex mutex;
1300 	void *func;
1301 	struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
1302 	int num_progs;
1303 	void *image;
1304 	void *rw_image;
1305 	u32 image_off;
1306 	struct bpf_ksym ksym;
1307 #ifdef CONFIG_HAVE_STATIC_CALL
1308 	struct static_call_key *sc_key;
1309 	void *sc_tramp;
1310 #endif
1311 };
1312 
1313 #ifndef __bpfcall
1314 #define __bpfcall __nocfi
1315 #endif
1316 
bpf_dispatcher_nop_func(const void * ctx,const struct bpf_insn * insnsi,bpf_func_t bpf_func)1317 static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func(
1318 	const void *ctx,
1319 	const struct bpf_insn *insnsi,
1320 	bpf_func_t bpf_func)
1321 {
1322 	return bpf_func(ctx, insnsi);
1323 }
1324 
1325 /* the implementation of the opaque uapi struct bpf_dynptr */
1326 struct bpf_dynptr_kern {
1327 	void *data;
1328 	/* Size represents the number of usable bytes of dynptr data.
1329 	 * If for example the offset is at 4 for a local dynptr whose data is
1330 	 * of type u64, the number of usable bytes is 4.
1331 	 *
1332 	 * The upper 8 bits are reserved. It is as follows:
1333 	 * Bits 0 - 23 = size
1334 	 * Bits 24 - 30 = dynptr type
1335 	 * Bit 31 = whether dynptr is read-only
1336 	 */
1337 	u32 size;
1338 	u32 offset;
1339 } __aligned(8);
1340 
1341 enum bpf_dynptr_type {
1342 	BPF_DYNPTR_TYPE_INVALID,
1343 	/* Points to memory that is local to the bpf program */
1344 	BPF_DYNPTR_TYPE_LOCAL,
1345 	/* Underlying data is a ringbuf record */
1346 	BPF_DYNPTR_TYPE_RINGBUF,
1347 	/* Underlying data is a sk_buff */
1348 	BPF_DYNPTR_TYPE_SKB,
1349 	/* Underlying data is a xdp_buff */
1350 	BPF_DYNPTR_TYPE_XDP,
1351 };
1352 
1353 int bpf_dynptr_check_size(u32 size);
1354 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
1355 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
1356 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
1357 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
1358 int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset,
1359 		       void *src, u32 len, u64 flags);
1360 void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
1361 			    void *buffer__opt, u32 buffer__szk);
1362 
bpf_dynptr_check_off_len(const struct bpf_dynptr_kern * ptr,u32 offset,u32 len)1363 static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
1364 {
1365 	u32 size = __bpf_dynptr_size(ptr);
1366 
1367 	if (len > size || offset > size - len)
1368 		return -E2BIG;
1369 
1370 	return 0;
1371 }
1372 
1373 #ifdef CONFIG_BPF_JIT
1374 int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
1375 			     struct bpf_trampoline *tr,
1376 			     struct bpf_prog *tgt_prog);
1377 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
1378 			       struct bpf_trampoline *tr,
1379 			       struct bpf_prog *tgt_prog);
1380 struct bpf_trampoline *bpf_trampoline_get(u64 key,
1381 					  struct bpf_attach_target_info *tgt_info);
1382 void bpf_trampoline_put(struct bpf_trampoline *tr);
1383 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
1384 
1385 /*
1386  * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
1387  * indirection with a direct call to the bpf program. If the architecture does
1388  * not have STATIC_CALL, avoid a double-indirection.
1389  */
1390 #ifdef CONFIG_HAVE_STATIC_CALL
1391 
1392 #define __BPF_DISPATCHER_SC_INIT(_name)				\
1393 	.sc_key = &STATIC_CALL_KEY(_name),			\
1394 	.sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
1395 
1396 #define __BPF_DISPATCHER_SC(name)				\
1397 	DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
1398 
1399 #define __BPF_DISPATCHER_CALL(name)				\
1400 	static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
1401 
1402 #define __BPF_DISPATCHER_UPDATE(_d, _new)			\
1403 	__static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
1404 
1405 #else
1406 #define __BPF_DISPATCHER_SC_INIT(name)
1407 #define __BPF_DISPATCHER_SC(name)
1408 #define __BPF_DISPATCHER_CALL(name)		bpf_func(ctx, insnsi)
1409 #define __BPF_DISPATCHER_UPDATE(_d, _new)
1410 #endif
1411 
1412 #define BPF_DISPATCHER_INIT(_name) {				\
1413 	.mutex = __MUTEX_INITIALIZER(_name.mutex),		\
1414 	.func = &_name##_func,					\
1415 	.progs = {},						\
1416 	.num_progs = 0,						\
1417 	.image = NULL,						\
1418 	.image_off = 0,						\
1419 	.ksym = {						\
1420 		.name  = #_name,				\
1421 		.lnode = LIST_HEAD_INIT(_name.ksym.lnode),	\
1422 	},							\
1423 	__BPF_DISPATCHER_SC_INIT(_name##_call)			\
1424 }
1425 
1426 #define DEFINE_BPF_DISPATCHER(name)					\
1427 	__BPF_DISPATCHER_SC(name);					\
1428 	noinline __bpfcall unsigned int bpf_dispatcher_##name##_func(	\
1429 		const void *ctx,					\
1430 		const struct bpf_insn *insnsi,				\
1431 		bpf_func_t bpf_func)					\
1432 	{								\
1433 		return __BPF_DISPATCHER_CALL(name);			\
1434 	}								\
1435 	EXPORT_SYMBOL(bpf_dispatcher_##name##_func);			\
1436 	struct bpf_dispatcher bpf_dispatcher_##name =			\
1437 		BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
1438 
1439 #define DECLARE_BPF_DISPATCHER(name)					\
1440 	unsigned int bpf_dispatcher_##name##_func(			\
1441 		const void *ctx,					\
1442 		const struct bpf_insn *insnsi,				\
1443 		bpf_func_t bpf_func);					\
1444 	extern struct bpf_dispatcher bpf_dispatcher_##name;
1445 
1446 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
1447 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
1448 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
1449 				struct bpf_prog *to);
1450 /* Called only from JIT-enabled code, so there's no need for stubs. */
1451 void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym);
1452 void bpf_image_ksym_add(struct bpf_ksym *ksym);
1453 void bpf_image_ksym_del(struct bpf_ksym *ksym);
1454 void bpf_ksym_add(struct bpf_ksym *ksym);
1455 void bpf_ksym_del(struct bpf_ksym *ksym);
1456 int bpf_jit_charge_modmem(u32 size);
1457 void bpf_jit_uncharge_modmem(u32 size);
1458 bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
1459 #else
bpf_trampoline_link_prog(struct bpf_tramp_link * link,struct bpf_trampoline * tr,struct bpf_prog * tgt_prog)1460 static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
1461 					   struct bpf_trampoline *tr,
1462 					   struct bpf_prog *tgt_prog)
1463 {
1464 	return -ENOTSUPP;
1465 }
bpf_trampoline_unlink_prog(struct bpf_tramp_link * link,struct bpf_trampoline * tr,struct bpf_prog * tgt_prog)1466 static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
1467 					     struct bpf_trampoline *tr,
1468 					     struct bpf_prog *tgt_prog)
1469 {
1470 	return -ENOTSUPP;
1471 }
bpf_trampoline_get(u64 key,struct bpf_attach_target_info * tgt_info)1472 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
1473 							struct bpf_attach_target_info *tgt_info)
1474 {
1475 	return NULL;
1476 }
bpf_trampoline_put(struct bpf_trampoline * tr)1477 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
1478 #define DEFINE_BPF_DISPATCHER(name)
1479 #define DECLARE_BPF_DISPATCHER(name)
1480 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
1481 #define BPF_DISPATCHER_PTR(name) NULL
bpf_dispatcher_change_prog(struct bpf_dispatcher * d,struct bpf_prog * from,struct bpf_prog * to)1482 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
1483 					      struct bpf_prog *from,
1484 					      struct bpf_prog *to) {}
is_bpf_image_address(unsigned long address)1485 static inline bool is_bpf_image_address(unsigned long address)
1486 {
1487 	return false;
1488 }
bpf_prog_has_trampoline(const struct bpf_prog * prog)1489 static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
1490 {
1491 	return false;
1492 }
1493 #endif
1494 
1495 struct bpf_func_info_aux {
1496 	u16 linkage;
1497 	bool unreliable;
1498 	bool called : 1;
1499 	bool verified : 1;
1500 };
1501 
1502 enum bpf_jit_poke_reason {
1503 	BPF_POKE_REASON_TAIL_CALL,
1504 };
1505 
1506 /* Descriptor of pokes pointing /into/ the JITed image. */
1507 struct bpf_jit_poke_descriptor {
1508 	void *tailcall_target;
1509 	void *tailcall_bypass;
1510 	void *bypass_addr;
1511 	void *aux;
1512 	union {
1513 		struct {
1514 			struct bpf_map *map;
1515 			u32 key;
1516 		} tail_call;
1517 	};
1518 	bool tailcall_target_stable;
1519 	u8 adj_off;
1520 	u16 reason;
1521 	u32 insn_idx;
1522 };
1523 
1524 /* reg_type info for ctx arguments */
1525 struct bpf_ctx_arg_aux {
1526 	u32 offset;
1527 	enum bpf_reg_type reg_type;
1528 	struct btf *btf;
1529 	u32 btf_id;
1530 	u32 ref_obj_id;
1531 	bool refcounted;
1532 };
1533 
1534 struct btf_mod_pair {
1535 	struct btf *btf;
1536 	struct module *module;
1537 };
1538 
1539 struct bpf_kfunc_desc_tab;
1540 
1541 enum bpf_stream_id {
1542 	BPF_STDOUT = 1,
1543 	BPF_STDERR = 2,
1544 };
1545 
1546 struct bpf_stream_elem {
1547 	struct llist_node node;
1548 	int total_len;
1549 	int consumed_len;
1550 	char str[];
1551 };
1552 
1553 enum {
1554 	/* 100k bytes */
1555 	BPF_STREAM_MAX_CAPACITY = 100000ULL,
1556 };
1557 
1558 struct bpf_stream {
1559 	atomic_t capacity;
1560 	struct llist_head log;	/* list of in-flight stream elements in LIFO order */
1561 
1562 	struct mutex lock;  /* lock protecting backlog_{head,tail} */
1563 	struct llist_node *backlog_head; /* list of in-flight stream elements in FIFO order */
1564 	struct llist_node *backlog_tail; /* tail of the list above */
1565 };
1566 
1567 struct bpf_stream_stage {
1568 	struct llist_head log;
1569 	int len;
1570 };
1571 
1572 struct bpf_prog_aux {
1573 	atomic64_t refcnt;
1574 	u32 used_map_cnt;
1575 	u32 used_btf_cnt;
1576 	u32 max_ctx_offset;
1577 	u32 max_pkt_offset;
1578 	u32 max_tp_access;
1579 	u32 stack_depth;
1580 	u32 id;
1581 	u32 func_cnt; /* used by non-func prog as the number of func progs */
1582 	u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
1583 	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
1584 	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
1585 	u32 attach_st_ops_member_off;
1586 	u32 ctx_arg_info_size;
1587 	u32 max_rdonly_access;
1588 	u32 max_rdwr_access;
1589 	struct btf *attach_btf;
1590 	struct bpf_ctx_arg_aux *ctx_arg_info;
1591 	void __percpu *priv_stack_ptr;
1592 	struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
1593 	struct bpf_prog *dst_prog;
1594 	struct bpf_trampoline *dst_trampoline;
1595 	enum bpf_prog_type saved_dst_prog_type;
1596 	enum bpf_attach_type saved_dst_attach_type;
1597 	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
1598 	bool dev_bound; /* Program is bound to the netdev. */
1599 	bool offload_requested; /* Program is bound and offloaded to the netdev. */
1600 	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
1601 	bool attach_tracing_prog; /* true if tracing another tracing program */
1602 	bool func_proto_unreliable;
1603 	bool tail_call_reachable;
1604 	bool xdp_has_frags;
1605 	bool exception_cb;
1606 	bool exception_boundary;
1607 	bool is_extended; /* true if extended by freplace program */
1608 	bool jits_use_priv_stack;
1609 	bool priv_stack_requested;
1610 	bool changes_pkt_data;
1611 	bool might_sleep;
1612 	u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
1613 	struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
1614 	struct bpf_arena *arena;
1615 	void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */
1616 	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
1617 	const struct btf_type *attach_func_proto;
1618 	/* function name for valid attach_btf_id */
1619 	const char *attach_func_name;
1620 	struct bpf_prog **func;
1621 	void *jit_data; /* JIT specific data. arch dependent */
1622 	struct bpf_jit_poke_descriptor *poke_tab;
1623 	struct bpf_kfunc_desc_tab *kfunc_tab;
1624 	struct bpf_kfunc_btf_tab *kfunc_btf_tab;
1625 	u32 size_poke_tab;
1626 #ifdef CONFIG_FINEIBT
1627 	struct bpf_ksym ksym_prefix;
1628 #endif
1629 	struct bpf_ksym ksym;
1630 	const struct bpf_prog_ops *ops;
1631 	const struct bpf_struct_ops *st_ops;
1632 	struct bpf_map **used_maps;
1633 	struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
1634 	struct btf_mod_pair *used_btfs;
1635 	struct bpf_prog *prog;
1636 	struct user_struct *user;
1637 	u64 load_time; /* ns since boottime */
1638 	u32 verified_insns;
1639 	int cgroup_atype; /* enum cgroup_bpf_attach_type */
1640 	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1641 	char name[BPF_OBJ_NAME_LEN];
1642 	u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64);
1643 #ifdef CONFIG_SECURITY
1644 	void *security;
1645 #endif
1646 	struct bpf_token *token;
1647 	struct bpf_prog_offload *offload;
1648 	struct btf *btf;
1649 	struct bpf_func_info *func_info;
1650 	struct bpf_func_info_aux *func_info_aux;
1651 	/* bpf_line_info loaded from userspace.  linfo->insn_off
1652 	 * has the xlated insn offset.
1653 	 * Both the main and sub prog share the same linfo.
1654 	 * The subprog can access its first linfo by
1655 	 * using the linfo_idx.
1656 	 */
1657 	struct bpf_line_info *linfo;
1658 	/* jited_linfo is the jited addr of the linfo.  It has a
1659 	 * one to one mapping to linfo:
1660 	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
1661 	 * Both the main and sub prog share the same jited_linfo.
1662 	 * The subprog can access its first jited_linfo by
1663 	 * using the linfo_idx.
1664 	 */
1665 	void **jited_linfo;
1666 	u32 func_info_cnt;
1667 	u32 nr_linfo;
1668 	/* subprog can use linfo_idx to access its first linfo and
1669 	 * jited_linfo.
1670 	 * main prog always has linfo_idx == 0
1671 	 */
1672 	u32 linfo_idx;
1673 	struct module *mod;
1674 	u32 num_exentries;
1675 	struct exception_table_entry *extable;
1676 	union {
1677 		struct work_struct work;
1678 		struct rcu_head	rcu;
1679 	};
1680 	struct bpf_stream stream[2];
1681 };
1682 
1683 struct bpf_prog {
1684 	u16			pages;		/* Number of allocated pages */
1685 	u16			jited:1,	/* Is our filter JIT'ed? */
1686 				jit_requested:1,/* archs need to JIT the prog */
1687 				gpl_compatible:1, /* Is filter GPL compatible? */
1688 				cb_access:1,	/* Is control block accessed? */
1689 				dst_needed:1,	/* Do we need dst entry? */
1690 				blinding_requested:1, /* needs constant blinding */
1691 				blinded:1,	/* Was blinded */
1692 				is_func:1,	/* program is a bpf function */
1693 				kprobe_override:1, /* Do we override a kprobe? */
1694 				has_callchain_buf:1, /* callchain buffer allocated? */
1695 				enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
1696 				call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
1697 				call_get_func_ip:1, /* Do we call get_func_ip() */
1698 				tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
1699 				sleepable:1;	/* BPF program is sleepable */
1700 	enum bpf_prog_type	type;		/* Type of BPF program */
1701 	enum bpf_attach_type	expected_attach_type; /* For some prog types */
1702 	u32			len;		/* Number of filter blocks */
1703 	u32			jited_len;	/* Size of jited insns in bytes */
1704 	u8			tag[BPF_TAG_SIZE];
1705 	struct bpf_prog_stats __percpu *stats;
1706 	int __percpu		*active;
1707 	unsigned int		(*bpf_func)(const void *ctx,
1708 					    const struct bpf_insn *insn);
1709 	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
1710 	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
1711 	/* Instructions for interpreter */
1712 	union {
1713 		DECLARE_FLEX_ARRAY(struct sock_filter, insns);
1714 		DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
1715 	};
1716 };
1717 
1718 struct bpf_array_aux {
1719 	/* Programs with direct jumps into programs part of this array. */
1720 	struct list_head poke_progs;
1721 	struct bpf_map *map;
1722 	struct mutex poke_mutex;
1723 	struct work_struct work;
1724 };
1725 
1726 struct bpf_link {
1727 	atomic64_t refcnt;
1728 	u32 id;
1729 	enum bpf_link_type type;
1730 	const struct bpf_link_ops *ops;
1731 	struct bpf_prog *prog;
1732 
1733 	u32 flags;
1734 	enum bpf_attach_type attach_type;
1735 
1736 	/* rcu is used before freeing, work can be used to schedule that
1737 	 * RCU-based freeing before that, so they never overlap
1738 	 */
1739 	union {
1740 		struct rcu_head rcu;
1741 		struct work_struct work;
1742 	};
1743 	/* whether BPF link itself has "sleepable" semantics, which can differ
1744 	 * from underlying BPF program having a "sleepable" semantics, as BPF
1745 	 * link's semantics is determined by target attach hook
1746 	 */
1747 	bool sleepable;
1748 };
1749 
1750 struct bpf_link_ops {
1751 	void (*release)(struct bpf_link *link);
1752 	/* deallocate link resources callback, called without RCU grace period
1753 	 * waiting
1754 	 */
1755 	void (*dealloc)(struct bpf_link *link);
1756 	/* deallocate link resources callback, called after RCU grace period;
1757 	 * if either the underlying BPF program is sleepable or BPF link's
1758 	 * target hook is sleepable, we'll go through tasks trace RCU GP and
1759 	 * then "classic" RCU GP; this need for chaining tasks trace and
1760 	 * classic RCU GPs is designated by setting bpf_link->sleepable flag
1761 	 */
1762 	void (*dealloc_deferred)(struct bpf_link *link);
1763 	int (*detach)(struct bpf_link *link);
1764 	int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1765 			   struct bpf_prog *old_prog);
1766 	void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1767 	int (*fill_link_info)(const struct bpf_link *link,
1768 			      struct bpf_link_info *info);
1769 	int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
1770 			  struct bpf_map *old_map);
1771 	__poll_t (*poll)(struct file *file, struct poll_table_struct *pts);
1772 };
1773 
1774 struct bpf_tramp_link {
1775 	struct bpf_link link;
1776 	struct hlist_node tramp_hlist;
1777 	u64 cookie;
1778 };
1779 
1780 struct bpf_shim_tramp_link {
1781 	struct bpf_tramp_link link;
1782 	struct bpf_trampoline *trampoline;
1783 };
1784 
1785 struct bpf_tracing_link {
1786 	struct bpf_tramp_link link;
1787 	struct bpf_trampoline *trampoline;
1788 	struct bpf_prog *tgt_prog;
1789 };
1790 
1791 struct bpf_raw_tp_link {
1792 	struct bpf_link link;
1793 	struct bpf_raw_event_map *btp;
1794 	u64 cookie;
1795 };
1796 
1797 struct bpf_link_primer {
1798 	struct bpf_link *link;
1799 	struct file *file;
1800 	int fd;
1801 	u32 id;
1802 };
1803 
1804 struct bpf_mount_opts {
1805 	kuid_t uid;
1806 	kgid_t gid;
1807 	umode_t mode;
1808 
1809 	/* BPF token-related delegation options */
1810 	u64 delegate_cmds;
1811 	u64 delegate_maps;
1812 	u64 delegate_progs;
1813 	u64 delegate_attachs;
1814 };
1815 
1816 struct bpf_token {
1817 	struct work_struct work;
1818 	atomic64_t refcnt;
1819 	struct user_namespace *userns;
1820 	u64 allowed_cmds;
1821 	u64 allowed_maps;
1822 	u64 allowed_progs;
1823 	u64 allowed_attachs;
1824 #ifdef CONFIG_SECURITY
1825 	void *security;
1826 #endif
1827 };
1828 
1829 struct bpf_struct_ops_value;
1830 struct btf_member;
1831 
1832 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1833 /**
1834  * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
1835  *			   define a BPF_MAP_TYPE_STRUCT_OPS map type composed
1836  *			   of BPF_PROG_TYPE_STRUCT_OPS progs.
1837  * @verifier_ops: A structure of callbacks that are invoked by the verifier
1838  *		  when determining whether the struct_ops progs in the
1839  *		  struct_ops map are valid.
1840  * @init: A callback that is invoked a single time, and before any other
1841  *	  callback, to initialize the structure. A nonzero return value means
1842  *	  the subsystem could not be initialized.
1843  * @check_member: When defined, a callback invoked by the verifier to allow
1844  *		  the subsystem to determine if an entry in the struct_ops map
1845  *		  is valid. A nonzero return value means that the map is
1846  *		  invalid and should be rejected by the verifier.
1847  * @init_member: A callback that is invoked for each member of the struct_ops
1848  *		 map to allow the subsystem to initialize the member. A nonzero
1849  *		 value means the member could not be initialized. This callback
1850  *		 is exclusive with the @type, @type_id, @value_type, and
1851  *		 @value_id fields.
1852  * @reg: A callback that is invoked when the struct_ops map has been
1853  *	 initialized and is being attached to. Zero means the struct_ops map
1854  *	 has been successfully registered and is live. A nonzero return value
1855  *	 means the struct_ops map could not be registered.
1856  * @unreg: A callback that is invoked when the struct_ops map should be
1857  *	   unregistered.
1858  * @update: A callback that is invoked when the live struct_ops map is being
1859  *	    updated to contain new values. This callback is only invoked when
1860  *	    the struct_ops map is loaded with BPF_F_LINK. If not defined, the
1861  *	    it is assumed that the struct_ops map cannot be updated.
1862  * @validate: A callback that is invoked after all of the members have been
1863  *	      initialized. This callback should perform static checks on the
1864  *	      map, meaning that it should either fail or succeed
1865  *	      deterministically. A struct_ops map that has been validated may
1866  *	      not necessarily succeed in being registered if the call to @reg
1867  *	      fails. For example, a valid struct_ops map may be loaded, but
1868  *	      then fail to be registered due to there being another active
1869  *	      struct_ops map on the system in the subsystem already. For this
1870  *	      reason, if this callback is not defined, the check is skipped as
1871  *	      the struct_ops map will have final verification performed in
1872  *	      @reg.
1873  * @type: BTF type.
1874  * @value_type: Value type.
1875  * @name: The name of the struct bpf_struct_ops object.
1876  * @func_models: Func models
1877  * @type_id: BTF type id.
1878  * @value_id: BTF value id.
1879  */
1880 struct bpf_struct_ops {
1881 	const struct bpf_verifier_ops *verifier_ops;
1882 	int (*init)(struct btf *btf);
1883 	int (*check_member)(const struct btf_type *t,
1884 			    const struct btf_member *member,
1885 			    const struct bpf_prog *prog);
1886 	int (*init_member)(const struct btf_type *t,
1887 			   const struct btf_member *member,
1888 			   void *kdata, const void *udata);
1889 	int (*reg)(void *kdata, struct bpf_link *link);
1890 	void (*unreg)(void *kdata, struct bpf_link *link);
1891 	int (*update)(void *kdata, void *old_kdata, struct bpf_link *link);
1892 	int (*validate)(void *kdata);
1893 	void *cfi_stubs;
1894 	struct module *owner;
1895 	const char *name;
1896 	struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1897 };
1898 
1899 /* Every member of a struct_ops type has an instance even a member is not
1900  * an operator (function pointer). The "info" field will be assigned to
1901  * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the
1902  * argument information required by the verifier to verify the program.
1903  *
1904  * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the
1905  * corresponding entry for an given argument.
1906  */
1907 struct bpf_struct_ops_arg_info {
1908 	struct bpf_ctx_arg_aux *info;
1909 	u32 cnt;
1910 };
1911 
1912 struct bpf_struct_ops_desc {
1913 	struct bpf_struct_ops *st_ops;
1914 
1915 	const struct btf_type *type;
1916 	const struct btf_type *value_type;
1917 	u32 type_id;
1918 	u32 value_id;
1919 
1920 	/* Collection of argument information for each member */
1921 	struct bpf_struct_ops_arg_info *arg_info;
1922 };
1923 
1924 enum bpf_struct_ops_state {
1925 	BPF_STRUCT_OPS_STATE_INIT,
1926 	BPF_STRUCT_OPS_STATE_INUSE,
1927 	BPF_STRUCT_OPS_STATE_TOBEFREE,
1928 	BPF_STRUCT_OPS_STATE_READY,
1929 };
1930 
1931 struct bpf_struct_ops_common_value {
1932 	refcount_t refcnt;
1933 	enum bpf_struct_ops_state state;
1934 };
1935 
1936 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
1937 /* This macro helps developer to register a struct_ops type and generate
1938  * type information correctly. Developers should use this macro to register
1939  * a struct_ops type instead of calling __register_bpf_struct_ops() directly.
1940  */
1941 #define register_bpf_struct_ops(st_ops, type)				\
1942 	({								\
1943 		struct bpf_struct_ops_##type {				\
1944 			struct bpf_struct_ops_common_value common;	\
1945 			struct type data ____cacheline_aligned_in_smp;	\
1946 		};							\
1947 		BTF_TYPE_EMIT(struct bpf_struct_ops_##type);		\
1948 		__register_bpf_struct_ops(st_ops);			\
1949 	})
1950 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1951 bool bpf_struct_ops_get(const void *kdata);
1952 void bpf_struct_ops_put(const void *kdata);
1953 int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff);
1954 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1955 				       void *value);
1956 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
1957 				      struct bpf_tramp_link *link,
1958 				      const struct btf_func_model *model,
1959 				      void *stub_func,
1960 				      void **image, u32 *image_off,
1961 				      bool allow_alloc);
1962 void bpf_struct_ops_image_free(void *image);
bpf_try_module_get(const void * data,struct module * owner)1963 static inline bool bpf_try_module_get(const void *data, struct module *owner)
1964 {
1965 	if (owner == BPF_MODULE_OWNER)
1966 		return bpf_struct_ops_get(data);
1967 	else
1968 		return try_module_get(owner);
1969 }
bpf_module_put(const void * data,struct module * owner)1970 static inline void bpf_module_put(const void *data, struct module *owner)
1971 {
1972 	if (owner == BPF_MODULE_OWNER)
1973 		bpf_struct_ops_put(data);
1974 	else
1975 		module_put(owner);
1976 }
1977 int bpf_struct_ops_link_create(union bpf_attr *attr);
1978 
1979 #ifdef CONFIG_NET
1980 /* Define it here to avoid the use of forward declaration */
1981 struct bpf_dummy_ops_state {
1982 	int val;
1983 };
1984 
1985 struct bpf_dummy_ops {
1986 	int (*test_1)(struct bpf_dummy_ops_state *cb);
1987 	int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1988 		      char a3, unsigned long a4);
1989 	int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
1990 };
1991 
1992 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1993 			    union bpf_attr __user *uattr);
1994 #endif
1995 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
1996 			     struct btf *btf,
1997 			     struct bpf_verifier_log *log);
1998 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1999 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc);
2000 #else
2001 #define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; })
bpf_try_module_get(const void * data,struct module * owner)2002 static inline bool bpf_try_module_get(const void *data, struct module *owner)
2003 {
2004 	return try_module_get(owner);
2005 }
bpf_module_put(const void * data,struct module * owner)2006 static inline void bpf_module_put(const void *data, struct module *owner)
2007 {
2008 	module_put(owner);
2009 }
bpf_struct_ops_supported(const struct bpf_struct_ops * st_ops,u32 moff)2010 static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
2011 {
2012 	return -ENOTSUPP;
2013 }
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)2014 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
2015 						     void *key,
2016 						     void *value)
2017 {
2018 	return -EINVAL;
2019 }
bpf_struct_ops_link_create(union bpf_attr * attr)2020 static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
2021 {
2022 	return -EOPNOTSUPP;
2023 }
bpf_map_struct_ops_info_fill(struct bpf_map_info * info,struct bpf_map * map)2024 static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
2025 {
2026 }
2027 
bpf_struct_ops_desc_release(struct bpf_struct_ops_desc * st_ops_desc)2028 static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
2029 {
2030 }
2031 
2032 #endif
2033 
2034 int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
2035 			       const struct bpf_ctx_arg_aux *info, u32 cnt);
2036 
2037 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
2038 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
2039 				    int cgroup_atype,
2040 				    enum bpf_attach_type attach_type);
2041 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
2042 #else
bpf_trampoline_link_cgroup_shim(struct bpf_prog * prog,int cgroup_atype,enum bpf_attach_type attach_type)2043 static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
2044 						  int cgroup_atype,
2045 						  enum bpf_attach_type attach_type)
2046 {
2047 	return -EOPNOTSUPP;
2048 }
bpf_trampoline_unlink_cgroup_shim(struct bpf_prog * prog)2049 static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
2050 {
2051 }
2052 #endif
2053 
2054 struct bpf_array {
2055 	struct bpf_map map;
2056 	u32 elem_size;
2057 	u32 index_mask;
2058 	struct bpf_array_aux *aux;
2059 	union {
2060 		DECLARE_FLEX_ARRAY(char, value) __aligned(8);
2061 		DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
2062 		DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
2063 	};
2064 };
2065 
2066 #define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
2067 #define MAX_TAIL_CALL_CNT 33
2068 
2069 /* Maximum number of loops for bpf_loop and bpf_iter_num.
2070  * It's enum to expose it (and thus make it discoverable) through BTF.
2071  */
2072 enum {
2073 	BPF_MAX_LOOPS = 8 * 1024 * 1024,
2074 	BPF_MAX_TIMED_LOOPS = 0xffff,
2075 };
2076 
2077 #define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
2078 				 BPF_F_RDONLY_PROG |	\
2079 				 BPF_F_WRONLY |		\
2080 				 BPF_F_WRONLY_PROG)
2081 
2082 #define BPF_MAP_CAN_READ	BIT(0)
2083 #define BPF_MAP_CAN_WRITE	BIT(1)
2084 
2085 /* Maximum number of user-producer ring buffer samples that can be drained in
2086  * a call to bpf_user_ringbuf_drain().
2087  */
2088 #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
2089 
bpf_map_flags_to_cap(struct bpf_map * map)2090 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
2091 {
2092 	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
2093 
2094 	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
2095 	 * not possible.
2096 	 */
2097 	if (access_flags & BPF_F_RDONLY_PROG)
2098 		return BPF_MAP_CAN_READ;
2099 	else if (access_flags & BPF_F_WRONLY_PROG)
2100 		return BPF_MAP_CAN_WRITE;
2101 	else
2102 		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
2103 }
2104 
bpf_map_flags_access_ok(u32 access_flags)2105 static inline bool bpf_map_flags_access_ok(u32 access_flags)
2106 {
2107 	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
2108 	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
2109 }
2110 
2111 struct bpf_event_entry {
2112 	struct perf_event *event;
2113 	struct file *perf_file;
2114 	struct file *map_file;
2115 	struct rcu_head rcu;
2116 };
2117 
map_type_contains_progs(struct bpf_map * map)2118 static inline bool map_type_contains_progs(struct bpf_map *map)
2119 {
2120 	return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
2121 	       map->map_type == BPF_MAP_TYPE_DEVMAP ||
2122 	       map->map_type == BPF_MAP_TYPE_CPUMAP;
2123 }
2124 
2125 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
2126 int bpf_prog_calc_tag(struct bpf_prog *fp);
2127 
2128 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
2129 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
2130 
2131 const struct bpf_func_proto *bpf_get_perf_event_read_value_proto(void);
2132 
2133 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
2134 					unsigned long off, unsigned long len);
2135 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
2136 					const struct bpf_insn *src,
2137 					struct bpf_insn *dst,
2138 					struct bpf_prog *prog,
2139 					u32 *target_size);
2140 
2141 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2142 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
2143 
2144 /* an array of programs to be executed under rcu_lock.
2145  *
2146  * Typical usage:
2147  * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
2148  *
2149  * the structure returned by bpf_prog_array_alloc() should be populated
2150  * with program pointers and the last pointer must be NULL.
2151  * The user has to keep refcnt on the program and make sure the program
2152  * is removed from the array before bpf_prog_put().
2153  * The 'struct bpf_prog_array *' should only be replaced with xchg()
2154  * since other cpus are walking the array of pointers in parallel.
2155  */
2156 struct bpf_prog_array_item {
2157 	struct bpf_prog *prog;
2158 	union {
2159 		struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
2160 		u64 bpf_cookie;
2161 	};
2162 };
2163 
2164 struct bpf_prog_array {
2165 	struct rcu_head rcu;
2166 	struct bpf_prog_array_item items[];
2167 };
2168 
2169 struct bpf_empty_prog_array {
2170 	struct bpf_prog_array hdr;
2171 	struct bpf_prog *null_prog;
2172 };
2173 
2174 /* to avoid allocating empty bpf_prog_array for cgroups that
2175  * don't have bpf program attached use one global 'bpf_empty_prog_array'
2176  * It will not be modified the caller of bpf_prog_array_alloc()
2177  * (since caller requested prog_cnt == 0)
2178  * that pointer should be 'freed' by bpf_prog_array_free()
2179  */
2180 extern struct bpf_empty_prog_array bpf_empty_prog_array;
2181 
2182 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
2183 void bpf_prog_array_free(struct bpf_prog_array *progs);
2184 /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
2185 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
2186 int bpf_prog_array_length(struct bpf_prog_array *progs);
2187 bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
2188 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
2189 				__u32 __user *prog_ids, u32 cnt);
2190 
2191 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
2192 				struct bpf_prog *old_prog);
2193 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
2194 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2195 			     struct bpf_prog *prog);
2196 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2197 			     u32 *prog_ids, u32 request_cnt,
2198 			     u32 *prog_cnt);
2199 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2200 			struct bpf_prog *exclude_prog,
2201 			struct bpf_prog *include_prog,
2202 			u64 bpf_cookie,
2203 			struct bpf_prog_array **new_array);
2204 
2205 struct bpf_run_ctx {};
2206 
2207 struct bpf_cg_run_ctx {
2208 	struct bpf_run_ctx run_ctx;
2209 	const struct bpf_prog_array_item *prog_item;
2210 	int retval;
2211 };
2212 
2213 struct bpf_trace_run_ctx {
2214 	struct bpf_run_ctx run_ctx;
2215 	u64 bpf_cookie;
2216 	bool is_uprobe;
2217 };
2218 
2219 struct bpf_tramp_run_ctx {
2220 	struct bpf_run_ctx run_ctx;
2221 	u64 bpf_cookie;
2222 	struct bpf_run_ctx *saved_run_ctx;
2223 };
2224 
bpf_set_run_ctx(struct bpf_run_ctx * new_ctx)2225 static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
2226 {
2227 	struct bpf_run_ctx *old_ctx = NULL;
2228 
2229 #ifdef CONFIG_BPF_SYSCALL
2230 	old_ctx = current->bpf_ctx;
2231 	current->bpf_ctx = new_ctx;
2232 #endif
2233 	return old_ctx;
2234 }
2235 
bpf_reset_run_ctx(struct bpf_run_ctx * old_ctx)2236 static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
2237 {
2238 #ifdef CONFIG_BPF_SYSCALL
2239 	current->bpf_ctx = old_ctx;
2240 #endif
2241 }
2242 
2243 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
2244 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE			(1 << 0)
2245 /* BPF program asks to set CN on the packet. */
2246 #define BPF_RET_SET_CN						(1 << 0)
2247 
2248 typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
2249 
2250 static __always_inline u32
bpf_prog_run_array(const struct bpf_prog_array * array,const void * ctx,bpf_prog_run_fn run_prog)2251 bpf_prog_run_array(const struct bpf_prog_array *array,
2252 		   const void *ctx, bpf_prog_run_fn run_prog)
2253 {
2254 	const struct bpf_prog_array_item *item;
2255 	const struct bpf_prog *prog;
2256 	struct bpf_run_ctx *old_run_ctx;
2257 	struct bpf_trace_run_ctx run_ctx;
2258 	u32 ret = 1;
2259 
2260 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
2261 
2262 	if (unlikely(!array))
2263 		return ret;
2264 
2265 	run_ctx.is_uprobe = false;
2266 
2267 	migrate_disable();
2268 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2269 	item = &array->items[0];
2270 	while ((prog = READ_ONCE(item->prog))) {
2271 		run_ctx.bpf_cookie = item->bpf_cookie;
2272 		ret &= run_prog(prog, ctx);
2273 		item++;
2274 	}
2275 	bpf_reset_run_ctx(old_run_ctx);
2276 	migrate_enable();
2277 	return ret;
2278 }
2279 
2280 /* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
2281  *
2282  * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
2283  * overall. As a result, we must use the bpf_prog_array_free_sleepable
2284  * in order to use the tasks_trace rcu grace period.
2285  *
2286  * When a non-sleepable program is inside the array, we take the rcu read
2287  * section and disable preemption for that program alone, so it can access
2288  * rcu-protected dynamically sized maps.
2289  */
2290 static __always_inline u32
bpf_prog_run_array_uprobe(const struct bpf_prog_array * array,const void * ctx,bpf_prog_run_fn run_prog)2291 bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
2292 			  const void *ctx, bpf_prog_run_fn run_prog)
2293 {
2294 	const struct bpf_prog_array_item *item;
2295 	const struct bpf_prog *prog;
2296 	struct bpf_run_ctx *old_run_ctx;
2297 	struct bpf_trace_run_ctx run_ctx;
2298 	u32 ret = 1;
2299 
2300 	might_fault();
2301 	RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held");
2302 
2303 	if (unlikely(!array))
2304 		return ret;
2305 
2306 	migrate_disable();
2307 
2308 	run_ctx.is_uprobe = true;
2309 
2310 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2311 	item = &array->items[0];
2312 	while ((prog = READ_ONCE(item->prog))) {
2313 		if (!prog->sleepable)
2314 			rcu_read_lock();
2315 
2316 		run_ctx.bpf_cookie = item->bpf_cookie;
2317 		ret &= run_prog(prog, ctx);
2318 		item++;
2319 
2320 		if (!prog->sleepable)
2321 			rcu_read_unlock();
2322 	}
2323 	bpf_reset_run_ctx(old_run_ctx);
2324 	migrate_enable();
2325 	return ret;
2326 }
2327 
2328 bool bpf_jit_bypass_spec_v1(void);
2329 bool bpf_jit_bypass_spec_v4(void);
2330 
2331 #ifdef CONFIG_BPF_SYSCALL
2332 DECLARE_PER_CPU(int, bpf_prog_active);
2333 extern struct mutex bpf_stats_enabled_mutex;
2334 
2335 /*
2336  * Block execution of BPF programs attached to instrumentation (perf,
2337  * kprobes, tracepoints) to prevent deadlocks on map operations as any of
2338  * these events can happen inside a region which holds a map bucket lock
2339  * and can deadlock on it.
2340  */
bpf_disable_instrumentation(void)2341 static inline void bpf_disable_instrumentation(void)
2342 {
2343 	migrate_disable();
2344 	this_cpu_inc(bpf_prog_active);
2345 }
2346 
bpf_enable_instrumentation(void)2347 static inline void bpf_enable_instrumentation(void)
2348 {
2349 	this_cpu_dec(bpf_prog_active);
2350 	migrate_enable();
2351 }
2352 
2353 extern const struct super_operations bpf_super_ops;
2354 extern const struct file_operations bpf_map_fops;
2355 extern const struct file_operations bpf_prog_fops;
2356 extern const struct file_operations bpf_iter_fops;
2357 extern const struct file_operations bpf_token_fops;
2358 
2359 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2360 	extern const struct bpf_prog_ops _name ## _prog_ops; \
2361 	extern const struct bpf_verifier_ops _name ## _verifier_ops;
2362 #define BPF_MAP_TYPE(_id, _ops) \
2363 	extern const struct bpf_map_ops _ops;
2364 #define BPF_LINK_TYPE(_id, _name)
2365 #include <linux/bpf_types.h>
2366 #undef BPF_PROG_TYPE
2367 #undef BPF_MAP_TYPE
2368 #undef BPF_LINK_TYPE
2369 
2370 extern const struct bpf_prog_ops bpf_offload_prog_ops;
2371 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
2372 extern const struct bpf_verifier_ops xdp_analyzer_ops;
2373 
2374 struct bpf_prog *bpf_prog_get(u32 ufd);
2375 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2376 				       bool attach_drv);
2377 void bpf_prog_add(struct bpf_prog *prog, int i);
2378 void bpf_prog_sub(struct bpf_prog *prog, int i);
2379 void bpf_prog_inc(struct bpf_prog *prog);
2380 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
2381 void bpf_prog_put(struct bpf_prog *prog);
2382 
2383 void bpf_prog_free_id(struct bpf_prog *prog);
2384 void bpf_map_free_id(struct bpf_map *map);
2385 
2386 struct btf_field *btf_record_find(const struct btf_record *rec,
2387 				  u32 offset, u32 field_mask);
2388 void btf_record_free(struct btf_record *rec);
2389 void bpf_map_free_record(struct bpf_map *map);
2390 struct btf_record *btf_record_dup(const struct btf_record *rec);
2391 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
2392 void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
2393 void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
2394 void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
2395 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
2396 
2397 struct bpf_map *bpf_map_get(u32 ufd);
2398 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
2399 
2400 /*
2401  * The __bpf_map_get() and __btf_get_by_fd() functions parse a file
2402  * descriptor and return a corresponding map or btf object.
2403  * Their names are double underscored to emphasize the fact that they
2404  * do not increase refcnt. To also increase refcnt use corresponding
2405  * bpf_map_get() and btf_get_by_fd() functions.
2406  */
2407 
__bpf_map_get(struct fd f)2408 static inline struct bpf_map *__bpf_map_get(struct fd f)
2409 {
2410 	if (fd_empty(f))
2411 		return ERR_PTR(-EBADF);
2412 	if (unlikely(fd_file(f)->f_op != &bpf_map_fops))
2413 		return ERR_PTR(-EINVAL);
2414 	return fd_file(f)->private_data;
2415 }
2416 
__btf_get_by_fd(struct fd f)2417 static inline struct btf *__btf_get_by_fd(struct fd f)
2418 {
2419 	if (fd_empty(f))
2420 		return ERR_PTR(-EBADF);
2421 	if (unlikely(fd_file(f)->f_op != &btf_fops))
2422 		return ERR_PTR(-EINVAL);
2423 	return fd_file(f)->private_data;
2424 }
2425 
2426 void bpf_map_inc(struct bpf_map *map);
2427 void bpf_map_inc_with_uref(struct bpf_map *map);
2428 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
2429 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
2430 void bpf_map_put_with_uref(struct bpf_map *map);
2431 void bpf_map_put(struct bpf_map *map);
2432 void *bpf_map_area_alloc(u64 size, int numa_node);
2433 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
2434 void bpf_map_area_free(void *base);
2435 bool bpf_map_write_active(const struct bpf_map *map);
2436 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
2437 int  generic_map_lookup_batch(struct bpf_map *map,
2438 			      const union bpf_attr *attr,
2439 			      union bpf_attr __user *uattr);
2440 int  generic_map_update_batch(struct bpf_map *map, struct file *map_file,
2441 			      const union bpf_attr *attr,
2442 			      union bpf_attr __user *uattr);
2443 int  generic_map_delete_batch(struct bpf_map *map,
2444 			      const union bpf_attr *attr,
2445 			      union bpf_attr __user *uattr);
2446 struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
2447 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
2448 
2449 
2450 int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
2451 			unsigned long nr_pages, struct page **page_array);
2452 #ifdef CONFIG_MEMCG
2453 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
2454 			   int node);
2455 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
2456 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
2457 		       gfp_t flags);
2458 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
2459 				    size_t align, gfp_t flags);
2460 #else
2461 /*
2462  * These specialized allocators have to be macros for their allocations to be
2463  * accounted separately (to have separate alloc_tag).
2464  */
2465 #define bpf_map_kmalloc_node(_map, _size, _flags, _node)	\
2466 		kmalloc_node(_size, _flags, _node)
2467 #define bpf_map_kzalloc(_map, _size, _flags)			\
2468 		kzalloc(_size, _flags)
2469 #define bpf_map_kvcalloc(_map, _n, _size, _flags)		\
2470 		kvcalloc(_n, _size, _flags)
2471 #define bpf_map_alloc_percpu(_map, _size, _align, _flags)	\
2472 		__alloc_percpu_gfp(_size, _align, _flags)
2473 #endif
2474 
2475 static inline int
bpf_map_init_elem_count(struct bpf_map * map)2476 bpf_map_init_elem_count(struct bpf_map *map)
2477 {
2478 	size_t size = sizeof(*map->elem_count), align = size;
2479 	gfp_t flags = GFP_USER | __GFP_NOWARN;
2480 
2481 	map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
2482 	if (!map->elem_count)
2483 		return -ENOMEM;
2484 
2485 	return 0;
2486 }
2487 
2488 static inline void
bpf_map_free_elem_count(struct bpf_map * map)2489 bpf_map_free_elem_count(struct bpf_map *map)
2490 {
2491 	free_percpu(map->elem_count);
2492 }
2493 
bpf_map_inc_elem_count(struct bpf_map * map)2494 static inline void bpf_map_inc_elem_count(struct bpf_map *map)
2495 {
2496 	this_cpu_inc(*map->elem_count);
2497 }
2498 
bpf_map_dec_elem_count(struct bpf_map * map)2499 static inline void bpf_map_dec_elem_count(struct bpf_map *map)
2500 {
2501 	this_cpu_dec(*map->elem_count);
2502 }
2503 
2504 extern int sysctl_unprivileged_bpf_disabled;
2505 
2506 bool bpf_token_capable(const struct bpf_token *token, int cap);
2507 
bpf_allow_ptr_leaks(const struct bpf_token * token)2508 static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
2509 {
2510 	return bpf_token_capable(token, CAP_PERFMON);
2511 }
2512 
bpf_allow_uninit_stack(const struct bpf_token * token)2513 static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
2514 {
2515 	return bpf_token_capable(token, CAP_PERFMON);
2516 }
2517 
bpf_bypass_spec_v1(const struct bpf_token * token)2518 static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
2519 {
2520 	return bpf_jit_bypass_spec_v1() ||
2521 		cpu_mitigations_off() ||
2522 		bpf_token_capable(token, CAP_PERFMON);
2523 }
2524 
bpf_bypass_spec_v4(const struct bpf_token * token)2525 static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
2526 {
2527 	return bpf_jit_bypass_spec_v4() ||
2528 		cpu_mitigations_off() ||
2529 		bpf_token_capable(token, CAP_PERFMON);
2530 }
2531 
2532 int bpf_map_new_fd(struct bpf_map *map, int flags);
2533 int bpf_prog_new_fd(struct bpf_prog *prog);
2534 
2535 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2536 		   const struct bpf_link_ops *ops, struct bpf_prog *prog,
2537 		   enum bpf_attach_type attach_type);
2538 void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
2539 			     const struct bpf_link_ops *ops, struct bpf_prog *prog,
2540 			     enum bpf_attach_type attach_type, bool sleepable);
2541 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
2542 int bpf_link_settle(struct bpf_link_primer *primer);
2543 void bpf_link_cleanup(struct bpf_link_primer *primer);
2544 void bpf_link_inc(struct bpf_link *link);
2545 struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
2546 void bpf_link_put(struct bpf_link *link);
2547 int bpf_link_new_fd(struct bpf_link *link);
2548 struct bpf_link *bpf_link_get_from_fd(u32 ufd);
2549 struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
2550 
2551 void bpf_token_inc(struct bpf_token *token);
2552 void bpf_token_put(struct bpf_token *token);
2553 int bpf_token_create(union bpf_attr *attr);
2554 struct bpf_token *bpf_token_get_from_fd(u32 ufd);
2555 int bpf_token_get_info_by_fd(struct bpf_token *token,
2556 			     const union bpf_attr *attr,
2557 			     union bpf_attr __user *uattr);
2558 
2559 bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
2560 bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
2561 bool bpf_token_allow_prog_type(const struct bpf_token *token,
2562 			       enum bpf_prog_type prog_type,
2563 			       enum bpf_attach_type attach_type);
2564 
2565 int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
2566 int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
2567 struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir,
2568 			    umode_t mode);
2569 
2570 #define BPF_ITER_FUNC_PREFIX "bpf_iter_"
2571 #define DEFINE_BPF_ITER_FUNC(target, args...)			\
2572 	extern int bpf_iter_ ## target(args);			\
2573 	int __init bpf_iter_ ## target(args) { return 0; }
2574 
2575 /*
2576  * The task type of iterators.
2577  *
2578  * For BPF task iterators, they can be parameterized with various
2579  * parameters to visit only some of tasks.
2580  *
2581  * BPF_TASK_ITER_ALL (default)
2582  *	Iterate over resources of every task.
2583  *
2584  * BPF_TASK_ITER_TID
2585  *	Iterate over resources of a task/tid.
2586  *
2587  * BPF_TASK_ITER_TGID
2588  *	Iterate over resources of every task of a process / task group.
2589  */
2590 enum bpf_iter_task_type {
2591 	BPF_TASK_ITER_ALL = 0,
2592 	BPF_TASK_ITER_TID,
2593 	BPF_TASK_ITER_TGID,
2594 };
2595 
2596 struct bpf_iter_aux_info {
2597 	/* for map_elem iter */
2598 	struct bpf_map *map;
2599 
2600 	/* for cgroup iter */
2601 	struct {
2602 		struct cgroup *start; /* starting cgroup */
2603 		enum bpf_cgroup_iter_order order;
2604 	} cgroup;
2605 	struct {
2606 		enum bpf_iter_task_type	type;
2607 		u32 pid;
2608 	} task;
2609 };
2610 
2611 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
2612 					union bpf_iter_link_info *linfo,
2613 					struct bpf_iter_aux_info *aux);
2614 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
2615 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
2616 					struct seq_file *seq);
2617 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
2618 					 struct bpf_link_info *info);
2619 typedef const struct bpf_func_proto *
2620 (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
2621 			     const struct bpf_prog *prog);
2622 
2623 enum bpf_iter_feature {
2624 	BPF_ITER_RESCHED	= BIT(0),
2625 };
2626 
2627 #define BPF_ITER_CTX_ARG_MAX 2
2628 struct bpf_iter_reg {
2629 	const char *target;
2630 	bpf_iter_attach_target_t attach_target;
2631 	bpf_iter_detach_target_t detach_target;
2632 	bpf_iter_show_fdinfo_t show_fdinfo;
2633 	bpf_iter_fill_link_info_t fill_link_info;
2634 	bpf_iter_get_func_proto_t get_func_proto;
2635 	u32 ctx_arg_info_size;
2636 	u32 feature;
2637 	struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
2638 	const struct bpf_iter_seq_info *seq_info;
2639 };
2640 
2641 struct bpf_iter_meta {
2642 	__bpf_md_ptr(struct seq_file *, seq);
2643 	u64 session_id;
2644 	u64 seq_num;
2645 };
2646 
2647 struct bpf_iter__bpf_map_elem {
2648 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
2649 	__bpf_md_ptr(struct bpf_map *, map);
2650 	__bpf_md_ptr(void *, key);
2651 	__bpf_md_ptr(void *, value);
2652 };
2653 
2654 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
2655 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
2656 int bpf_iter_prog_supported(struct bpf_prog *prog);
2657 const struct bpf_func_proto *
2658 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
2659 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
2660 int bpf_iter_new_fd(struct bpf_link *link);
2661 bool bpf_link_is_iter(struct bpf_link *link);
2662 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
2663 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
2664 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
2665 			      struct seq_file *seq);
2666 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
2667 				struct bpf_link_info *info);
2668 
2669 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
2670 				   struct bpf_func_state *caller,
2671 				   struct bpf_func_state *callee);
2672 
2673 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
2674 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
2675 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2676 			   u64 flags);
2677 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
2678 			    u64 flags);
2679 
2680 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
2681 
2682 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
2683 				 void *key, void *value, u64 map_flags);
2684 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2685 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2686 				void *key, void *value, u64 map_flags);
2687 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2688 
2689 int bpf_get_file_flag(int flags);
2690 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
2691 			     size_t actual_size);
2692 
2693 /* verify correctness of eBPF program */
2694 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
2695 
2696 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2697 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
2698 #endif
2699 
2700 struct btf *bpf_get_btf_vmlinux(void);
2701 
2702 /* Map specifics */
2703 struct xdp_frame;
2704 struct sk_buff;
2705 struct bpf_dtab_netdev;
2706 struct bpf_cpu_map_entry;
2707 
2708 void __dev_flush(struct list_head *flush_list);
2709 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
2710 		    struct net_device *dev_rx);
2711 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
2712 		    struct net_device *dev_rx);
2713 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
2714 			  struct bpf_map *map, bool exclude_ingress);
2715 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
2716 			     const struct bpf_prog *xdp_prog);
2717 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
2718 			   const struct bpf_prog *xdp_prog,
2719 			   struct bpf_map *map, bool exclude_ingress);
2720 
2721 void __cpu_map_flush(struct list_head *flush_list);
2722 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
2723 		    struct net_device *dev_rx);
2724 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
2725 			     struct sk_buff *skb);
2726 
2727 /* Return map's numa specified by userspace */
bpf_map_attr_numa_node(const union bpf_attr * attr)2728 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
2729 {
2730 	return (attr->map_flags & BPF_F_NUMA_NODE) ?
2731 		attr->numa_node : NUMA_NO_NODE;
2732 }
2733 
2734 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
2735 int array_map_alloc_check(union bpf_attr *attr);
2736 
2737 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
2738 			  union bpf_attr __user *uattr);
2739 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
2740 			  union bpf_attr __user *uattr);
2741 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
2742 			      const union bpf_attr *kattr,
2743 			      union bpf_attr __user *uattr);
2744 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
2745 				     const union bpf_attr *kattr,
2746 				     union bpf_attr __user *uattr);
2747 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
2748 			     const union bpf_attr *kattr,
2749 			     union bpf_attr __user *uattr);
2750 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
2751 				const union bpf_attr *kattr,
2752 				union bpf_attr __user *uattr);
2753 int bpf_prog_test_run_nf(struct bpf_prog *prog,
2754 			 const union bpf_attr *kattr,
2755 			 union bpf_attr __user *uattr);
2756 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
2757 		    const struct bpf_prog *prog,
2758 		    struct bpf_insn_access_aux *info);
2759 
bpf_tracing_ctx_access(int off,int size,enum bpf_access_type type)2760 static inline bool bpf_tracing_ctx_access(int off, int size,
2761 					  enum bpf_access_type type)
2762 {
2763 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
2764 		return false;
2765 	if (type != BPF_READ)
2766 		return false;
2767 	if (off % size != 0)
2768 		return false;
2769 	return true;
2770 }
2771 
bpf_tracing_btf_ctx_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2772 static inline bool bpf_tracing_btf_ctx_access(int off, int size,
2773 					      enum bpf_access_type type,
2774 					      const struct bpf_prog *prog,
2775 					      struct bpf_insn_access_aux *info)
2776 {
2777 	if (!bpf_tracing_ctx_access(off, size, type))
2778 		return false;
2779 	return btf_ctx_access(off, size, type, prog, info);
2780 }
2781 
2782 int btf_struct_access(struct bpf_verifier_log *log,
2783 		      const struct bpf_reg_state *reg,
2784 		      int off, int size, enum bpf_access_type atype,
2785 		      u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
2786 bool btf_struct_ids_match(struct bpf_verifier_log *log,
2787 			  const struct btf *btf, u32 id, int off,
2788 			  const struct btf *need_btf, u32 need_type_id,
2789 			  bool strict);
2790 
2791 int btf_distill_func_proto(struct bpf_verifier_log *log,
2792 			   struct btf *btf,
2793 			   const struct btf_type *func_proto,
2794 			   const char *func_name,
2795 			   struct btf_func_model *m);
2796 
2797 struct bpf_reg_state;
2798 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog);
2799 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
2800 			 struct btf *btf, const struct btf_type *t);
2801 const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
2802 				    int comp_idx, const char *tag_key);
2803 int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
2804 			   int comp_idx, const char *tag_key, int last_id);
2805 
2806 struct bpf_prog *bpf_prog_by_id(u32 id);
2807 struct bpf_link *bpf_link_by_id(u32 id);
2808 
2809 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id,
2810 						 const struct bpf_prog *prog);
2811 void bpf_task_storage_free(struct task_struct *task);
2812 void bpf_cgrp_storage_free(struct cgroup *cgroup);
2813 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
2814 const struct btf_func_model *
2815 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2816 			 const struct bpf_insn *insn);
2817 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2818 		       u16 btf_fd_idx, u8 **func_addr);
2819 
2820 struct bpf_core_ctx {
2821 	struct bpf_verifier_log *log;
2822 	const struct btf *btf;
2823 };
2824 
2825 bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
2826 				const struct bpf_reg_state *reg,
2827 				const char *field_name, u32 btf_id, const char *suffix);
2828 
2829 bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
2830 			       const struct btf *reg_btf, u32 reg_id,
2831 			       const struct btf *arg_btf, u32 arg_id);
2832 
2833 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
2834 		   int relo_idx, void *insn);
2835 
unprivileged_ebpf_enabled(void)2836 static inline bool unprivileged_ebpf_enabled(void)
2837 {
2838 	return !sysctl_unprivileged_bpf_disabled;
2839 }
2840 
2841 /* Not all bpf prog type has the bpf_ctx.
2842  * For the bpf prog type that has initialized the bpf_ctx,
2843  * this function can be used to decide if a kernel function
2844  * is called by a bpf program.
2845  */
has_current_bpf_ctx(void)2846 static inline bool has_current_bpf_ctx(void)
2847 {
2848 	return !!current->bpf_ctx;
2849 }
2850 
2851 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
2852 
2853 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
2854 		     enum bpf_dynptr_type type, u32 offset, u32 size);
2855 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
2856 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
2857 
2858 #else /* !CONFIG_BPF_SYSCALL */
bpf_prog_get(u32 ufd)2859 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
2860 {
2861 	return ERR_PTR(-EOPNOTSUPP);
2862 }
2863 
bpf_prog_get_type_dev(u32 ufd,enum bpf_prog_type type,bool attach_drv)2864 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
2865 						     enum bpf_prog_type type,
2866 						     bool attach_drv)
2867 {
2868 	return ERR_PTR(-EOPNOTSUPP);
2869 }
2870 
bpf_prog_add(struct bpf_prog * prog,int i)2871 static inline void bpf_prog_add(struct bpf_prog *prog, int i)
2872 {
2873 }
2874 
bpf_prog_sub(struct bpf_prog * prog,int i)2875 static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
2876 {
2877 }
2878 
bpf_prog_put(struct bpf_prog * prog)2879 static inline void bpf_prog_put(struct bpf_prog *prog)
2880 {
2881 }
2882 
bpf_prog_inc(struct bpf_prog * prog)2883 static inline void bpf_prog_inc(struct bpf_prog *prog)
2884 {
2885 }
2886 
2887 static inline struct bpf_prog *__must_check
bpf_prog_inc_not_zero(struct bpf_prog * prog)2888 bpf_prog_inc_not_zero(struct bpf_prog *prog)
2889 {
2890 	return ERR_PTR(-EOPNOTSUPP);
2891 }
2892 
bpf_link_init(struct bpf_link * link,enum bpf_link_type type,const struct bpf_link_ops * ops,struct bpf_prog * prog,enum bpf_attach_type attach_type)2893 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2894 				 const struct bpf_link_ops *ops,
2895 				 struct bpf_prog *prog, enum bpf_attach_type attach_type)
2896 {
2897 }
2898 
bpf_link_init_sleepable(struct bpf_link * link,enum bpf_link_type type,const struct bpf_link_ops * ops,struct bpf_prog * prog,enum bpf_attach_type attach_type,bool sleepable)2899 static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
2900 					   const struct bpf_link_ops *ops, struct bpf_prog *prog,
2901 					   enum bpf_attach_type attach_type, bool sleepable)
2902 {
2903 }
2904 
bpf_link_prime(struct bpf_link * link,struct bpf_link_primer * primer)2905 static inline int bpf_link_prime(struct bpf_link *link,
2906 				 struct bpf_link_primer *primer)
2907 {
2908 	return -EOPNOTSUPP;
2909 }
2910 
bpf_link_settle(struct bpf_link_primer * primer)2911 static inline int bpf_link_settle(struct bpf_link_primer *primer)
2912 {
2913 	return -EOPNOTSUPP;
2914 }
2915 
bpf_link_cleanup(struct bpf_link_primer * primer)2916 static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
2917 {
2918 }
2919 
bpf_link_inc(struct bpf_link * link)2920 static inline void bpf_link_inc(struct bpf_link *link)
2921 {
2922 }
2923 
bpf_link_inc_not_zero(struct bpf_link * link)2924 static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
2925 {
2926 	return NULL;
2927 }
2928 
bpf_link_put(struct bpf_link * link)2929 static inline void bpf_link_put(struct bpf_link *link)
2930 {
2931 }
2932 
bpf_obj_get_user(const char __user * pathname,int flags)2933 static inline int bpf_obj_get_user(const char __user *pathname, int flags)
2934 {
2935 	return -EOPNOTSUPP;
2936 }
2937 
bpf_token_capable(const struct bpf_token * token,int cap)2938 static inline bool bpf_token_capable(const struct bpf_token *token, int cap)
2939 {
2940 	return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
2941 }
2942 
bpf_token_inc(struct bpf_token * token)2943 static inline void bpf_token_inc(struct bpf_token *token)
2944 {
2945 }
2946 
bpf_token_put(struct bpf_token * token)2947 static inline void bpf_token_put(struct bpf_token *token)
2948 {
2949 }
2950 
bpf_token_get_from_fd(u32 ufd)2951 static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
2952 {
2953 	return ERR_PTR(-EOPNOTSUPP);
2954 }
2955 
bpf_token_get_info_by_fd(struct bpf_token * token,const union bpf_attr * attr,union bpf_attr __user * uattr)2956 static inline int bpf_token_get_info_by_fd(struct bpf_token *token,
2957 					   const union bpf_attr *attr,
2958 					   union bpf_attr __user *uattr)
2959 {
2960 	return -EOPNOTSUPP;
2961 }
2962 
__dev_flush(struct list_head * flush_list)2963 static inline void __dev_flush(struct list_head *flush_list)
2964 {
2965 }
2966 
2967 struct xdp_frame;
2968 struct bpf_dtab_netdev;
2969 struct bpf_cpu_map_entry;
2970 
2971 static inline
dev_xdp_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx)2972 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
2973 		    struct net_device *dev_rx)
2974 {
2975 	return 0;
2976 }
2977 
2978 static inline
dev_map_enqueue(struct bpf_dtab_netdev * dst,struct xdp_frame * xdpf,struct net_device * dev_rx)2979 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
2980 		    struct net_device *dev_rx)
2981 {
2982 	return 0;
2983 }
2984 
2985 static inline
dev_map_enqueue_multi(struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_map * map,bool exclude_ingress)2986 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
2987 			  struct bpf_map *map, bool exclude_ingress)
2988 {
2989 	return 0;
2990 }
2991 
2992 struct sk_buff;
2993 
dev_map_generic_redirect(struct bpf_dtab_netdev * dst,struct sk_buff * skb,const struct bpf_prog * xdp_prog)2994 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
2995 					   struct sk_buff *skb,
2996 					   const struct bpf_prog *xdp_prog)
2997 {
2998 	return 0;
2999 }
3000 
3001 static inline
dev_map_redirect_multi(struct net_device * dev,struct sk_buff * skb,const struct bpf_prog * xdp_prog,struct bpf_map * map,bool exclude_ingress)3002 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
3003 			   const struct bpf_prog *xdp_prog,
3004 			   struct bpf_map *map, bool exclude_ingress)
3005 {
3006 	return 0;
3007 }
3008 
__cpu_map_flush(struct list_head * flush_list)3009 static inline void __cpu_map_flush(struct list_head *flush_list)
3010 {
3011 }
3012 
cpu_map_enqueue(struct bpf_cpu_map_entry * rcpu,struct xdp_frame * xdpf,struct net_device * dev_rx)3013 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
3014 				  struct xdp_frame *xdpf,
3015 				  struct net_device *dev_rx)
3016 {
3017 	return 0;
3018 }
3019 
cpu_map_generic_redirect(struct bpf_cpu_map_entry * rcpu,struct sk_buff * skb)3020 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
3021 					   struct sk_buff *skb)
3022 {
3023 	return -EOPNOTSUPP;
3024 }
3025 
bpf_prog_get_type_path(const char * name,enum bpf_prog_type type)3026 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
3027 				enum bpf_prog_type type)
3028 {
3029 	return ERR_PTR(-EOPNOTSUPP);
3030 }
3031 
bpf_prog_test_run_xdp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)3032 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
3033 					const union bpf_attr *kattr,
3034 					union bpf_attr __user *uattr)
3035 {
3036 	return -ENOTSUPP;
3037 }
3038 
bpf_prog_test_run_skb(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)3039 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
3040 					const union bpf_attr *kattr,
3041 					union bpf_attr __user *uattr)
3042 {
3043 	return -ENOTSUPP;
3044 }
3045 
bpf_prog_test_run_tracing(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)3046 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
3047 					    const union bpf_attr *kattr,
3048 					    union bpf_attr __user *uattr)
3049 {
3050 	return -ENOTSUPP;
3051 }
3052 
bpf_prog_test_run_flow_dissector(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)3053 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
3054 						   const union bpf_attr *kattr,
3055 						   union bpf_attr __user *uattr)
3056 {
3057 	return -ENOTSUPP;
3058 }
3059 
bpf_prog_test_run_sk_lookup(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)3060 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
3061 					      const union bpf_attr *kattr,
3062 					      union bpf_attr __user *uattr)
3063 {
3064 	return -ENOTSUPP;
3065 }
3066 
bpf_map_put(struct bpf_map * map)3067 static inline void bpf_map_put(struct bpf_map *map)
3068 {
3069 }
3070 
bpf_prog_by_id(u32 id)3071 static inline struct bpf_prog *bpf_prog_by_id(u32 id)
3072 {
3073 	return ERR_PTR(-ENOTSUPP);
3074 }
3075 
btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size,enum bpf_access_type atype,u32 * next_btf_id,enum bpf_type_flag * flag,const char ** field_name)3076 static inline int btf_struct_access(struct bpf_verifier_log *log,
3077 				    const struct bpf_reg_state *reg,
3078 				    int off, int size, enum bpf_access_type atype,
3079 				    u32 *next_btf_id, enum bpf_type_flag *flag,
3080 				    const char **field_name)
3081 {
3082 	return -EACCES;
3083 }
3084 
3085 static inline const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)3086 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3087 {
3088 	return NULL;
3089 }
3090 
bpf_task_storage_free(struct task_struct * task)3091 static inline void bpf_task_storage_free(struct task_struct *task)
3092 {
3093 }
3094 
bpf_prog_has_kfunc_call(const struct bpf_prog * prog)3095 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
3096 {
3097 	return false;
3098 }
3099 
3100 static inline const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog * prog,const struct bpf_insn * insn)3101 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
3102 			 const struct bpf_insn *insn)
3103 {
3104 	return NULL;
3105 }
3106 
3107 static inline int
bpf_get_kfunc_addr(const struct bpf_prog * prog,u32 func_id,u16 btf_fd_idx,u8 ** func_addr)3108 bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
3109 		   u16 btf_fd_idx, u8 **func_addr)
3110 {
3111 	return -ENOTSUPP;
3112 }
3113 
unprivileged_ebpf_enabled(void)3114 static inline bool unprivileged_ebpf_enabled(void)
3115 {
3116 	return false;
3117 }
3118 
has_current_bpf_ctx(void)3119 static inline bool has_current_bpf_ctx(void)
3120 {
3121 	return false;
3122 }
3123 
bpf_prog_inc_misses_counter(struct bpf_prog * prog)3124 static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
3125 {
3126 }
3127 
bpf_cgrp_storage_free(struct cgroup * cgroup)3128 static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
3129 {
3130 }
3131 
bpf_dynptr_init(struct bpf_dynptr_kern * ptr,void * data,enum bpf_dynptr_type type,u32 offset,u32 size)3132 static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
3133 				   enum bpf_dynptr_type type, u32 offset, u32 size)
3134 {
3135 }
3136 
bpf_dynptr_set_null(struct bpf_dynptr_kern * ptr)3137 static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
3138 {
3139 }
3140 
bpf_dynptr_set_rdonly(struct bpf_dynptr_kern * ptr)3141 static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
3142 {
3143 }
3144 #endif /* CONFIG_BPF_SYSCALL */
3145 
3146 static __always_inline int
bpf_probe_read_kernel_common(void * dst,u32 size,const void * unsafe_ptr)3147 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
3148 {
3149 	int ret = -EFAULT;
3150 
3151 	if (IS_ENABLED(CONFIG_BPF_EVENTS))
3152 		ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
3153 	if (unlikely(ret < 0))
3154 		memset(dst, 0, size);
3155 	return ret;
3156 }
3157 
3158 void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len);
3159 
bpf_prog_get_type(u32 ufd,enum bpf_prog_type type)3160 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
3161 						 enum bpf_prog_type type)
3162 {
3163 	return bpf_prog_get_type_dev(ufd, type, false);
3164 }
3165 
3166 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
3167 			  struct bpf_map **used_maps, u32 len);
3168 
3169 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
3170 
3171 int bpf_prog_offload_compile(struct bpf_prog *prog);
3172 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
3173 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
3174 			       struct bpf_prog *prog);
3175 
3176 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
3177 
3178 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
3179 int bpf_map_offload_update_elem(struct bpf_map *map,
3180 				void *key, void *value, u64 flags);
3181 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
3182 int bpf_map_offload_get_next_key(struct bpf_map *map,
3183 				 void *key, void *next_key);
3184 
3185 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
3186 
3187 struct bpf_offload_dev *
3188 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
3189 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
3190 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
3191 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
3192 				    struct net_device *netdev);
3193 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
3194 				       struct net_device *netdev);
3195 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
3196 
3197 void unpriv_ebpf_notify(int new_state);
3198 
3199 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
3200 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
3201 			      struct bpf_prog_aux *prog_aux);
3202 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
3203 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
3204 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
3205 void bpf_dev_bound_netdev_unregister(struct net_device *dev);
3206 
bpf_prog_is_dev_bound(const struct bpf_prog_aux * aux)3207 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
3208 {
3209 	return aux->dev_bound;
3210 }
3211 
bpf_prog_is_offloaded(const struct bpf_prog_aux * aux)3212 static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
3213 {
3214 	return aux->offload_requested;
3215 }
3216 
3217 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
3218 
bpf_map_is_offloaded(struct bpf_map * map)3219 static inline bool bpf_map_is_offloaded(struct bpf_map *map)
3220 {
3221 	return unlikely(map->ops == &bpf_map_offload_ops);
3222 }
3223 
3224 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
3225 void bpf_map_offload_map_free(struct bpf_map *map);
3226 u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
3227 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
3228 			      const union bpf_attr *kattr,
3229 			      union bpf_attr __user *uattr);
3230 
3231 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
3232 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
3233 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
3234 int sock_map_bpf_prog_query(const union bpf_attr *attr,
3235 			    union bpf_attr __user *uattr);
3236 int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog);
3237 
3238 void sock_map_unhash(struct sock *sk);
3239 void sock_map_destroy(struct sock *sk);
3240 void sock_map_close(struct sock *sk, long timeout);
3241 #else
bpf_dev_bound_kfunc_check(struct bpf_verifier_log * log,struct bpf_prog_aux * prog_aux)3242 static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
3243 					    struct bpf_prog_aux *prog_aux)
3244 {
3245 	return -EOPNOTSUPP;
3246 }
3247 
bpf_dev_bound_resolve_kfunc(struct bpf_prog * prog,u32 func_id)3248 static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
3249 						u32 func_id)
3250 {
3251 	return NULL;
3252 }
3253 
bpf_prog_dev_bound_init(struct bpf_prog * prog,union bpf_attr * attr)3254 static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
3255 					  union bpf_attr *attr)
3256 {
3257 	return -EOPNOTSUPP;
3258 }
3259 
bpf_prog_dev_bound_inherit(struct bpf_prog * new_prog,struct bpf_prog * old_prog)3260 static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
3261 					     struct bpf_prog *old_prog)
3262 {
3263 	return -EOPNOTSUPP;
3264 }
3265 
bpf_dev_bound_netdev_unregister(struct net_device * dev)3266 static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
3267 {
3268 }
3269 
bpf_prog_is_dev_bound(const struct bpf_prog_aux * aux)3270 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
3271 {
3272 	return false;
3273 }
3274 
bpf_prog_is_offloaded(struct bpf_prog_aux * aux)3275 static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
3276 {
3277 	return false;
3278 }
3279 
bpf_prog_dev_bound_match(const struct bpf_prog * lhs,const struct bpf_prog * rhs)3280 static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
3281 {
3282 	return false;
3283 }
3284 
bpf_map_is_offloaded(struct bpf_map * map)3285 static inline bool bpf_map_is_offloaded(struct bpf_map *map)
3286 {
3287 	return false;
3288 }
3289 
bpf_map_offload_map_alloc(union bpf_attr * attr)3290 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
3291 {
3292 	return ERR_PTR(-EOPNOTSUPP);
3293 }
3294 
bpf_map_offload_map_free(struct bpf_map * map)3295 static inline void bpf_map_offload_map_free(struct bpf_map *map)
3296 {
3297 }
3298 
bpf_map_offload_map_mem_usage(const struct bpf_map * map)3299 static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
3300 {
3301 	return 0;
3302 }
3303 
bpf_prog_test_run_syscall(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)3304 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
3305 					    const union bpf_attr *kattr,
3306 					    union bpf_attr __user *uattr)
3307 {
3308 	return -ENOTSUPP;
3309 }
3310 
3311 #ifdef CONFIG_BPF_SYSCALL
sock_map_get_from_fd(const union bpf_attr * attr,struct bpf_prog * prog)3312 static inline int sock_map_get_from_fd(const union bpf_attr *attr,
3313 				       struct bpf_prog *prog)
3314 {
3315 	return -EINVAL;
3316 }
3317 
sock_map_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)3318 static inline int sock_map_prog_detach(const union bpf_attr *attr,
3319 				       enum bpf_prog_type ptype)
3320 {
3321 	return -EOPNOTSUPP;
3322 }
3323 
sock_map_update_elem_sys(struct bpf_map * map,void * key,void * value,u64 flags)3324 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
3325 					   u64 flags)
3326 {
3327 	return -EOPNOTSUPP;
3328 }
3329 
sock_map_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)3330 static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
3331 					  union bpf_attr __user *uattr)
3332 {
3333 	return -EINVAL;
3334 }
3335 
sock_map_link_create(const union bpf_attr * attr,struct bpf_prog * prog)3336 static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
3337 {
3338 	return -EOPNOTSUPP;
3339 }
3340 #endif /* CONFIG_BPF_SYSCALL */
3341 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
3342 
3343 static __always_inline void
bpf_prog_inc_misses_counters(const struct bpf_prog_array * array)3344 bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
3345 {
3346 	const struct bpf_prog_array_item *item;
3347 	struct bpf_prog *prog;
3348 
3349 	if (unlikely(!array))
3350 		return;
3351 
3352 	item = &array->items[0];
3353 	while ((prog = READ_ONCE(item->prog))) {
3354 		bpf_prog_inc_misses_counter(prog);
3355 		item++;
3356 	}
3357 }
3358 
3359 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
3360 void bpf_sk_reuseport_detach(struct sock *sk);
3361 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
3362 				       void *value);
3363 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
3364 				       void *value, u64 map_flags);
3365 #else
bpf_sk_reuseport_detach(struct sock * sk)3366 static inline void bpf_sk_reuseport_detach(struct sock *sk)
3367 {
3368 }
3369 
3370 #ifdef CONFIG_BPF_SYSCALL
bpf_fd_reuseport_array_lookup_elem(struct bpf_map * map,void * key,void * value)3371 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
3372 						     void *key, void *value)
3373 {
3374 	return -EOPNOTSUPP;
3375 }
3376 
bpf_fd_reuseport_array_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)3377 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
3378 						     void *key, void *value,
3379 						     u64 map_flags)
3380 {
3381 	return -EOPNOTSUPP;
3382 }
3383 #endif /* CONFIG_BPF_SYSCALL */
3384 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
3385 
3386 /* verifier prototypes for helper functions called from eBPF programs */
3387 extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
3388 extern const struct bpf_func_proto bpf_map_update_elem_proto;
3389 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
3390 extern const struct bpf_func_proto bpf_map_push_elem_proto;
3391 extern const struct bpf_func_proto bpf_map_pop_elem_proto;
3392 extern const struct bpf_func_proto bpf_map_peek_elem_proto;
3393 extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
3394 
3395 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
3396 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
3397 extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
3398 extern const struct bpf_func_proto bpf_tail_call_proto;
3399 extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
3400 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
3401 extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
3402 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
3403 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
3404 extern const struct bpf_func_proto bpf_get_current_comm_proto;
3405 extern const struct bpf_func_proto bpf_get_stackid_proto;
3406 extern const struct bpf_func_proto bpf_get_stack_proto;
3407 extern const struct bpf_func_proto bpf_get_stack_sleepable_proto;
3408 extern const struct bpf_func_proto bpf_get_task_stack_proto;
3409 extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto;
3410 extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
3411 extern const struct bpf_func_proto bpf_get_stack_proto_pe;
3412 extern const struct bpf_func_proto bpf_sock_map_update_proto;
3413 extern const struct bpf_func_proto bpf_sock_hash_update_proto;
3414 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
3415 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
3416 extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
3417 extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
3418 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
3419 extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
3420 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
3421 extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
3422 extern const struct bpf_func_proto bpf_spin_lock_proto;
3423 extern const struct bpf_func_proto bpf_spin_unlock_proto;
3424 extern const struct bpf_func_proto bpf_get_local_storage_proto;
3425 extern const struct bpf_func_proto bpf_strtol_proto;
3426 extern const struct bpf_func_proto bpf_strtoul_proto;
3427 extern const struct bpf_func_proto bpf_tcp_sock_proto;
3428 extern const struct bpf_func_proto bpf_jiffies64_proto;
3429 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
3430 extern const struct bpf_func_proto bpf_event_output_data_proto;
3431 extern const struct bpf_func_proto bpf_ringbuf_output_proto;
3432 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
3433 extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
3434 extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
3435 extern const struct bpf_func_proto bpf_ringbuf_query_proto;
3436 extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
3437 extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
3438 extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
3439 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
3440 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
3441 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
3442 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
3443 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
3444 extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
3445 extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
3446 extern const struct bpf_func_proto bpf_copy_from_user_proto;
3447 extern const struct bpf_func_proto bpf_snprintf_btf_proto;
3448 extern const struct bpf_func_proto bpf_snprintf_proto;
3449 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
3450 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
3451 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
3452 extern const struct bpf_func_proto bpf_sock_from_file_proto;
3453 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
3454 extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
3455 extern const struct bpf_func_proto bpf_task_storage_get_proto;
3456 extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
3457 extern const struct bpf_func_proto bpf_task_storage_delete_proto;
3458 extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
3459 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
3460 extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
3461 extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
3462 extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
3463 extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
3464 extern const struct bpf_func_proto bpf_find_vma_proto;
3465 extern const struct bpf_func_proto bpf_loop_proto;
3466 extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
3467 extern const struct bpf_func_proto bpf_set_retval_proto;
3468 extern const struct bpf_func_proto bpf_get_retval_proto;
3469 extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
3470 extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
3471 extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
3472 
3473 const struct bpf_func_proto *tracing_prog_func_proto(
3474   enum bpf_func_id func_id, const struct bpf_prog *prog);
3475 
3476 /* Shared helpers among cBPF and eBPF. */
3477 void bpf_user_rnd_init_once(void);
3478 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3479 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3480 
3481 #if defined(CONFIG_NET)
3482 bool bpf_sock_common_is_valid_access(int off, int size,
3483 				     enum bpf_access_type type,
3484 				     struct bpf_insn_access_aux *info);
3485 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3486 			      struct bpf_insn_access_aux *info);
3487 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
3488 				const struct bpf_insn *si,
3489 				struct bpf_insn *insn_buf,
3490 				struct bpf_prog *prog,
3491 				u32 *target_size);
3492 int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
3493 			       struct bpf_dynptr *ptr);
3494 #else
bpf_sock_common_is_valid_access(int off,int size,enum bpf_access_type type,struct bpf_insn_access_aux * info)3495 static inline bool bpf_sock_common_is_valid_access(int off, int size,
3496 						   enum bpf_access_type type,
3497 						   struct bpf_insn_access_aux *info)
3498 {
3499 	return false;
3500 }
bpf_sock_is_valid_access(int off,int size,enum bpf_access_type type,struct bpf_insn_access_aux * info)3501 static inline bool bpf_sock_is_valid_access(int off, int size,
3502 					    enum bpf_access_type type,
3503 					    struct bpf_insn_access_aux *info)
3504 {
3505 	return false;
3506 }
bpf_sock_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)3507 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
3508 					      const struct bpf_insn *si,
3509 					      struct bpf_insn *insn_buf,
3510 					      struct bpf_prog *prog,
3511 					      u32 *target_size)
3512 {
3513 	return 0;
3514 }
bpf_dynptr_from_skb_rdonly(struct __sk_buff * skb,u64 flags,struct bpf_dynptr * ptr)3515 static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
3516 					     struct bpf_dynptr *ptr)
3517 {
3518 	return -EOPNOTSUPP;
3519 }
3520 #endif
3521 
3522 #ifdef CONFIG_INET
3523 struct sk_reuseport_kern {
3524 	struct sk_buff *skb;
3525 	struct sock *sk;
3526 	struct sock *selected_sk;
3527 	struct sock *migrating_sk;
3528 	void *data_end;
3529 	u32 hash;
3530 	u32 reuseport_id;
3531 	bool bind_inany;
3532 };
3533 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3534 				  struct bpf_insn_access_aux *info);
3535 
3536 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
3537 				    const struct bpf_insn *si,
3538 				    struct bpf_insn *insn_buf,
3539 				    struct bpf_prog *prog,
3540 				    u32 *target_size);
3541 
3542 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3543 				  struct bpf_insn_access_aux *info);
3544 
3545 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
3546 				    const struct bpf_insn *si,
3547 				    struct bpf_insn *insn_buf,
3548 				    struct bpf_prog *prog,
3549 				    u32 *target_size);
3550 #else
bpf_tcp_sock_is_valid_access(int off,int size,enum bpf_access_type type,struct bpf_insn_access_aux * info)3551 static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
3552 						enum bpf_access_type type,
3553 						struct bpf_insn_access_aux *info)
3554 {
3555 	return false;
3556 }
3557 
bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)3558 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
3559 						  const struct bpf_insn *si,
3560 						  struct bpf_insn *insn_buf,
3561 						  struct bpf_prog *prog,
3562 						  u32 *target_size)
3563 {
3564 	return 0;
3565 }
bpf_xdp_sock_is_valid_access(int off,int size,enum bpf_access_type type,struct bpf_insn_access_aux * info)3566 static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
3567 						enum bpf_access_type type,
3568 						struct bpf_insn_access_aux *info)
3569 {
3570 	return false;
3571 }
3572 
bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)3573 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
3574 						  const struct bpf_insn *si,
3575 						  struct bpf_insn *insn_buf,
3576 						  struct bpf_prog *prog,
3577 						  u32 *target_size)
3578 {
3579 	return 0;
3580 }
3581 #endif /* CONFIG_INET */
3582 
3583 enum bpf_text_poke_type {
3584 	BPF_MOD_CALL,
3585 	BPF_MOD_JUMP,
3586 };
3587 
3588 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
3589 		       void *addr1, void *addr2);
3590 
3591 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3592 			       struct bpf_prog *new, struct bpf_prog *old);
3593 
3594 void *bpf_arch_text_copy(void *dst, void *src, size_t len);
3595 int bpf_arch_text_invalidate(void *dst, size_t len);
3596 
3597 struct btf_id_set;
3598 bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
3599 
3600 #define MAX_BPRINTF_VARARGS		12
3601 #define MAX_BPRINTF_BUF			1024
3602 
3603 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
3604  * arguments representation.
3605  */
3606 #define MAX_BPRINTF_BIN_ARGS	512
3607 
3608 struct bpf_bprintf_buffers {
3609 	char bin_args[MAX_BPRINTF_BIN_ARGS];
3610 	char buf[MAX_BPRINTF_BUF];
3611 };
3612 
3613 struct bpf_bprintf_data {
3614 	u32 *bin_args;
3615 	char *buf;
3616 	bool get_bin_args;
3617 	bool get_buf;
3618 };
3619 
3620 int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args,
3621 			u32 num_args, struct bpf_bprintf_data *data);
3622 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
3623 int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs);
3624 void bpf_put_buffers(void);
3625 
3626 void bpf_prog_stream_init(struct bpf_prog *prog);
3627 void bpf_prog_stream_free(struct bpf_prog *prog);
3628 int bpf_prog_stream_read(struct bpf_prog *prog, enum bpf_stream_id stream_id, void __user *buf, int len);
3629 void bpf_stream_stage_init(struct bpf_stream_stage *ss);
3630 void bpf_stream_stage_free(struct bpf_stream_stage *ss);
3631 __printf(2, 3)
3632 int bpf_stream_stage_printk(struct bpf_stream_stage *ss, const char *fmt, ...);
3633 int bpf_stream_stage_commit(struct bpf_stream_stage *ss, struct bpf_prog *prog,
3634 			    enum bpf_stream_id stream_id);
3635 int bpf_stream_stage_dump_stack(struct bpf_stream_stage *ss);
3636 
3637 #define bpf_stream_printk(ss, ...) bpf_stream_stage_printk(&ss, __VA_ARGS__)
3638 #define bpf_stream_dump_stack(ss) bpf_stream_stage_dump_stack(&ss)
3639 
3640 #define bpf_stream_stage(ss, prog, stream_id, expr)            \
3641 	({                                                     \
3642 		bpf_stream_stage_init(&ss);                    \
3643 		(expr);                                        \
3644 		bpf_stream_stage_commit(&ss, prog, stream_id); \
3645 		bpf_stream_stage_free(&ss);                    \
3646 	})
3647 
3648 #ifdef CONFIG_BPF_LSM
3649 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
3650 void bpf_cgroup_atype_put(int cgroup_atype);
3651 #else
bpf_cgroup_atype_get(u32 attach_btf_id,int cgroup_atype)3652 static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
bpf_cgroup_atype_put(int cgroup_atype)3653 static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
3654 #endif /* CONFIG_BPF_LSM */
3655 
3656 struct key;
3657 
3658 #ifdef CONFIG_KEYS
3659 struct bpf_key {
3660 	struct key *key;
3661 	bool has_ref;
3662 };
3663 #endif /* CONFIG_KEYS */
3664 
type_is_alloc(u32 type)3665 static inline bool type_is_alloc(u32 type)
3666 {
3667 	return type & MEM_ALLOC;
3668 }
3669 
bpf_memcg_flags(gfp_t flags)3670 static inline gfp_t bpf_memcg_flags(gfp_t flags)
3671 {
3672 	if (memcg_bpf_enabled())
3673 		return flags | __GFP_ACCOUNT;
3674 	return flags;
3675 }
3676 
bpf_is_subprog(const struct bpf_prog * prog)3677 static inline bool bpf_is_subprog(const struct bpf_prog *prog)
3678 {
3679 	return prog->aux->func_idx != 0;
3680 }
3681 
3682 int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep,
3683 			   const char **linep, int *nump);
3684 struct bpf_prog *bpf_prog_find_from_stack(void);
3685 
3686 #endif /* _LINUX_BPF_H */
3687