xref: /linux/kernel/bpf/helpers.c (revision d303caf5caf453da2abfd84d249d210aaffe9873)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 #include <linux/kasan.h>
26 #include <linux/bpf_verifier.h>
27 #include <linux/uaccess.h>
28 #include <linux/verification.h>
29 #include <linux/task_work.h>
30 #include <linux/irq_work.h>
31 
32 #include "../../lib/kstrtox.h"
33 
34 /* If kernel subsystem is allowing eBPF programs to call this function,
35  * inside its own verifier_ops->get_func_proto() callback it should return
36  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
37  *
38  * Different map implementations will rely on rcu in map methods
39  * lookup/update/delete, therefore eBPF programs must run under rcu lock
40  * if program is allowed to access maps, so check rcu_read_lock_held() or
41  * rcu_read_lock_trace_held() in all three functions.
42  */
BPF_CALL_2(bpf_map_lookup_elem,struct bpf_map *,map,void *,key)43 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
44 {
45 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
46 		     !rcu_read_lock_bh_held());
47 	return (unsigned long) map->ops->map_lookup_elem(map, key);
48 }
49 
50 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
51 	.func		= bpf_map_lookup_elem,
52 	.gpl_only	= false,
53 	.pkt_access	= true,
54 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
55 	.arg1_type	= ARG_CONST_MAP_PTR,
56 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
57 };
58 
BPF_CALL_4(bpf_map_update_elem,struct bpf_map *,map,void *,key,void *,value,u64,flags)59 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
60 	   void *, value, u64, flags)
61 {
62 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
63 		     !rcu_read_lock_bh_held());
64 	return map->ops->map_update_elem(map, key, value, flags);
65 }
66 
67 const struct bpf_func_proto bpf_map_update_elem_proto = {
68 	.func		= bpf_map_update_elem,
69 	.gpl_only	= false,
70 	.pkt_access	= true,
71 	.ret_type	= RET_INTEGER,
72 	.arg1_type	= ARG_CONST_MAP_PTR,
73 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
74 	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
75 	.arg4_type	= ARG_ANYTHING,
76 };
77 
BPF_CALL_2(bpf_map_delete_elem,struct bpf_map *,map,void *,key)78 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
79 {
80 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
81 		     !rcu_read_lock_bh_held());
82 	return map->ops->map_delete_elem(map, key);
83 }
84 
85 const struct bpf_func_proto bpf_map_delete_elem_proto = {
86 	.func		= bpf_map_delete_elem,
87 	.gpl_only	= false,
88 	.pkt_access	= true,
89 	.ret_type	= RET_INTEGER,
90 	.arg1_type	= ARG_CONST_MAP_PTR,
91 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
92 };
93 
BPF_CALL_3(bpf_map_push_elem,struct bpf_map *,map,void *,value,u64,flags)94 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
95 {
96 	return map->ops->map_push_elem(map, value, flags);
97 }
98 
99 const struct bpf_func_proto bpf_map_push_elem_proto = {
100 	.func		= bpf_map_push_elem,
101 	.gpl_only	= false,
102 	.pkt_access	= true,
103 	.ret_type	= RET_INTEGER,
104 	.arg1_type	= ARG_CONST_MAP_PTR,
105 	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
106 	.arg3_type	= ARG_ANYTHING,
107 };
108 
BPF_CALL_2(bpf_map_pop_elem,struct bpf_map *,map,void *,value)109 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
110 {
111 	return map->ops->map_pop_elem(map, value);
112 }
113 
114 const struct bpf_func_proto bpf_map_pop_elem_proto = {
115 	.func		= bpf_map_pop_elem,
116 	.gpl_only	= false,
117 	.ret_type	= RET_INTEGER,
118 	.arg1_type	= ARG_CONST_MAP_PTR,
119 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
120 };
121 
BPF_CALL_2(bpf_map_peek_elem,struct bpf_map *,map,void *,value)122 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
123 {
124 	return map->ops->map_peek_elem(map, value);
125 }
126 
127 const struct bpf_func_proto bpf_map_peek_elem_proto = {
128 	.func		= bpf_map_peek_elem,
129 	.gpl_only	= false,
130 	.ret_type	= RET_INTEGER,
131 	.arg1_type	= ARG_CONST_MAP_PTR,
132 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
133 };
134 
BPF_CALL_3(bpf_map_lookup_percpu_elem,struct bpf_map *,map,void *,key,u32,cpu)135 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
136 {
137 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
138 		     !rcu_read_lock_bh_held());
139 	return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
140 }
141 
142 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
143 	.func		= bpf_map_lookup_percpu_elem,
144 	.gpl_only	= false,
145 	.pkt_access	= true,
146 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
147 	.arg1_type	= ARG_CONST_MAP_PTR,
148 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
149 	.arg3_type	= ARG_ANYTHING,
150 };
151 
152 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
153 	.func		= bpf_user_rnd_u32,
154 	.gpl_only	= false,
155 	.ret_type	= RET_INTEGER,
156 };
157 
BPF_CALL_0(bpf_get_smp_processor_id)158 BPF_CALL_0(bpf_get_smp_processor_id)
159 {
160 	return smp_processor_id();
161 }
162 
163 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
164 	.func		= bpf_get_smp_processor_id,
165 	.gpl_only	= false,
166 	.ret_type	= RET_INTEGER,
167 	.allow_fastcall	= true,
168 };
169 
BPF_CALL_0(bpf_get_numa_node_id)170 BPF_CALL_0(bpf_get_numa_node_id)
171 {
172 	return numa_node_id();
173 }
174 
175 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
176 	.func		= bpf_get_numa_node_id,
177 	.gpl_only	= false,
178 	.ret_type	= RET_INTEGER,
179 };
180 
BPF_CALL_0(bpf_ktime_get_ns)181 BPF_CALL_0(bpf_ktime_get_ns)
182 {
183 	/* NMI safe access to clock monotonic */
184 	return ktime_get_mono_fast_ns();
185 }
186 
187 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
188 	.func		= bpf_ktime_get_ns,
189 	.gpl_only	= false,
190 	.ret_type	= RET_INTEGER,
191 };
192 
BPF_CALL_0(bpf_ktime_get_boot_ns)193 BPF_CALL_0(bpf_ktime_get_boot_ns)
194 {
195 	/* NMI safe access to clock boottime */
196 	return ktime_get_boot_fast_ns();
197 }
198 
199 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
200 	.func		= bpf_ktime_get_boot_ns,
201 	.gpl_only	= false,
202 	.ret_type	= RET_INTEGER,
203 };
204 
BPF_CALL_0(bpf_ktime_get_coarse_ns)205 BPF_CALL_0(bpf_ktime_get_coarse_ns)
206 {
207 	return ktime_get_coarse_ns();
208 }
209 
210 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
211 	.func		= bpf_ktime_get_coarse_ns,
212 	.gpl_only	= false,
213 	.ret_type	= RET_INTEGER,
214 };
215 
BPF_CALL_0(bpf_ktime_get_tai_ns)216 BPF_CALL_0(bpf_ktime_get_tai_ns)
217 {
218 	/* NMI safe access to clock tai */
219 	return ktime_get_tai_fast_ns();
220 }
221 
222 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
223 	.func		= bpf_ktime_get_tai_ns,
224 	.gpl_only	= false,
225 	.ret_type	= RET_INTEGER,
226 };
227 
BPF_CALL_0(bpf_get_current_pid_tgid)228 BPF_CALL_0(bpf_get_current_pid_tgid)
229 {
230 	struct task_struct *task = current;
231 
232 	if (unlikely(!task))
233 		return -EINVAL;
234 
235 	return (u64) task->tgid << 32 | task->pid;
236 }
237 
238 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
239 	.func		= bpf_get_current_pid_tgid,
240 	.gpl_only	= false,
241 	.ret_type	= RET_INTEGER,
242 };
243 
BPF_CALL_0(bpf_get_current_uid_gid)244 BPF_CALL_0(bpf_get_current_uid_gid)
245 {
246 	struct task_struct *task = current;
247 	kuid_t uid;
248 	kgid_t gid;
249 
250 	if (unlikely(!task))
251 		return -EINVAL;
252 
253 	current_uid_gid(&uid, &gid);
254 	return (u64) from_kgid(&init_user_ns, gid) << 32 |
255 		     from_kuid(&init_user_ns, uid);
256 }
257 
258 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
259 	.func		= bpf_get_current_uid_gid,
260 	.gpl_only	= false,
261 	.ret_type	= RET_INTEGER,
262 };
263 
BPF_CALL_2(bpf_get_current_comm,char *,buf,u32,size)264 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
265 {
266 	struct task_struct *task = current;
267 
268 	if (unlikely(!task))
269 		goto err_clear;
270 
271 	/* Verifier guarantees that size > 0 */
272 	strscpy_pad(buf, task->comm, size);
273 	return 0;
274 err_clear:
275 	memset(buf, 0, size);
276 	return -EINVAL;
277 }
278 
279 const struct bpf_func_proto bpf_get_current_comm_proto = {
280 	.func		= bpf_get_current_comm,
281 	.gpl_only	= false,
282 	.ret_type	= RET_INTEGER,
283 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
284 	.arg2_type	= ARG_CONST_SIZE,
285 };
286 
287 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
288 
__bpf_spin_lock(struct bpf_spin_lock * lock)289 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
290 {
291 	arch_spinlock_t *l = (void *)lock;
292 	union {
293 		__u32 val;
294 		arch_spinlock_t lock;
295 	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
296 
297 	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
298 	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
299 	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
300 	preempt_disable();
301 	arch_spin_lock(l);
302 }
303 
__bpf_spin_unlock(struct bpf_spin_lock * lock)304 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
305 {
306 	arch_spinlock_t *l = (void *)lock;
307 
308 	arch_spin_unlock(l);
309 	preempt_enable();
310 }
311 
312 #else
313 
__bpf_spin_lock(struct bpf_spin_lock * lock)314 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
315 {
316 	atomic_t *l = (void *)lock;
317 
318 	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
319 	do {
320 		atomic_cond_read_relaxed(l, !VAL);
321 	} while (atomic_xchg(l, 1));
322 }
323 
__bpf_spin_unlock(struct bpf_spin_lock * lock)324 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
325 {
326 	atomic_t *l = (void *)lock;
327 
328 	atomic_set_release(l, 0);
329 }
330 
331 #endif
332 
333 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
334 
__bpf_spin_lock_irqsave(struct bpf_spin_lock * lock)335 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
336 {
337 	unsigned long flags;
338 
339 	local_irq_save(flags);
340 	__bpf_spin_lock(lock);
341 	__this_cpu_write(irqsave_flags, flags);
342 }
343 
NOTRACE_BPF_CALL_1(bpf_spin_lock,struct bpf_spin_lock *,lock)344 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
345 {
346 	__bpf_spin_lock_irqsave(lock);
347 	return 0;
348 }
349 
350 const struct bpf_func_proto bpf_spin_lock_proto = {
351 	.func		= bpf_spin_lock,
352 	.gpl_only	= false,
353 	.ret_type	= RET_VOID,
354 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
355 	.arg1_btf_id    = BPF_PTR_POISON,
356 };
357 
__bpf_spin_unlock_irqrestore(struct bpf_spin_lock * lock)358 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
359 {
360 	unsigned long flags;
361 
362 	flags = __this_cpu_read(irqsave_flags);
363 	__bpf_spin_unlock(lock);
364 	local_irq_restore(flags);
365 }
366 
NOTRACE_BPF_CALL_1(bpf_spin_unlock,struct bpf_spin_lock *,lock)367 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
368 {
369 	__bpf_spin_unlock_irqrestore(lock);
370 	return 0;
371 }
372 
373 const struct bpf_func_proto bpf_spin_unlock_proto = {
374 	.func		= bpf_spin_unlock,
375 	.gpl_only	= false,
376 	.ret_type	= RET_VOID,
377 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
378 	.arg1_btf_id    = BPF_PTR_POISON,
379 };
380 
copy_map_value_locked(struct bpf_map * map,void * dst,void * src,bool lock_src)381 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
382 			   bool lock_src)
383 {
384 	struct bpf_spin_lock *lock;
385 
386 	if (lock_src)
387 		lock = src + map->record->spin_lock_off;
388 	else
389 		lock = dst + map->record->spin_lock_off;
390 	preempt_disable();
391 	__bpf_spin_lock_irqsave(lock);
392 	copy_map_value(map, dst, src);
393 	__bpf_spin_unlock_irqrestore(lock);
394 	preempt_enable();
395 }
396 
BPF_CALL_0(bpf_jiffies64)397 BPF_CALL_0(bpf_jiffies64)
398 {
399 	return get_jiffies_64();
400 }
401 
402 const struct bpf_func_proto bpf_jiffies64_proto = {
403 	.func		= bpf_jiffies64,
404 	.gpl_only	= false,
405 	.ret_type	= RET_INTEGER,
406 };
407 
408 #ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)409 BPF_CALL_0(bpf_get_current_cgroup_id)
410 {
411 	struct cgroup *cgrp;
412 	u64 cgrp_id;
413 
414 	rcu_read_lock();
415 	cgrp = task_dfl_cgroup(current);
416 	cgrp_id = cgroup_id(cgrp);
417 	rcu_read_unlock();
418 
419 	return cgrp_id;
420 }
421 
422 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
423 	.func		= bpf_get_current_cgroup_id,
424 	.gpl_only	= false,
425 	.ret_type	= RET_INTEGER,
426 };
427 
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id,int,ancestor_level)428 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
429 {
430 	struct cgroup *cgrp;
431 	struct cgroup *ancestor;
432 	u64 cgrp_id;
433 
434 	rcu_read_lock();
435 	cgrp = task_dfl_cgroup(current);
436 	ancestor = cgroup_ancestor(cgrp, ancestor_level);
437 	cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
438 	rcu_read_unlock();
439 
440 	return cgrp_id;
441 }
442 
443 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
444 	.func		= bpf_get_current_ancestor_cgroup_id,
445 	.gpl_only	= false,
446 	.ret_type	= RET_INTEGER,
447 	.arg1_type	= ARG_ANYTHING,
448 };
449 #endif /* CONFIG_CGROUPS */
450 
451 #define BPF_STRTOX_BASE_MASK 0x1F
452 
__bpf_strtoull(const char * buf,size_t buf_len,u64 flags,unsigned long long * res,bool * is_negative)453 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
454 			  unsigned long long *res, bool *is_negative)
455 {
456 	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
457 	const char *cur_buf = buf;
458 	size_t cur_len = buf_len;
459 	unsigned int consumed;
460 	size_t val_len;
461 	char str[64];
462 
463 	if (!buf || !buf_len || !res || !is_negative)
464 		return -EINVAL;
465 
466 	if (base != 0 && base != 8 && base != 10 && base != 16)
467 		return -EINVAL;
468 
469 	if (flags & ~BPF_STRTOX_BASE_MASK)
470 		return -EINVAL;
471 
472 	while (cur_buf < buf + buf_len && isspace(*cur_buf))
473 		++cur_buf;
474 
475 	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
476 	if (*is_negative)
477 		++cur_buf;
478 
479 	consumed = cur_buf - buf;
480 	cur_len -= consumed;
481 	if (!cur_len)
482 		return -EINVAL;
483 
484 	cur_len = min(cur_len, sizeof(str) - 1);
485 	memcpy(str, cur_buf, cur_len);
486 	str[cur_len] = '\0';
487 	cur_buf = str;
488 
489 	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
490 	val_len = _parse_integer(cur_buf, base, res);
491 
492 	if (val_len & KSTRTOX_OVERFLOW)
493 		return -ERANGE;
494 
495 	if (val_len == 0)
496 		return -EINVAL;
497 
498 	cur_buf += val_len;
499 	consumed += cur_buf - str;
500 
501 	return consumed;
502 }
503 
__bpf_strtoll(const char * buf,size_t buf_len,u64 flags,long long * res)504 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
505 			 long long *res)
506 {
507 	unsigned long long _res;
508 	bool is_negative;
509 	int err;
510 
511 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
512 	if (err < 0)
513 		return err;
514 	if (is_negative) {
515 		if ((long long)-_res > 0)
516 			return -ERANGE;
517 		*res = -_res;
518 	} else {
519 		if ((long long)_res < 0)
520 			return -ERANGE;
521 		*res = _res;
522 	}
523 	return err;
524 }
525 
BPF_CALL_4(bpf_strtol,const char *,buf,size_t,buf_len,u64,flags,s64 *,res)526 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
527 	   s64 *, res)
528 {
529 	long long _res;
530 	int err;
531 
532 	*res = 0;
533 	err = __bpf_strtoll(buf, buf_len, flags, &_res);
534 	if (err < 0)
535 		return err;
536 	*res = _res;
537 	return err;
538 }
539 
540 const struct bpf_func_proto bpf_strtol_proto = {
541 	.func		= bpf_strtol,
542 	.gpl_only	= false,
543 	.ret_type	= RET_INTEGER,
544 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
545 	.arg2_type	= ARG_CONST_SIZE,
546 	.arg3_type	= ARG_ANYTHING,
547 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
548 	.arg4_size	= sizeof(s64),
549 };
550 
BPF_CALL_4(bpf_strtoul,const char *,buf,size_t,buf_len,u64,flags,u64 *,res)551 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
552 	   u64 *, res)
553 {
554 	unsigned long long _res;
555 	bool is_negative;
556 	int err;
557 
558 	*res = 0;
559 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
560 	if (err < 0)
561 		return err;
562 	if (is_negative)
563 		return -EINVAL;
564 	*res = _res;
565 	return err;
566 }
567 
568 const struct bpf_func_proto bpf_strtoul_proto = {
569 	.func		= bpf_strtoul,
570 	.gpl_only	= false,
571 	.ret_type	= RET_INTEGER,
572 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
573 	.arg2_type	= ARG_CONST_SIZE,
574 	.arg3_type	= ARG_ANYTHING,
575 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
576 	.arg4_size	= sizeof(u64),
577 };
578 
BPF_CALL_3(bpf_strncmp,const char *,s1,u32,s1_sz,const char *,s2)579 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
580 {
581 	return strncmp(s1, s2, s1_sz);
582 }
583 
584 static const struct bpf_func_proto bpf_strncmp_proto = {
585 	.func		= bpf_strncmp,
586 	.gpl_only	= false,
587 	.ret_type	= RET_INTEGER,
588 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
589 	.arg2_type	= ARG_CONST_SIZE,
590 	.arg3_type	= ARG_PTR_TO_CONST_STR,
591 };
592 
BPF_CALL_4(bpf_get_ns_current_pid_tgid,u64,dev,u64,ino,struct bpf_pidns_info *,nsdata,u32,size)593 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
594 	   struct bpf_pidns_info *, nsdata, u32, size)
595 {
596 	struct task_struct *task = current;
597 	struct pid_namespace *pidns;
598 	int err = -EINVAL;
599 
600 	if (unlikely(size != sizeof(struct bpf_pidns_info)))
601 		goto clear;
602 
603 	if (unlikely((u64)(dev_t)dev != dev))
604 		goto clear;
605 
606 	if (unlikely(!task))
607 		goto clear;
608 
609 	pidns = task_active_pid_ns(task);
610 	if (unlikely(!pidns)) {
611 		err = -ENOENT;
612 		goto clear;
613 	}
614 
615 	if (!ns_match(&pidns->ns, (dev_t)dev, ino))
616 		goto clear;
617 
618 	nsdata->pid = task_pid_nr_ns(task, pidns);
619 	nsdata->tgid = task_tgid_nr_ns(task, pidns);
620 	return 0;
621 clear:
622 	memset((void *)nsdata, 0, (size_t) size);
623 	return err;
624 }
625 
626 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
627 	.func		= bpf_get_ns_current_pid_tgid,
628 	.gpl_only	= false,
629 	.ret_type	= RET_INTEGER,
630 	.arg1_type	= ARG_ANYTHING,
631 	.arg2_type	= ARG_ANYTHING,
632 	.arg3_type      = ARG_PTR_TO_UNINIT_MEM,
633 	.arg4_type      = ARG_CONST_SIZE,
634 };
635 
636 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
637 	.func		= bpf_get_raw_cpu_id,
638 	.gpl_only	= false,
639 	.ret_type	= RET_INTEGER,
640 };
641 
BPF_CALL_5(bpf_event_output_data,void *,ctx,struct bpf_map *,map,u64,flags,void *,data,u64,size)642 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
643 	   u64, flags, void *, data, u64, size)
644 {
645 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
646 		return -EINVAL;
647 
648 	return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
649 }
650 
651 const struct bpf_func_proto bpf_event_output_data_proto =  {
652 	.func		= bpf_event_output_data,
653 	.gpl_only       = true,
654 	.ret_type       = RET_INTEGER,
655 	.arg1_type      = ARG_PTR_TO_CTX,
656 	.arg2_type      = ARG_CONST_MAP_PTR,
657 	.arg3_type      = ARG_ANYTHING,
658 	.arg4_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
659 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
660 };
661 
BPF_CALL_3(bpf_copy_from_user,void *,dst,u32,size,const void __user *,user_ptr)662 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
663 	   const void __user *, user_ptr)
664 {
665 	int ret = copy_from_user(dst, user_ptr, size);
666 
667 	if (unlikely(ret)) {
668 		memset(dst, 0, size);
669 		ret = -EFAULT;
670 	}
671 
672 	return ret;
673 }
674 
675 const struct bpf_func_proto bpf_copy_from_user_proto = {
676 	.func		= bpf_copy_from_user,
677 	.gpl_only	= false,
678 	.might_sleep	= true,
679 	.ret_type	= RET_INTEGER,
680 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
681 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
682 	.arg3_type	= ARG_ANYTHING,
683 };
684 
BPF_CALL_5(bpf_copy_from_user_task,void *,dst,u32,size,const void __user *,user_ptr,struct task_struct *,tsk,u64,flags)685 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
686 	   const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
687 {
688 	int ret;
689 
690 	/* flags is not used yet */
691 	if (unlikely(flags))
692 		return -EINVAL;
693 
694 	if (unlikely(!size))
695 		return 0;
696 
697 	ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
698 	if (ret == size)
699 		return 0;
700 
701 	memset(dst, 0, size);
702 	/* Return -EFAULT for partial read */
703 	return ret < 0 ? ret : -EFAULT;
704 }
705 
706 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
707 	.func		= bpf_copy_from_user_task,
708 	.gpl_only	= true,
709 	.might_sleep	= true,
710 	.ret_type	= RET_INTEGER,
711 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
712 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
713 	.arg3_type	= ARG_ANYTHING,
714 	.arg4_type	= ARG_PTR_TO_BTF_ID,
715 	.arg4_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
716 	.arg5_type	= ARG_ANYTHING
717 };
718 
BPF_CALL_2(bpf_per_cpu_ptr,const void *,ptr,u32,cpu)719 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
720 {
721 	if (cpu >= nr_cpu_ids)
722 		return (unsigned long)NULL;
723 
724 	return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
725 }
726 
727 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
728 	.func		= bpf_per_cpu_ptr,
729 	.gpl_only	= false,
730 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
731 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
732 	.arg2_type	= ARG_ANYTHING,
733 };
734 
BPF_CALL_1(bpf_this_cpu_ptr,const void *,percpu_ptr)735 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
736 {
737 	return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
738 }
739 
740 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
741 	.func		= bpf_this_cpu_ptr,
742 	.gpl_only	= false,
743 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
744 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
745 };
746 
bpf_trace_copy_string(char * buf,void * unsafe_ptr,char fmt_ptype,size_t bufsz)747 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
748 		size_t bufsz)
749 {
750 	void __user *user_ptr = (__force void __user *)unsafe_ptr;
751 
752 	buf[0] = 0;
753 
754 	switch (fmt_ptype) {
755 	case 's':
756 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
757 		if ((unsigned long)unsafe_ptr < TASK_SIZE)
758 			return strncpy_from_user_nofault(buf, user_ptr, bufsz);
759 		fallthrough;
760 #endif
761 	case 'k':
762 		return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
763 	case 'u':
764 		return strncpy_from_user_nofault(buf, user_ptr, bufsz);
765 	}
766 
767 	return -EINVAL;
768 }
769 
770 /* Support executing three nested bprintf helper calls on a given CPU */
771 #define MAX_BPRINTF_NEST_LEVEL	3
772 
773 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
774 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
775 
bpf_try_get_buffers(struct bpf_bprintf_buffers ** bufs)776 int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs)
777 {
778 	int nest_level;
779 
780 	nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
781 	if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
782 		this_cpu_dec(bpf_bprintf_nest_level);
783 		return -EBUSY;
784 	}
785 	*bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
786 
787 	return 0;
788 }
789 
bpf_put_buffers(void)790 void bpf_put_buffers(void)
791 {
792 	if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
793 		return;
794 	this_cpu_dec(bpf_bprintf_nest_level);
795 }
796 
bpf_bprintf_cleanup(struct bpf_bprintf_data * data)797 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
798 {
799 	if (!data->bin_args && !data->buf)
800 		return;
801 	bpf_put_buffers();
802 }
803 
804 /*
805  * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
806  *
807  * Returns a negative value if fmt is an invalid format string or 0 otherwise.
808  *
809  * This can be used in two ways:
810  * - Format string verification only: when data->get_bin_args is false
811  * - Arguments preparation: in addition to the above verification, it writes in
812  *   data->bin_args a binary representation of arguments usable by bstr_printf
813  *   where pointers from BPF have been sanitized.
814  *
815  * In argument preparation mode, if 0 is returned, safe temporary buffers are
816  * allocated and bpf_bprintf_cleanup should be called to free them after use.
817  */
bpf_bprintf_prepare(const char * fmt,u32 fmt_size,const u64 * raw_args,u32 num_args,struct bpf_bprintf_data * data)818 int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args,
819 			u32 num_args, struct bpf_bprintf_data *data)
820 {
821 	bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
822 	char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
823 	struct bpf_bprintf_buffers *buffers = NULL;
824 	size_t sizeof_cur_arg, sizeof_cur_ip;
825 	int err, i, num_spec = 0;
826 	u64 cur_arg;
827 	char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
828 
829 	fmt_end = strnchr(fmt, fmt_size, 0);
830 	if (!fmt_end)
831 		return -EINVAL;
832 	fmt_size = fmt_end - fmt;
833 
834 	if (get_buffers && bpf_try_get_buffers(&buffers))
835 		return -EBUSY;
836 
837 	if (data->get_bin_args) {
838 		if (num_args)
839 			tmp_buf = buffers->bin_args;
840 		tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
841 		data->bin_args = (u32 *)tmp_buf;
842 	}
843 
844 	if (data->get_buf)
845 		data->buf = buffers->buf;
846 
847 	for (i = 0; i < fmt_size; i++) {
848 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
849 			err = -EINVAL;
850 			goto out;
851 		}
852 
853 		if (fmt[i] != '%')
854 			continue;
855 
856 		if (fmt[i + 1] == '%') {
857 			i++;
858 			continue;
859 		}
860 
861 		if (num_spec >= num_args) {
862 			err = -EINVAL;
863 			goto out;
864 		}
865 
866 		/* The string is zero-terminated so if fmt[i] != 0, we can
867 		 * always access fmt[i + 1], in the worst case it will be a 0
868 		 */
869 		i++;
870 
871 		/* skip optional "[0 +-][num]" width formatting field */
872 		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
873 		       fmt[i] == ' ')
874 			i++;
875 		if (fmt[i] >= '1' && fmt[i] <= '9') {
876 			i++;
877 			while (fmt[i] >= '0' && fmt[i] <= '9')
878 				i++;
879 		}
880 
881 		if (fmt[i] == 'p') {
882 			sizeof_cur_arg = sizeof(long);
883 
884 			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
885 			    ispunct(fmt[i + 1])) {
886 				if (tmp_buf)
887 					cur_arg = raw_args[num_spec];
888 				goto nocopy_fmt;
889 			}
890 
891 			if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
892 			    fmt[i + 2] == 's') {
893 				fmt_ptype = fmt[i + 1];
894 				i += 2;
895 				goto fmt_str;
896 			}
897 
898 			if (fmt[i + 1] == 'K' ||
899 			    fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
900 			    fmt[i + 1] == 'S') {
901 				if (tmp_buf)
902 					cur_arg = raw_args[num_spec];
903 				i++;
904 				goto nocopy_fmt;
905 			}
906 
907 			if (fmt[i + 1] == 'B') {
908 				if (tmp_buf)  {
909 					err = snprintf(tmp_buf,
910 						       (tmp_buf_end - tmp_buf),
911 						       "%pB",
912 						       (void *)(long)raw_args[num_spec]);
913 					tmp_buf += (err + 1);
914 				}
915 
916 				i++;
917 				num_spec++;
918 				continue;
919 			}
920 
921 			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
922 			if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
923 			    (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
924 				err = -EINVAL;
925 				goto out;
926 			}
927 
928 			i += 2;
929 			if (!tmp_buf)
930 				goto nocopy_fmt;
931 
932 			sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
933 			if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
934 				err = -ENOSPC;
935 				goto out;
936 			}
937 
938 			unsafe_ptr = (char *)(long)raw_args[num_spec];
939 			err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
940 						       sizeof_cur_ip);
941 			if (err < 0)
942 				memset(cur_ip, 0, sizeof_cur_ip);
943 
944 			/* hack: bstr_printf expects IP addresses to be
945 			 * pre-formatted as strings, ironically, the easiest way
946 			 * to do that is to call snprintf.
947 			 */
948 			ip_spec[2] = fmt[i - 1];
949 			ip_spec[3] = fmt[i];
950 			err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
951 				       ip_spec, &cur_ip);
952 
953 			tmp_buf += err + 1;
954 			num_spec++;
955 
956 			continue;
957 		} else if (fmt[i] == 's') {
958 			fmt_ptype = fmt[i];
959 fmt_str:
960 			if (fmt[i + 1] != 0 &&
961 			    !isspace(fmt[i + 1]) &&
962 			    !ispunct(fmt[i + 1])) {
963 				err = -EINVAL;
964 				goto out;
965 			}
966 
967 			if (!tmp_buf)
968 				goto nocopy_fmt;
969 
970 			if (tmp_buf_end == tmp_buf) {
971 				err = -ENOSPC;
972 				goto out;
973 			}
974 
975 			unsafe_ptr = (char *)(long)raw_args[num_spec];
976 			err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
977 						    fmt_ptype,
978 						    tmp_buf_end - tmp_buf);
979 			if (err < 0) {
980 				tmp_buf[0] = '\0';
981 				err = 1;
982 			}
983 
984 			tmp_buf += err;
985 			num_spec++;
986 
987 			continue;
988 		} else if (fmt[i] == 'c') {
989 			if (!tmp_buf)
990 				goto nocopy_fmt;
991 
992 			if (tmp_buf_end == tmp_buf) {
993 				err = -ENOSPC;
994 				goto out;
995 			}
996 
997 			*tmp_buf = raw_args[num_spec];
998 			tmp_buf++;
999 			num_spec++;
1000 
1001 			continue;
1002 		}
1003 
1004 		sizeof_cur_arg = sizeof(int);
1005 
1006 		if (fmt[i] == 'l') {
1007 			sizeof_cur_arg = sizeof(long);
1008 			i++;
1009 		}
1010 		if (fmt[i] == 'l') {
1011 			sizeof_cur_arg = sizeof(long long);
1012 			i++;
1013 		}
1014 
1015 		if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1016 		    fmt[i] != 'x' && fmt[i] != 'X') {
1017 			err = -EINVAL;
1018 			goto out;
1019 		}
1020 
1021 		if (tmp_buf)
1022 			cur_arg = raw_args[num_spec];
1023 nocopy_fmt:
1024 		if (tmp_buf) {
1025 			tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1026 			if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1027 				err = -ENOSPC;
1028 				goto out;
1029 			}
1030 
1031 			if (sizeof_cur_arg == 8) {
1032 				*(u32 *)tmp_buf = *(u32 *)&cur_arg;
1033 				*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1034 			} else {
1035 				*(u32 *)tmp_buf = (u32)(long)cur_arg;
1036 			}
1037 			tmp_buf += sizeof_cur_arg;
1038 		}
1039 		num_spec++;
1040 	}
1041 
1042 	err = 0;
1043 out:
1044 	if (err)
1045 		bpf_bprintf_cleanup(data);
1046 	return err;
1047 }
1048 
BPF_CALL_5(bpf_snprintf,char *,str,u32,str_size,char *,fmt,const void *,args,u32,data_len)1049 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1050 	   const void *, args, u32, data_len)
1051 {
1052 	struct bpf_bprintf_data data = {
1053 		.get_bin_args	= true,
1054 	};
1055 	int err, num_args;
1056 
1057 	if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1058 	    (data_len && !args))
1059 		return -EINVAL;
1060 	num_args = data_len / 8;
1061 
1062 	/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1063 	 * can safely give an unbounded size.
1064 	 */
1065 	err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1066 	if (err < 0)
1067 		return err;
1068 
1069 	err = bstr_printf(str, str_size, fmt, data.bin_args);
1070 
1071 	bpf_bprintf_cleanup(&data);
1072 
1073 	return err + 1;
1074 }
1075 
1076 const struct bpf_func_proto bpf_snprintf_proto = {
1077 	.func		= bpf_snprintf,
1078 	.gpl_only	= true,
1079 	.ret_type	= RET_INTEGER,
1080 	.arg1_type	= ARG_PTR_TO_MEM_OR_NULL,
1081 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1082 	.arg3_type	= ARG_PTR_TO_CONST_STR,
1083 	.arg4_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1084 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1085 };
1086 
map_key_from_value(struct bpf_map * map,void * value,u32 * arr_idx)1087 static void *map_key_from_value(struct bpf_map *map, void *value, u32 *arr_idx)
1088 {
1089 	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1090 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1091 
1092 		*arr_idx = ((char *)value - array->value) / array->elem_size;
1093 		return arr_idx;
1094 	}
1095 	return (void *)value - round_up(map->key_size, 8);
1096 }
1097 
1098 struct bpf_async_cb {
1099 	struct bpf_map *map;
1100 	struct bpf_prog *prog;
1101 	void __rcu *callback_fn;
1102 	void *value;
1103 	union {
1104 		struct rcu_head rcu;
1105 		struct work_struct delete_work;
1106 	};
1107 	u64 flags;
1108 };
1109 
1110 /* BPF map elements can contain 'struct bpf_timer'.
1111  * Such map owns all of its BPF timers.
1112  * 'struct bpf_timer' is allocated as part of map element allocation
1113  * and it's zero initialized.
1114  * That space is used to keep 'struct bpf_async_kern'.
1115  * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1116  * remembers 'struct bpf_map *' pointer it's part of.
1117  * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1118  * bpf_timer_start() arms the timer.
1119  * If user space reference to a map goes to zero at this point
1120  * ops->map_release_uref callback is responsible for cancelling the timers,
1121  * freeing their memory, and decrementing prog's refcnts.
1122  * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1123  * Inner maps can contain bpf timers as well. ops->map_release_uref is
1124  * freeing the timers when inner map is replaced or deleted by user space.
1125  */
1126 struct bpf_hrtimer {
1127 	struct bpf_async_cb cb;
1128 	struct hrtimer timer;
1129 	atomic_t cancelling;
1130 };
1131 
1132 struct bpf_work {
1133 	struct bpf_async_cb cb;
1134 	struct work_struct work;
1135 	struct work_struct delete_work;
1136 };
1137 
1138 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
1139 struct bpf_async_kern {
1140 	union {
1141 		struct bpf_async_cb *cb;
1142 		struct bpf_hrtimer *timer;
1143 		struct bpf_work *work;
1144 	};
1145 	/* bpf_spin_lock is used here instead of spinlock_t to make
1146 	 * sure that it always fits into space reserved by struct bpf_timer
1147 	 * regardless of LOCKDEP and spinlock debug flags.
1148 	 */
1149 	struct bpf_spin_lock lock;
1150 } __attribute__((aligned(8)));
1151 
1152 enum bpf_async_type {
1153 	BPF_ASYNC_TYPE_TIMER = 0,
1154 	BPF_ASYNC_TYPE_WQ,
1155 };
1156 
1157 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1158 
bpf_timer_cb(struct hrtimer * hrtimer)1159 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1160 {
1161 	struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1162 	struct bpf_map *map = t->cb.map;
1163 	void *value = t->cb.value;
1164 	bpf_callback_t callback_fn;
1165 	void *key;
1166 	u32 idx;
1167 
1168 	BTF_TYPE_EMIT(struct bpf_timer);
1169 	callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
1170 	if (!callback_fn)
1171 		goto out;
1172 
1173 	/* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1174 	 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1175 	 * Remember the timer this callback is servicing to prevent
1176 	 * deadlock if callback_fn() calls bpf_timer_cancel() or
1177 	 * bpf_map_delete_elem() on the same timer.
1178 	 */
1179 	this_cpu_write(hrtimer_running, t);
1180 
1181 	key = map_key_from_value(map, value, &idx);
1182 
1183 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1184 	/* The verifier checked that return value is zero. */
1185 
1186 	this_cpu_write(hrtimer_running, NULL);
1187 out:
1188 	return HRTIMER_NORESTART;
1189 }
1190 
bpf_wq_work(struct work_struct * work)1191 static void bpf_wq_work(struct work_struct *work)
1192 {
1193 	struct bpf_work *w = container_of(work, struct bpf_work, work);
1194 	struct bpf_async_cb *cb = &w->cb;
1195 	struct bpf_map *map = cb->map;
1196 	bpf_callback_t callback_fn;
1197 	void *value = cb->value;
1198 	void *key;
1199 	u32 idx;
1200 
1201 	BTF_TYPE_EMIT(struct bpf_wq);
1202 
1203 	callback_fn = READ_ONCE(cb->callback_fn);
1204 	if (!callback_fn)
1205 		return;
1206 
1207 	key = map_key_from_value(map, value, &idx);
1208 
1209         rcu_read_lock_trace();
1210         migrate_disable();
1211 
1212 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1213 
1214 	migrate_enable();
1215 	rcu_read_unlock_trace();
1216 }
1217 
bpf_async_cb_rcu_free(struct rcu_head * rcu)1218 static void bpf_async_cb_rcu_free(struct rcu_head *rcu)
1219 {
1220 	struct bpf_async_cb *cb = container_of(rcu, struct bpf_async_cb, rcu);
1221 
1222 	kfree_nolock(cb);
1223 }
1224 
bpf_wq_delete_work(struct work_struct * work)1225 static void bpf_wq_delete_work(struct work_struct *work)
1226 {
1227 	struct bpf_work *w = container_of(work, struct bpf_work, delete_work);
1228 
1229 	cancel_work_sync(&w->work);
1230 
1231 	call_rcu(&w->cb.rcu, bpf_async_cb_rcu_free);
1232 }
1233 
bpf_timer_delete_work(struct work_struct * work)1234 static void bpf_timer_delete_work(struct work_struct *work)
1235 {
1236 	struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work);
1237 
1238 	/* Cancel the timer and wait for callback to complete if it was running.
1239 	 * If hrtimer_cancel() can be safely called it's safe to call
1240 	 * call_rcu() right after for both preallocated and non-preallocated
1241 	 * maps.  The async->cb = NULL was already done and no code path can see
1242 	 * address 't' anymore. Timer if armed for existing bpf_hrtimer before
1243 	 * bpf_timer_cancel_and_free will have been cancelled.
1244 	 */
1245 	hrtimer_cancel(&t->timer);
1246 	call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free);
1247 }
1248 
__bpf_async_init(struct bpf_async_kern * async,struct bpf_map * map,u64 flags,enum bpf_async_type type)1249 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1250 			    enum bpf_async_type type)
1251 {
1252 	struct bpf_async_cb *cb;
1253 	struct bpf_hrtimer *t;
1254 	struct bpf_work *w;
1255 	clockid_t clockid;
1256 	size_t size;
1257 	int ret = 0;
1258 
1259 	if (in_nmi())
1260 		return -EOPNOTSUPP;
1261 
1262 	switch (type) {
1263 	case BPF_ASYNC_TYPE_TIMER:
1264 		size = sizeof(struct bpf_hrtimer);
1265 		break;
1266 	case BPF_ASYNC_TYPE_WQ:
1267 		size = sizeof(struct bpf_work);
1268 		break;
1269 	default:
1270 		return -EINVAL;
1271 	}
1272 
1273 	__bpf_spin_lock_irqsave(&async->lock);
1274 	t = async->timer;
1275 	if (t) {
1276 		ret = -EBUSY;
1277 		goto out;
1278 	}
1279 
1280 	cb = bpf_map_kmalloc_nolock(map, size, 0, map->numa_node);
1281 	if (!cb) {
1282 		ret = -ENOMEM;
1283 		goto out;
1284 	}
1285 
1286 	switch (type) {
1287 	case BPF_ASYNC_TYPE_TIMER:
1288 		clockid = flags & (MAX_CLOCKS - 1);
1289 		t = (struct bpf_hrtimer *)cb;
1290 
1291 		atomic_set(&t->cancelling, 0);
1292 		INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work);
1293 		hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT);
1294 		cb->value = (void *)async - map->record->timer_off;
1295 		break;
1296 	case BPF_ASYNC_TYPE_WQ:
1297 		w = (struct bpf_work *)cb;
1298 
1299 		INIT_WORK(&w->work, bpf_wq_work);
1300 		INIT_WORK(&w->delete_work, bpf_wq_delete_work);
1301 		cb->value = (void *)async - map->record->wq_off;
1302 		break;
1303 	}
1304 	cb->map = map;
1305 	cb->prog = NULL;
1306 	cb->flags = flags;
1307 	rcu_assign_pointer(cb->callback_fn, NULL);
1308 
1309 	WRITE_ONCE(async->cb, cb);
1310 	/* Guarantee the order between async->cb and map->usercnt. So
1311 	 * when there are concurrent uref release and bpf timer init, either
1312 	 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1313 	 * timer or atomic64_read() below returns a zero usercnt.
1314 	 */
1315 	smp_mb();
1316 	if (!atomic64_read(&map->usercnt)) {
1317 		/* maps with timers must be either held by user space
1318 		 * or pinned in bpffs.
1319 		 */
1320 		WRITE_ONCE(async->cb, NULL);
1321 		kfree_nolock(cb);
1322 		ret = -EPERM;
1323 	}
1324 out:
1325 	__bpf_spin_unlock_irqrestore(&async->lock);
1326 	return ret;
1327 }
1328 
BPF_CALL_3(bpf_timer_init,struct bpf_async_kern *,timer,struct bpf_map *,map,u64,flags)1329 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1330 	   u64, flags)
1331 {
1332 	clock_t clockid = flags & (MAX_CLOCKS - 1);
1333 
1334 	BUILD_BUG_ON(MAX_CLOCKS != 16);
1335 	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1336 	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1337 
1338 	if (flags >= MAX_CLOCKS ||
1339 	    /* similar to timerfd except _ALARM variants are not supported */
1340 	    (clockid != CLOCK_MONOTONIC &&
1341 	     clockid != CLOCK_REALTIME &&
1342 	     clockid != CLOCK_BOOTTIME))
1343 		return -EINVAL;
1344 
1345 	return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1346 }
1347 
1348 static const struct bpf_func_proto bpf_timer_init_proto = {
1349 	.func		= bpf_timer_init,
1350 	.gpl_only	= true,
1351 	.ret_type	= RET_INTEGER,
1352 	.arg1_type	= ARG_PTR_TO_TIMER,
1353 	.arg2_type	= ARG_CONST_MAP_PTR,
1354 	.arg3_type	= ARG_ANYTHING,
1355 };
1356 
__bpf_async_set_callback(struct bpf_async_kern * async,void * callback_fn,struct bpf_prog_aux * aux,unsigned int flags,enum bpf_async_type type)1357 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
1358 				    struct bpf_prog_aux *aux, unsigned int flags,
1359 				    enum bpf_async_type type)
1360 {
1361 	struct bpf_prog *prev, *prog = aux->prog;
1362 	struct bpf_async_cb *cb;
1363 	int ret = 0;
1364 
1365 	if (in_nmi())
1366 		return -EOPNOTSUPP;
1367 	__bpf_spin_lock_irqsave(&async->lock);
1368 	cb = async->cb;
1369 	if (!cb) {
1370 		ret = -EINVAL;
1371 		goto out;
1372 	}
1373 	if (!atomic64_read(&cb->map->usercnt)) {
1374 		/* maps with timers must be either held by user space
1375 		 * or pinned in bpffs. Otherwise timer might still be
1376 		 * running even when bpf prog is detached and user space
1377 		 * is gone, since map_release_uref won't ever be called.
1378 		 */
1379 		ret = -EPERM;
1380 		goto out;
1381 	}
1382 	prev = cb->prog;
1383 	if (prev != prog) {
1384 		/* Bump prog refcnt once. Every bpf_timer_set_callback()
1385 		 * can pick different callback_fn-s within the same prog.
1386 		 */
1387 		prog = bpf_prog_inc_not_zero(prog);
1388 		if (IS_ERR(prog)) {
1389 			ret = PTR_ERR(prog);
1390 			goto out;
1391 		}
1392 		if (prev)
1393 			/* Drop prev prog refcnt when swapping with new prog */
1394 			bpf_prog_put(prev);
1395 		cb->prog = prog;
1396 	}
1397 	rcu_assign_pointer(cb->callback_fn, callback_fn);
1398 out:
1399 	__bpf_spin_unlock_irqrestore(&async->lock);
1400 	return ret;
1401 }
1402 
BPF_CALL_3(bpf_timer_set_callback,struct bpf_async_kern *,timer,void *,callback_fn,struct bpf_prog_aux *,aux)1403 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
1404 	   struct bpf_prog_aux *, aux)
1405 {
1406 	return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER);
1407 }
1408 
1409 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1410 	.func		= bpf_timer_set_callback,
1411 	.gpl_only	= true,
1412 	.ret_type	= RET_INTEGER,
1413 	.arg1_type	= ARG_PTR_TO_TIMER,
1414 	.arg2_type	= ARG_PTR_TO_FUNC,
1415 };
1416 
BPF_CALL_3(bpf_timer_start,struct bpf_async_kern *,timer,u64,nsecs,u64,flags)1417 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
1418 {
1419 	struct bpf_hrtimer *t;
1420 	int ret = 0;
1421 	enum hrtimer_mode mode;
1422 
1423 	if (in_nmi())
1424 		return -EOPNOTSUPP;
1425 	if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
1426 		return -EINVAL;
1427 	__bpf_spin_lock_irqsave(&timer->lock);
1428 	t = timer->timer;
1429 	if (!t || !t->cb.prog) {
1430 		ret = -EINVAL;
1431 		goto out;
1432 	}
1433 
1434 	if (flags & BPF_F_TIMER_ABS)
1435 		mode = HRTIMER_MODE_ABS_SOFT;
1436 	else
1437 		mode = HRTIMER_MODE_REL_SOFT;
1438 
1439 	if (flags & BPF_F_TIMER_CPU_PIN)
1440 		mode |= HRTIMER_MODE_PINNED;
1441 
1442 	hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1443 out:
1444 	__bpf_spin_unlock_irqrestore(&timer->lock);
1445 	return ret;
1446 }
1447 
1448 static const struct bpf_func_proto bpf_timer_start_proto = {
1449 	.func		= bpf_timer_start,
1450 	.gpl_only	= true,
1451 	.ret_type	= RET_INTEGER,
1452 	.arg1_type	= ARG_PTR_TO_TIMER,
1453 	.arg2_type	= ARG_ANYTHING,
1454 	.arg3_type	= ARG_ANYTHING,
1455 };
1456 
drop_prog_refcnt(struct bpf_async_cb * async)1457 static void drop_prog_refcnt(struct bpf_async_cb *async)
1458 {
1459 	struct bpf_prog *prog = async->prog;
1460 
1461 	if (prog) {
1462 		bpf_prog_put(prog);
1463 		async->prog = NULL;
1464 		rcu_assign_pointer(async->callback_fn, NULL);
1465 	}
1466 }
1467 
BPF_CALL_1(bpf_timer_cancel,struct bpf_async_kern *,timer)1468 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
1469 {
1470 	struct bpf_hrtimer *t, *cur_t;
1471 	bool inc = false;
1472 	int ret = 0;
1473 
1474 	if (in_nmi())
1475 		return -EOPNOTSUPP;
1476 	rcu_read_lock();
1477 	__bpf_spin_lock_irqsave(&timer->lock);
1478 	t = timer->timer;
1479 	if (!t) {
1480 		ret = -EINVAL;
1481 		goto out;
1482 	}
1483 
1484 	cur_t = this_cpu_read(hrtimer_running);
1485 	if (cur_t == t) {
1486 		/* If bpf callback_fn is trying to bpf_timer_cancel()
1487 		 * its own timer the hrtimer_cancel() will deadlock
1488 		 * since it waits for callback_fn to finish.
1489 		 */
1490 		ret = -EDEADLK;
1491 		goto out;
1492 	}
1493 
1494 	/* Only account in-flight cancellations when invoked from a timer
1495 	 * callback, since we want to avoid waiting only if other _callbacks_
1496 	 * are waiting on us, to avoid introducing lockups. Non-callback paths
1497 	 * are ok, since nobody would synchronously wait for their completion.
1498 	 */
1499 	if (!cur_t)
1500 		goto drop;
1501 	atomic_inc(&t->cancelling);
1502 	/* Need full barrier after relaxed atomic_inc */
1503 	smp_mb__after_atomic();
1504 	inc = true;
1505 	if (atomic_read(&cur_t->cancelling)) {
1506 		/* We're cancelling timer t, while some other timer callback is
1507 		 * attempting to cancel us. In such a case, it might be possible
1508 		 * that timer t belongs to the other callback, or some other
1509 		 * callback waiting upon it (creating transitive dependencies
1510 		 * upon us), and we will enter a deadlock if we continue
1511 		 * cancelling and waiting for it synchronously, since it might
1512 		 * do the same. Bail!
1513 		 */
1514 		ret = -EDEADLK;
1515 		goto out;
1516 	}
1517 drop:
1518 	drop_prog_refcnt(&t->cb);
1519 out:
1520 	__bpf_spin_unlock_irqrestore(&timer->lock);
1521 	/* Cancel the timer and wait for associated callback to finish
1522 	 * if it was running.
1523 	 */
1524 	ret = ret ?: hrtimer_cancel(&t->timer);
1525 	if (inc)
1526 		atomic_dec(&t->cancelling);
1527 	rcu_read_unlock();
1528 	return ret;
1529 }
1530 
1531 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1532 	.func		= bpf_timer_cancel,
1533 	.gpl_only	= true,
1534 	.ret_type	= RET_INTEGER,
1535 	.arg1_type	= ARG_PTR_TO_TIMER,
1536 };
1537 
__bpf_async_cancel_and_free(struct bpf_async_kern * async)1538 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async)
1539 {
1540 	struct bpf_async_cb *cb;
1541 
1542 	/* Performance optimization: read async->cb without lock first. */
1543 	if (!READ_ONCE(async->cb))
1544 		return NULL;
1545 
1546 	__bpf_spin_lock_irqsave(&async->lock);
1547 	/* re-read it under lock */
1548 	cb = async->cb;
1549 	if (!cb)
1550 		goto out;
1551 	drop_prog_refcnt(cb);
1552 	/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1553 	 * this timer, since it won't be initialized.
1554 	 */
1555 	WRITE_ONCE(async->cb, NULL);
1556 out:
1557 	__bpf_spin_unlock_irqrestore(&async->lock);
1558 	return cb;
1559 }
1560 
1561 /* This function is called by map_delete/update_elem for individual element and
1562  * by ops->map_release_uref when the user space reference to a map reaches zero.
1563  */
bpf_timer_cancel_and_free(void * val)1564 void bpf_timer_cancel_and_free(void *val)
1565 {
1566 	struct bpf_hrtimer *t;
1567 
1568 	t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val);
1569 
1570 	if (!t)
1571 		return;
1572 	/* We check that bpf_map_delete/update_elem() was called from timer
1573 	 * callback_fn. In such case we don't call hrtimer_cancel() (since it
1574 	 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will
1575 	 * just return -1). Though callback_fn is still running on this cpu it's
1576 	 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1577 	 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1578 	 * since async->cb = NULL was already done. The timer will be
1579 	 * effectively cancelled because bpf_timer_cb() will return
1580 	 * HRTIMER_NORESTART.
1581 	 *
1582 	 * However, it is possible the timer callback_fn calling us armed the
1583 	 * timer _before_ calling us, such that failing to cancel it here will
1584 	 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
1585 	 * Therefore, we _need_ to cancel any outstanding timers before we do
1586 	 * call_rcu, even though no more timers can be armed.
1587 	 *
1588 	 * Moreover, we need to schedule work even if timer does not belong to
1589 	 * the calling callback_fn, as on two different CPUs, we can end up in a
1590 	 * situation where both sides run in parallel, try to cancel one
1591 	 * another, and we end up waiting on both sides in hrtimer_cancel
1592 	 * without making forward progress, since timer1 depends on time2
1593 	 * callback to finish, and vice versa.
1594 	 *
1595 	 *  CPU 1 (timer1_cb)			CPU 2 (timer2_cb)
1596 	 *  bpf_timer_cancel_and_free(timer2)	bpf_timer_cancel_and_free(timer1)
1597 	 *
1598 	 * To avoid these issues, punt to workqueue context when we are in a
1599 	 * timer callback.
1600 	 */
1601 	if (this_cpu_read(hrtimer_running)) {
1602 		queue_work(system_dfl_wq, &t->cb.delete_work);
1603 		return;
1604 	}
1605 
1606 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1607 		/* If the timer is running on other CPU, also use a kworker to
1608 		 * wait for the completion of the timer instead of trying to
1609 		 * acquire a sleepable lock in hrtimer_cancel() to wait for its
1610 		 * completion.
1611 		 */
1612 		if (hrtimer_try_to_cancel(&t->timer) >= 0)
1613 			call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free);
1614 		else
1615 			queue_work(system_dfl_wq, &t->cb.delete_work);
1616 	} else {
1617 		bpf_timer_delete_work(&t->cb.delete_work);
1618 	}
1619 }
1620 
1621 /* This function is called by map_delete/update_elem for individual element and
1622  * by ops->map_release_uref when the user space reference to a map reaches zero.
1623  */
bpf_wq_cancel_and_free(void * val)1624 void bpf_wq_cancel_and_free(void *val)
1625 {
1626 	struct bpf_work *work;
1627 
1628 	BTF_TYPE_EMIT(struct bpf_wq);
1629 
1630 	work = (struct bpf_work *)__bpf_async_cancel_and_free(val);
1631 	if (!work)
1632 		return;
1633 	/* Trigger cancel of the sleepable work, but *do not* wait for
1634 	 * it to finish if it was running as we might not be in a
1635 	 * sleepable context.
1636 	 * kfree will be called once the work has finished.
1637 	 */
1638 	schedule_work(&work->delete_work);
1639 }
1640 
BPF_CALL_2(bpf_kptr_xchg,void *,dst,void *,ptr)1641 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
1642 {
1643 	unsigned long *kptr = dst;
1644 
1645 	/* This helper may be inlined by verifier. */
1646 	return xchg(kptr, (unsigned long)ptr);
1647 }
1648 
1649 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1650  * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1651  * denote type that verifier will determine.
1652  */
1653 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1654 	.func         = bpf_kptr_xchg,
1655 	.gpl_only     = false,
1656 	.ret_type     = RET_PTR_TO_BTF_ID_OR_NULL,
1657 	.ret_btf_id   = BPF_PTR_POISON,
1658 	.arg1_type    = ARG_KPTR_XCHG_DEST,
1659 	.arg2_type    = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1660 	.arg2_btf_id  = BPF_PTR_POISON,
1661 };
1662 
1663 /* Since the upper 8 bits of dynptr->size is reserved, the
1664  * maximum supported size is 2^24 - 1.
1665  */
1666 #define DYNPTR_MAX_SIZE	((1UL << 24) - 1)
1667 #define DYNPTR_TYPE_SHIFT	28
1668 #define DYNPTR_SIZE_MASK	0xFFFFFF
1669 #define DYNPTR_RDONLY_BIT	BIT(31)
1670 
__bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern * ptr)1671 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1672 {
1673 	return ptr->size & DYNPTR_RDONLY_BIT;
1674 }
1675 
bpf_dynptr_set_rdonly(struct bpf_dynptr_kern * ptr)1676 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1677 {
1678 	ptr->size |= DYNPTR_RDONLY_BIT;
1679 }
1680 
bpf_dynptr_set_type(struct bpf_dynptr_kern * ptr,enum bpf_dynptr_type type)1681 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1682 {
1683 	ptr->size |= type << DYNPTR_TYPE_SHIFT;
1684 }
1685 
bpf_dynptr_get_type(const struct bpf_dynptr_kern * ptr)1686 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1687 {
1688 	return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1689 }
1690 
__bpf_dynptr_size(const struct bpf_dynptr_kern * ptr)1691 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1692 {
1693 	return ptr->size & DYNPTR_SIZE_MASK;
1694 }
1695 
bpf_dynptr_set_size(struct bpf_dynptr_kern * ptr,u32 new_size)1696 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
1697 {
1698 	u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1699 
1700 	ptr->size = new_size | metadata;
1701 }
1702 
bpf_dynptr_check_size(u32 size)1703 int bpf_dynptr_check_size(u32 size)
1704 {
1705 	return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1706 }
1707 
bpf_dynptr_init(struct bpf_dynptr_kern * ptr,void * data,enum bpf_dynptr_type type,u32 offset,u32 size)1708 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1709 		     enum bpf_dynptr_type type, u32 offset, u32 size)
1710 {
1711 	ptr->data = data;
1712 	ptr->offset = offset;
1713 	ptr->size = size;
1714 	bpf_dynptr_set_type(ptr, type);
1715 }
1716 
bpf_dynptr_set_null(struct bpf_dynptr_kern * ptr)1717 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1718 {
1719 	memset(ptr, 0, sizeof(*ptr));
1720 }
1721 
BPF_CALL_4(bpf_dynptr_from_mem,void *,data,u32,size,u64,flags,struct bpf_dynptr_kern *,ptr)1722 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1723 {
1724 	int err;
1725 
1726 	BTF_TYPE_EMIT(struct bpf_dynptr);
1727 
1728 	err = bpf_dynptr_check_size(size);
1729 	if (err)
1730 		goto error;
1731 
1732 	/* flags is currently unsupported */
1733 	if (flags) {
1734 		err = -EINVAL;
1735 		goto error;
1736 	}
1737 
1738 	bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1739 
1740 	return 0;
1741 
1742 error:
1743 	bpf_dynptr_set_null(ptr);
1744 	return err;
1745 }
1746 
1747 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1748 	.func		= bpf_dynptr_from_mem,
1749 	.gpl_only	= false,
1750 	.ret_type	= RET_INTEGER,
1751 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1752 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1753 	.arg3_type	= ARG_ANYTHING,
1754 	.arg4_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
1755 };
1756 
__bpf_dynptr_read(void * dst,u32 len,const struct bpf_dynptr_kern * src,u32 offset,u64 flags)1757 static int __bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr_kern *src,
1758 			     u32 offset, u64 flags)
1759 {
1760 	enum bpf_dynptr_type type;
1761 	int err;
1762 
1763 	if (!src->data || flags)
1764 		return -EINVAL;
1765 
1766 	err = bpf_dynptr_check_off_len(src, offset, len);
1767 	if (err)
1768 		return err;
1769 
1770 	type = bpf_dynptr_get_type(src);
1771 
1772 	switch (type) {
1773 	case BPF_DYNPTR_TYPE_LOCAL:
1774 	case BPF_DYNPTR_TYPE_RINGBUF:
1775 		/* Source and destination may possibly overlap, hence use memmove to
1776 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1777 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1778 		 */
1779 		memmove(dst, src->data + src->offset + offset, len);
1780 		return 0;
1781 	case BPF_DYNPTR_TYPE_SKB:
1782 		return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1783 	case BPF_DYNPTR_TYPE_XDP:
1784 		return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1785 	case BPF_DYNPTR_TYPE_SKB_META:
1786 		memmove(dst, bpf_skb_meta_pointer(src->data, src->offset + offset), len);
1787 		return 0;
1788 	default:
1789 		WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1790 		return -EFAULT;
1791 	}
1792 }
1793 
BPF_CALL_5(bpf_dynptr_read,void *,dst,u32,len,const struct bpf_dynptr_kern *,src,u32,offset,u64,flags)1794 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1795 	   u32, offset, u64, flags)
1796 {
1797 	return __bpf_dynptr_read(dst, len, src, offset, flags);
1798 }
1799 
1800 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1801 	.func		= bpf_dynptr_read,
1802 	.gpl_only	= false,
1803 	.ret_type	= RET_INTEGER,
1804 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1805 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1806 	.arg3_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1807 	.arg4_type	= ARG_ANYTHING,
1808 	.arg5_type	= ARG_ANYTHING,
1809 };
1810 
__bpf_dynptr_write(const struct bpf_dynptr_kern * dst,u32 offset,void * src,u32 len,u64 flags)1811 int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src,
1812 		       u32 len, u64 flags)
1813 {
1814 	enum bpf_dynptr_type type;
1815 	int err;
1816 
1817 	if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1818 		return -EINVAL;
1819 
1820 	err = bpf_dynptr_check_off_len(dst, offset, len);
1821 	if (err)
1822 		return err;
1823 
1824 	type = bpf_dynptr_get_type(dst);
1825 
1826 	switch (type) {
1827 	case BPF_DYNPTR_TYPE_LOCAL:
1828 	case BPF_DYNPTR_TYPE_RINGBUF:
1829 		if (flags)
1830 			return -EINVAL;
1831 		/* Source and destination may possibly overlap, hence use memmove to
1832 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1833 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1834 		 */
1835 		memmove(dst->data + dst->offset + offset, src, len);
1836 		return 0;
1837 	case BPF_DYNPTR_TYPE_SKB:
1838 		return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1839 					     flags);
1840 	case BPF_DYNPTR_TYPE_XDP:
1841 		if (flags)
1842 			return -EINVAL;
1843 		return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1844 	case BPF_DYNPTR_TYPE_SKB_META:
1845 		if (flags)
1846 			return -EINVAL;
1847 		memmove(bpf_skb_meta_pointer(dst->data, dst->offset + offset), src, len);
1848 		return 0;
1849 	default:
1850 		WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1851 		return -EFAULT;
1852 	}
1853 }
1854 
BPF_CALL_5(bpf_dynptr_write,const struct bpf_dynptr_kern *,dst,u32,offset,void *,src,u32,len,u64,flags)1855 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1856 	   u32, len, u64, flags)
1857 {
1858 	return __bpf_dynptr_write(dst, offset, src, len, flags);
1859 }
1860 
1861 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1862 	.func		= bpf_dynptr_write,
1863 	.gpl_only	= false,
1864 	.ret_type	= RET_INTEGER,
1865 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1866 	.arg2_type	= ARG_ANYTHING,
1867 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1868 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
1869 	.arg5_type	= ARG_ANYTHING,
1870 };
1871 
BPF_CALL_3(bpf_dynptr_data,const struct bpf_dynptr_kern *,ptr,u32,offset,u32,len)1872 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1873 {
1874 	enum bpf_dynptr_type type;
1875 	int err;
1876 
1877 	if (!ptr->data)
1878 		return 0;
1879 
1880 	err = bpf_dynptr_check_off_len(ptr, offset, len);
1881 	if (err)
1882 		return 0;
1883 
1884 	if (__bpf_dynptr_is_rdonly(ptr))
1885 		return 0;
1886 
1887 	type = bpf_dynptr_get_type(ptr);
1888 
1889 	switch (type) {
1890 	case BPF_DYNPTR_TYPE_LOCAL:
1891 	case BPF_DYNPTR_TYPE_RINGBUF:
1892 		return (unsigned long)(ptr->data + ptr->offset + offset);
1893 	case BPF_DYNPTR_TYPE_SKB:
1894 	case BPF_DYNPTR_TYPE_XDP:
1895 	case BPF_DYNPTR_TYPE_SKB_META:
1896 		/* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1897 		return 0;
1898 	default:
1899 		WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1900 		return 0;
1901 	}
1902 }
1903 
1904 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1905 	.func		= bpf_dynptr_data,
1906 	.gpl_only	= false,
1907 	.ret_type	= RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1908 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1909 	.arg2_type	= ARG_ANYTHING,
1910 	.arg3_type	= ARG_CONST_ALLOC_SIZE_OR_ZERO,
1911 };
1912 
1913 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1914 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1915 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1916 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1917 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1918 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1919 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1920 const struct bpf_func_proto bpf_perf_event_read_proto __weak;
1921 const struct bpf_func_proto bpf_send_signal_proto __weak;
1922 const struct bpf_func_proto bpf_send_signal_thread_proto __weak;
1923 const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak;
1924 const struct bpf_func_proto bpf_get_task_stack_proto __weak;
1925 const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak;
1926 
1927 const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1928 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1929 {
1930 	switch (func_id) {
1931 	case BPF_FUNC_map_lookup_elem:
1932 		return &bpf_map_lookup_elem_proto;
1933 	case BPF_FUNC_map_update_elem:
1934 		return &bpf_map_update_elem_proto;
1935 	case BPF_FUNC_map_delete_elem:
1936 		return &bpf_map_delete_elem_proto;
1937 	case BPF_FUNC_map_push_elem:
1938 		return &bpf_map_push_elem_proto;
1939 	case BPF_FUNC_map_pop_elem:
1940 		return &bpf_map_pop_elem_proto;
1941 	case BPF_FUNC_map_peek_elem:
1942 		return &bpf_map_peek_elem_proto;
1943 	case BPF_FUNC_map_lookup_percpu_elem:
1944 		return &bpf_map_lookup_percpu_elem_proto;
1945 	case BPF_FUNC_get_prandom_u32:
1946 		return &bpf_get_prandom_u32_proto;
1947 	case BPF_FUNC_get_smp_processor_id:
1948 		return &bpf_get_raw_smp_processor_id_proto;
1949 	case BPF_FUNC_get_numa_node_id:
1950 		return &bpf_get_numa_node_id_proto;
1951 	case BPF_FUNC_tail_call:
1952 		return &bpf_tail_call_proto;
1953 	case BPF_FUNC_ktime_get_ns:
1954 		return &bpf_ktime_get_ns_proto;
1955 	case BPF_FUNC_ktime_get_boot_ns:
1956 		return &bpf_ktime_get_boot_ns_proto;
1957 	case BPF_FUNC_ktime_get_tai_ns:
1958 		return &bpf_ktime_get_tai_ns_proto;
1959 	case BPF_FUNC_ringbuf_output:
1960 		return &bpf_ringbuf_output_proto;
1961 	case BPF_FUNC_ringbuf_reserve:
1962 		return &bpf_ringbuf_reserve_proto;
1963 	case BPF_FUNC_ringbuf_submit:
1964 		return &bpf_ringbuf_submit_proto;
1965 	case BPF_FUNC_ringbuf_discard:
1966 		return &bpf_ringbuf_discard_proto;
1967 	case BPF_FUNC_ringbuf_query:
1968 		return &bpf_ringbuf_query_proto;
1969 	case BPF_FUNC_strncmp:
1970 		return &bpf_strncmp_proto;
1971 	case BPF_FUNC_strtol:
1972 		return &bpf_strtol_proto;
1973 	case BPF_FUNC_strtoul:
1974 		return &bpf_strtoul_proto;
1975 	case BPF_FUNC_get_current_pid_tgid:
1976 		return &bpf_get_current_pid_tgid_proto;
1977 	case BPF_FUNC_get_ns_current_pid_tgid:
1978 		return &bpf_get_ns_current_pid_tgid_proto;
1979 	case BPF_FUNC_get_current_uid_gid:
1980 		return &bpf_get_current_uid_gid_proto;
1981 	default:
1982 		break;
1983 	}
1984 
1985 	if (!bpf_token_capable(prog->aux->token, CAP_BPF))
1986 		return NULL;
1987 
1988 	switch (func_id) {
1989 	case BPF_FUNC_spin_lock:
1990 		return &bpf_spin_lock_proto;
1991 	case BPF_FUNC_spin_unlock:
1992 		return &bpf_spin_unlock_proto;
1993 	case BPF_FUNC_jiffies64:
1994 		return &bpf_jiffies64_proto;
1995 	case BPF_FUNC_per_cpu_ptr:
1996 		return &bpf_per_cpu_ptr_proto;
1997 	case BPF_FUNC_this_cpu_ptr:
1998 		return &bpf_this_cpu_ptr_proto;
1999 	case BPF_FUNC_timer_init:
2000 		return &bpf_timer_init_proto;
2001 	case BPF_FUNC_timer_set_callback:
2002 		return &bpf_timer_set_callback_proto;
2003 	case BPF_FUNC_timer_start:
2004 		return &bpf_timer_start_proto;
2005 	case BPF_FUNC_timer_cancel:
2006 		return &bpf_timer_cancel_proto;
2007 	case BPF_FUNC_kptr_xchg:
2008 		return &bpf_kptr_xchg_proto;
2009 	case BPF_FUNC_for_each_map_elem:
2010 		return &bpf_for_each_map_elem_proto;
2011 	case BPF_FUNC_loop:
2012 		return &bpf_loop_proto;
2013 	case BPF_FUNC_user_ringbuf_drain:
2014 		return &bpf_user_ringbuf_drain_proto;
2015 	case BPF_FUNC_ringbuf_reserve_dynptr:
2016 		return &bpf_ringbuf_reserve_dynptr_proto;
2017 	case BPF_FUNC_ringbuf_submit_dynptr:
2018 		return &bpf_ringbuf_submit_dynptr_proto;
2019 	case BPF_FUNC_ringbuf_discard_dynptr:
2020 		return &bpf_ringbuf_discard_dynptr_proto;
2021 	case BPF_FUNC_dynptr_from_mem:
2022 		return &bpf_dynptr_from_mem_proto;
2023 	case BPF_FUNC_dynptr_read:
2024 		return &bpf_dynptr_read_proto;
2025 	case BPF_FUNC_dynptr_write:
2026 		return &bpf_dynptr_write_proto;
2027 	case BPF_FUNC_dynptr_data:
2028 		return &bpf_dynptr_data_proto;
2029 #ifdef CONFIG_CGROUPS
2030 	case BPF_FUNC_cgrp_storage_get:
2031 		return &bpf_cgrp_storage_get_proto;
2032 	case BPF_FUNC_cgrp_storage_delete:
2033 		return &bpf_cgrp_storage_delete_proto;
2034 	case BPF_FUNC_get_current_cgroup_id:
2035 		return &bpf_get_current_cgroup_id_proto;
2036 	case BPF_FUNC_get_current_ancestor_cgroup_id:
2037 		return &bpf_get_current_ancestor_cgroup_id_proto;
2038 	case BPF_FUNC_current_task_under_cgroup:
2039 		return &bpf_current_task_under_cgroup_proto;
2040 #endif
2041 #ifdef CONFIG_CGROUP_NET_CLASSID
2042 	case BPF_FUNC_get_cgroup_classid:
2043 		return &bpf_get_cgroup_classid_curr_proto;
2044 #endif
2045 	case BPF_FUNC_task_storage_get:
2046 		if (bpf_prog_check_recur(prog))
2047 			return &bpf_task_storage_get_recur_proto;
2048 		return &bpf_task_storage_get_proto;
2049 	case BPF_FUNC_task_storage_delete:
2050 		if (bpf_prog_check_recur(prog))
2051 			return &bpf_task_storage_delete_recur_proto;
2052 		return &bpf_task_storage_delete_proto;
2053 	default:
2054 		break;
2055 	}
2056 
2057 	if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
2058 		return NULL;
2059 
2060 	switch (func_id) {
2061 	case BPF_FUNC_trace_printk:
2062 		return bpf_get_trace_printk_proto();
2063 	case BPF_FUNC_get_current_task:
2064 		return &bpf_get_current_task_proto;
2065 	case BPF_FUNC_get_current_task_btf:
2066 		return &bpf_get_current_task_btf_proto;
2067 	case BPF_FUNC_get_current_comm:
2068 		return &bpf_get_current_comm_proto;
2069 	case BPF_FUNC_probe_read_user:
2070 		return &bpf_probe_read_user_proto;
2071 	case BPF_FUNC_probe_read_kernel:
2072 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2073 		       NULL : &bpf_probe_read_kernel_proto;
2074 	case BPF_FUNC_probe_read_user_str:
2075 		return &bpf_probe_read_user_str_proto;
2076 	case BPF_FUNC_probe_read_kernel_str:
2077 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2078 		       NULL : &bpf_probe_read_kernel_str_proto;
2079 	case BPF_FUNC_copy_from_user:
2080 		return &bpf_copy_from_user_proto;
2081 	case BPF_FUNC_copy_from_user_task:
2082 		return &bpf_copy_from_user_task_proto;
2083 	case BPF_FUNC_snprintf_btf:
2084 		return &bpf_snprintf_btf_proto;
2085 	case BPF_FUNC_snprintf:
2086 		return &bpf_snprintf_proto;
2087 	case BPF_FUNC_task_pt_regs:
2088 		return &bpf_task_pt_regs_proto;
2089 	case BPF_FUNC_trace_vprintk:
2090 		return bpf_get_trace_vprintk_proto();
2091 	case BPF_FUNC_perf_event_read_value:
2092 		return bpf_get_perf_event_read_value_proto();
2093 	case BPF_FUNC_perf_event_read:
2094 		return &bpf_perf_event_read_proto;
2095 	case BPF_FUNC_send_signal:
2096 		return &bpf_send_signal_proto;
2097 	case BPF_FUNC_send_signal_thread:
2098 		return &bpf_send_signal_thread_proto;
2099 	case BPF_FUNC_get_task_stack:
2100 		return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
2101 				       : &bpf_get_task_stack_proto;
2102 	case BPF_FUNC_get_branch_snapshot:
2103 		return &bpf_get_branch_snapshot_proto;
2104 	case BPF_FUNC_find_vma:
2105 		return &bpf_find_vma_proto;
2106 	default:
2107 		return NULL;
2108 	}
2109 }
2110 EXPORT_SYMBOL_GPL(bpf_base_func_proto);
2111 
bpf_list_head_free(const struct btf_field * field,void * list_head,struct bpf_spin_lock * spin_lock)2112 void bpf_list_head_free(const struct btf_field *field, void *list_head,
2113 			struct bpf_spin_lock *spin_lock)
2114 {
2115 	struct list_head *head = list_head, *orig_head = list_head;
2116 
2117 	BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
2118 	BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
2119 
2120 	/* Do the actual list draining outside the lock to not hold the lock for
2121 	 * too long, and also prevent deadlocks if tracing programs end up
2122 	 * executing on entry/exit of functions called inside the critical
2123 	 * section, and end up doing map ops that call bpf_list_head_free for
2124 	 * the same map value again.
2125 	 */
2126 	__bpf_spin_lock_irqsave(spin_lock);
2127 	if (!head->next || list_empty(head))
2128 		goto unlock;
2129 	head = head->next;
2130 unlock:
2131 	INIT_LIST_HEAD(orig_head);
2132 	__bpf_spin_unlock_irqrestore(spin_lock);
2133 
2134 	while (head != orig_head) {
2135 		void *obj = head;
2136 
2137 		obj -= field->graph_root.node_offset;
2138 		head = head->next;
2139 		/* The contained type can also have resources, including a
2140 		 * bpf_list_head which needs to be freed.
2141 		 */
2142 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2143 	}
2144 }
2145 
2146 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
2147  * 'rb_node *', so field name of rb_node within containing struct is not
2148  * needed.
2149  *
2150  * Since bpf_rb_tree's node type has a corresponding struct btf_field with
2151  * graph_root.node_offset, it's not necessary to know field name
2152  * or type of node struct
2153  */
2154 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
2155 	for (pos = rb_first_postorder(root); \
2156 	    pos && ({ n = rb_next_postorder(pos); 1; }); \
2157 	    pos = n)
2158 
bpf_rb_root_free(const struct btf_field * field,void * rb_root,struct bpf_spin_lock * spin_lock)2159 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
2160 		      struct bpf_spin_lock *spin_lock)
2161 {
2162 	struct rb_root_cached orig_root, *root = rb_root;
2163 	struct rb_node *pos, *n;
2164 	void *obj;
2165 
2166 	BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
2167 	BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
2168 
2169 	__bpf_spin_lock_irqsave(spin_lock);
2170 	orig_root = *root;
2171 	*root = RB_ROOT_CACHED;
2172 	__bpf_spin_unlock_irqrestore(spin_lock);
2173 
2174 	bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
2175 		obj = pos;
2176 		obj -= field->graph_root.node_offset;
2177 
2178 
2179 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2180 	}
2181 }
2182 
2183 __bpf_kfunc_start_defs();
2184 
bpf_obj_new_impl(u64 local_type_id__k,void * meta__ign)2185 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2186 {
2187 	struct btf_struct_meta *meta = meta__ign;
2188 	u64 size = local_type_id__k;
2189 	void *p;
2190 
2191 	p = bpf_mem_alloc(&bpf_global_ma, size);
2192 	if (!p)
2193 		return NULL;
2194 	if (meta)
2195 		bpf_obj_init(meta->record, p);
2196 	return p;
2197 }
2198 
bpf_percpu_obj_new_impl(u64 local_type_id__k,void * meta__ign)2199 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2200 {
2201 	u64 size = local_type_id__k;
2202 
2203 	/* The verifier has ensured that meta__ign must be NULL */
2204 	return bpf_mem_alloc(&bpf_global_percpu_ma, size);
2205 }
2206 
2207 /* Must be called under migrate_disable(), as required by bpf_mem_free */
__bpf_obj_drop_impl(void * p,const struct btf_record * rec,bool percpu)2208 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
2209 {
2210 	struct bpf_mem_alloc *ma;
2211 
2212 	if (rec && rec->refcount_off >= 0 &&
2213 	    !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
2214 		/* Object is refcounted and refcount_dec didn't result in 0
2215 		 * refcount. Return without freeing the object
2216 		 */
2217 		return;
2218 	}
2219 
2220 	if (rec)
2221 		bpf_obj_free_fields(rec, p);
2222 
2223 	if (percpu)
2224 		ma = &bpf_global_percpu_ma;
2225 	else
2226 		ma = &bpf_global_ma;
2227 	bpf_mem_free_rcu(ma, p);
2228 }
2229 
bpf_obj_drop_impl(void * p__alloc,void * meta__ign)2230 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
2231 {
2232 	struct btf_struct_meta *meta = meta__ign;
2233 	void *p = p__alloc;
2234 
2235 	__bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
2236 }
2237 
bpf_percpu_obj_drop_impl(void * p__alloc,void * meta__ign)2238 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
2239 {
2240 	/* The verifier has ensured that meta__ign must be NULL */
2241 	bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
2242 }
2243 
bpf_refcount_acquire_impl(void * p__refcounted_kptr,void * meta__ign)2244 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
2245 {
2246 	struct btf_struct_meta *meta = meta__ign;
2247 	struct bpf_refcount *ref;
2248 
2249 	/* Could just cast directly to refcount_t *, but need some code using
2250 	 * bpf_refcount type so that it is emitted in vmlinux BTF
2251 	 */
2252 	ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
2253 	if (!refcount_inc_not_zero((refcount_t *)ref))
2254 		return NULL;
2255 
2256 	/* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
2257 	 * in verifier.c
2258 	 */
2259 	return (void *)p__refcounted_kptr;
2260 }
2261 
__bpf_list_add(struct bpf_list_node_kern * node,struct bpf_list_head * head,bool tail,struct btf_record * rec,u64 off)2262 static int __bpf_list_add(struct bpf_list_node_kern *node,
2263 			  struct bpf_list_head *head,
2264 			  bool tail, struct btf_record *rec, u64 off)
2265 {
2266 	struct list_head *n = &node->list_head, *h = (void *)head;
2267 
2268 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2269 	 * called on its fields, so init here
2270 	 */
2271 	if (unlikely(!h->next))
2272 		INIT_LIST_HEAD(h);
2273 
2274 	/* node->owner != NULL implies !list_empty(n), no need to separately
2275 	 * check the latter
2276 	 */
2277 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2278 		/* Only called from BPF prog, no need to migrate_disable */
2279 		__bpf_obj_drop_impl((void *)n - off, rec, false);
2280 		return -EINVAL;
2281 	}
2282 
2283 	tail ? list_add_tail(n, h) : list_add(n, h);
2284 	WRITE_ONCE(node->owner, head);
2285 
2286 	return 0;
2287 }
2288 
bpf_list_push_front_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2289 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
2290 					 struct bpf_list_node *node,
2291 					 void *meta__ign, u64 off)
2292 {
2293 	struct bpf_list_node_kern *n = (void *)node;
2294 	struct btf_struct_meta *meta = meta__ign;
2295 
2296 	return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
2297 }
2298 
bpf_list_push_back_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2299 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
2300 					struct bpf_list_node *node,
2301 					void *meta__ign, u64 off)
2302 {
2303 	struct bpf_list_node_kern *n = (void *)node;
2304 	struct btf_struct_meta *meta = meta__ign;
2305 
2306 	return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2307 }
2308 
__bpf_list_del(struct bpf_list_head * head,bool tail)2309 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2310 {
2311 	struct list_head *n, *h = (void *)head;
2312 	struct bpf_list_node_kern *node;
2313 
2314 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2315 	 * called on its fields, so init here
2316 	 */
2317 	if (unlikely(!h->next))
2318 		INIT_LIST_HEAD(h);
2319 	if (list_empty(h))
2320 		return NULL;
2321 
2322 	n = tail ? h->prev : h->next;
2323 	node = container_of(n, struct bpf_list_node_kern, list_head);
2324 	if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2325 		return NULL;
2326 
2327 	list_del_init(n);
2328 	WRITE_ONCE(node->owner, NULL);
2329 	return (struct bpf_list_node *)n;
2330 }
2331 
bpf_list_pop_front(struct bpf_list_head * head)2332 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2333 {
2334 	return __bpf_list_del(head, false);
2335 }
2336 
bpf_list_pop_back(struct bpf_list_head * head)2337 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2338 {
2339 	return __bpf_list_del(head, true);
2340 }
2341 
bpf_list_front(struct bpf_list_head * head)2342 __bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head)
2343 {
2344 	struct list_head *h = (struct list_head *)head;
2345 
2346 	if (list_empty(h) || unlikely(!h->next))
2347 		return NULL;
2348 
2349 	return (struct bpf_list_node *)h->next;
2350 }
2351 
bpf_list_back(struct bpf_list_head * head)2352 __bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head)
2353 {
2354 	struct list_head *h = (struct list_head *)head;
2355 
2356 	if (list_empty(h) || unlikely(!h->next))
2357 		return NULL;
2358 
2359 	return (struct bpf_list_node *)h->prev;
2360 }
2361 
bpf_rbtree_remove(struct bpf_rb_root * root,struct bpf_rb_node * node)2362 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2363 						  struct bpf_rb_node *node)
2364 {
2365 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2366 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2367 	struct rb_node *n = &node_internal->rb_node;
2368 
2369 	/* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2370 	 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2371 	 */
2372 	if (READ_ONCE(node_internal->owner) != root)
2373 		return NULL;
2374 
2375 	rb_erase_cached(n, r);
2376 	RB_CLEAR_NODE(n);
2377 	WRITE_ONCE(node_internal->owner, NULL);
2378 	return (struct bpf_rb_node *)n;
2379 }
2380 
2381 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2382  * program
2383  */
__bpf_rbtree_add(struct bpf_rb_root * root,struct bpf_rb_node_kern * node,void * less,struct btf_record * rec,u64 off)2384 static int __bpf_rbtree_add(struct bpf_rb_root *root,
2385 			    struct bpf_rb_node_kern *node,
2386 			    void *less, struct btf_record *rec, u64 off)
2387 {
2388 	struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2389 	struct rb_node *parent = NULL, *n = &node->rb_node;
2390 	bpf_callback_t cb = (bpf_callback_t)less;
2391 	bool leftmost = true;
2392 
2393 	/* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2394 	 * check the latter
2395 	 */
2396 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2397 		/* Only called from BPF prog, no need to migrate_disable */
2398 		__bpf_obj_drop_impl((void *)n - off, rec, false);
2399 		return -EINVAL;
2400 	}
2401 
2402 	while (*link) {
2403 		parent = *link;
2404 		if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2405 			link = &parent->rb_left;
2406 		} else {
2407 			link = &parent->rb_right;
2408 			leftmost = false;
2409 		}
2410 	}
2411 
2412 	rb_link_node(n, parent, link);
2413 	rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2414 	WRITE_ONCE(node->owner, root);
2415 	return 0;
2416 }
2417 
bpf_rbtree_add_impl(struct bpf_rb_root * root,struct bpf_rb_node * node,bool (less)(struct bpf_rb_node * a,const struct bpf_rb_node * b),void * meta__ign,u64 off)2418 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2419 				    bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2420 				    void *meta__ign, u64 off)
2421 {
2422 	struct btf_struct_meta *meta = meta__ign;
2423 	struct bpf_rb_node_kern *n = (void *)node;
2424 
2425 	return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2426 }
2427 
bpf_rbtree_first(struct bpf_rb_root * root)2428 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2429 {
2430 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2431 
2432 	return (struct bpf_rb_node *)rb_first_cached(r);
2433 }
2434 
bpf_rbtree_root(struct bpf_rb_root * root)2435 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root)
2436 {
2437 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2438 
2439 	return (struct bpf_rb_node *)r->rb_root.rb_node;
2440 }
2441 
bpf_rbtree_left(struct bpf_rb_root * root,struct bpf_rb_node * node)2442 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node)
2443 {
2444 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2445 
2446 	if (READ_ONCE(node_internal->owner) != root)
2447 		return NULL;
2448 
2449 	return (struct bpf_rb_node *)node_internal->rb_node.rb_left;
2450 }
2451 
bpf_rbtree_right(struct bpf_rb_root * root,struct bpf_rb_node * node)2452 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node)
2453 {
2454 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2455 
2456 	if (READ_ONCE(node_internal->owner) != root)
2457 		return NULL;
2458 
2459 	return (struct bpf_rb_node *)node_internal->rb_node.rb_right;
2460 }
2461 
2462 /**
2463  * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2464  * kfunc which is not stored in a map as a kptr, must be released by calling
2465  * bpf_task_release().
2466  * @p: The task on which a reference is being acquired.
2467  */
bpf_task_acquire(struct task_struct * p)2468 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2469 {
2470 	if (refcount_inc_not_zero(&p->rcu_users))
2471 		return p;
2472 	return NULL;
2473 }
2474 
2475 /**
2476  * bpf_task_release - Release the reference acquired on a task.
2477  * @p: The task on which a reference is being released.
2478  */
bpf_task_release(struct task_struct * p)2479 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2480 {
2481 	put_task_struct_rcu_user(p);
2482 }
2483 
bpf_task_release_dtor(void * p)2484 __bpf_kfunc void bpf_task_release_dtor(void *p)
2485 {
2486 	put_task_struct_rcu_user(p);
2487 }
2488 CFI_NOSEAL(bpf_task_release_dtor);
2489 
2490 #ifdef CONFIG_CGROUPS
2491 /**
2492  * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2493  * this kfunc which is not stored in a map as a kptr, must be released by
2494  * calling bpf_cgroup_release().
2495  * @cgrp: The cgroup on which a reference is being acquired.
2496  */
bpf_cgroup_acquire(struct cgroup * cgrp)2497 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2498 {
2499 	return cgroup_tryget(cgrp) ? cgrp : NULL;
2500 }
2501 
2502 /**
2503  * bpf_cgroup_release - Release the reference acquired on a cgroup.
2504  * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2505  * not be freed until the current grace period has ended, even if its refcount
2506  * drops to 0.
2507  * @cgrp: The cgroup on which a reference is being released.
2508  */
bpf_cgroup_release(struct cgroup * cgrp)2509 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2510 {
2511 	cgroup_put(cgrp);
2512 }
2513 
bpf_cgroup_release_dtor(void * cgrp)2514 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
2515 {
2516 	cgroup_put(cgrp);
2517 }
2518 CFI_NOSEAL(bpf_cgroup_release_dtor);
2519 
2520 /**
2521  * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2522  * array. A cgroup returned by this kfunc which is not subsequently stored in a
2523  * map, must be released by calling bpf_cgroup_release().
2524  * @cgrp: The cgroup for which we're performing a lookup.
2525  * @level: The level of ancestor to look up.
2526  */
bpf_cgroup_ancestor(struct cgroup * cgrp,int level)2527 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2528 {
2529 	struct cgroup *ancestor;
2530 
2531 	if (level > cgrp->level || level < 0)
2532 		return NULL;
2533 
2534 	/* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2535 	ancestor = cgrp->ancestors[level];
2536 	if (!cgroup_tryget(ancestor))
2537 		return NULL;
2538 	return ancestor;
2539 }
2540 
2541 /**
2542  * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2543  * kfunc which is not subsequently stored in a map, must be released by calling
2544  * bpf_cgroup_release().
2545  * @cgid: cgroup id.
2546  */
bpf_cgroup_from_id(u64 cgid)2547 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2548 {
2549 	struct cgroup *cgrp;
2550 
2551 	cgrp = __cgroup_get_from_id(cgid);
2552 	if (IS_ERR(cgrp))
2553 		return NULL;
2554 	return cgrp;
2555 }
2556 
2557 /**
2558  * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2559  * task's membership of cgroup ancestry.
2560  * @task: the task to be tested
2561  * @ancestor: possible ancestor of @task's cgroup
2562  *
2563  * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2564  * It follows all the same rules as cgroup_is_descendant, and only applies
2565  * to the default hierarchy.
2566  */
bpf_task_under_cgroup(struct task_struct * task,struct cgroup * ancestor)2567 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2568 				       struct cgroup *ancestor)
2569 {
2570 	long ret;
2571 
2572 	rcu_read_lock();
2573 	ret = task_under_cgroup_hierarchy(task, ancestor);
2574 	rcu_read_unlock();
2575 	return ret;
2576 }
2577 
BPF_CALL_2(bpf_current_task_under_cgroup,struct bpf_map *,map,u32,idx)2578 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
2579 {
2580 	struct bpf_array *array = container_of(map, struct bpf_array, map);
2581 	struct cgroup *cgrp;
2582 
2583 	if (unlikely(idx >= array->map.max_entries))
2584 		return -E2BIG;
2585 
2586 	cgrp = READ_ONCE(array->ptrs[idx]);
2587 	if (unlikely(!cgrp))
2588 		return -EAGAIN;
2589 
2590 	return task_under_cgroup_hierarchy(current, cgrp);
2591 }
2592 
2593 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
2594 	.func           = bpf_current_task_under_cgroup,
2595 	.gpl_only       = false,
2596 	.ret_type       = RET_INTEGER,
2597 	.arg1_type      = ARG_CONST_MAP_PTR,
2598 	.arg2_type      = ARG_ANYTHING,
2599 };
2600 
2601 /**
2602  * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2603  * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
2604  * hierarchy ID.
2605  * @task: The target task
2606  * @hierarchy_id: The ID of a cgroup1 hierarchy
2607  *
2608  * On success, the cgroup is returen. On failure, NULL is returned.
2609  */
2610 __bpf_kfunc struct cgroup *
bpf_task_get_cgroup1(struct task_struct * task,int hierarchy_id)2611 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
2612 {
2613 	struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
2614 
2615 	if (IS_ERR(cgrp))
2616 		return NULL;
2617 	return cgrp;
2618 }
2619 #endif /* CONFIG_CGROUPS */
2620 
2621 /**
2622  * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2623  * in the root pid namespace idr. If a task is returned, it must either be
2624  * stored in a map, or released with bpf_task_release().
2625  * @pid: The pid of the task being looked up.
2626  */
bpf_task_from_pid(s32 pid)2627 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2628 {
2629 	struct task_struct *p;
2630 
2631 	rcu_read_lock();
2632 	p = find_task_by_pid_ns(pid, &init_pid_ns);
2633 	if (p)
2634 		p = bpf_task_acquire(p);
2635 	rcu_read_unlock();
2636 
2637 	return p;
2638 }
2639 
2640 /**
2641  * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up
2642  * in the pid namespace of the current task. If a task is returned, it must
2643  * either be stored in a map, or released with bpf_task_release().
2644  * @vpid: The vpid of the task being looked up.
2645  */
bpf_task_from_vpid(s32 vpid)2646 __bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid)
2647 {
2648 	struct task_struct *p;
2649 
2650 	rcu_read_lock();
2651 	p = find_task_by_vpid(vpid);
2652 	if (p)
2653 		p = bpf_task_acquire(p);
2654 	rcu_read_unlock();
2655 
2656 	return p;
2657 }
2658 
2659 /**
2660  * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2661  * @p: The dynptr whose data slice to retrieve
2662  * @offset: Offset into the dynptr
2663  * @buffer__opt: User-provided buffer to copy contents into.  May be NULL
2664  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2665  *               length of the requested slice. This must be a constant.
2666  *
2667  * For non-skb and non-xdp type dynptrs, there is no difference between
2668  * bpf_dynptr_slice and bpf_dynptr_data.
2669  *
2670  *  If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2671  *
2672  * If the intention is to write to the data slice, please use
2673  * bpf_dynptr_slice_rdwr.
2674  *
2675  * The user must check that the returned pointer is not null before using it.
2676  *
2677  * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2678  * does not change the underlying packet data pointers, so a call to
2679  * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2680  * the bpf program.
2681  *
2682  * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2683  * data slice (can be either direct pointer to the data or a pointer to the user
2684  * provided buffer, with its contents containing the data, if unable to obtain
2685  * direct pointer)
2686  */
bpf_dynptr_slice(const struct bpf_dynptr * p,u32 offset,void * buffer__opt,u32 buffer__szk)2687 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
2688 				   void *buffer__opt, u32 buffer__szk)
2689 {
2690 	const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2691 	enum bpf_dynptr_type type;
2692 	u32 len = buffer__szk;
2693 	int err;
2694 
2695 	if (!ptr->data)
2696 		return NULL;
2697 
2698 	err = bpf_dynptr_check_off_len(ptr, offset, len);
2699 	if (err)
2700 		return NULL;
2701 
2702 	type = bpf_dynptr_get_type(ptr);
2703 
2704 	switch (type) {
2705 	case BPF_DYNPTR_TYPE_LOCAL:
2706 	case BPF_DYNPTR_TYPE_RINGBUF:
2707 		return ptr->data + ptr->offset + offset;
2708 	case BPF_DYNPTR_TYPE_SKB:
2709 		if (buffer__opt)
2710 			return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
2711 		else
2712 			return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2713 	case BPF_DYNPTR_TYPE_XDP:
2714 	{
2715 		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2716 		if (!IS_ERR_OR_NULL(xdp_ptr))
2717 			return xdp_ptr;
2718 
2719 		if (!buffer__opt)
2720 			return NULL;
2721 		bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
2722 		return buffer__opt;
2723 	}
2724 	case BPF_DYNPTR_TYPE_SKB_META:
2725 		return bpf_skb_meta_pointer(ptr->data, ptr->offset + offset);
2726 	default:
2727 		WARN_ONCE(true, "unknown dynptr type %d\n", type);
2728 		return NULL;
2729 	}
2730 }
2731 
2732 /**
2733  * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2734  * @p: The dynptr whose data slice to retrieve
2735  * @offset: Offset into the dynptr
2736  * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2737  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2738  *               length of the requested slice. This must be a constant.
2739  *
2740  * For non-skb and non-xdp type dynptrs, there is no difference between
2741  * bpf_dynptr_slice and bpf_dynptr_data.
2742  *
2743  * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2744  *
2745  * The returned pointer is writable and may point to either directly the dynptr
2746  * data at the requested offset or to the buffer if unable to obtain a direct
2747  * data pointer to (example: the requested slice is to the paged area of an skb
2748  * packet). In the case where the returned pointer is to the buffer, the user
2749  * is responsible for persisting writes through calling bpf_dynptr_write(). This
2750  * usually looks something like this pattern:
2751  *
2752  * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2753  * if (!eth)
2754  *	return TC_ACT_SHOT;
2755  *
2756  * // mutate eth header //
2757  *
2758  * if (eth == buffer)
2759  *	bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2760  *
2761  * Please note that, as in the example above, the user must check that the
2762  * returned pointer is not null before using it.
2763  *
2764  * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2765  * does not change the underlying packet data pointers, so a call to
2766  * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2767  * the bpf program.
2768  *
2769  * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2770  * data slice (can be either direct pointer to the data or a pointer to the user
2771  * provided buffer, with its contents containing the data, if unable to obtain
2772  * direct pointer)
2773  */
bpf_dynptr_slice_rdwr(const struct bpf_dynptr * p,u32 offset,void * buffer__opt,u32 buffer__szk)2774 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
2775 					void *buffer__opt, u32 buffer__szk)
2776 {
2777 	const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2778 
2779 	if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2780 		return NULL;
2781 
2782 	/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2783 	 *
2784 	 * For skb-type dynptrs, it is safe to write into the returned pointer
2785 	 * if the bpf program allows skb data writes. There are two possibilities
2786 	 * that may occur when calling bpf_dynptr_slice_rdwr:
2787 	 *
2788 	 * 1) The requested slice is in the head of the skb. In this case, the
2789 	 * returned pointer is directly to skb data, and if the skb is cloned, the
2790 	 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2791 	 * The pointer can be directly written into.
2792 	 *
2793 	 * 2) Some portion of the requested slice is in the paged buffer area.
2794 	 * In this case, the requested data will be copied out into the buffer
2795 	 * and the returned pointer will be a pointer to the buffer. The skb
2796 	 * will not be pulled. To persist the write, the user will need to call
2797 	 * bpf_dynptr_write(), which will pull the skb and commit the write.
2798 	 *
2799 	 * Similarly for xdp programs, if the requested slice is not across xdp
2800 	 * fragments, then a direct pointer will be returned, otherwise the data
2801 	 * will be copied out into the buffer and the user will need to call
2802 	 * bpf_dynptr_write() to commit changes.
2803 	 */
2804 	return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk);
2805 }
2806 
bpf_dynptr_adjust(const struct bpf_dynptr * p,u32 start,u32 end)2807 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
2808 {
2809 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2810 	u32 size;
2811 
2812 	if (!ptr->data || start > end)
2813 		return -EINVAL;
2814 
2815 	size = __bpf_dynptr_size(ptr);
2816 
2817 	if (start > size || end > size)
2818 		return -ERANGE;
2819 
2820 	ptr->offset += start;
2821 	bpf_dynptr_set_size(ptr, end - start);
2822 
2823 	return 0;
2824 }
2825 
bpf_dynptr_is_null(const struct bpf_dynptr * p)2826 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
2827 {
2828 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2829 
2830 	return !ptr->data;
2831 }
2832 
bpf_dynptr_is_rdonly(const struct bpf_dynptr * p)2833 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
2834 {
2835 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2836 
2837 	if (!ptr->data)
2838 		return false;
2839 
2840 	return __bpf_dynptr_is_rdonly(ptr);
2841 }
2842 
bpf_dynptr_size(const struct bpf_dynptr * p)2843 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
2844 {
2845 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2846 
2847 	if (!ptr->data)
2848 		return -EINVAL;
2849 
2850 	return __bpf_dynptr_size(ptr);
2851 }
2852 
bpf_dynptr_clone(const struct bpf_dynptr * p,struct bpf_dynptr * clone__uninit)2853 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
2854 				 struct bpf_dynptr *clone__uninit)
2855 {
2856 	struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
2857 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2858 
2859 	if (!ptr->data) {
2860 		bpf_dynptr_set_null(clone);
2861 		return -EINVAL;
2862 	}
2863 
2864 	*clone = *ptr;
2865 
2866 	return 0;
2867 }
2868 
2869 /**
2870  * bpf_dynptr_copy() - Copy data from one dynptr to another.
2871  * @dst_ptr: Destination dynptr - where data should be copied to
2872  * @dst_off: Offset into the destination dynptr
2873  * @src_ptr: Source dynptr - where data should be copied from
2874  * @src_off: Offset into the source dynptr
2875  * @size: Length of the data to copy from source to destination
2876  *
2877  * Copies data from source dynptr to destination dynptr.
2878  * Returns 0 on success; negative error, otherwise.
2879  */
bpf_dynptr_copy(struct bpf_dynptr * dst_ptr,u32 dst_off,struct bpf_dynptr * src_ptr,u32 src_off,u32 size)2880 __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off,
2881 				struct bpf_dynptr *src_ptr, u32 src_off, u32 size)
2882 {
2883 	struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr;
2884 	struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr;
2885 	void *src_slice, *dst_slice;
2886 	char buf[256];
2887 	u32 off;
2888 
2889 	src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size);
2890 	dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size);
2891 
2892 	if (src_slice && dst_slice) {
2893 		memmove(dst_slice, src_slice, size);
2894 		return 0;
2895 	}
2896 
2897 	if (src_slice)
2898 		return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0);
2899 
2900 	if (dst_slice)
2901 		return __bpf_dynptr_read(dst_slice, size, src, src_off, 0);
2902 
2903 	if (bpf_dynptr_check_off_len(dst, dst_off, size) ||
2904 	    bpf_dynptr_check_off_len(src, src_off, size))
2905 		return -E2BIG;
2906 
2907 	off = 0;
2908 	while (off < size) {
2909 		u32 chunk_sz = min_t(u32, sizeof(buf), size - off);
2910 		int err;
2911 
2912 		err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0);
2913 		if (err)
2914 			return err;
2915 		err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0);
2916 		if (err)
2917 			return err;
2918 
2919 		off += chunk_sz;
2920 	}
2921 	return 0;
2922 }
2923 
2924 /**
2925  * bpf_dynptr_memset() - Fill dynptr memory with a constant byte.
2926  * @p: Destination dynptr - where data will be filled
2927  * @offset: Offset into the dynptr to start filling from
2928  * @size: Number of bytes to fill
2929  * @val: Constant byte to fill the memory with
2930  *
2931  * Fills the @size bytes of the memory area pointed to by @p
2932  * at @offset with the constant byte @val.
2933  * Returns 0 on success; negative error, otherwise.
2934  */
bpf_dynptr_memset(struct bpf_dynptr * p,u32 offset,u32 size,u8 val)2935  __bpf_kfunc int bpf_dynptr_memset(struct bpf_dynptr *p, u32 offset, u32 size, u8 val)
2936  {
2937 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2938 	u32 chunk_sz, write_off;
2939 	char buf[256];
2940 	void* slice;
2941 	int err;
2942 
2943 	slice = bpf_dynptr_slice_rdwr(p, offset, NULL, size);
2944 	if (likely(slice)) {
2945 		memset(slice, val, size);
2946 		return 0;
2947 	}
2948 
2949 	if (__bpf_dynptr_is_rdonly(ptr))
2950 		return -EINVAL;
2951 
2952 	err = bpf_dynptr_check_off_len(ptr, offset, size);
2953 	if (err)
2954 		return err;
2955 
2956 	/* Non-linear data under the dynptr, write from a local buffer */
2957 	chunk_sz = min_t(u32, sizeof(buf), size);
2958 	memset(buf, val, chunk_sz);
2959 
2960 	for (write_off = 0; write_off < size; write_off += chunk_sz) {
2961 		chunk_sz = min_t(u32, sizeof(buf), size - write_off);
2962 		err = __bpf_dynptr_write(ptr, offset + write_off, buf, chunk_sz, 0);
2963 		if (err)
2964 			return err;
2965 	}
2966 
2967 	return 0;
2968 }
2969 
bpf_cast_to_kern_ctx(void * obj)2970 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2971 {
2972 	return obj;
2973 }
2974 
bpf_rdonly_cast(const void * obj__ign,u32 btf_id__k)2975 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
2976 {
2977 	return (void *)obj__ign;
2978 }
2979 
bpf_rcu_read_lock(void)2980 __bpf_kfunc void bpf_rcu_read_lock(void)
2981 {
2982 	rcu_read_lock();
2983 }
2984 
bpf_rcu_read_unlock(void)2985 __bpf_kfunc void bpf_rcu_read_unlock(void)
2986 {
2987 	rcu_read_unlock();
2988 }
2989 
2990 struct bpf_throw_ctx {
2991 	struct bpf_prog_aux *aux;
2992 	u64 sp;
2993 	u64 bp;
2994 	int cnt;
2995 };
2996 
bpf_stack_walker(void * cookie,u64 ip,u64 sp,u64 bp)2997 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
2998 {
2999 	struct bpf_throw_ctx *ctx = cookie;
3000 	struct bpf_prog *prog;
3001 
3002 	/*
3003 	 * The RCU read lock is held to safely traverse the latch tree, but we
3004 	 * don't need its protection when accessing the prog, since it has an
3005 	 * active stack frame on the current stack trace, and won't disappear.
3006 	 */
3007 	rcu_read_lock();
3008 	prog = bpf_prog_ksym_find(ip);
3009 	rcu_read_unlock();
3010 	if (!prog)
3011 		return !ctx->cnt;
3012 	ctx->cnt++;
3013 	if (bpf_is_subprog(prog))
3014 		return true;
3015 	ctx->aux = prog->aux;
3016 	ctx->sp = sp;
3017 	ctx->bp = bp;
3018 	return false;
3019 }
3020 
bpf_throw(u64 cookie)3021 __bpf_kfunc void bpf_throw(u64 cookie)
3022 {
3023 	struct bpf_throw_ctx ctx = {};
3024 
3025 	arch_bpf_stack_walk(bpf_stack_walker, &ctx);
3026 	WARN_ON_ONCE(!ctx.aux);
3027 	if (ctx.aux)
3028 		WARN_ON_ONCE(!ctx.aux->exception_boundary);
3029 	WARN_ON_ONCE(!ctx.bp);
3030 	WARN_ON_ONCE(!ctx.cnt);
3031 	/* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
3032 	 * deeper stack depths than ctx.sp as we do not return from bpf_throw,
3033 	 * which skips compiler generated instrumentation to do the same.
3034 	 */
3035 	kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
3036 	ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
3037 	WARN(1, "A call to BPF exception callback should never return\n");
3038 }
3039 
bpf_wq_init(struct bpf_wq * wq,void * p__map,unsigned int flags)3040 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
3041 {
3042 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3043 	struct bpf_map *map = p__map;
3044 
3045 	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq));
3046 	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq));
3047 
3048 	if (flags)
3049 		return -EINVAL;
3050 
3051 	return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
3052 }
3053 
bpf_wq_start(struct bpf_wq * wq,unsigned int flags)3054 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
3055 {
3056 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3057 	struct bpf_work *w;
3058 
3059 	if (in_nmi())
3060 		return -EOPNOTSUPP;
3061 	if (flags)
3062 		return -EINVAL;
3063 	w = READ_ONCE(async->work);
3064 	if (!w || !READ_ONCE(w->cb.prog))
3065 		return -EINVAL;
3066 
3067 	schedule_work(&w->work);
3068 	return 0;
3069 }
3070 
bpf_wq_set_callback_impl(struct bpf_wq * wq,int (callback_fn)(void * map,int * key,void * value),unsigned int flags,void * aux__prog)3071 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
3072 					 int (callback_fn)(void *map, int *key, void *value),
3073 					 unsigned int flags,
3074 					 void *aux__prog)
3075 {
3076 	struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog;
3077 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3078 
3079 	if (flags)
3080 		return -EINVAL;
3081 
3082 	return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ);
3083 }
3084 
bpf_preempt_disable(void)3085 __bpf_kfunc void bpf_preempt_disable(void)
3086 {
3087 	preempt_disable();
3088 }
3089 
bpf_preempt_enable(void)3090 __bpf_kfunc void bpf_preempt_enable(void)
3091 {
3092 	preempt_enable();
3093 }
3094 
3095 struct bpf_iter_bits {
3096 	__u64 __opaque[2];
3097 } __aligned(8);
3098 
3099 #define BITS_ITER_NR_WORDS_MAX 511
3100 
3101 struct bpf_iter_bits_kern {
3102 	union {
3103 		__u64 *bits;
3104 		__u64 bits_copy;
3105 	};
3106 	int nr_bits;
3107 	int bit;
3108 } __aligned(8);
3109 
3110 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing
3111  * a u64 pointer and an unsigned long pointer to find_next_bit() will
3112  * return the same result, as both point to the same 8-byte area.
3113  *
3114  * For 32-bit little-endian hosts, using a u64 pointer or unsigned long
3115  * pointer also makes no difference. This is because the first iterated
3116  * unsigned long is composed of bits 0-31 of the u64 and the second unsigned
3117  * long is composed of bits 32-63 of the u64.
3118  *
3119  * However, for 32-bit big-endian hosts, this is not the case. The first
3120  * iterated unsigned long will be bits 32-63 of the u64, so swap these two
3121  * ulong values within the u64.
3122  */
swap_ulong_in_u64(u64 * bits,unsigned int nr)3123 static void swap_ulong_in_u64(u64 *bits, unsigned int nr)
3124 {
3125 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
3126 	unsigned int i;
3127 
3128 	for (i = 0; i < nr; i++)
3129 		bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32);
3130 #endif
3131 }
3132 
3133 /**
3134  * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
3135  * @it: The new bpf_iter_bits to be created
3136  * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
3137  * @nr_words: The size of the specified memory area, measured in 8-byte units.
3138  * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be
3139  * further reduced by the BPF memory allocator implementation.
3140  *
3141  * This function initializes a new bpf_iter_bits structure for iterating over
3142  * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
3143  * copies the data of the memory area to the newly created bpf_iter_bits @it for
3144  * subsequent iteration operations.
3145  *
3146  * On success, 0 is returned. On failure, ERR is returned.
3147  */
3148 __bpf_kfunc int
bpf_iter_bits_new(struct bpf_iter_bits * it,const u64 * unsafe_ptr__ign,u32 nr_words)3149 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
3150 {
3151 	struct bpf_iter_bits_kern *kit = (void *)it;
3152 	u32 nr_bytes = nr_words * sizeof(u64);
3153 	u32 nr_bits = BYTES_TO_BITS(nr_bytes);
3154 	int err;
3155 
3156 	BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
3157 	BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
3158 		     __alignof__(struct bpf_iter_bits));
3159 
3160 	kit->nr_bits = 0;
3161 	kit->bits_copy = 0;
3162 	kit->bit = -1;
3163 
3164 	if (!unsafe_ptr__ign || !nr_words)
3165 		return -EINVAL;
3166 	if (nr_words > BITS_ITER_NR_WORDS_MAX)
3167 		return -E2BIG;
3168 
3169 	/* Optimization for u64 mask */
3170 	if (nr_bits == 64) {
3171 		err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
3172 		if (err)
3173 			return -EFAULT;
3174 
3175 		swap_ulong_in_u64(&kit->bits_copy, nr_words);
3176 
3177 		kit->nr_bits = nr_bits;
3178 		return 0;
3179 	}
3180 
3181 	if (bpf_mem_alloc_check_size(false, nr_bytes))
3182 		return -E2BIG;
3183 
3184 	/* Fallback to memalloc */
3185 	kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
3186 	if (!kit->bits)
3187 		return -ENOMEM;
3188 
3189 	err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
3190 	if (err) {
3191 		bpf_mem_free(&bpf_global_ma, kit->bits);
3192 		return err;
3193 	}
3194 
3195 	swap_ulong_in_u64(kit->bits, nr_words);
3196 
3197 	kit->nr_bits = nr_bits;
3198 	return 0;
3199 }
3200 
3201 /**
3202  * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
3203  * @it: The bpf_iter_bits to be checked
3204  *
3205  * This function returns a pointer to a number representing the value of the
3206  * next bit in the bits.
3207  *
3208  * If there are no further bits available, it returns NULL.
3209  */
bpf_iter_bits_next(struct bpf_iter_bits * it)3210 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
3211 {
3212 	struct bpf_iter_bits_kern *kit = (void *)it;
3213 	int bit = kit->bit, nr_bits = kit->nr_bits;
3214 	const void *bits;
3215 
3216 	if (!nr_bits || bit >= nr_bits)
3217 		return NULL;
3218 
3219 	bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
3220 	bit = find_next_bit(bits, nr_bits, bit + 1);
3221 	if (bit >= nr_bits) {
3222 		kit->bit = bit;
3223 		return NULL;
3224 	}
3225 
3226 	kit->bit = bit;
3227 	return &kit->bit;
3228 }
3229 
3230 /**
3231  * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
3232  * @it: The bpf_iter_bits to be destroyed
3233  *
3234  * Destroy the resource associated with the bpf_iter_bits.
3235  */
bpf_iter_bits_destroy(struct bpf_iter_bits * it)3236 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
3237 {
3238 	struct bpf_iter_bits_kern *kit = (void *)it;
3239 
3240 	if (kit->nr_bits <= 64)
3241 		return;
3242 	bpf_mem_free(&bpf_global_ma, kit->bits);
3243 }
3244 
3245 /**
3246  * bpf_copy_from_user_str() - Copy a string from an unsafe user address
3247  * @dst:             Destination address, in kernel space.  This buffer must be
3248  *                   at least @dst__sz bytes long.
3249  * @dst__sz:         Maximum number of bytes to copy, includes the trailing NUL.
3250  * @unsafe_ptr__ign: Source address, in user space.
3251  * @flags:           The only supported flag is BPF_F_PAD_ZEROS
3252  *
3253  * Copies a NUL-terminated string from userspace to BPF space. If user string is
3254  * too long this will still ensure zero termination in the dst buffer unless
3255  * buffer size is 0.
3256  *
3257  * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and
3258  * memset all of @dst on failure.
3259  */
bpf_copy_from_user_str(void * dst,u32 dst__sz,const void __user * unsafe_ptr__ign,u64 flags)3260 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags)
3261 {
3262 	int ret;
3263 
3264 	if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3265 		return -EINVAL;
3266 
3267 	if (unlikely(!dst__sz))
3268 		return 0;
3269 
3270 	ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1);
3271 	if (ret < 0) {
3272 		if (flags & BPF_F_PAD_ZEROS)
3273 			memset((char *)dst, 0, dst__sz);
3274 
3275 		return ret;
3276 	}
3277 
3278 	if (flags & BPF_F_PAD_ZEROS)
3279 		memset((char *)dst + ret, 0, dst__sz - ret);
3280 	else
3281 		((char *)dst)[ret] = '\0';
3282 
3283 	return ret + 1;
3284 }
3285 
3286 /**
3287  * bpf_copy_from_user_task_str() - Copy a string from an task's address space
3288  * @dst:             Destination address, in kernel space.  This buffer must be
3289  *                   at least @dst__sz bytes long.
3290  * @dst__sz:         Maximum number of bytes to copy, includes the trailing NUL.
3291  * @unsafe_ptr__ign: Source address in the task's address space.
3292  * @tsk:             The task whose address space will be used
3293  * @flags:           The only supported flag is BPF_F_PAD_ZEROS
3294  *
3295  * Copies a NUL terminated string from a task's address space to @dst__sz
3296  * buffer. If user string is too long this will still ensure zero termination
3297  * in the @dst__sz buffer unless buffer size is 0.
3298  *
3299  * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success
3300  * and memset all of @dst__sz on failure.
3301  *
3302  * Return: The number of copied bytes on success including the NUL terminator.
3303  * A negative error code on failure.
3304  */
bpf_copy_from_user_task_str(void * dst,u32 dst__sz,const void __user * unsafe_ptr__ign,struct task_struct * tsk,u64 flags)3305 __bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz,
3306 					    const void __user *unsafe_ptr__ign,
3307 					    struct task_struct *tsk, u64 flags)
3308 {
3309 	int ret;
3310 
3311 	if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3312 		return -EINVAL;
3313 
3314 	if (unlikely(dst__sz == 0))
3315 		return 0;
3316 
3317 	ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0);
3318 	if (ret < 0) {
3319 		if (flags & BPF_F_PAD_ZEROS)
3320 			memset(dst, 0, dst__sz);
3321 		return ret;
3322 	}
3323 
3324 	if (flags & BPF_F_PAD_ZEROS)
3325 		memset(dst + ret, 0, dst__sz - ret);
3326 
3327 	return ret + 1;
3328 }
3329 
3330 /* Keep unsinged long in prototype so that kfunc is usable when emitted to
3331  * vmlinux.h in BPF programs directly, but note that while in BPF prog, the
3332  * unsigned long always points to 8-byte region on stack, the kernel may only
3333  * read and write the 4-bytes on 32-bit.
3334  */
bpf_local_irq_save(unsigned long * flags__irq_flag)3335 __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag)
3336 {
3337 	local_irq_save(*flags__irq_flag);
3338 }
3339 
bpf_local_irq_restore(unsigned long * flags__irq_flag)3340 __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag)
3341 {
3342 	local_irq_restore(*flags__irq_flag);
3343 }
3344 
__bpf_trap(void)3345 __bpf_kfunc void __bpf_trap(void)
3346 {
3347 }
3348 
3349 /*
3350  * Kfuncs for string operations.
3351  *
3352  * Since strings are not necessarily %NUL-terminated, we cannot directly call
3353  * in-kernel implementations. Instead, we open-code the implementations using
3354  * __get_kernel_nofault instead of plain dereference to make them safe.
3355  */
3356 
__bpf_strcasecmp(const char * s1,const char * s2,bool ignore_case)3357 static int __bpf_strcasecmp(const char *s1, const char *s2, bool ignore_case)
3358 {
3359 	char c1, c2;
3360 	int i;
3361 
3362 	if (!copy_from_kernel_nofault_allowed(s1, 1) ||
3363 	    !copy_from_kernel_nofault_allowed(s2, 1)) {
3364 		return -ERANGE;
3365 	}
3366 
3367 	guard(pagefault)();
3368 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3369 		__get_kernel_nofault(&c1, s1, char, err_out);
3370 		__get_kernel_nofault(&c2, s2, char, err_out);
3371 		if (ignore_case) {
3372 			c1 = tolower(c1);
3373 			c2 = tolower(c2);
3374 		}
3375 		if (c1 != c2)
3376 			return c1 < c2 ? -1 : 1;
3377 		if (c1 == '\0')
3378 			return 0;
3379 		s1++;
3380 		s2++;
3381 	}
3382 	return -E2BIG;
3383 err_out:
3384 	return -EFAULT;
3385 }
3386 
3387 /**
3388  * bpf_strcmp - Compare two strings
3389  * @s1__ign: One string
3390  * @s2__ign: Another string
3391  *
3392  * Return:
3393  * * %0       - Strings are equal
3394  * * %-1      - @s1__ign is smaller
3395  * * %1       - @s2__ign is smaller
3396  * * %-EFAULT - Cannot read one of the strings
3397  * * %-E2BIG  - One of strings is too large
3398  * * %-ERANGE - One of strings is outside of kernel address space
3399  */
bpf_strcmp(const char * s1__ign,const char * s2__ign)3400 __bpf_kfunc int bpf_strcmp(const char *s1__ign, const char *s2__ign)
3401 {
3402 	return __bpf_strcasecmp(s1__ign, s2__ign, false);
3403 }
3404 
3405 /**
3406  * bpf_strcasecmp - Compare two strings, ignoring the case of the characters
3407  * @s1__ign: One string
3408  * @s2__ign: Another string
3409  *
3410  * Return:
3411  * * %0       - Strings are equal
3412  * * %-1      - @s1__ign is smaller
3413  * * %1       - @s2__ign is smaller
3414  * * %-EFAULT - Cannot read one of the strings
3415  * * %-E2BIG  - One of strings is too large
3416  * * %-ERANGE - One of strings is outside of kernel address space
3417  */
bpf_strcasecmp(const char * s1__ign,const char * s2__ign)3418 __bpf_kfunc int bpf_strcasecmp(const char *s1__ign, const char *s2__ign)
3419 {
3420 	return __bpf_strcasecmp(s1__ign, s2__ign, true);
3421 }
3422 
3423 /**
3424  * bpf_strnchr - Find a character in a length limited string
3425  * @s__ign: The string to be searched
3426  * @count: The number of characters to be searched
3427  * @c: The character to search for
3428  *
3429  * Note that the %NUL-terminator is considered part of the string, and can
3430  * be searched for.
3431  *
3432  * Return:
3433  * * >=0      - Index of the first occurrence of @c within @s__ign
3434  * * %-ENOENT - @c not found in the first @count characters of @s__ign
3435  * * %-EFAULT - Cannot read @s__ign
3436  * * %-E2BIG  - @s__ign is too large
3437  * * %-ERANGE - @s__ign is outside of kernel address space
3438  */
bpf_strnchr(const char * s__ign,size_t count,char c)3439 __bpf_kfunc int bpf_strnchr(const char *s__ign, size_t count, char c)
3440 {
3441 	char sc;
3442 	int i;
3443 
3444 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3445 		return -ERANGE;
3446 
3447 	guard(pagefault)();
3448 	for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) {
3449 		__get_kernel_nofault(&sc, s__ign, char, err_out);
3450 		if (sc == c)
3451 			return i;
3452 		if (sc == '\0')
3453 			return -ENOENT;
3454 		s__ign++;
3455 	}
3456 	return i == XATTR_SIZE_MAX ? -E2BIG : -ENOENT;
3457 err_out:
3458 	return -EFAULT;
3459 }
3460 
3461 /**
3462  * bpf_strchr - Find the first occurrence of a character in a string
3463  * @s__ign: The string to be searched
3464  * @c: The character to search for
3465  *
3466  * Note that the %NUL-terminator is considered part of the string, and can
3467  * be searched for.
3468  *
3469  * Return:
3470  * * >=0      - The index of the first occurrence of @c within @s__ign
3471  * * %-ENOENT - @c not found in @s__ign
3472  * * %-EFAULT - Cannot read @s__ign
3473  * * %-E2BIG  - @s__ign is too large
3474  * * %-ERANGE - @s__ign is outside of kernel address space
3475  */
bpf_strchr(const char * s__ign,char c)3476 __bpf_kfunc int bpf_strchr(const char *s__ign, char c)
3477 {
3478 	return bpf_strnchr(s__ign, XATTR_SIZE_MAX, c);
3479 }
3480 
3481 /**
3482  * bpf_strchrnul - Find and return a character in a string, or end of string
3483  * @s__ign: The string to be searched
3484  * @c: The character to search for
3485  *
3486  * Return:
3487  * * >=0      - Index of the first occurrence of @c within @s__ign or index of
3488  *              the null byte at the end of @s__ign when @c is not found
3489  * * %-EFAULT - Cannot read @s__ign
3490  * * %-E2BIG  - @s__ign is too large
3491  * * %-ERANGE - @s__ign is outside of kernel address space
3492  */
bpf_strchrnul(const char * s__ign,char c)3493 __bpf_kfunc int bpf_strchrnul(const char *s__ign, char c)
3494 {
3495 	char sc;
3496 	int i;
3497 
3498 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3499 		return -ERANGE;
3500 
3501 	guard(pagefault)();
3502 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3503 		__get_kernel_nofault(&sc, s__ign, char, err_out);
3504 		if (sc == '\0' || sc == c)
3505 			return i;
3506 		s__ign++;
3507 	}
3508 	return -E2BIG;
3509 err_out:
3510 	return -EFAULT;
3511 }
3512 
3513 /**
3514  * bpf_strrchr - Find the last occurrence of a character in a string
3515  * @s__ign: The string to be searched
3516  * @c: The character to search for
3517  *
3518  * Return:
3519  * * >=0      - Index of the last occurrence of @c within @s__ign
3520  * * %-ENOENT - @c not found in @s__ign
3521  * * %-EFAULT - Cannot read @s__ign
3522  * * %-E2BIG  - @s__ign is too large
3523  * * %-ERANGE - @s__ign is outside of kernel address space
3524  */
bpf_strrchr(const char * s__ign,int c)3525 __bpf_kfunc int bpf_strrchr(const char *s__ign, int c)
3526 {
3527 	char sc;
3528 	int i, last = -ENOENT;
3529 
3530 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3531 		return -ERANGE;
3532 
3533 	guard(pagefault)();
3534 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3535 		__get_kernel_nofault(&sc, s__ign, char, err_out);
3536 		if (sc == c)
3537 			last = i;
3538 		if (sc == '\0')
3539 			return last;
3540 		s__ign++;
3541 	}
3542 	return -E2BIG;
3543 err_out:
3544 	return -EFAULT;
3545 }
3546 
3547 /**
3548  * bpf_strnlen - Calculate the length of a length-limited string
3549  * @s__ign: The string
3550  * @count: The maximum number of characters to count
3551  *
3552  * Return:
3553  * * >=0      - The length of @s__ign
3554  * * %-EFAULT - Cannot read @s__ign
3555  * * %-E2BIG  - @s__ign is too large
3556  * * %-ERANGE - @s__ign is outside of kernel address space
3557  */
bpf_strnlen(const char * s__ign,size_t count)3558 __bpf_kfunc int bpf_strnlen(const char *s__ign, size_t count)
3559 {
3560 	char c;
3561 	int i;
3562 
3563 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3564 		return -ERANGE;
3565 
3566 	guard(pagefault)();
3567 	for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) {
3568 		__get_kernel_nofault(&c, s__ign, char, err_out);
3569 		if (c == '\0')
3570 			return i;
3571 		s__ign++;
3572 	}
3573 	return i == XATTR_SIZE_MAX ? -E2BIG : i;
3574 err_out:
3575 	return -EFAULT;
3576 }
3577 
3578 /**
3579  * bpf_strlen - Calculate the length of a string
3580  * @s__ign: The string
3581  *
3582  * Return:
3583  * * >=0      - The length of @s__ign
3584  * * %-EFAULT - Cannot read @s__ign
3585  * * %-E2BIG  - @s__ign is too large
3586  * * %-ERANGE - @s__ign is outside of kernel address space
3587  */
bpf_strlen(const char * s__ign)3588 __bpf_kfunc int bpf_strlen(const char *s__ign)
3589 {
3590 	return bpf_strnlen(s__ign, XATTR_SIZE_MAX);
3591 }
3592 
3593 /**
3594  * bpf_strspn - Calculate the length of the initial substring of @s__ign which
3595  *              only contains letters in @accept__ign
3596  * @s__ign: The string to be searched
3597  * @accept__ign: The string to search for
3598  *
3599  * Return:
3600  * * >=0      - The length of the initial substring of @s__ign which only
3601  *              contains letters from @accept__ign
3602  * * %-EFAULT - Cannot read one of the strings
3603  * * %-E2BIG  - One of the strings is too large
3604  * * %-ERANGE - One of the strings is outside of kernel address space
3605  */
bpf_strspn(const char * s__ign,const char * accept__ign)3606 __bpf_kfunc int bpf_strspn(const char *s__ign, const char *accept__ign)
3607 {
3608 	char cs, ca;
3609 	int i, j;
3610 
3611 	if (!copy_from_kernel_nofault_allowed(s__ign, 1) ||
3612 	    !copy_from_kernel_nofault_allowed(accept__ign, 1)) {
3613 		return -ERANGE;
3614 	}
3615 
3616 	guard(pagefault)();
3617 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3618 		__get_kernel_nofault(&cs, s__ign, char, err_out);
3619 		if (cs == '\0')
3620 			return i;
3621 		for (j = 0; j < XATTR_SIZE_MAX; j++) {
3622 			__get_kernel_nofault(&ca, accept__ign + j, char, err_out);
3623 			if (cs == ca || ca == '\0')
3624 				break;
3625 		}
3626 		if (j == XATTR_SIZE_MAX)
3627 			return -E2BIG;
3628 		if (ca == '\0')
3629 			return i;
3630 		s__ign++;
3631 	}
3632 	return -E2BIG;
3633 err_out:
3634 	return -EFAULT;
3635 }
3636 
3637 /**
3638  * bpf_strcspn - Calculate the length of the initial substring of @s__ign which
3639  *               does not contain letters in @reject__ign
3640  * @s__ign: The string to be searched
3641  * @reject__ign: The string to search for
3642  *
3643  * Return:
3644  * * >=0      - The length of the initial substring of @s__ign which does not
3645  *              contain letters from @reject__ign
3646  * * %-EFAULT - Cannot read one of the strings
3647  * * %-E2BIG  - One of the strings is too large
3648  * * %-ERANGE - One of the strings is outside of kernel address space
3649  */
bpf_strcspn(const char * s__ign,const char * reject__ign)3650 __bpf_kfunc int bpf_strcspn(const char *s__ign, const char *reject__ign)
3651 {
3652 	char cs, cr;
3653 	int i, j;
3654 
3655 	if (!copy_from_kernel_nofault_allowed(s__ign, 1) ||
3656 	    !copy_from_kernel_nofault_allowed(reject__ign, 1)) {
3657 		return -ERANGE;
3658 	}
3659 
3660 	guard(pagefault)();
3661 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3662 		__get_kernel_nofault(&cs, s__ign, char, err_out);
3663 		if (cs == '\0')
3664 			return i;
3665 		for (j = 0; j < XATTR_SIZE_MAX; j++) {
3666 			__get_kernel_nofault(&cr, reject__ign + j, char, err_out);
3667 			if (cs == cr || cr == '\0')
3668 				break;
3669 		}
3670 		if (j == XATTR_SIZE_MAX)
3671 			return -E2BIG;
3672 		if (cr != '\0')
3673 			return i;
3674 		s__ign++;
3675 	}
3676 	return -E2BIG;
3677 err_out:
3678 	return -EFAULT;
3679 }
3680 
3681 /**
3682  * bpf_strnstr - Find the first substring in a length-limited string
3683  * @s1__ign: The string to be searched
3684  * @s2__ign: The string to search for
3685  * @len: the maximum number of characters to search
3686  *
3687  * Return:
3688  * * >=0      - Index of the first character of the first occurrence of @s2__ign
3689  *              within the first @len characters of @s1__ign
3690  * * %-ENOENT - @s2__ign not found in the first @len characters of @s1__ign
3691  * * %-EFAULT - Cannot read one of the strings
3692  * * %-E2BIG  - One of the strings is too large
3693  * * %-ERANGE - One of the strings is outside of kernel address space
3694  */
bpf_strnstr(const char * s1__ign,const char * s2__ign,size_t len)3695 __bpf_kfunc int bpf_strnstr(const char *s1__ign, const char *s2__ign, size_t len)
3696 {
3697 	char c1, c2;
3698 	int i, j;
3699 
3700 	if (!copy_from_kernel_nofault_allowed(s1__ign, 1) ||
3701 	    !copy_from_kernel_nofault_allowed(s2__ign, 1)) {
3702 		return -ERANGE;
3703 	}
3704 
3705 	guard(pagefault)();
3706 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3707 		for (j = 0; i + j <= len && j < XATTR_SIZE_MAX; j++) {
3708 			__get_kernel_nofault(&c2, s2__ign + j, char, err_out);
3709 			if (c2 == '\0')
3710 				return i;
3711 			/*
3712 			 * We allow reading an extra byte from s2 (note the
3713 			 * `i + j <= len` above) to cover the case when s2 is
3714 			 * a suffix of the first len chars of s1.
3715 			 */
3716 			if (i + j == len)
3717 				break;
3718 			__get_kernel_nofault(&c1, s1__ign + j, char, err_out);
3719 			if (c1 == '\0')
3720 				return -ENOENT;
3721 			if (c1 != c2)
3722 				break;
3723 		}
3724 		if (j == XATTR_SIZE_MAX)
3725 			return -E2BIG;
3726 		if (i + j == len)
3727 			return -ENOENT;
3728 		s1__ign++;
3729 	}
3730 	return -E2BIG;
3731 err_out:
3732 	return -EFAULT;
3733 }
3734 
3735 /**
3736  * bpf_strstr - Find the first substring in a string
3737  * @s1__ign: The string to be searched
3738  * @s2__ign: The string to search for
3739  *
3740  * Return:
3741  * * >=0      - Index of the first character of the first occurrence of @s2__ign
3742  *              within @s1__ign
3743  * * %-ENOENT - @s2__ign is not a substring of @s1__ign
3744  * * %-EFAULT - Cannot read one of the strings
3745  * * %-E2BIG  - One of the strings is too large
3746  * * %-ERANGE - One of the strings is outside of kernel address space
3747  */
bpf_strstr(const char * s1__ign,const char * s2__ign)3748 __bpf_kfunc int bpf_strstr(const char *s1__ign, const char *s2__ign)
3749 {
3750 	return bpf_strnstr(s1__ign, s2__ign, XATTR_SIZE_MAX);
3751 }
3752 #ifdef CONFIG_KEYS
3753 /**
3754  * bpf_lookup_user_key - lookup a key by its serial
3755  * @serial: key handle serial number
3756  * @flags: lookup-specific flags
3757  *
3758  * Search a key with a given *serial* and the provided *flags*.
3759  * If found, increment the reference count of the key by one, and
3760  * return it in the bpf_key structure.
3761  *
3762  * The bpf_key structure must be passed to bpf_key_put() when done
3763  * with it, so that the key reference count is decremented and the
3764  * bpf_key structure is freed.
3765  *
3766  * Permission checks are deferred to the time the key is used by
3767  * one of the available key-specific kfuncs.
3768  *
3769  * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
3770  * special keyring (e.g. session keyring), if it doesn't yet exist.
3771  * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
3772  * for the key construction, and to retrieve uninstantiated keys (keys
3773  * without data attached to them).
3774  *
3775  * Return: a bpf_key pointer with a valid key pointer if the key is found, a
3776  *         NULL pointer otherwise.
3777  */
bpf_lookup_user_key(s32 serial,u64 flags)3778 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags)
3779 {
3780 	key_ref_t key_ref;
3781 	struct bpf_key *bkey;
3782 
3783 	if (flags & ~KEY_LOOKUP_ALL)
3784 		return NULL;
3785 
3786 	/*
3787 	 * Permission check is deferred until the key is used, as the
3788 	 * intent of the caller is unknown here.
3789 	 */
3790 	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
3791 	if (IS_ERR(key_ref))
3792 		return NULL;
3793 
3794 	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
3795 	if (!bkey) {
3796 		key_put(key_ref_to_ptr(key_ref));
3797 		return NULL;
3798 	}
3799 
3800 	bkey->key = key_ref_to_ptr(key_ref);
3801 	bkey->has_ref = true;
3802 
3803 	return bkey;
3804 }
3805 
3806 /**
3807  * bpf_lookup_system_key - lookup a key by a system-defined ID
3808  * @id: key ID
3809  *
3810  * Obtain a bpf_key structure with a key pointer set to the passed key ID.
3811  * The key pointer is marked as invalid, to prevent bpf_key_put() from
3812  * attempting to decrement the key reference count on that pointer. The key
3813  * pointer set in such way is currently understood only by
3814  * verify_pkcs7_signature().
3815  *
3816  * Set *id* to one of the values defined in include/linux/verification.h:
3817  * 0 for the primary keyring (immutable keyring of system keys);
3818  * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
3819  * (where keys can be added only if they are vouched for by existing keys
3820  * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
3821  * keyring (primarily used by the integrity subsystem to verify a kexec'ed
3822  * kerned image and, possibly, the initramfs signature).
3823  *
3824  * Return: a bpf_key pointer with an invalid key pointer set from the
3825  *         pre-determined ID on success, a NULL pointer otherwise
3826  */
bpf_lookup_system_key(u64 id)3827 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
3828 {
3829 	struct bpf_key *bkey;
3830 
3831 	if (system_keyring_id_check(id) < 0)
3832 		return NULL;
3833 
3834 	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
3835 	if (!bkey)
3836 		return NULL;
3837 
3838 	bkey->key = (struct key *)(unsigned long)id;
3839 	bkey->has_ref = false;
3840 
3841 	return bkey;
3842 }
3843 
3844 /**
3845  * bpf_key_put - decrement key reference count if key is valid and free bpf_key
3846  * @bkey: bpf_key structure
3847  *
3848  * Decrement the reference count of the key inside *bkey*, if the pointer
3849  * is valid, and free *bkey*.
3850  */
bpf_key_put(struct bpf_key * bkey)3851 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
3852 {
3853 	if (bkey->has_ref)
3854 		key_put(bkey->key);
3855 
3856 	kfree(bkey);
3857 }
3858 
3859 /**
3860  * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
3861  * @data_p: data to verify
3862  * @sig_p: signature of the data
3863  * @trusted_keyring: keyring with keys trusted for signature verification
3864  *
3865  * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
3866  * with keys in a keyring referenced by *trusted_keyring*.
3867  *
3868  * Return: 0 on success, a negative value on error.
3869  */
bpf_verify_pkcs7_signature(struct bpf_dynptr * data_p,struct bpf_dynptr * sig_p,struct bpf_key * trusted_keyring)3870 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
3871 			       struct bpf_dynptr *sig_p,
3872 			       struct bpf_key *trusted_keyring)
3873 {
3874 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
3875 	struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
3876 	struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
3877 	const void *data, *sig;
3878 	u32 data_len, sig_len;
3879 	int ret;
3880 
3881 	if (trusted_keyring->has_ref) {
3882 		/*
3883 		 * Do the permission check deferred in bpf_lookup_user_key().
3884 		 * See bpf_lookup_user_key() for more details.
3885 		 *
3886 		 * A call to key_task_permission() here would be redundant, as
3887 		 * it is already done by keyring_search() called by
3888 		 * find_asymmetric_key().
3889 		 */
3890 		ret = key_validate(trusted_keyring->key);
3891 		if (ret < 0)
3892 			return ret;
3893 	}
3894 
3895 	data_len = __bpf_dynptr_size(data_ptr);
3896 	data = __bpf_dynptr_data(data_ptr, data_len);
3897 	sig_len = __bpf_dynptr_size(sig_ptr);
3898 	sig = __bpf_dynptr_data(sig_ptr, sig_len);
3899 
3900 	return verify_pkcs7_signature(data, data_len, sig, sig_len,
3901 				      trusted_keyring->key,
3902 				      VERIFYING_BPF_SIGNATURE, NULL,
3903 				      NULL);
3904 #else
3905 	return -EOPNOTSUPP;
3906 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
3907 }
3908 #endif /* CONFIG_KEYS */
3909 
3910 typedef int (*bpf_task_work_callback_t)(struct bpf_map *map, void *key, void *value);
3911 
3912 enum bpf_task_work_state {
3913 	/* bpf_task_work is ready to be used */
3914 	BPF_TW_STANDBY = 0,
3915 	/* irq work scheduling in progress */
3916 	BPF_TW_PENDING,
3917 	/* task work scheduling in progress */
3918 	BPF_TW_SCHEDULING,
3919 	/* task work is scheduled successfully */
3920 	BPF_TW_SCHEDULED,
3921 	/* callback is running */
3922 	BPF_TW_RUNNING,
3923 	/* associated BPF map value is deleted */
3924 	BPF_TW_FREED,
3925 };
3926 
3927 struct bpf_task_work_ctx {
3928 	enum bpf_task_work_state state;
3929 	refcount_t refcnt;
3930 	struct callback_head work;
3931 	struct irq_work irq_work;
3932 	/* bpf_prog that schedules task work */
3933 	struct bpf_prog *prog;
3934 	/* task for which callback is scheduled */
3935 	struct task_struct *task;
3936 	/* the map and map value associated with this context */
3937 	struct bpf_map *map;
3938 	void *map_val;
3939 	enum task_work_notify_mode mode;
3940 	bpf_task_work_callback_t callback_fn;
3941 	struct rcu_head rcu;
3942 } __aligned(8);
3943 
3944 /* Actual type for struct bpf_task_work */
3945 struct bpf_task_work_kern {
3946 	struct bpf_task_work_ctx *ctx;
3947 };
3948 
bpf_task_work_ctx_reset(struct bpf_task_work_ctx * ctx)3949 static void bpf_task_work_ctx_reset(struct bpf_task_work_ctx *ctx)
3950 {
3951 	if (ctx->prog) {
3952 		bpf_prog_put(ctx->prog);
3953 		ctx->prog = NULL;
3954 	}
3955 	if (ctx->task) {
3956 		bpf_task_release(ctx->task);
3957 		ctx->task = NULL;
3958 	}
3959 }
3960 
bpf_task_work_ctx_tryget(struct bpf_task_work_ctx * ctx)3961 static bool bpf_task_work_ctx_tryget(struct bpf_task_work_ctx *ctx)
3962 {
3963 	return refcount_inc_not_zero(&ctx->refcnt);
3964 }
3965 
bpf_task_work_ctx_put(struct bpf_task_work_ctx * ctx)3966 static void bpf_task_work_ctx_put(struct bpf_task_work_ctx *ctx)
3967 {
3968 	if (!refcount_dec_and_test(&ctx->refcnt))
3969 		return;
3970 
3971 	bpf_task_work_ctx_reset(ctx);
3972 
3973 	/* bpf_mem_free expects migration to be disabled */
3974 	migrate_disable();
3975 	bpf_mem_free(&bpf_global_ma, ctx);
3976 	migrate_enable();
3977 }
3978 
bpf_task_work_cancel(struct bpf_task_work_ctx * ctx)3979 static void bpf_task_work_cancel(struct bpf_task_work_ctx *ctx)
3980 {
3981 	/*
3982 	 * Scheduled task_work callback holds ctx ref, so if we successfully
3983 	 * cancelled, we put that ref on callback's behalf. If we couldn't
3984 	 * cancel, callback will inevitably run or has already completed
3985 	 * running, and it would have taken care of its ctx ref itself.
3986 	 */
3987 	if (task_work_cancel(ctx->task, &ctx->work))
3988 		bpf_task_work_ctx_put(ctx);
3989 }
3990 
bpf_task_work_callback(struct callback_head * cb)3991 static void bpf_task_work_callback(struct callback_head *cb)
3992 {
3993 	struct bpf_task_work_ctx *ctx = container_of(cb, struct bpf_task_work_ctx, work);
3994 	enum bpf_task_work_state state;
3995 	u32 idx;
3996 	void *key;
3997 
3998 	/* Read lock is needed to protect ctx and map key/value access */
3999 	guard(rcu_tasks_trace)();
4000 	/*
4001 	 * This callback may start running before bpf_task_work_irq() switched to
4002 	 * SCHEDULED state, so handle both transition variants SCHEDULING|SCHEDULED -> RUNNING.
4003 	 */
4004 	state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_RUNNING);
4005 	if (state == BPF_TW_SCHEDULED)
4006 		state = cmpxchg(&ctx->state, BPF_TW_SCHEDULED, BPF_TW_RUNNING);
4007 	if (state == BPF_TW_FREED) {
4008 		bpf_task_work_ctx_put(ctx);
4009 		return;
4010 	}
4011 
4012 	key = (void *)map_key_from_value(ctx->map, ctx->map_val, &idx);
4013 
4014 	migrate_disable();
4015 	ctx->callback_fn(ctx->map, key, ctx->map_val);
4016 	migrate_enable();
4017 
4018 	bpf_task_work_ctx_reset(ctx);
4019 	(void)cmpxchg(&ctx->state, BPF_TW_RUNNING, BPF_TW_STANDBY);
4020 
4021 	bpf_task_work_ctx_put(ctx);
4022 }
4023 
bpf_task_work_irq(struct irq_work * irq_work)4024 static void bpf_task_work_irq(struct irq_work *irq_work)
4025 {
4026 	struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
4027 	enum bpf_task_work_state state;
4028 	int err;
4029 
4030 	guard(rcu_tasks_trace)();
4031 
4032 	if (cmpxchg(&ctx->state, BPF_TW_PENDING, BPF_TW_SCHEDULING) != BPF_TW_PENDING) {
4033 		bpf_task_work_ctx_put(ctx);
4034 		return;
4035 	}
4036 
4037 	err = task_work_add(ctx->task, &ctx->work, ctx->mode);
4038 	if (err) {
4039 		bpf_task_work_ctx_reset(ctx);
4040 		/*
4041 		 * try to switch back to STANDBY for another task_work reuse, but we might have
4042 		 * gone to FREED already, which is fine as we already cleaned up after ourselves
4043 		 */
4044 		(void)cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_STANDBY);
4045 		bpf_task_work_ctx_put(ctx);
4046 		return;
4047 	}
4048 
4049 	/*
4050 	 * It's technically possible for just scheduled task_work callback to
4051 	 * complete running by now, going SCHEDULING -> RUNNING and then
4052 	 * dropping its ctx refcount. Instead of capturing extra ref just to
4053 	 * protected below ctx->state access, we rely on RCU protection to
4054 	 * perform below SCHEDULING -> SCHEDULED attempt.
4055 	 */
4056 	state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_SCHEDULED);
4057 	if (state == BPF_TW_FREED)
4058 		bpf_task_work_cancel(ctx); /* clean up if we switched into FREED state */
4059 }
4060 
bpf_task_work_fetch_ctx(struct bpf_task_work * tw,struct bpf_map * map)4061 static struct bpf_task_work_ctx *bpf_task_work_fetch_ctx(struct bpf_task_work *tw,
4062 							 struct bpf_map *map)
4063 {
4064 	struct bpf_task_work_kern *twk = (void *)tw;
4065 	struct bpf_task_work_ctx *ctx, *old_ctx;
4066 
4067 	ctx = READ_ONCE(twk->ctx);
4068 	if (ctx)
4069 		return ctx;
4070 
4071 	ctx = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_task_work_ctx));
4072 	if (!ctx)
4073 		return ERR_PTR(-ENOMEM);
4074 
4075 	memset(ctx, 0, sizeof(*ctx));
4076 	refcount_set(&ctx->refcnt, 1); /* map's own ref */
4077 	ctx->state = BPF_TW_STANDBY;
4078 
4079 	old_ctx = cmpxchg(&twk->ctx, NULL, ctx);
4080 	if (old_ctx) {
4081 		/*
4082 		 * tw->ctx is set by concurrent BPF program, release allocated
4083 		 * memory and try to reuse already set context.
4084 		 */
4085 		bpf_mem_free(&bpf_global_ma, ctx);
4086 		return old_ctx;
4087 	}
4088 
4089 	return ctx; /* Success */
4090 }
4091 
bpf_task_work_acquire_ctx(struct bpf_task_work * tw,struct bpf_map * map)4092 static struct bpf_task_work_ctx *bpf_task_work_acquire_ctx(struct bpf_task_work *tw,
4093 							   struct bpf_map *map)
4094 {
4095 	struct bpf_task_work_ctx *ctx;
4096 
4097 	ctx = bpf_task_work_fetch_ctx(tw, map);
4098 	if (IS_ERR(ctx))
4099 		return ctx;
4100 
4101 	/* try to get ref for task_work callback to hold */
4102 	if (!bpf_task_work_ctx_tryget(ctx))
4103 		return ERR_PTR(-EBUSY);
4104 
4105 	if (cmpxchg(&ctx->state, BPF_TW_STANDBY, BPF_TW_PENDING) != BPF_TW_STANDBY) {
4106 		/* lost acquiring race or map_release_uref() stole it from us, put ref and bail */
4107 		bpf_task_work_ctx_put(ctx);
4108 		return ERR_PTR(-EBUSY);
4109 	}
4110 
4111 	/*
4112 	 * If no process or bpffs is holding a reference to the map, no new callbacks should be
4113 	 * scheduled. This does not address any race or correctness issue, but rather is a policy
4114 	 * choice: dropping user references should stop everything.
4115 	 */
4116 	if (!atomic64_read(&map->usercnt)) {
4117 		/* drop ref we just got for task_work callback itself */
4118 		bpf_task_work_ctx_put(ctx);
4119 		/* transfer map's ref into cancel_and_free() */
4120 		bpf_task_work_cancel_and_free(tw);
4121 		return ERR_PTR(-EBUSY);
4122 	}
4123 
4124 	return ctx;
4125 }
4126 
bpf_task_work_schedule(struct task_struct * task,struct bpf_task_work * tw,struct bpf_map * map,bpf_task_work_callback_t callback_fn,struct bpf_prog_aux * aux,enum task_work_notify_mode mode)4127 static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work *tw,
4128 				  struct bpf_map *map, bpf_task_work_callback_t callback_fn,
4129 				  struct bpf_prog_aux *aux, enum task_work_notify_mode mode)
4130 {
4131 	struct bpf_prog *prog;
4132 	struct bpf_task_work_ctx *ctx;
4133 	int err;
4134 
4135 	BTF_TYPE_EMIT(struct bpf_task_work);
4136 
4137 	prog = bpf_prog_inc_not_zero(aux->prog);
4138 	if (IS_ERR(prog))
4139 		return -EBADF;
4140 	task = bpf_task_acquire(task);
4141 	if (!task) {
4142 		err = -EBADF;
4143 		goto release_prog;
4144 	}
4145 
4146 	ctx = bpf_task_work_acquire_ctx(tw, map);
4147 	if (IS_ERR(ctx)) {
4148 		err = PTR_ERR(ctx);
4149 		goto release_all;
4150 	}
4151 
4152 	ctx->task = task;
4153 	ctx->callback_fn = callback_fn;
4154 	ctx->prog = prog;
4155 	ctx->mode = mode;
4156 	ctx->map = map;
4157 	ctx->map_val = (void *)tw - map->record->task_work_off;
4158 	init_task_work(&ctx->work, bpf_task_work_callback);
4159 	init_irq_work(&ctx->irq_work, bpf_task_work_irq);
4160 
4161 	irq_work_queue(&ctx->irq_work);
4162 	return 0;
4163 
4164 release_all:
4165 	bpf_task_release(task);
4166 release_prog:
4167 	bpf_prog_put(prog);
4168 	return err;
4169 }
4170 
4171 /**
4172  * bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL mode
4173  * @task: Task struct for which callback should be scheduled
4174  * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
4175  * @map__map: bpf_map that embeds struct bpf_task_work in the values
4176  * @callback: pointer to BPF subprogram to call
4177  * @aux__prog: user should pass NULL
4178  *
4179  * Return: 0 if task work has been scheduled successfully, negative error code otherwise
4180  */
bpf_task_work_schedule_signal(struct task_struct * task,struct bpf_task_work * tw,void * map__map,bpf_task_work_callback_t callback,void * aux__prog)4181 __bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
4182 					      void *map__map, bpf_task_work_callback_t callback,
4183 					      void *aux__prog)
4184 {
4185 	return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
4186 }
4187 
4188 /**
4189  * bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME mode
4190  * @task: Task struct for which callback should be scheduled
4191  * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
4192  * @map__map: bpf_map that embeds struct bpf_task_work in the values
4193  * @callback: pointer to BPF subprogram to call
4194  * @aux__prog: user should pass NULL
4195  *
4196  * Return: 0 if task work has been scheduled successfully, negative error code otherwise
4197  */
bpf_task_work_schedule_resume(struct task_struct * task,struct bpf_task_work * tw,void * map__map,bpf_task_work_callback_t callback,void * aux__prog)4198 __bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
4199 					      void *map__map, bpf_task_work_callback_t callback,
4200 					      void *aux__prog)
4201 {
4202 	return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
4203 }
4204 
4205 __bpf_kfunc_end_defs();
4206 
bpf_task_work_cancel_scheduled(struct irq_work * irq_work)4207 static void bpf_task_work_cancel_scheduled(struct irq_work *irq_work)
4208 {
4209 	struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
4210 
4211 	bpf_task_work_cancel(ctx); /* this might put task_work callback's ref */
4212 	bpf_task_work_ctx_put(ctx); /* and here we put map's own ref that was transferred to us */
4213 }
4214 
bpf_task_work_cancel_and_free(void * val)4215 void bpf_task_work_cancel_and_free(void *val)
4216 {
4217 	struct bpf_task_work_kern *twk = val;
4218 	struct bpf_task_work_ctx *ctx;
4219 	enum bpf_task_work_state state;
4220 
4221 	ctx = xchg(&twk->ctx, NULL);
4222 	if (!ctx)
4223 		return;
4224 
4225 	state = xchg(&ctx->state, BPF_TW_FREED);
4226 	if (state == BPF_TW_SCHEDULED) {
4227 		/* run in irq_work to avoid locks in NMI */
4228 		init_irq_work(&ctx->irq_work, bpf_task_work_cancel_scheduled);
4229 		irq_work_queue(&ctx->irq_work);
4230 		return;
4231 	}
4232 
4233 	bpf_task_work_ctx_put(ctx); /* put bpf map's ref */
4234 }
4235 
4236 BTF_KFUNCS_START(generic_btf_ids)
4237 #ifdef CONFIG_CRASH_DUMP
4238 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
4239 #endif
4240 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
4241 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
4242 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
4243 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
4244 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
4245 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
4246 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
4247 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
4248 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
4249 BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
4250 BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
4251 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4252 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
4253 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
4254 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
4255 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
4256 BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL)
4257 BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL)
4258 BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL)
4259 
4260 #ifdef CONFIG_CGROUPS
4261 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4262 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
4263 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4264 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
4265 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
4266 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4267 #endif
4268 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
4269 BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL)
4270 BTF_ID_FLAGS(func, bpf_throw)
4271 #ifdef CONFIG_BPF_EVENTS
4272 BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS)
4273 #endif
4274 #ifdef CONFIG_KEYS
4275 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
4276 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
4277 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
4278 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
4279 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
4280 #endif
4281 #endif
4282 BTF_KFUNCS_END(generic_btf_ids)
4283 
4284 static const struct btf_kfunc_id_set generic_kfunc_set = {
4285 	.owner = THIS_MODULE,
4286 	.set   = &generic_btf_ids,
4287 };
4288 
4289 
4290 BTF_ID_LIST(generic_dtor_ids)
4291 BTF_ID(struct, task_struct)
4292 BTF_ID(func, bpf_task_release_dtor)
4293 #ifdef CONFIG_CGROUPS
4294 BTF_ID(struct, cgroup)
4295 BTF_ID(func, bpf_cgroup_release_dtor)
4296 #endif
4297 
4298 BTF_KFUNCS_START(common_btf_ids)
4299 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL)
4300 BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL)
4301 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
4302 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
4303 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
4304 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
4305 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
4306 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
4307 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
4308 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
4309 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
4310 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
4311 #ifdef CONFIG_CGROUPS
4312 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
4313 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
4314 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
4315 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
4316 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
4317 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
4318 #endif
4319 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
4320 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
4321 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
4322 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
4323 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
4324 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
4325 BTF_ID_FLAGS(func, bpf_dynptr_size)
4326 BTF_ID_FLAGS(func, bpf_dynptr_clone)
4327 BTF_ID_FLAGS(func, bpf_dynptr_copy)
4328 BTF_ID_FLAGS(func, bpf_dynptr_memset)
4329 #ifdef CONFIG_NET
4330 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
4331 #endif
4332 BTF_ID_FLAGS(func, bpf_wq_init)
4333 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
4334 BTF_ID_FLAGS(func, bpf_wq_start)
4335 BTF_ID_FLAGS(func, bpf_preempt_disable)
4336 BTF_ID_FLAGS(func, bpf_preempt_enable)
4337 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
4338 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
4339 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
4340 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
4341 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE)
4342 BTF_ID_FLAGS(func, bpf_get_kmem_cache)
4343 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
4344 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
4345 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
4346 BTF_ID_FLAGS(func, bpf_local_irq_save)
4347 BTF_ID_FLAGS(func, bpf_local_irq_restore)
4348 BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr)
4349 BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr)
4350 BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr)
4351 BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr)
4352 BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE)
4353 BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE)
4354 BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
4355 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
4356 #ifdef CONFIG_DMA_SHARED_BUFFER
4357 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)
4358 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
4359 BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
4360 #endif
4361 BTF_ID_FLAGS(func, __bpf_trap)
4362 BTF_ID_FLAGS(func, bpf_strcmp);
4363 BTF_ID_FLAGS(func, bpf_strcasecmp);
4364 BTF_ID_FLAGS(func, bpf_strchr);
4365 BTF_ID_FLAGS(func, bpf_strchrnul);
4366 BTF_ID_FLAGS(func, bpf_strnchr);
4367 BTF_ID_FLAGS(func, bpf_strrchr);
4368 BTF_ID_FLAGS(func, bpf_strlen);
4369 BTF_ID_FLAGS(func, bpf_strnlen);
4370 BTF_ID_FLAGS(func, bpf_strspn);
4371 BTF_ID_FLAGS(func, bpf_strcspn);
4372 BTF_ID_FLAGS(func, bpf_strstr);
4373 BTF_ID_FLAGS(func, bpf_strnstr);
4374 #if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS)
4375 BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
4376 #endif
4377 BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_TRUSTED_ARGS)
4378 BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_TRUSTED_ARGS)
4379 BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_TRUSTED_ARGS)
4380 BTF_KFUNCS_END(common_btf_ids)
4381 
4382 static const struct btf_kfunc_id_set common_kfunc_set = {
4383 	.owner = THIS_MODULE,
4384 	.set   = &common_btf_ids,
4385 };
4386 
kfunc_init(void)4387 static int __init kfunc_init(void)
4388 {
4389 	int ret;
4390 	const struct btf_id_dtor_kfunc generic_dtors[] = {
4391 		{
4392 			.btf_id       = generic_dtor_ids[0],
4393 			.kfunc_btf_id = generic_dtor_ids[1]
4394 		},
4395 #ifdef CONFIG_CGROUPS
4396 		{
4397 			.btf_id       = generic_dtor_ids[2],
4398 			.kfunc_btf_id = generic_dtor_ids[3]
4399 		},
4400 #endif
4401 	};
4402 
4403 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
4404 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
4405 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
4406 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
4407 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
4408 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set);
4409 	ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
4410 						  ARRAY_SIZE(generic_dtors),
4411 						  THIS_MODULE);
4412 	return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
4413 }
4414 
4415 late_initcall(kfunc_init);
4416 
4417 /* Get a pointer to dynptr data up to len bytes for read only access. If
4418  * the dynptr doesn't have continuous data up to len bytes, return NULL.
4419  */
__bpf_dynptr_data(const struct bpf_dynptr_kern * ptr,u32 len)4420 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
4421 {
4422 	const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
4423 
4424 	return bpf_dynptr_slice(p, 0, NULL, len);
4425 }
4426 
4427 /* Get a pointer to dynptr data up to len bytes for read write access. If
4428  * the dynptr doesn't have continuous data up to len bytes, or the dynptr
4429  * is read only, return NULL.
4430  */
__bpf_dynptr_data_rw(const struct bpf_dynptr_kern * ptr,u32 len)4431 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
4432 {
4433 	if (__bpf_dynptr_is_rdonly(ptr))
4434 		return NULL;
4435 	return (void *)__bpf_dynptr_data(ptr, len);
4436 }
4437