xref: /linux/kernel/bpf/helpers.c (revision 8b4e023d79b760d217dd1c462848c4a27fcc7677)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 #include <linux/kasan.h>
26 #include <linux/bpf_verifier.h>
27 #include <linux/uaccess.h>
28 #include <linux/verification.h>
29 #include <linux/task_work.h>
30 #include <linux/irq_work.h>
31 
32 #include "../../lib/kstrtox.h"
33 
34 /* If kernel subsystem is allowing eBPF programs to call this function,
35  * inside its own verifier_ops->get_func_proto() callback it should return
36  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
37  *
38  * Different map implementations will rely on rcu in map methods
39  * lookup/update/delete, therefore eBPF programs must run under rcu lock
40  * if program is allowed to access maps, so check rcu_read_lock_held() or
41  * rcu_read_lock_trace_held() in all three functions.
42  */
43 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
44 {
45 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
46 		     !rcu_read_lock_bh_held());
47 	return (unsigned long) map->ops->map_lookup_elem(map, key);
48 }
49 
50 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
51 	.func		= bpf_map_lookup_elem,
52 	.gpl_only	= false,
53 	.pkt_access	= true,
54 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
55 	.arg1_type	= ARG_CONST_MAP_PTR,
56 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
57 };
58 
59 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
60 	   void *, value, u64, flags)
61 {
62 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
63 		     !rcu_read_lock_bh_held());
64 	return map->ops->map_update_elem(map, key, value, flags);
65 }
66 
67 const struct bpf_func_proto bpf_map_update_elem_proto = {
68 	.func		= bpf_map_update_elem,
69 	.gpl_only	= false,
70 	.pkt_access	= true,
71 	.ret_type	= RET_INTEGER,
72 	.arg1_type	= ARG_CONST_MAP_PTR,
73 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
74 	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
75 	.arg4_type	= ARG_ANYTHING,
76 };
77 
78 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
79 {
80 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
81 		     !rcu_read_lock_bh_held());
82 	return map->ops->map_delete_elem(map, key);
83 }
84 
85 const struct bpf_func_proto bpf_map_delete_elem_proto = {
86 	.func		= bpf_map_delete_elem,
87 	.gpl_only	= false,
88 	.pkt_access	= true,
89 	.ret_type	= RET_INTEGER,
90 	.arg1_type	= ARG_CONST_MAP_PTR,
91 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
92 };
93 
94 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
95 {
96 	return map->ops->map_push_elem(map, value, flags);
97 }
98 
99 const struct bpf_func_proto bpf_map_push_elem_proto = {
100 	.func		= bpf_map_push_elem,
101 	.gpl_only	= false,
102 	.pkt_access	= true,
103 	.ret_type	= RET_INTEGER,
104 	.arg1_type	= ARG_CONST_MAP_PTR,
105 	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
106 	.arg3_type	= ARG_ANYTHING,
107 };
108 
109 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
110 {
111 	return map->ops->map_pop_elem(map, value);
112 }
113 
114 const struct bpf_func_proto bpf_map_pop_elem_proto = {
115 	.func		= bpf_map_pop_elem,
116 	.gpl_only	= false,
117 	.ret_type	= RET_INTEGER,
118 	.arg1_type	= ARG_CONST_MAP_PTR,
119 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
120 };
121 
122 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
123 {
124 	return map->ops->map_peek_elem(map, value);
125 }
126 
127 const struct bpf_func_proto bpf_map_peek_elem_proto = {
128 	.func		= bpf_map_peek_elem,
129 	.gpl_only	= false,
130 	.ret_type	= RET_INTEGER,
131 	.arg1_type	= ARG_CONST_MAP_PTR,
132 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
133 };
134 
135 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
136 {
137 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
138 		     !rcu_read_lock_bh_held());
139 	return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
140 }
141 
142 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
143 	.func		= bpf_map_lookup_percpu_elem,
144 	.gpl_only	= false,
145 	.pkt_access	= true,
146 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
147 	.arg1_type	= ARG_CONST_MAP_PTR,
148 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
149 	.arg3_type	= ARG_ANYTHING,
150 };
151 
152 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
153 	.func		= bpf_user_rnd_u32,
154 	.gpl_only	= false,
155 	.ret_type	= RET_INTEGER,
156 };
157 
158 BPF_CALL_0(bpf_get_smp_processor_id)
159 {
160 	return smp_processor_id();
161 }
162 
163 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
164 	.func		= bpf_get_smp_processor_id,
165 	.gpl_only	= false,
166 	.ret_type	= RET_INTEGER,
167 	.allow_fastcall	= true,
168 };
169 
170 BPF_CALL_0(bpf_get_numa_node_id)
171 {
172 	return numa_node_id();
173 }
174 
175 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
176 	.func		= bpf_get_numa_node_id,
177 	.gpl_only	= false,
178 	.ret_type	= RET_INTEGER,
179 };
180 
181 BPF_CALL_0(bpf_ktime_get_ns)
182 {
183 	/* NMI safe access to clock monotonic */
184 	return ktime_get_mono_fast_ns();
185 }
186 
187 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
188 	.func		= bpf_ktime_get_ns,
189 	.gpl_only	= false,
190 	.ret_type	= RET_INTEGER,
191 };
192 
193 BPF_CALL_0(bpf_ktime_get_boot_ns)
194 {
195 	/* NMI safe access to clock boottime */
196 	return ktime_get_boot_fast_ns();
197 }
198 
199 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
200 	.func		= bpf_ktime_get_boot_ns,
201 	.gpl_only	= false,
202 	.ret_type	= RET_INTEGER,
203 };
204 
205 BPF_CALL_0(bpf_ktime_get_coarse_ns)
206 {
207 	return ktime_get_coarse_ns();
208 }
209 
210 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
211 	.func		= bpf_ktime_get_coarse_ns,
212 	.gpl_only	= false,
213 	.ret_type	= RET_INTEGER,
214 };
215 
216 BPF_CALL_0(bpf_ktime_get_tai_ns)
217 {
218 	/* NMI safe access to clock tai */
219 	return ktime_get_tai_fast_ns();
220 }
221 
222 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
223 	.func		= bpf_ktime_get_tai_ns,
224 	.gpl_only	= false,
225 	.ret_type	= RET_INTEGER,
226 };
227 
228 BPF_CALL_0(bpf_get_current_pid_tgid)
229 {
230 	struct task_struct *task = current;
231 
232 	if (unlikely(!task))
233 		return -EINVAL;
234 
235 	return (u64) task->tgid << 32 | task->pid;
236 }
237 
238 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
239 	.func		= bpf_get_current_pid_tgid,
240 	.gpl_only	= false,
241 	.ret_type	= RET_INTEGER,
242 };
243 
244 BPF_CALL_0(bpf_get_current_uid_gid)
245 {
246 	struct task_struct *task = current;
247 	kuid_t uid;
248 	kgid_t gid;
249 
250 	if (unlikely(!task))
251 		return -EINVAL;
252 
253 	current_uid_gid(&uid, &gid);
254 	return (u64) from_kgid(&init_user_ns, gid) << 32 |
255 		     from_kuid(&init_user_ns, uid);
256 }
257 
258 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
259 	.func		= bpf_get_current_uid_gid,
260 	.gpl_only	= false,
261 	.ret_type	= RET_INTEGER,
262 };
263 
264 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
265 {
266 	struct task_struct *task = current;
267 
268 	if (unlikely(!task))
269 		goto err_clear;
270 
271 	/* Verifier guarantees that size > 0 */
272 	strscpy_pad(buf, task->comm, size);
273 	return 0;
274 err_clear:
275 	memset(buf, 0, size);
276 	return -EINVAL;
277 }
278 
279 const struct bpf_func_proto bpf_get_current_comm_proto = {
280 	.func		= bpf_get_current_comm,
281 	.gpl_only	= false,
282 	.ret_type	= RET_INTEGER,
283 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
284 	.arg2_type	= ARG_CONST_SIZE,
285 };
286 
287 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
288 
289 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
290 {
291 	arch_spinlock_t *l = (void *)lock;
292 	union {
293 		__u32 val;
294 		arch_spinlock_t lock;
295 	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
296 
297 	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
298 	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
299 	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
300 	preempt_disable();
301 	arch_spin_lock(l);
302 }
303 
304 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
305 {
306 	arch_spinlock_t *l = (void *)lock;
307 
308 	arch_spin_unlock(l);
309 	preempt_enable();
310 }
311 
312 #else
313 
314 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
315 {
316 	atomic_t *l = (void *)lock;
317 
318 	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
319 	do {
320 		atomic_cond_read_relaxed(l, !VAL);
321 	} while (atomic_xchg(l, 1));
322 }
323 
324 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
325 {
326 	atomic_t *l = (void *)lock;
327 
328 	atomic_set_release(l, 0);
329 }
330 
331 #endif
332 
333 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
334 
335 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
336 {
337 	unsigned long flags;
338 
339 	local_irq_save(flags);
340 	__bpf_spin_lock(lock);
341 	__this_cpu_write(irqsave_flags, flags);
342 }
343 
344 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
345 {
346 	__bpf_spin_lock_irqsave(lock);
347 	return 0;
348 }
349 
350 const struct bpf_func_proto bpf_spin_lock_proto = {
351 	.func		= bpf_spin_lock,
352 	.gpl_only	= false,
353 	.ret_type	= RET_VOID,
354 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
355 	.arg1_btf_id    = BPF_PTR_POISON,
356 };
357 
358 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
359 {
360 	unsigned long flags;
361 
362 	flags = __this_cpu_read(irqsave_flags);
363 	__bpf_spin_unlock(lock);
364 	local_irq_restore(flags);
365 }
366 
367 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
368 {
369 	__bpf_spin_unlock_irqrestore(lock);
370 	return 0;
371 }
372 
373 const struct bpf_func_proto bpf_spin_unlock_proto = {
374 	.func		= bpf_spin_unlock,
375 	.gpl_only	= false,
376 	.ret_type	= RET_VOID,
377 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
378 	.arg1_btf_id    = BPF_PTR_POISON,
379 };
380 
381 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
382 			   bool lock_src)
383 {
384 	struct bpf_spin_lock *lock;
385 
386 	if (lock_src)
387 		lock = src + map->record->spin_lock_off;
388 	else
389 		lock = dst + map->record->spin_lock_off;
390 	preempt_disable();
391 	__bpf_spin_lock_irqsave(lock);
392 	copy_map_value(map, dst, src);
393 	__bpf_spin_unlock_irqrestore(lock);
394 	preempt_enable();
395 }
396 
397 BPF_CALL_0(bpf_jiffies64)
398 {
399 	return get_jiffies_64();
400 }
401 
402 const struct bpf_func_proto bpf_jiffies64_proto = {
403 	.func		= bpf_jiffies64,
404 	.gpl_only	= false,
405 	.ret_type	= RET_INTEGER,
406 };
407 
408 #ifdef CONFIG_CGROUPS
409 BPF_CALL_0(bpf_get_current_cgroup_id)
410 {
411 	struct cgroup *cgrp;
412 	u64 cgrp_id;
413 
414 	rcu_read_lock();
415 	cgrp = task_dfl_cgroup(current);
416 	cgrp_id = cgroup_id(cgrp);
417 	rcu_read_unlock();
418 
419 	return cgrp_id;
420 }
421 
422 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
423 	.func		= bpf_get_current_cgroup_id,
424 	.gpl_only	= false,
425 	.ret_type	= RET_INTEGER,
426 };
427 
428 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
429 {
430 	struct cgroup *cgrp;
431 	struct cgroup *ancestor;
432 	u64 cgrp_id;
433 
434 	rcu_read_lock();
435 	cgrp = task_dfl_cgroup(current);
436 	ancestor = cgroup_ancestor(cgrp, ancestor_level);
437 	cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
438 	rcu_read_unlock();
439 
440 	return cgrp_id;
441 }
442 
443 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
444 	.func		= bpf_get_current_ancestor_cgroup_id,
445 	.gpl_only	= false,
446 	.ret_type	= RET_INTEGER,
447 	.arg1_type	= ARG_ANYTHING,
448 };
449 #endif /* CONFIG_CGROUPS */
450 
451 #define BPF_STRTOX_BASE_MASK 0x1F
452 
453 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
454 			  unsigned long long *res, bool *is_negative)
455 {
456 	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
457 	const char *cur_buf = buf;
458 	size_t cur_len = buf_len;
459 	unsigned int consumed;
460 	size_t val_len;
461 	char str[64];
462 
463 	if (!buf || !buf_len || !res || !is_negative)
464 		return -EINVAL;
465 
466 	if (base != 0 && base != 8 && base != 10 && base != 16)
467 		return -EINVAL;
468 
469 	if (flags & ~BPF_STRTOX_BASE_MASK)
470 		return -EINVAL;
471 
472 	while (cur_buf < buf + buf_len && isspace(*cur_buf))
473 		++cur_buf;
474 
475 	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
476 	if (*is_negative)
477 		++cur_buf;
478 
479 	consumed = cur_buf - buf;
480 	cur_len -= consumed;
481 	if (!cur_len)
482 		return -EINVAL;
483 
484 	cur_len = min(cur_len, sizeof(str) - 1);
485 	memcpy(str, cur_buf, cur_len);
486 	str[cur_len] = '\0';
487 	cur_buf = str;
488 
489 	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
490 	val_len = _parse_integer(cur_buf, base, res);
491 
492 	if (val_len & KSTRTOX_OVERFLOW)
493 		return -ERANGE;
494 
495 	if (val_len == 0)
496 		return -EINVAL;
497 
498 	cur_buf += val_len;
499 	consumed += cur_buf - str;
500 
501 	return consumed;
502 }
503 
504 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
505 			 long long *res)
506 {
507 	unsigned long long _res;
508 	bool is_negative;
509 	int err;
510 
511 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
512 	if (err < 0)
513 		return err;
514 	if (is_negative) {
515 		if ((long long)-_res > 0)
516 			return -ERANGE;
517 		*res = -_res;
518 	} else {
519 		if ((long long)_res < 0)
520 			return -ERANGE;
521 		*res = _res;
522 	}
523 	return err;
524 }
525 
526 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
527 	   s64 *, res)
528 {
529 	long long _res;
530 	int err;
531 
532 	*res = 0;
533 	err = __bpf_strtoll(buf, buf_len, flags, &_res);
534 	if (err < 0)
535 		return err;
536 	*res = _res;
537 	return err;
538 }
539 
540 const struct bpf_func_proto bpf_strtol_proto = {
541 	.func		= bpf_strtol,
542 	.gpl_only	= false,
543 	.ret_type	= RET_INTEGER,
544 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
545 	.arg2_type	= ARG_CONST_SIZE,
546 	.arg3_type	= ARG_ANYTHING,
547 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
548 	.arg4_size	= sizeof(s64),
549 };
550 
551 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
552 	   u64 *, res)
553 {
554 	unsigned long long _res;
555 	bool is_negative;
556 	int err;
557 
558 	*res = 0;
559 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
560 	if (err < 0)
561 		return err;
562 	if (is_negative)
563 		return -EINVAL;
564 	*res = _res;
565 	return err;
566 }
567 
568 const struct bpf_func_proto bpf_strtoul_proto = {
569 	.func		= bpf_strtoul,
570 	.gpl_only	= false,
571 	.ret_type	= RET_INTEGER,
572 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
573 	.arg2_type	= ARG_CONST_SIZE,
574 	.arg3_type	= ARG_ANYTHING,
575 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
576 	.arg4_size	= sizeof(u64),
577 };
578 
579 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
580 {
581 	return strncmp(s1, s2, s1_sz);
582 }
583 
584 static const struct bpf_func_proto bpf_strncmp_proto = {
585 	.func		= bpf_strncmp,
586 	.gpl_only	= false,
587 	.ret_type	= RET_INTEGER,
588 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
589 	.arg2_type	= ARG_CONST_SIZE,
590 	.arg3_type	= ARG_PTR_TO_CONST_STR,
591 };
592 
593 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
594 	   struct bpf_pidns_info *, nsdata, u32, size)
595 {
596 	struct task_struct *task = current;
597 	struct pid_namespace *pidns;
598 	int err = -EINVAL;
599 
600 	if (unlikely(size != sizeof(struct bpf_pidns_info)))
601 		goto clear;
602 
603 	if (unlikely((u64)(dev_t)dev != dev))
604 		goto clear;
605 
606 	if (unlikely(!task))
607 		goto clear;
608 
609 	pidns = task_active_pid_ns(task);
610 	if (unlikely(!pidns)) {
611 		err = -ENOENT;
612 		goto clear;
613 	}
614 
615 	if (!ns_match(&pidns->ns, (dev_t)dev, ino))
616 		goto clear;
617 
618 	nsdata->pid = task_pid_nr_ns(task, pidns);
619 	nsdata->tgid = task_tgid_nr_ns(task, pidns);
620 	return 0;
621 clear:
622 	memset((void *)nsdata, 0, (size_t) size);
623 	return err;
624 }
625 
626 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
627 	.func		= bpf_get_ns_current_pid_tgid,
628 	.gpl_only	= false,
629 	.ret_type	= RET_INTEGER,
630 	.arg1_type	= ARG_ANYTHING,
631 	.arg2_type	= ARG_ANYTHING,
632 	.arg3_type      = ARG_PTR_TO_UNINIT_MEM,
633 	.arg4_type      = ARG_CONST_SIZE,
634 };
635 
636 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
637 	.func		= bpf_get_raw_cpu_id,
638 	.gpl_only	= false,
639 	.ret_type	= RET_INTEGER,
640 };
641 
642 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
643 	   u64, flags, void *, data, u64, size)
644 {
645 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
646 		return -EINVAL;
647 
648 	return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
649 }
650 
651 const struct bpf_func_proto bpf_event_output_data_proto =  {
652 	.func		= bpf_event_output_data,
653 	.gpl_only       = true,
654 	.ret_type       = RET_INTEGER,
655 	.arg1_type      = ARG_PTR_TO_CTX,
656 	.arg2_type      = ARG_CONST_MAP_PTR,
657 	.arg3_type      = ARG_ANYTHING,
658 	.arg4_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
659 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
660 };
661 
662 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
663 	   const void __user *, user_ptr)
664 {
665 	int ret = copy_from_user(dst, user_ptr, size);
666 
667 	if (unlikely(ret)) {
668 		memset(dst, 0, size);
669 		ret = -EFAULT;
670 	}
671 
672 	return ret;
673 }
674 
675 const struct bpf_func_proto bpf_copy_from_user_proto = {
676 	.func		= bpf_copy_from_user,
677 	.gpl_only	= false,
678 	.might_sleep	= true,
679 	.ret_type	= RET_INTEGER,
680 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
681 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
682 	.arg3_type	= ARG_ANYTHING,
683 };
684 
685 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
686 	   const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
687 {
688 	int ret;
689 
690 	/* flags is not used yet */
691 	if (unlikely(flags))
692 		return -EINVAL;
693 
694 	if (unlikely(!size))
695 		return 0;
696 
697 	ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
698 	if (ret == size)
699 		return 0;
700 
701 	memset(dst, 0, size);
702 	/* Return -EFAULT for partial read */
703 	return ret < 0 ? ret : -EFAULT;
704 }
705 
706 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
707 	.func		= bpf_copy_from_user_task,
708 	.gpl_only	= true,
709 	.might_sleep	= true,
710 	.ret_type	= RET_INTEGER,
711 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
712 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
713 	.arg3_type	= ARG_ANYTHING,
714 	.arg4_type	= ARG_PTR_TO_BTF_ID,
715 	.arg4_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
716 	.arg5_type	= ARG_ANYTHING
717 };
718 
719 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
720 {
721 	if (cpu >= nr_cpu_ids)
722 		return (unsigned long)NULL;
723 
724 	return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
725 }
726 
727 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
728 	.func		= bpf_per_cpu_ptr,
729 	.gpl_only	= false,
730 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
731 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
732 	.arg2_type	= ARG_ANYTHING,
733 };
734 
735 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
736 {
737 	return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
738 }
739 
740 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
741 	.func		= bpf_this_cpu_ptr,
742 	.gpl_only	= false,
743 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
744 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
745 };
746 
747 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
748 		size_t bufsz)
749 {
750 	void __user *user_ptr = (__force void __user *)unsafe_ptr;
751 
752 	buf[0] = 0;
753 
754 	switch (fmt_ptype) {
755 	case 's':
756 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
757 		if ((unsigned long)unsafe_ptr < TASK_SIZE)
758 			return strncpy_from_user_nofault(buf, user_ptr, bufsz);
759 		fallthrough;
760 #endif
761 	case 'k':
762 		return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
763 	case 'u':
764 		return strncpy_from_user_nofault(buf, user_ptr, bufsz);
765 	}
766 
767 	return -EINVAL;
768 }
769 
770 /* Support executing three nested bprintf helper calls on a given CPU */
771 #define MAX_BPRINTF_NEST_LEVEL	3
772 
773 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
774 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
775 
776 int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs)
777 {
778 	int nest_level;
779 
780 	nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
781 	if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
782 		this_cpu_dec(bpf_bprintf_nest_level);
783 		return -EBUSY;
784 	}
785 	*bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
786 
787 	return 0;
788 }
789 
790 void bpf_put_buffers(void)
791 {
792 	if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
793 		return;
794 	this_cpu_dec(bpf_bprintf_nest_level);
795 }
796 
797 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
798 {
799 	if (!data->bin_args && !data->buf)
800 		return;
801 	bpf_put_buffers();
802 }
803 
804 /*
805  * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
806  *
807  * Returns a negative value if fmt is an invalid format string or 0 otherwise.
808  *
809  * This can be used in two ways:
810  * - Format string verification only: when data->get_bin_args is false
811  * - Arguments preparation: in addition to the above verification, it writes in
812  *   data->bin_args a binary representation of arguments usable by bstr_printf
813  *   where pointers from BPF have been sanitized.
814  *
815  * In argument preparation mode, if 0 is returned, safe temporary buffers are
816  * allocated and bpf_bprintf_cleanup should be called to free them after use.
817  */
818 int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args,
819 			u32 num_args, struct bpf_bprintf_data *data)
820 {
821 	bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
822 	char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
823 	struct bpf_bprintf_buffers *buffers = NULL;
824 	size_t sizeof_cur_arg, sizeof_cur_ip;
825 	int err, i, num_spec = 0;
826 	u64 cur_arg;
827 	char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
828 
829 	fmt_end = strnchr(fmt, fmt_size, 0);
830 	if (!fmt_end)
831 		return -EINVAL;
832 	fmt_size = fmt_end - fmt;
833 
834 	if (get_buffers && bpf_try_get_buffers(&buffers))
835 		return -EBUSY;
836 
837 	if (data->get_bin_args) {
838 		if (num_args)
839 			tmp_buf = buffers->bin_args;
840 		tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
841 		data->bin_args = (u32 *)tmp_buf;
842 	}
843 
844 	if (data->get_buf)
845 		data->buf = buffers->buf;
846 
847 	for (i = 0; i < fmt_size; i++) {
848 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
849 			err = -EINVAL;
850 			goto out;
851 		}
852 
853 		if (fmt[i] != '%')
854 			continue;
855 
856 		if (fmt[i + 1] == '%') {
857 			i++;
858 			continue;
859 		}
860 
861 		if (num_spec >= num_args) {
862 			err = -EINVAL;
863 			goto out;
864 		}
865 
866 		/* The string is zero-terminated so if fmt[i] != 0, we can
867 		 * always access fmt[i + 1], in the worst case it will be a 0
868 		 */
869 		i++;
870 
871 		/* skip optional "[0 +-][num]" width formatting field */
872 		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
873 		       fmt[i] == ' ')
874 			i++;
875 		if (fmt[i] >= '1' && fmt[i] <= '9') {
876 			i++;
877 			while (fmt[i] >= '0' && fmt[i] <= '9')
878 				i++;
879 		}
880 
881 		if (fmt[i] == 'p') {
882 			sizeof_cur_arg = sizeof(long);
883 
884 			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
885 			    ispunct(fmt[i + 1])) {
886 				if (tmp_buf)
887 					cur_arg = raw_args[num_spec];
888 				goto nocopy_fmt;
889 			}
890 
891 			if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
892 			    fmt[i + 2] == 's') {
893 				fmt_ptype = fmt[i + 1];
894 				i += 2;
895 				goto fmt_str;
896 			}
897 
898 			if (fmt[i + 1] == 'K' ||
899 			    fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
900 			    fmt[i + 1] == 'S') {
901 				if (tmp_buf)
902 					cur_arg = raw_args[num_spec];
903 				i++;
904 				goto nocopy_fmt;
905 			}
906 
907 			if (fmt[i + 1] == 'B') {
908 				if (tmp_buf)  {
909 					err = snprintf(tmp_buf,
910 						       (tmp_buf_end - tmp_buf),
911 						       "%pB",
912 						       (void *)(long)raw_args[num_spec]);
913 					tmp_buf += (err + 1);
914 				}
915 
916 				i++;
917 				num_spec++;
918 				continue;
919 			}
920 
921 			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
922 			if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
923 			    (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
924 				err = -EINVAL;
925 				goto out;
926 			}
927 
928 			i += 2;
929 			if (!tmp_buf)
930 				goto nocopy_fmt;
931 
932 			sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
933 			if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
934 				err = -ENOSPC;
935 				goto out;
936 			}
937 
938 			unsafe_ptr = (char *)(long)raw_args[num_spec];
939 			err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
940 						       sizeof_cur_ip);
941 			if (err < 0)
942 				memset(cur_ip, 0, sizeof_cur_ip);
943 
944 			/* hack: bstr_printf expects IP addresses to be
945 			 * pre-formatted as strings, ironically, the easiest way
946 			 * to do that is to call snprintf.
947 			 */
948 			ip_spec[2] = fmt[i - 1];
949 			ip_spec[3] = fmt[i];
950 			err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
951 				       ip_spec, &cur_ip);
952 
953 			tmp_buf += err + 1;
954 			num_spec++;
955 
956 			continue;
957 		} else if (fmt[i] == 's') {
958 			fmt_ptype = fmt[i];
959 fmt_str:
960 			if (fmt[i + 1] != 0 &&
961 			    !isspace(fmt[i + 1]) &&
962 			    !ispunct(fmt[i + 1])) {
963 				err = -EINVAL;
964 				goto out;
965 			}
966 
967 			if (!tmp_buf)
968 				goto nocopy_fmt;
969 
970 			if (tmp_buf_end == tmp_buf) {
971 				err = -ENOSPC;
972 				goto out;
973 			}
974 
975 			unsafe_ptr = (char *)(long)raw_args[num_spec];
976 			err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
977 						    fmt_ptype,
978 						    tmp_buf_end - tmp_buf);
979 			if (err < 0) {
980 				tmp_buf[0] = '\0';
981 				err = 1;
982 			}
983 
984 			tmp_buf += err;
985 			num_spec++;
986 
987 			continue;
988 		} else if (fmt[i] == 'c') {
989 			if (!tmp_buf)
990 				goto nocopy_fmt;
991 
992 			if (tmp_buf_end == tmp_buf) {
993 				err = -ENOSPC;
994 				goto out;
995 			}
996 
997 			*tmp_buf = raw_args[num_spec];
998 			tmp_buf++;
999 			num_spec++;
1000 
1001 			continue;
1002 		}
1003 
1004 		sizeof_cur_arg = sizeof(int);
1005 
1006 		if (fmt[i] == 'l') {
1007 			sizeof_cur_arg = sizeof(long);
1008 			i++;
1009 		}
1010 		if (fmt[i] == 'l') {
1011 			sizeof_cur_arg = sizeof(long long);
1012 			i++;
1013 		}
1014 
1015 		if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1016 		    fmt[i] != 'x' && fmt[i] != 'X') {
1017 			err = -EINVAL;
1018 			goto out;
1019 		}
1020 
1021 		if (tmp_buf)
1022 			cur_arg = raw_args[num_spec];
1023 nocopy_fmt:
1024 		if (tmp_buf) {
1025 			tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1026 			if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1027 				err = -ENOSPC;
1028 				goto out;
1029 			}
1030 
1031 			if (sizeof_cur_arg == 8) {
1032 				*(u32 *)tmp_buf = *(u32 *)&cur_arg;
1033 				*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1034 			} else {
1035 				*(u32 *)tmp_buf = (u32)(long)cur_arg;
1036 			}
1037 			tmp_buf += sizeof_cur_arg;
1038 		}
1039 		num_spec++;
1040 	}
1041 
1042 	err = 0;
1043 out:
1044 	if (err)
1045 		bpf_bprintf_cleanup(data);
1046 	return err;
1047 }
1048 
1049 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1050 	   const void *, args, u32, data_len)
1051 {
1052 	struct bpf_bprintf_data data = {
1053 		.get_bin_args	= true,
1054 	};
1055 	int err, num_args;
1056 
1057 	if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1058 	    (data_len && !args))
1059 		return -EINVAL;
1060 	num_args = data_len / 8;
1061 
1062 	/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1063 	 * can safely give an unbounded size.
1064 	 */
1065 	err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1066 	if (err < 0)
1067 		return err;
1068 
1069 	err = bstr_printf(str, str_size, fmt, data.bin_args);
1070 
1071 	bpf_bprintf_cleanup(&data);
1072 
1073 	return err + 1;
1074 }
1075 
1076 const struct bpf_func_proto bpf_snprintf_proto = {
1077 	.func		= bpf_snprintf,
1078 	.gpl_only	= true,
1079 	.ret_type	= RET_INTEGER,
1080 	.arg1_type	= ARG_PTR_TO_MEM_OR_NULL,
1081 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1082 	.arg3_type	= ARG_PTR_TO_CONST_STR,
1083 	.arg4_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1084 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1085 };
1086 
1087 static void *map_key_from_value(struct bpf_map *map, void *value, u32 *arr_idx)
1088 {
1089 	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1090 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1091 
1092 		*arr_idx = ((char *)value - array->value) / array->elem_size;
1093 		return arr_idx;
1094 	}
1095 	return (void *)value - round_up(map->key_size, 8);
1096 }
1097 
1098 struct bpf_async_cb {
1099 	struct bpf_map *map;
1100 	struct bpf_prog *prog;
1101 	void __rcu *callback_fn;
1102 	void *value;
1103 	union {
1104 		struct rcu_head rcu;
1105 		struct work_struct delete_work;
1106 	};
1107 	u64 flags;
1108 };
1109 
1110 /* BPF map elements can contain 'struct bpf_timer'.
1111  * Such map owns all of its BPF timers.
1112  * 'struct bpf_timer' is allocated as part of map element allocation
1113  * and it's zero initialized.
1114  * That space is used to keep 'struct bpf_async_kern'.
1115  * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1116  * remembers 'struct bpf_map *' pointer it's part of.
1117  * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1118  * bpf_timer_start() arms the timer.
1119  * If user space reference to a map goes to zero at this point
1120  * ops->map_release_uref callback is responsible for cancelling the timers,
1121  * freeing their memory, and decrementing prog's refcnts.
1122  * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1123  * Inner maps can contain bpf timers as well. ops->map_release_uref is
1124  * freeing the timers when inner map is replaced or deleted by user space.
1125  */
1126 struct bpf_hrtimer {
1127 	struct bpf_async_cb cb;
1128 	struct hrtimer timer;
1129 	atomic_t cancelling;
1130 };
1131 
1132 struct bpf_work {
1133 	struct bpf_async_cb cb;
1134 	struct work_struct work;
1135 	struct work_struct delete_work;
1136 };
1137 
1138 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
1139 struct bpf_async_kern {
1140 	union {
1141 		struct bpf_async_cb *cb;
1142 		struct bpf_hrtimer *timer;
1143 		struct bpf_work *work;
1144 	};
1145 	/* bpf_spin_lock is used here instead of spinlock_t to make
1146 	 * sure that it always fits into space reserved by struct bpf_timer
1147 	 * regardless of LOCKDEP and spinlock debug flags.
1148 	 */
1149 	struct bpf_spin_lock lock;
1150 } __attribute__((aligned(8)));
1151 
1152 enum bpf_async_type {
1153 	BPF_ASYNC_TYPE_TIMER = 0,
1154 	BPF_ASYNC_TYPE_WQ,
1155 };
1156 
1157 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1158 
1159 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1160 {
1161 	struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1162 	struct bpf_map *map = t->cb.map;
1163 	void *value = t->cb.value;
1164 	bpf_callback_t callback_fn;
1165 	void *key;
1166 	u32 idx;
1167 
1168 	BTF_TYPE_EMIT(struct bpf_timer);
1169 	callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
1170 	if (!callback_fn)
1171 		goto out;
1172 
1173 	/* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1174 	 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1175 	 * Remember the timer this callback is servicing to prevent
1176 	 * deadlock if callback_fn() calls bpf_timer_cancel() or
1177 	 * bpf_map_delete_elem() on the same timer.
1178 	 */
1179 	this_cpu_write(hrtimer_running, t);
1180 
1181 	key = map_key_from_value(map, value, &idx);
1182 
1183 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1184 	/* The verifier checked that return value is zero. */
1185 
1186 	this_cpu_write(hrtimer_running, NULL);
1187 out:
1188 	return HRTIMER_NORESTART;
1189 }
1190 
1191 static void bpf_wq_work(struct work_struct *work)
1192 {
1193 	struct bpf_work *w = container_of(work, struct bpf_work, work);
1194 	struct bpf_async_cb *cb = &w->cb;
1195 	struct bpf_map *map = cb->map;
1196 	bpf_callback_t callback_fn;
1197 	void *value = cb->value;
1198 	void *key;
1199 	u32 idx;
1200 
1201 	BTF_TYPE_EMIT(struct bpf_wq);
1202 
1203 	callback_fn = READ_ONCE(cb->callback_fn);
1204 	if (!callback_fn)
1205 		return;
1206 
1207 	key = map_key_from_value(map, value, &idx);
1208 
1209         rcu_read_lock_trace();
1210         migrate_disable();
1211 
1212 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1213 
1214 	migrate_enable();
1215 	rcu_read_unlock_trace();
1216 }
1217 
1218 static void bpf_async_cb_rcu_free(struct rcu_head *rcu)
1219 {
1220 	struct bpf_async_cb *cb = container_of(rcu, struct bpf_async_cb, rcu);
1221 
1222 	kfree_nolock(cb);
1223 }
1224 
1225 static void bpf_wq_delete_work(struct work_struct *work)
1226 {
1227 	struct bpf_work *w = container_of(work, struct bpf_work, delete_work);
1228 
1229 	cancel_work_sync(&w->work);
1230 
1231 	call_rcu(&w->cb.rcu, bpf_async_cb_rcu_free);
1232 }
1233 
1234 static void bpf_timer_delete_work(struct work_struct *work)
1235 {
1236 	struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work);
1237 
1238 	/* Cancel the timer and wait for callback to complete if it was running.
1239 	 * If hrtimer_cancel() can be safely called it's safe to call
1240 	 * call_rcu() right after for both preallocated and non-preallocated
1241 	 * maps.  The async->cb = NULL was already done and no code path can see
1242 	 * address 't' anymore. Timer if armed for existing bpf_hrtimer before
1243 	 * bpf_timer_cancel_and_free will have been cancelled.
1244 	 */
1245 	hrtimer_cancel(&t->timer);
1246 	call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free);
1247 }
1248 
1249 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1250 			    enum bpf_async_type type)
1251 {
1252 	struct bpf_async_cb *cb;
1253 	struct bpf_hrtimer *t;
1254 	struct bpf_work *w;
1255 	clockid_t clockid;
1256 	size_t size;
1257 	int ret = 0;
1258 
1259 	if (in_nmi())
1260 		return -EOPNOTSUPP;
1261 
1262 	switch (type) {
1263 	case BPF_ASYNC_TYPE_TIMER:
1264 		size = sizeof(struct bpf_hrtimer);
1265 		break;
1266 	case BPF_ASYNC_TYPE_WQ:
1267 		size = sizeof(struct bpf_work);
1268 		break;
1269 	default:
1270 		return -EINVAL;
1271 	}
1272 
1273 	__bpf_spin_lock_irqsave(&async->lock);
1274 	t = async->timer;
1275 	if (t) {
1276 		ret = -EBUSY;
1277 		goto out;
1278 	}
1279 
1280 	cb = bpf_map_kmalloc_nolock(map, size, 0, map->numa_node);
1281 	if (!cb) {
1282 		ret = -ENOMEM;
1283 		goto out;
1284 	}
1285 
1286 	switch (type) {
1287 	case BPF_ASYNC_TYPE_TIMER:
1288 		clockid = flags & (MAX_CLOCKS - 1);
1289 		t = (struct bpf_hrtimer *)cb;
1290 
1291 		atomic_set(&t->cancelling, 0);
1292 		INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work);
1293 		hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT);
1294 		cb->value = (void *)async - map->record->timer_off;
1295 		break;
1296 	case BPF_ASYNC_TYPE_WQ:
1297 		w = (struct bpf_work *)cb;
1298 
1299 		INIT_WORK(&w->work, bpf_wq_work);
1300 		INIT_WORK(&w->delete_work, bpf_wq_delete_work);
1301 		cb->value = (void *)async - map->record->wq_off;
1302 		break;
1303 	}
1304 	cb->map = map;
1305 	cb->prog = NULL;
1306 	cb->flags = flags;
1307 	rcu_assign_pointer(cb->callback_fn, NULL);
1308 
1309 	WRITE_ONCE(async->cb, cb);
1310 	/* Guarantee the order between async->cb and map->usercnt. So
1311 	 * when there are concurrent uref release and bpf timer init, either
1312 	 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1313 	 * timer or atomic64_read() below returns a zero usercnt.
1314 	 */
1315 	smp_mb();
1316 	if (!atomic64_read(&map->usercnt)) {
1317 		/* maps with timers must be either held by user space
1318 		 * or pinned in bpffs.
1319 		 */
1320 		WRITE_ONCE(async->cb, NULL);
1321 		kfree_nolock(cb);
1322 		ret = -EPERM;
1323 	}
1324 out:
1325 	__bpf_spin_unlock_irqrestore(&async->lock);
1326 	return ret;
1327 }
1328 
1329 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1330 	   u64, flags)
1331 {
1332 	clock_t clockid = flags & (MAX_CLOCKS - 1);
1333 
1334 	BUILD_BUG_ON(MAX_CLOCKS != 16);
1335 	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1336 	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1337 
1338 	if (flags >= MAX_CLOCKS ||
1339 	    /* similar to timerfd except _ALARM variants are not supported */
1340 	    (clockid != CLOCK_MONOTONIC &&
1341 	     clockid != CLOCK_REALTIME &&
1342 	     clockid != CLOCK_BOOTTIME))
1343 		return -EINVAL;
1344 
1345 	return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1346 }
1347 
1348 static const struct bpf_func_proto bpf_timer_init_proto = {
1349 	.func		= bpf_timer_init,
1350 	.gpl_only	= true,
1351 	.ret_type	= RET_INTEGER,
1352 	.arg1_type	= ARG_PTR_TO_TIMER,
1353 	.arg2_type	= ARG_CONST_MAP_PTR,
1354 	.arg3_type	= ARG_ANYTHING,
1355 };
1356 
1357 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
1358 				    struct bpf_prog_aux *aux, unsigned int flags,
1359 				    enum bpf_async_type type)
1360 {
1361 	struct bpf_prog *prev, *prog = aux->prog;
1362 	struct bpf_async_cb *cb;
1363 	int ret = 0;
1364 
1365 	if (in_nmi())
1366 		return -EOPNOTSUPP;
1367 	__bpf_spin_lock_irqsave(&async->lock);
1368 	cb = async->cb;
1369 	if (!cb) {
1370 		ret = -EINVAL;
1371 		goto out;
1372 	}
1373 	if (!atomic64_read(&cb->map->usercnt)) {
1374 		/* maps with timers must be either held by user space
1375 		 * or pinned in bpffs. Otherwise timer might still be
1376 		 * running even when bpf prog is detached and user space
1377 		 * is gone, since map_release_uref won't ever be called.
1378 		 */
1379 		ret = -EPERM;
1380 		goto out;
1381 	}
1382 	prev = cb->prog;
1383 	if (prev != prog) {
1384 		/* Bump prog refcnt once. Every bpf_timer_set_callback()
1385 		 * can pick different callback_fn-s within the same prog.
1386 		 */
1387 		prog = bpf_prog_inc_not_zero(prog);
1388 		if (IS_ERR(prog)) {
1389 			ret = PTR_ERR(prog);
1390 			goto out;
1391 		}
1392 		if (prev)
1393 			/* Drop prev prog refcnt when swapping with new prog */
1394 			bpf_prog_put(prev);
1395 		cb->prog = prog;
1396 	}
1397 	rcu_assign_pointer(cb->callback_fn, callback_fn);
1398 out:
1399 	__bpf_spin_unlock_irqrestore(&async->lock);
1400 	return ret;
1401 }
1402 
1403 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
1404 	   struct bpf_prog_aux *, aux)
1405 {
1406 	return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER);
1407 }
1408 
1409 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1410 	.func		= bpf_timer_set_callback,
1411 	.gpl_only	= true,
1412 	.ret_type	= RET_INTEGER,
1413 	.arg1_type	= ARG_PTR_TO_TIMER,
1414 	.arg2_type	= ARG_PTR_TO_FUNC,
1415 };
1416 
1417 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
1418 {
1419 	struct bpf_hrtimer *t;
1420 	int ret = 0;
1421 	enum hrtimer_mode mode;
1422 
1423 	if (in_nmi())
1424 		return -EOPNOTSUPP;
1425 	if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
1426 		return -EINVAL;
1427 	__bpf_spin_lock_irqsave(&timer->lock);
1428 	t = timer->timer;
1429 	if (!t || !t->cb.prog) {
1430 		ret = -EINVAL;
1431 		goto out;
1432 	}
1433 
1434 	if (flags & BPF_F_TIMER_ABS)
1435 		mode = HRTIMER_MODE_ABS_SOFT;
1436 	else
1437 		mode = HRTIMER_MODE_REL_SOFT;
1438 
1439 	if (flags & BPF_F_TIMER_CPU_PIN)
1440 		mode |= HRTIMER_MODE_PINNED;
1441 
1442 	hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1443 out:
1444 	__bpf_spin_unlock_irqrestore(&timer->lock);
1445 	return ret;
1446 }
1447 
1448 static const struct bpf_func_proto bpf_timer_start_proto = {
1449 	.func		= bpf_timer_start,
1450 	.gpl_only	= true,
1451 	.ret_type	= RET_INTEGER,
1452 	.arg1_type	= ARG_PTR_TO_TIMER,
1453 	.arg2_type	= ARG_ANYTHING,
1454 	.arg3_type	= ARG_ANYTHING,
1455 };
1456 
1457 static void drop_prog_refcnt(struct bpf_async_cb *async)
1458 {
1459 	struct bpf_prog *prog = async->prog;
1460 
1461 	if (prog) {
1462 		bpf_prog_put(prog);
1463 		async->prog = NULL;
1464 		rcu_assign_pointer(async->callback_fn, NULL);
1465 	}
1466 }
1467 
1468 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
1469 {
1470 	struct bpf_hrtimer *t, *cur_t;
1471 	bool inc = false;
1472 	int ret = 0;
1473 
1474 	if (in_nmi())
1475 		return -EOPNOTSUPP;
1476 	rcu_read_lock();
1477 	__bpf_spin_lock_irqsave(&timer->lock);
1478 	t = timer->timer;
1479 	if (!t) {
1480 		ret = -EINVAL;
1481 		goto out;
1482 	}
1483 
1484 	cur_t = this_cpu_read(hrtimer_running);
1485 	if (cur_t == t) {
1486 		/* If bpf callback_fn is trying to bpf_timer_cancel()
1487 		 * its own timer the hrtimer_cancel() will deadlock
1488 		 * since it waits for callback_fn to finish.
1489 		 */
1490 		ret = -EDEADLK;
1491 		goto out;
1492 	}
1493 
1494 	/* Only account in-flight cancellations when invoked from a timer
1495 	 * callback, since we want to avoid waiting only if other _callbacks_
1496 	 * are waiting on us, to avoid introducing lockups. Non-callback paths
1497 	 * are ok, since nobody would synchronously wait for their completion.
1498 	 */
1499 	if (!cur_t)
1500 		goto drop;
1501 	atomic_inc(&t->cancelling);
1502 	/* Need full barrier after relaxed atomic_inc */
1503 	smp_mb__after_atomic();
1504 	inc = true;
1505 	if (atomic_read(&cur_t->cancelling)) {
1506 		/* We're cancelling timer t, while some other timer callback is
1507 		 * attempting to cancel us. In such a case, it might be possible
1508 		 * that timer t belongs to the other callback, or some other
1509 		 * callback waiting upon it (creating transitive dependencies
1510 		 * upon us), and we will enter a deadlock if we continue
1511 		 * cancelling and waiting for it synchronously, since it might
1512 		 * do the same. Bail!
1513 		 */
1514 		ret = -EDEADLK;
1515 		goto out;
1516 	}
1517 drop:
1518 	drop_prog_refcnt(&t->cb);
1519 out:
1520 	__bpf_spin_unlock_irqrestore(&timer->lock);
1521 	/* Cancel the timer and wait for associated callback to finish
1522 	 * if it was running.
1523 	 */
1524 	ret = ret ?: hrtimer_cancel(&t->timer);
1525 	if (inc)
1526 		atomic_dec(&t->cancelling);
1527 	rcu_read_unlock();
1528 	return ret;
1529 }
1530 
1531 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1532 	.func		= bpf_timer_cancel,
1533 	.gpl_only	= true,
1534 	.ret_type	= RET_INTEGER,
1535 	.arg1_type	= ARG_PTR_TO_TIMER,
1536 };
1537 
1538 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async)
1539 {
1540 	struct bpf_async_cb *cb;
1541 
1542 	/* Performance optimization: read async->cb without lock first. */
1543 	if (!READ_ONCE(async->cb))
1544 		return NULL;
1545 
1546 	__bpf_spin_lock_irqsave(&async->lock);
1547 	/* re-read it under lock */
1548 	cb = async->cb;
1549 	if (!cb)
1550 		goto out;
1551 	drop_prog_refcnt(cb);
1552 	/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1553 	 * this timer, since it won't be initialized.
1554 	 */
1555 	WRITE_ONCE(async->cb, NULL);
1556 out:
1557 	__bpf_spin_unlock_irqrestore(&async->lock);
1558 	return cb;
1559 }
1560 
1561 /* This function is called by map_delete/update_elem for individual element and
1562  * by ops->map_release_uref when the user space reference to a map reaches zero.
1563  */
1564 void bpf_timer_cancel_and_free(void *val)
1565 {
1566 	struct bpf_hrtimer *t;
1567 
1568 	t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val);
1569 
1570 	if (!t)
1571 		return;
1572 	/* We check that bpf_map_delete/update_elem() was called from timer
1573 	 * callback_fn. In such case we don't call hrtimer_cancel() (since it
1574 	 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will
1575 	 * just return -1). Though callback_fn is still running on this cpu it's
1576 	 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1577 	 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1578 	 * since async->cb = NULL was already done. The timer will be
1579 	 * effectively cancelled because bpf_timer_cb() will return
1580 	 * HRTIMER_NORESTART.
1581 	 *
1582 	 * However, it is possible the timer callback_fn calling us armed the
1583 	 * timer _before_ calling us, such that failing to cancel it here will
1584 	 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
1585 	 * Therefore, we _need_ to cancel any outstanding timers before we do
1586 	 * call_rcu, even though no more timers can be armed.
1587 	 *
1588 	 * Moreover, we need to schedule work even if timer does not belong to
1589 	 * the calling callback_fn, as on two different CPUs, we can end up in a
1590 	 * situation where both sides run in parallel, try to cancel one
1591 	 * another, and we end up waiting on both sides in hrtimer_cancel
1592 	 * without making forward progress, since timer1 depends on time2
1593 	 * callback to finish, and vice versa.
1594 	 *
1595 	 *  CPU 1 (timer1_cb)			CPU 2 (timer2_cb)
1596 	 *  bpf_timer_cancel_and_free(timer2)	bpf_timer_cancel_and_free(timer1)
1597 	 *
1598 	 * To avoid these issues, punt to workqueue context when we are in a
1599 	 * timer callback.
1600 	 */
1601 	if (this_cpu_read(hrtimer_running)) {
1602 		queue_work(system_dfl_wq, &t->cb.delete_work);
1603 		return;
1604 	}
1605 
1606 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1607 		/* If the timer is running on other CPU, also use a kworker to
1608 		 * wait for the completion of the timer instead of trying to
1609 		 * acquire a sleepable lock in hrtimer_cancel() to wait for its
1610 		 * completion.
1611 		 */
1612 		if (hrtimer_try_to_cancel(&t->timer) >= 0)
1613 			call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free);
1614 		else
1615 			queue_work(system_dfl_wq, &t->cb.delete_work);
1616 	} else {
1617 		bpf_timer_delete_work(&t->cb.delete_work);
1618 	}
1619 }
1620 
1621 /* This function is called by map_delete/update_elem for individual element and
1622  * by ops->map_release_uref when the user space reference to a map reaches zero.
1623  */
1624 void bpf_wq_cancel_and_free(void *val)
1625 {
1626 	struct bpf_work *work;
1627 
1628 	BTF_TYPE_EMIT(struct bpf_wq);
1629 
1630 	work = (struct bpf_work *)__bpf_async_cancel_and_free(val);
1631 	if (!work)
1632 		return;
1633 	/* Trigger cancel of the sleepable work, but *do not* wait for
1634 	 * it to finish if it was running as we might not be in a
1635 	 * sleepable context.
1636 	 * kfree will be called once the work has finished.
1637 	 */
1638 	schedule_work(&work->delete_work);
1639 }
1640 
1641 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
1642 {
1643 	unsigned long *kptr = dst;
1644 
1645 	/* This helper may be inlined by verifier. */
1646 	return xchg(kptr, (unsigned long)ptr);
1647 }
1648 
1649 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1650  * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1651  * denote type that verifier will determine.
1652  */
1653 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1654 	.func         = bpf_kptr_xchg,
1655 	.gpl_only     = false,
1656 	.ret_type     = RET_PTR_TO_BTF_ID_OR_NULL,
1657 	.ret_btf_id   = BPF_PTR_POISON,
1658 	.arg1_type    = ARG_KPTR_XCHG_DEST,
1659 	.arg2_type    = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1660 	.arg2_btf_id  = BPF_PTR_POISON,
1661 };
1662 
1663 /* Since the upper 8 bits of dynptr->size is reserved, the
1664  * maximum supported size is 2^24 - 1.
1665  */
1666 #define DYNPTR_MAX_SIZE	((1UL << 24) - 1)
1667 #define DYNPTR_TYPE_SHIFT	28
1668 #define DYNPTR_SIZE_MASK	0xFFFFFF
1669 #define DYNPTR_RDONLY_BIT	BIT(31)
1670 
1671 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1672 {
1673 	return ptr->size & DYNPTR_RDONLY_BIT;
1674 }
1675 
1676 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1677 {
1678 	ptr->size |= DYNPTR_RDONLY_BIT;
1679 }
1680 
1681 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1682 {
1683 	ptr->size |= type << DYNPTR_TYPE_SHIFT;
1684 }
1685 
1686 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1687 {
1688 	return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1689 }
1690 
1691 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1692 {
1693 	return ptr->size & DYNPTR_SIZE_MASK;
1694 }
1695 
1696 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
1697 {
1698 	u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1699 
1700 	ptr->size = new_size | metadata;
1701 }
1702 
1703 int bpf_dynptr_check_size(u32 size)
1704 {
1705 	return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1706 }
1707 
1708 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1709 		     enum bpf_dynptr_type type, u32 offset, u32 size)
1710 {
1711 	ptr->data = data;
1712 	ptr->offset = offset;
1713 	ptr->size = size;
1714 	bpf_dynptr_set_type(ptr, type);
1715 }
1716 
1717 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1718 {
1719 	memset(ptr, 0, sizeof(*ptr));
1720 }
1721 
1722 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1723 {
1724 	int err;
1725 
1726 	BTF_TYPE_EMIT(struct bpf_dynptr);
1727 
1728 	err = bpf_dynptr_check_size(size);
1729 	if (err)
1730 		goto error;
1731 
1732 	/* flags is currently unsupported */
1733 	if (flags) {
1734 		err = -EINVAL;
1735 		goto error;
1736 	}
1737 
1738 	bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1739 
1740 	return 0;
1741 
1742 error:
1743 	bpf_dynptr_set_null(ptr);
1744 	return err;
1745 }
1746 
1747 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1748 	.func		= bpf_dynptr_from_mem,
1749 	.gpl_only	= false,
1750 	.ret_type	= RET_INTEGER,
1751 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1752 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1753 	.arg3_type	= ARG_ANYTHING,
1754 	.arg4_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
1755 };
1756 
1757 static int __bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr_kern *src,
1758 			     u32 offset, u64 flags)
1759 {
1760 	enum bpf_dynptr_type type;
1761 	int err;
1762 
1763 	if (!src->data || flags)
1764 		return -EINVAL;
1765 
1766 	err = bpf_dynptr_check_off_len(src, offset, len);
1767 	if (err)
1768 		return err;
1769 
1770 	type = bpf_dynptr_get_type(src);
1771 
1772 	switch (type) {
1773 	case BPF_DYNPTR_TYPE_LOCAL:
1774 	case BPF_DYNPTR_TYPE_RINGBUF:
1775 		/* Source and destination may possibly overlap, hence use memmove to
1776 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1777 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1778 		 */
1779 		memmove(dst, src->data + src->offset + offset, len);
1780 		return 0;
1781 	case BPF_DYNPTR_TYPE_SKB:
1782 		return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1783 	case BPF_DYNPTR_TYPE_XDP:
1784 		return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1785 	case BPF_DYNPTR_TYPE_SKB_META:
1786 		memmove(dst, bpf_skb_meta_pointer(src->data, src->offset + offset), len);
1787 		return 0;
1788 	default:
1789 		WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1790 		return -EFAULT;
1791 	}
1792 }
1793 
1794 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1795 	   u32, offset, u64, flags)
1796 {
1797 	return __bpf_dynptr_read(dst, len, src, offset, flags);
1798 }
1799 
1800 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1801 	.func		= bpf_dynptr_read,
1802 	.gpl_only	= false,
1803 	.ret_type	= RET_INTEGER,
1804 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1805 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1806 	.arg3_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1807 	.arg4_type	= ARG_ANYTHING,
1808 	.arg5_type	= ARG_ANYTHING,
1809 };
1810 
1811 int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src,
1812 		       u32 len, u64 flags)
1813 {
1814 	enum bpf_dynptr_type type;
1815 	int err;
1816 
1817 	if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1818 		return -EINVAL;
1819 
1820 	err = bpf_dynptr_check_off_len(dst, offset, len);
1821 	if (err)
1822 		return err;
1823 
1824 	type = bpf_dynptr_get_type(dst);
1825 
1826 	switch (type) {
1827 	case BPF_DYNPTR_TYPE_LOCAL:
1828 	case BPF_DYNPTR_TYPE_RINGBUF:
1829 		if (flags)
1830 			return -EINVAL;
1831 		/* Source and destination may possibly overlap, hence use memmove to
1832 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1833 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1834 		 */
1835 		memmove(dst->data + dst->offset + offset, src, len);
1836 		return 0;
1837 	case BPF_DYNPTR_TYPE_SKB:
1838 		return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1839 					     flags);
1840 	case BPF_DYNPTR_TYPE_XDP:
1841 		if (flags)
1842 			return -EINVAL;
1843 		return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1844 	case BPF_DYNPTR_TYPE_SKB_META:
1845 		return __bpf_skb_meta_store_bytes(dst->data, dst->offset + offset, src,
1846 						  len, flags);
1847 	default:
1848 		WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1849 		return -EFAULT;
1850 	}
1851 }
1852 
1853 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1854 	   u32, len, u64, flags)
1855 {
1856 	return __bpf_dynptr_write(dst, offset, src, len, flags);
1857 }
1858 
1859 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1860 	.func		= bpf_dynptr_write,
1861 	.gpl_only	= false,
1862 	.ret_type	= RET_INTEGER,
1863 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1864 	.arg2_type	= ARG_ANYTHING,
1865 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1866 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
1867 	.arg5_type	= ARG_ANYTHING,
1868 };
1869 
1870 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1871 {
1872 	enum bpf_dynptr_type type;
1873 	int err;
1874 
1875 	if (!ptr->data)
1876 		return 0;
1877 
1878 	err = bpf_dynptr_check_off_len(ptr, offset, len);
1879 	if (err)
1880 		return 0;
1881 
1882 	if (__bpf_dynptr_is_rdonly(ptr))
1883 		return 0;
1884 
1885 	type = bpf_dynptr_get_type(ptr);
1886 
1887 	switch (type) {
1888 	case BPF_DYNPTR_TYPE_LOCAL:
1889 	case BPF_DYNPTR_TYPE_RINGBUF:
1890 		return (unsigned long)(ptr->data + ptr->offset + offset);
1891 	case BPF_DYNPTR_TYPE_SKB:
1892 	case BPF_DYNPTR_TYPE_XDP:
1893 	case BPF_DYNPTR_TYPE_SKB_META:
1894 		/* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1895 		return 0;
1896 	default:
1897 		WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1898 		return 0;
1899 	}
1900 }
1901 
1902 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1903 	.func		= bpf_dynptr_data,
1904 	.gpl_only	= false,
1905 	.ret_type	= RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1906 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1907 	.arg2_type	= ARG_ANYTHING,
1908 	.arg3_type	= ARG_CONST_ALLOC_SIZE_OR_ZERO,
1909 };
1910 
1911 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1912 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1913 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1914 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1915 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1916 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1917 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1918 const struct bpf_func_proto bpf_perf_event_read_proto __weak;
1919 const struct bpf_func_proto bpf_send_signal_proto __weak;
1920 const struct bpf_func_proto bpf_send_signal_thread_proto __weak;
1921 const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak;
1922 const struct bpf_func_proto bpf_get_task_stack_proto __weak;
1923 const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak;
1924 
1925 const struct bpf_func_proto *
1926 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1927 {
1928 	switch (func_id) {
1929 	case BPF_FUNC_map_lookup_elem:
1930 		return &bpf_map_lookup_elem_proto;
1931 	case BPF_FUNC_map_update_elem:
1932 		return &bpf_map_update_elem_proto;
1933 	case BPF_FUNC_map_delete_elem:
1934 		return &bpf_map_delete_elem_proto;
1935 	case BPF_FUNC_map_push_elem:
1936 		return &bpf_map_push_elem_proto;
1937 	case BPF_FUNC_map_pop_elem:
1938 		return &bpf_map_pop_elem_proto;
1939 	case BPF_FUNC_map_peek_elem:
1940 		return &bpf_map_peek_elem_proto;
1941 	case BPF_FUNC_map_lookup_percpu_elem:
1942 		return &bpf_map_lookup_percpu_elem_proto;
1943 	case BPF_FUNC_get_prandom_u32:
1944 		return &bpf_get_prandom_u32_proto;
1945 	case BPF_FUNC_get_smp_processor_id:
1946 		return &bpf_get_raw_smp_processor_id_proto;
1947 	case BPF_FUNC_get_numa_node_id:
1948 		return &bpf_get_numa_node_id_proto;
1949 	case BPF_FUNC_tail_call:
1950 		return &bpf_tail_call_proto;
1951 	case BPF_FUNC_ktime_get_ns:
1952 		return &bpf_ktime_get_ns_proto;
1953 	case BPF_FUNC_ktime_get_boot_ns:
1954 		return &bpf_ktime_get_boot_ns_proto;
1955 	case BPF_FUNC_ktime_get_tai_ns:
1956 		return &bpf_ktime_get_tai_ns_proto;
1957 	case BPF_FUNC_ringbuf_output:
1958 		return &bpf_ringbuf_output_proto;
1959 	case BPF_FUNC_ringbuf_reserve:
1960 		return &bpf_ringbuf_reserve_proto;
1961 	case BPF_FUNC_ringbuf_submit:
1962 		return &bpf_ringbuf_submit_proto;
1963 	case BPF_FUNC_ringbuf_discard:
1964 		return &bpf_ringbuf_discard_proto;
1965 	case BPF_FUNC_ringbuf_query:
1966 		return &bpf_ringbuf_query_proto;
1967 	case BPF_FUNC_strncmp:
1968 		return &bpf_strncmp_proto;
1969 	case BPF_FUNC_strtol:
1970 		return &bpf_strtol_proto;
1971 	case BPF_FUNC_strtoul:
1972 		return &bpf_strtoul_proto;
1973 	case BPF_FUNC_get_current_pid_tgid:
1974 		return &bpf_get_current_pid_tgid_proto;
1975 	case BPF_FUNC_get_ns_current_pid_tgid:
1976 		return &bpf_get_ns_current_pid_tgid_proto;
1977 	case BPF_FUNC_get_current_uid_gid:
1978 		return &bpf_get_current_uid_gid_proto;
1979 	default:
1980 		break;
1981 	}
1982 
1983 	if (!bpf_token_capable(prog->aux->token, CAP_BPF))
1984 		return NULL;
1985 
1986 	switch (func_id) {
1987 	case BPF_FUNC_spin_lock:
1988 		return &bpf_spin_lock_proto;
1989 	case BPF_FUNC_spin_unlock:
1990 		return &bpf_spin_unlock_proto;
1991 	case BPF_FUNC_jiffies64:
1992 		return &bpf_jiffies64_proto;
1993 	case BPF_FUNC_per_cpu_ptr:
1994 		return &bpf_per_cpu_ptr_proto;
1995 	case BPF_FUNC_this_cpu_ptr:
1996 		return &bpf_this_cpu_ptr_proto;
1997 	case BPF_FUNC_timer_init:
1998 		return &bpf_timer_init_proto;
1999 	case BPF_FUNC_timer_set_callback:
2000 		return &bpf_timer_set_callback_proto;
2001 	case BPF_FUNC_timer_start:
2002 		return &bpf_timer_start_proto;
2003 	case BPF_FUNC_timer_cancel:
2004 		return &bpf_timer_cancel_proto;
2005 	case BPF_FUNC_kptr_xchg:
2006 		return &bpf_kptr_xchg_proto;
2007 	case BPF_FUNC_for_each_map_elem:
2008 		return &bpf_for_each_map_elem_proto;
2009 	case BPF_FUNC_loop:
2010 		return &bpf_loop_proto;
2011 	case BPF_FUNC_user_ringbuf_drain:
2012 		return &bpf_user_ringbuf_drain_proto;
2013 	case BPF_FUNC_ringbuf_reserve_dynptr:
2014 		return &bpf_ringbuf_reserve_dynptr_proto;
2015 	case BPF_FUNC_ringbuf_submit_dynptr:
2016 		return &bpf_ringbuf_submit_dynptr_proto;
2017 	case BPF_FUNC_ringbuf_discard_dynptr:
2018 		return &bpf_ringbuf_discard_dynptr_proto;
2019 	case BPF_FUNC_dynptr_from_mem:
2020 		return &bpf_dynptr_from_mem_proto;
2021 	case BPF_FUNC_dynptr_read:
2022 		return &bpf_dynptr_read_proto;
2023 	case BPF_FUNC_dynptr_write:
2024 		return &bpf_dynptr_write_proto;
2025 	case BPF_FUNC_dynptr_data:
2026 		return &bpf_dynptr_data_proto;
2027 #ifdef CONFIG_CGROUPS
2028 	case BPF_FUNC_cgrp_storage_get:
2029 		return &bpf_cgrp_storage_get_proto;
2030 	case BPF_FUNC_cgrp_storage_delete:
2031 		return &bpf_cgrp_storage_delete_proto;
2032 	case BPF_FUNC_get_current_cgroup_id:
2033 		return &bpf_get_current_cgroup_id_proto;
2034 	case BPF_FUNC_get_current_ancestor_cgroup_id:
2035 		return &bpf_get_current_ancestor_cgroup_id_proto;
2036 	case BPF_FUNC_current_task_under_cgroup:
2037 		return &bpf_current_task_under_cgroup_proto;
2038 #endif
2039 #ifdef CONFIG_CGROUP_NET_CLASSID
2040 	case BPF_FUNC_get_cgroup_classid:
2041 		return &bpf_get_cgroup_classid_curr_proto;
2042 #endif
2043 	case BPF_FUNC_task_storage_get:
2044 		if (bpf_prog_check_recur(prog))
2045 			return &bpf_task_storage_get_recur_proto;
2046 		return &bpf_task_storage_get_proto;
2047 	case BPF_FUNC_task_storage_delete:
2048 		if (bpf_prog_check_recur(prog))
2049 			return &bpf_task_storage_delete_recur_proto;
2050 		return &bpf_task_storage_delete_proto;
2051 	default:
2052 		break;
2053 	}
2054 
2055 	if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
2056 		return NULL;
2057 
2058 	switch (func_id) {
2059 	case BPF_FUNC_trace_printk:
2060 		return bpf_get_trace_printk_proto();
2061 	case BPF_FUNC_get_current_task:
2062 		return &bpf_get_current_task_proto;
2063 	case BPF_FUNC_get_current_task_btf:
2064 		return &bpf_get_current_task_btf_proto;
2065 	case BPF_FUNC_get_current_comm:
2066 		return &bpf_get_current_comm_proto;
2067 	case BPF_FUNC_probe_read_user:
2068 		return &bpf_probe_read_user_proto;
2069 	case BPF_FUNC_probe_read_kernel:
2070 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2071 		       NULL : &bpf_probe_read_kernel_proto;
2072 	case BPF_FUNC_probe_read_user_str:
2073 		return &bpf_probe_read_user_str_proto;
2074 	case BPF_FUNC_probe_read_kernel_str:
2075 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2076 		       NULL : &bpf_probe_read_kernel_str_proto;
2077 	case BPF_FUNC_copy_from_user:
2078 		return &bpf_copy_from_user_proto;
2079 	case BPF_FUNC_copy_from_user_task:
2080 		return &bpf_copy_from_user_task_proto;
2081 	case BPF_FUNC_snprintf_btf:
2082 		return &bpf_snprintf_btf_proto;
2083 	case BPF_FUNC_snprintf:
2084 		return &bpf_snprintf_proto;
2085 	case BPF_FUNC_task_pt_regs:
2086 		return &bpf_task_pt_regs_proto;
2087 	case BPF_FUNC_trace_vprintk:
2088 		return bpf_get_trace_vprintk_proto();
2089 	case BPF_FUNC_perf_event_read_value:
2090 		return bpf_get_perf_event_read_value_proto();
2091 	case BPF_FUNC_perf_event_read:
2092 		return &bpf_perf_event_read_proto;
2093 	case BPF_FUNC_send_signal:
2094 		return &bpf_send_signal_proto;
2095 	case BPF_FUNC_send_signal_thread:
2096 		return &bpf_send_signal_thread_proto;
2097 	case BPF_FUNC_get_task_stack:
2098 		return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
2099 				       : &bpf_get_task_stack_proto;
2100 	case BPF_FUNC_get_branch_snapshot:
2101 		return &bpf_get_branch_snapshot_proto;
2102 	case BPF_FUNC_find_vma:
2103 		return &bpf_find_vma_proto;
2104 	default:
2105 		return NULL;
2106 	}
2107 }
2108 EXPORT_SYMBOL_GPL(bpf_base_func_proto);
2109 
2110 void bpf_list_head_free(const struct btf_field *field, void *list_head,
2111 			struct bpf_spin_lock *spin_lock)
2112 {
2113 	struct list_head *head = list_head, *orig_head = list_head;
2114 
2115 	BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
2116 	BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
2117 
2118 	/* Do the actual list draining outside the lock to not hold the lock for
2119 	 * too long, and also prevent deadlocks if tracing programs end up
2120 	 * executing on entry/exit of functions called inside the critical
2121 	 * section, and end up doing map ops that call bpf_list_head_free for
2122 	 * the same map value again.
2123 	 */
2124 	__bpf_spin_lock_irqsave(spin_lock);
2125 	if (!head->next || list_empty(head))
2126 		goto unlock;
2127 	head = head->next;
2128 unlock:
2129 	INIT_LIST_HEAD(orig_head);
2130 	__bpf_spin_unlock_irqrestore(spin_lock);
2131 
2132 	while (head != orig_head) {
2133 		void *obj = head;
2134 
2135 		obj -= field->graph_root.node_offset;
2136 		head = head->next;
2137 		/* The contained type can also have resources, including a
2138 		 * bpf_list_head which needs to be freed.
2139 		 */
2140 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2141 	}
2142 }
2143 
2144 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
2145  * 'rb_node *', so field name of rb_node within containing struct is not
2146  * needed.
2147  *
2148  * Since bpf_rb_tree's node type has a corresponding struct btf_field with
2149  * graph_root.node_offset, it's not necessary to know field name
2150  * or type of node struct
2151  */
2152 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
2153 	for (pos = rb_first_postorder(root); \
2154 	    pos && ({ n = rb_next_postorder(pos); 1; }); \
2155 	    pos = n)
2156 
2157 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
2158 		      struct bpf_spin_lock *spin_lock)
2159 {
2160 	struct rb_root_cached orig_root, *root = rb_root;
2161 	struct rb_node *pos, *n;
2162 	void *obj;
2163 
2164 	BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
2165 	BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
2166 
2167 	__bpf_spin_lock_irqsave(spin_lock);
2168 	orig_root = *root;
2169 	*root = RB_ROOT_CACHED;
2170 	__bpf_spin_unlock_irqrestore(spin_lock);
2171 
2172 	bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
2173 		obj = pos;
2174 		obj -= field->graph_root.node_offset;
2175 
2176 
2177 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2178 	}
2179 }
2180 
2181 __bpf_kfunc_start_defs();
2182 
2183 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2184 {
2185 	struct btf_struct_meta *meta = meta__ign;
2186 	u64 size = local_type_id__k;
2187 	void *p;
2188 
2189 	p = bpf_mem_alloc(&bpf_global_ma, size);
2190 	if (!p)
2191 		return NULL;
2192 	if (meta)
2193 		bpf_obj_init(meta->record, p);
2194 	return p;
2195 }
2196 
2197 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2198 {
2199 	u64 size = local_type_id__k;
2200 
2201 	/* The verifier has ensured that meta__ign must be NULL */
2202 	return bpf_mem_alloc(&bpf_global_percpu_ma, size);
2203 }
2204 
2205 /* Must be called under migrate_disable(), as required by bpf_mem_free */
2206 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
2207 {
2208 	struct bpf_mem_alloc *ma;
2209 
2210 	if (rec && rec->refcount_off >= 0 &&
2211 	    !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
2212 		/* Object is refcounted and refcount_dec didn't result in 0
2213 		 * refcount. Return without freeing the object
2214 		 */
2215 		return;
2216 	}
2217 
2218 	if (rec)
2219 		bpf_obj_free_fields(rec, p);
2220 
2221 	if (percpu)
2222 		ma = &bpf_global_percpu_ma;
2223 	else
2224 		ma = &bpf_global_ma;
2225 	bpf_mem_free_rcu(ma, p);
2226 }
2227 
2228 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
2229 {
2230 	struct btf_struct_meta *meta = meta__ign;
2231 	void *p = p__alloc;
2232 
2233 	__bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
2234 }
2235 
2236 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
2237 {
2238 	/* The verifier has ensured that meta__ign must be NULL */
2239 	bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
2240 }
2241 
2242 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
2243 {
2244 	struct btf_struct_meta *meta = meta__ign;
2245 	struct bpf_refcount *ref;
2246 
2247 	/* Could just cast directly to refcount_t *, but need some code using
2248 	 * bpf_refcount type so that it is emitted in vmlinux BTF
2249 	 */
2250 	ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
2251 	if (!refcount_inc_not_zero((refcount_t *)ref))
2252 		return NULL;
2253 
2254 	/* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
2255 	 * in verifier.c
2256 	 */
2257 	return (void *)p__refcounted_kptr;
2258 }
2259 
2260 static int __bpf_list_add(struct bpf_list_node_kern *node,
2261 			  struct bpf_list_head *head,
2262 			  bool tail, struct btf_record *rec, u64 off)
2263 {
2264 	struct list_head *n = &node->list_head, *h = (void *)head;
2265 
2266 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2267 	 * called on its fields, so init here
2268 	 */
2269 	if (unlikely(!h->next))
2270 		INIT_LIST_HEAD(h);
2271 
2272 	/* node->owner != NULL implies !list_empty(n), no need to separately
2273 	 * check the latter
2274 	 */
2275 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2276 		/* Only called from BPF prog, no need to migrate_disable */
2277 		__bpf_obj_drop_impl((void *)n - off, rec, false);
2278 		return -EINVAL;
2279 	}
2280 
2281 	tail ? list_add_tail(n, h) : list_add(n, h);
2282 	WRITE_ONCE(node->owner, head);
2283 
2284 	return 0;
2285 }
2286 
2287 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
2288 					 struct bpf_list_node *node,
2289 					 void *meta__ign, u64 off)
2290 {
2291 	struct bpf_list_node_kern *n = (void *)node;
2292 	struct btf_struct_meta *meta = meta__ign;
2293 
2294 	return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
2295 }
2296 
2297 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
2298 					struct bpf_list_node *node,
2299 					void *meta__ign, u64 off)
2300 {
2301 	struct bpf_list_node_kern *n = (void *)node;
2302 	struct btf_struct_meta *meta = meta__ign;
2303 
2304 	return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2305 }
2306 
2307 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2308 {
2309 	struct list_head *n, *h = (void *)head;
2310 	struct bpf_list_node_kern *node;
2311 
2312 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2313 	 * called on its fields, so init here
2314 	 */
2315 	if (unlikely(!h->next))
2316 		INIT_LIST_HEAD(h);
2317 	if (list_empty(h))
2318 		return NULL;
2319 
2320 	n = tail ? h->prev : h->next;
2321 	node = container_of(n, struct bpf_list_node_kern, list_head);
2322 	if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2323 		return NULL;
2324 
2325 	list_del_init(n);
2326 	WRITE_ONCE(node->owner, NULL);
2327 	return (struct bpf_list_node *)n;
2328 }
2329 
2330 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2331 {
2332 	return __bpf_list_del(head, false);
2333 }
2334 
2335 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2336 {
2337 	return __bpf_list_del(head, true);
2338 }
2339 
2340 __bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head)
2341 {
2342 	struct list_head *h = (struct list_head *)head;
2343 
2344 	if (list_empty(h) || unlikely(!h->next))
2345 		return NULL;
2346 
2347 	return (struct bpf_list_node *)h->next;
2348 }
2349 
2350 __bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head)
2351 {
2352 	struct list_head *h = (struct list_head *)head;
2353 
2354 	if (list_empty(h) || unlikely(!h->next))
2355 		return NULL;
2356 
2357 	return (struct bpf_list_node *)h->prev;
2358 }
2359 
2360 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2361 						  struct bpf_rb_node *node)
2362 {
2363 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2364 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2365 	struct rb_node *n = &node_internal->rb_node;
2366 
2367 	/* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2368 	 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2369 	 */
2370 	if (READ_ONCE(node_internal->owner) != root)
2371 		return NULL;
2372 
2373 	rb_erase_cached(n, r);
2374 	RB_CLEAR_NODE(n);
2375 	WRITE_ONCE(node_internal->owner, NULL);
2376 	return (struct bpf_rb_node *)n;
2377 }
2378 
2379 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2380  * program
2381  */
2382 static int __bpf_rbtree_add(struct bpf_rb_root *root,
2383 			    struct bpf_rb_node_kern *node,
2384 			    void *less, struct btf_record *rec, u64 off)
2385 {
2386 	struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2387 	struct rb_node *parent = NULL, *n = &node->rb_node;
2388 	bpf_callback_t cb = (bpf_callback_t)less;
2389 	bool leftmost = true;
2390 
2391 	/* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2392 	 * check the latter
2393 	 */
2394 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2395 		/* Only called from BPF prog, no need to migrate_disable */
2396 		__bpf_obj_drop_impl((void *)n - off, rec, false);
2397 		return -EINVAL;
2398 	}
2399 
2400 	while (*link) {
2401 		parent = *link;
2402 		if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2403 			link = &parent->rb_left;
2404 		} else {
2405 			link = &parent->rb_right;
2406 			leftmost = false;
2407 		}
2408 	}
2409 
2410 	rb_link_node(n, parent, link);
2411 	rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2412 	WRITE_ONCE(node->owner, root);
2413 	return 0;
2414 }
2415 
2416 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2417 				    bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2418 				    void *meta__ign, u64 off)
2419 {
2420 	struct btf_struct_meta *meta = meta__ign;
2421 	struct bpf_rb_node_kern *n = (void *)node;
2422 
2423 	return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2424 }
2425 
2426 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2427 {
2428 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2429 
2430 	return (struct bpf_rb_node *)rb_first_cached(r);
2431 }
2432 
2433 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root)
2434 {
2435 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2436 
2437 	return (struct bpf_rb_node *)r->rb_root.rb_node;
2438 }
2439 
2440 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node)
2441 {
2442 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2443 
2444 	if (READ_ONCE(node_internal->owner) != root)
2445 		return NULL;
2446 
2447 	return (struct bpf_rb_node *)node_internal->rb_node.rb_left;
2448 }
2449 
2450 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node)
2451 {
2452 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2453 
2454 	if (READ_ONCE(node_internal->owner) != root)
2455 		return NULL;
2456 
2457 	return (struct bpf_rb_node *)node_internal->rb_node.rb_right;
2458 }
2459 
2460 /**
2461  * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2462  * kfunc which is not stored in a map as a kptr, must be released by calling
2463  * bpf_task_release().
2464  * @p: The task on which a reference is being acquired.
2465  */
2466 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2467 {
2468 	if (refcount_inc_not_zero(&p->rcu_users))
2469 		return p;
2470 	return NULL;
2471 }
2472 
2473 /**
2474  * bpf_task_release - Release the reference acquired on a task.
2475  * @p: The task on which a reference is being released.
2476  */
2477 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2478 {
2479 	put_task_struct_rcu_user(p);
2480 }
2481 
2482 __bpf_kfunc void bpf_task_release_dtor(void *p)
2483 {
2484 	put_task_struct_rcu_user(p);
2485 }
2486 CFI_NOSEAL(bpf_task_release_dtor);
2487 
2488 #ifdef CONFIG_CGROUPS
2489 /**
2490  * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2491  * this kfunc which is not stored in a map as a kptr, must be released by
2492  * calling bpf_cgroup_release().
2493  * @cgrp: The cgroup on which a reference is being acquired.
2494  */
2495 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2496 {
2497 	return cgroup_tryget(cgrp) ? cgrp : NULL;
2498 }
2499 
2500 /**
2501  * bpf_cgroup_release - Release the reference acquired on a cgroup.
2502  * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2503  * not be freed until the current grace period has ended, even if its refcount
2504  * drops to 0.
2505  * @cgrp: The cgroup on which a reference is being released.
2506  */
2507 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2508 {
2509 	cgroup_put(cgrp);
2510 }
2511 
2512 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
2513 {
2514 	cgroup_put(cgrp);
2515 }
2516 CFI_NOSEAL(bpf_cgroup_release_dtor);
2517 
2518 /**
2519  * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2520  * array. A cgroup returned by this kfunc which is not subsequently stored in a
2521  * map, must be released by calling bpf_cgroup_release().
2522  * @cgrp: The cgroup for which we're performing a lookup.
2523  * @level: The level of ancestor to look up.
2524  */
2525 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2526 {
2527 	struct cgroup *ancestor;
2528 
2529 	if (level > cgrp->level || level < 0)
2530 		return NULL;
2531 
2532 	/* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2533 	ancestor = cgrp->ancestors[level];
2534 	if (!cgroup_tryget(ancestor))
2535 		return NULL;
2536 	return ancestor;
2537 }
2538 
2539 /**
2540  * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2541  * kfunc which is not subsequently stored in a map, must be released by calling
2542  * bpf_cgroup_release().
2543  * @cgid: cgroup id.
2544  */
2545 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2546 {
2547 	struct cgroup *cgrp;
2548 
2549 	cgrp = __cgroup_get_from_id(cgid);
2550 	if (IS_ERR(cgrp))
2551 		return NULL;
2552 	return cgrp;
2553 }
2554 
2555 /**
2556  * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2557  * task's membership of cgroup ancestry.
2558  * @task: the task to be tested
2559  * @ancestor: possible ancestor of @task's cgroup
2560  *
2561  * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2562  * It follows all the same rules as cgroup_is_descendant, and only applies
2563  * to the default hierarchy.
2564  */
2565 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2566 				       struct cgroup *ancestor)
2567 {
2568 	long ret;
2569 
2570 	rcu_read_lock();
2571 	ret = task_under_cgroup_hierarchy(task, ancestor);
2572 	rcu_read_unlock();
2573 	return ret;
2574 }
2575 
2576 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
2577 {
2578 	struct bpf_array *array = container_of(map, struct bpf_array, map);
2579 	struct cgroup *cgrp;
2580 
2581 	if (unlikely(idx >= array->map.max_entries))
2582 		return -E2BIG;
2583 
2584 	cgrp = READ_ONCE(array->ptrs[idx]);
2585 	if (unlikely(!cgrp))
2586 		return -EAGAIN;
2587 
2588 	return task_under_cgroup_hierarchy(current, cgrp);
2589 }
2590 
2591 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
2592 	.func           = bpf_current_task_under_cgroup,
2593 	.gpl_only       = false,
2594 	.ret_type       = RET_INTEGER,
2595 	.arg1_type      = ARG_CONST_MAP_PTR,
2596 	.arg2_type      = ARG_ANYTHING,
2597 };
2598 
2599 /**
2600  * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2601  * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
2602  * hierarchy ID.
2603  * @task: The target task
2604  * @hierarchy_id: The ID of a cgroup1 hierarchy
2605  *
2606  * On success, the cgroup is returen. On failure, NULL is returned.
2607  */
2608 __bpf_kfunc struct cgroup *
2609 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
2610 {
2611 	struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
2612 
2613 	if (IS_ERR(cgrp))
2614 		return NULL;
2615 	return cgrp;
2616 }
2617 #endif /* CONFIG_CGROUPS */
2618 
2619 /**
2620  * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2621  * in the root pid namespace idr. If a task is returned, it must either be
2622  * stored in a map, or released with bpf_task_release().
2623  * @pid: The pid of the task being looked up.
2624  */
2625 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2626 {
2627 	struct task_struct *p;
2628 
2629 	rcu_read_lock();
2630 	p = find_task_by_pid_ns(pid, &init_pid_ns);
2631 	if (p)
2632 		p = bpf_task_acquire(p);
2633 	rcu_read_unlock();
2634 
2635 	return p;
2636 }
2637 
2638 /**
2639  * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up
2640  * in the pid namespace of the current task. If a task is returned, it must
2641  * either be stored in a map, or released with bpf_task_release().
2642  * @vpid: The vpid of the task being looked up.
2643  */
2644 __bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid)
2645 {
2646 	struct task_struct *p;
2647 
2648 	rcu_read_lock();
2649 	p = find_task_by_vpid(vpid);
2650 	if (p)
2651 		p = bpf_task_acquire(p);
2652 	rcu_read_unlock();
2653 
2654 	return p;
2655 }
2656 
2657 /**
2658  * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2659  * @p: The dynptr whose data slice to retrieve
2660  * @offset: Offset into the dynptr
2661  * @buffer__opt: User-provided buffer to copy contents into.  May be NULL
2662  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2663  *               length of the requested slice. This must be a constant.
2664  *
2665  * For non-skb and non-xdp type dynptrs, there is no difference between
2666  * bpf_dynptr_slice and bpf_dynptr_data.
2667  *
2668  *  If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2669  *
2670  * If the intention is to write to the data slice, please use
2671  * bpf_dynptr_slice_rdwr.
2672  *
2673  * The user must check that the returned pointer is not null before using it.
2674  *
2675  * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2676  * does not change the underlying packet data pointers, so a call to
2677  * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2678  * the bpf program.
2679  *
2680  * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2681  * data slice (can be either direct pointer to the data or a pointer to the user
2682  * provided buffer, with its contents containing the data, if unable to obtain
2683  * direct pointer)
2684  */
2685 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
2686 				   void *buffer__opt, u32 buffer__szk)
2687 {
2688 	const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2689 	enum bpf_dynptr_type type;
2690 	u32 len = buffer__szk;
2691 	int err;
2692 
2693 	if (!ptr->data)
2694 		return NULL;
2695 
2696 	err = bpf_dynptr_check_off_len(ptr, offset, len);
2697 	if (err)
2698 		return NULL;
2699 
2700 	type = bpf_dynptr_get_type(ptr);
2701 
2702 	switch (type) {
2703 	case BPF_DYNPTR_TYPE_LOCAL:
2704 	case BPF_DYNPTR_TYPE_RINGBUF:
2705 		return ptr->data + ptr->offset + offset;
2706 	case BPF_DYNPTR_TYPE_SKB:
2707 		if (buffer__opt)
2708 			return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
2709 		else
2710 			return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2711 	case BPF_DYNPTR_TYPE_XDP:
2712 	{
2713 		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2714 		if (!IS_ERR_OR_NULL(xdp_ptr))
2715 			return xdp_ptr;
2716 
2717 		if (!buffer__opt)
2718 			return NULL;
2719 		bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
2720 		return buffer__opt;
2721 	}
2722 	case BPF_DYNPTR_TYPE_SKB_META:
2723 		return bpf_skb_meta_pointer(ptr->data, ptr->offset + offset);
2724 	default:
2725 		WARN_ONCE(true, "unknown dynptr type %d\n", type);
2726 		return NULL;
2727 	}
2728 }
2729 
2730 /**
2731  * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2732  * @p: The dynptr whose data slice to retrieve
2733  * @offset: Offset into the dynptr
2734  * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2735  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2736  *               length of the requested slice. This must be a constant.
2737  *
2738  * For non-skb and non-xdp type dynptrs, there is no difference between
2739  * bpf_dynptr_slice and bpf_dynptr_data.
2740  *
2741  * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2742  *
2743  * The returned pointer is writable and may point to either directly the dynptr
2744  * data at the requested offset or to the buffer if unable to obtain a direct
2745  * data pointer to (example: the requested slice is to the paged area of an skb
2746  * packet). In the case where the returned pointer is to the buffer, the user
2747  * is responsible for persisting writes through calling bpf_dynptr_write(). This
2748  * usually looks something like this pattern:
2749  *
2750  * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2751  * if (!eth)
2752  *	return TC_ACT_SHOT;
2753  *
2754  * // mutate eth header //
2755  *
2756  * if (eth == buffer)
2757  *	bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2758  *
2759  * Please note that, as in the example above, the user must check that the
2760  * returned pointer is not null before using it.
2761  *
2762  * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2763  * does not change the underlying packet data pointers, so a call to
2764  * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2765  * the bpf program.
2766  *
2767  * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2768  * data slice (can be either direct pointer to the data or a pointer to the user
2769  * provided buffer, with its contents containing the data, if unable to obtain
2770  * direct pointer)
2771  */
2772 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
2773 					void *buffer__opt, u32 buffer__szk)
2774 {
2775 	const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2776 
2777 	if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2778 		return NULL;
2779 
2780 	/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2781 	 *
2782 	 * For skb-type dynptrs, it is safe to write into the returned pointer
2783 	 * if the bpf program allows skb data writes. There are two possibilities
2784 	 * that may occur when calling bpf_dynptr_slice_rdwr:
2785 	 *
2786 	 * 1) The requested slice is in the head of the skb. In this case, the
2787 	 * returned pointer is directly to skb data, and if the skb is cloned, the
2788 	 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2789 	 * The pointer can be directly written into.
2790 	 *
2791 	 * 2) Some portion of the requested slice is in the paged buffer area.
2792 	 * In this case, the requested data will be copied out into the buffer
2793 	 * and the returned pointer will be a pointer to the buffer. The skb
2794 	 * will not be pulled. To persist the write, the user will need to call
2795 	 * bpf_dynptr_write(), which will pull the skb and commit the write.
2796 	 *
2797 	 * Similarly for xdp programs, if the requested slice is not across xdp
2798 	 * fragments, then a direct pointer will be returned, otherwise the data
2799 	 * will be copied out into the buffer and the user will need to call
2800 	 * bpf_dynptr_write() to commit changes.
2801 	 */
2802 	return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk);
2803 }
2804 
2805 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
2806 {
2807 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2808 	u32 size;
2809 
2810 	if (!ptr->data || start > end)
2811 		return -EINVAL;
2812 
2813 	size = __bpf_dynptr_size(ptr);
2814 
2815 	if (start > size || end > size)
2816 		return -ERANGE;
2817 
2818 	ptr->offset += start;
2819 	bpf_dynptr_set_size(ptr, end - start);
2820 
2821 	return 0;
2822 }
2823 
2824 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
2825 {
2826 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2827 
2828 	return !ptr->data;
2829 }
2830 
2831 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
2832 {
2833 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2834 
2835 	if (!ptr->data)
2836 		return false;
2837 
2838 	return __bpf_dynptr_is_rdonly(ptr);
2839 }
2840 
2841 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
2842 {
2843 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2844 
2845 	if (!ptr->data)
2846 		return -EINVAL;
2847 
2848 	return __bpf_dynptr_size(ptr);
2849 }
2850 
2851 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
2852 				 struct bpf_dynptr *clone__uninit)
2853 {
2854 	struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
2855 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2856 
2857 	if (!ptr->data) {
2858 		bpf_dynptr_set_null(clone);
2859 		return -EINVAL;
2860 	}
2861 
2862 	*clone = *ptr;
2863 
2864 	return 0;
2865 }
2866 
2867 /**
2868  * bpf_dynptr_copy() - Copy data from one dynptr to another.
2869  * @dst_ptr: Destination dynptr - where data should be copied to
2870  * @dst_off: Offset into the destination dynptr
2871  * @src_ptr: Source dynptr - where data should be copied from
2872  * @src_off: Offset into the source dynptr
2873  * @size: Length of the data to copy from source to destination
2874  *
2875  * Copies data from source dynptr to destination dynptr.
2876  * Returns 0 on success; negative error, otherwise.
2877  */
2878 __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off,
2879 				struct bpf_dynptr *src_ptr, u32 src_off, u32 size)
2880 {
2881 	struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr;
2882 	struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr;
2883 	void *src_slice, *dst_slice;
2884 	char buf[256];
2885 	u32 off;
2886 
2887 	src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size);
2888 	dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size);
2889 
2890 	if (src_slice && dst_slice) {
2891 		memmove(dst_slice, src_slice, size);
2892 		return 0;
2893 	}
2894 
2895 	if (src_slice)
2896 		return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0);
2897 
2898 	if (dst_slice)
2899 		return __bpf_dynptr_read(dst_slice, size, src, src_off, 0);
2900 
2901 	if (bpf_dynptr_check_off_len(dst, dst_off, size) ||
2902 	    bpf_dynptr_check_off_len(src, src_off, size))
2903 		return -E2BIG;
2904 
2905 	off = 0;
2906 	while (off < size) {
2907 		u32 chunk_sz = min_t(u32, sizeof(buf), size - off);
2908 		int err;
2909 
2910 		err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0);
2911 		if (err)
2912 			return err;
2913 		err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0);
2914 		if (err)
2915 			return err;
2916 
2917 		off += chunk_sz;
2918 	}
2919 	return 0;
2920 }
2921 
2922 /**
2923  * bpf_dynptr_memset() - Fill dynptr memory with a constant byte.
2924  * @p: Destination dynptr - where data will be filled
2925  * @offset: Offset into the dynptr to start filling from
2926  * @size: Number of bytes to fill
2927  * @val: Constant byte to fill the memory with
2928  *
2929  * Fills the @size bytes of the memory area pointed to by @p
2930  * at @offset with the constant byte @val.
2931  * Returns 0 on success; negative error, otherwise.
2932  */
2933  __bpf_kfunc int bpf_dynptr_memset(struct bpf_dynptr *p, u32 offset, u32 size, u8 val)
2934  {
2935 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2936 	u32 chunk_sz, write_off;
2937 	char buf[256];
2938 	void* slice;
2939 	int err;
2940 
2941 	slice = bpf_dynptr_slice_rdwr(p, offset, NULL, size);
2942 	if (likely(slice)) {
2943 		memset(slice, val, size);
2944 		return 0;
2945 	}
2946 
2947 	if (__bpf_dynptr_is_rdonly(ptr))
2948 		return -EINVAL;
2949 
2950 	err = bpf_dynptr_check_off_len(ptr, offset, size);
2951 	if (err)
2952 		return err;
2953 
2954 	/* Non-linear data under the dynptr, write from a local buffer */
2955 	chunk_sz = min_t(u32, sizeof(buf), size);
2956 	memset(buf, val, chunk_sz);
2957 
2958 	for (write_off = 0; write_off < size; write_off += chunk_sz) {
2959 		chunk_sz = min_t(u32, sizeof(buf), size - write_off);
2960 		err = __bpf_dynptr_write(ptr, offset + write_off, buf, chunk_sz, 0);
2961 		if (err)
2962 			return err;
2963 	}
2964 
2965 	return 0;
2966 }
2967 
2968 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2969 {
2970 	return obj;
2971 }
2972 
2973 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
2974 {
2975 	return (void *)obj__ign;
2976 }
2977 
2978 __bpf_kfunc void bpf_rcu_read_lock(void)
2979 {
2980 	rcu_read_lock();
2981 }
2982 
2983 __bpf_kfunc void bpf_rcu_read_unlock(void)
2984 {
2985 	rcu_read_unlock();
2986 }
2987 
2988 struct bpf_throw_ctx {
2989 	struct bpf_prog_aux *aux;
2990 	u64 sp;
2991 	u64 bp;
2992 	int cnt;
2993 };
2994 
2995 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
2996 {
2997 	struct bpf_throw_ctx *ctx = cookie;
2998 	struct bpf_prog *prog;
2999 
3000 	/*
3001 	 * The RCU read lock is held to safely traverse the latch tree, but we
3002 	 * don't need its protection when accessing the prog, since it has an
3003 	 * active stack frame on the current stack trace, and won't disappear.
3004 	 */
3005 	rcu_read_lock();
3006 	prog = bpf_prog_ksym_find(ip);
3007 	rcu_read_unlock();
3008 	if (!prog)
3009 		return !ctx->cnt;
3010 	ctx->cnt++;
3011 	if (bpf_is_subprog(prog))
3012 		return true;
3013 	ctx->aux = prog->aux;
3014 	ctx->sp = sp;
3015 	ctx->bp = bp;
3016 	return false;
3017 }
3018 
3019 __bpf_kfunc void bpf_throw(u64 cookie)
3020 {
3021 	struct bpf_throw_ctx ctx = {};
3022 
3023 	arch_bpf_stack_walk(bpf_stack_walker, &ctx);
3024 	WARN_ON_ONCE(!ctx.aux);
3025 	if (ctx.aux)
3026 		WARN_ON_ONCE(!ctx.aux->exception_boundary);
3027 	WARN_ON_ONCE(!ctx.bp);
3028 	WARN_ON_ONCE(!ctx.cnt);
3029 	/* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
3030 	 * deeper stack depths than ctx.sp as we do not return from bpf_throw,
3031 	 * which skips compiler generated instrumentation to do the same.
3032 	 */
3033 	kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
3034 	ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
3035 	WARN(1, "A call to BPF exception callback should never return\n");
3036 }
3037 
3038 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
3039 {
3040 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3041 	struct bpf_map *map = p__map;
3042 
3043 	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq));
3044 	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq));
3045 
3046 	if (flags)
3047 		return -EINVAL;
3048 
3049 	return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
3050 }
3051 
3052 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
3053 {
3054 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3055 	struct bpf_work *w;
3056 
3057 	if (in_nmi())
3058 		return -EOPNOTSUPP;
3059 	if (flags)
3060 		return -EINVAL;
3061 	w = READ_ONCE(async->work);
3062 	if (!w || !READ_ONCE(w->cb.prog))
3063 		return -EINVAL;
3064 
3065 	schedule_work(&w->work);
3066 	return 0;
3067 }
3068 
3069 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
3070 					 int (callback_fn)(void *map, int *key, void *value),
3071 					 unsigned int flags,
3072 					 void *aux__prog)
3073 {
3074 	struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog;
3075 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3076 
3077 	if (flags)
3078 		return -EINVAL;
3079 
3080 	return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ);
3081 }
3082 
3083 __bpf_kfunc void bpf_preempt_disable(void)
3084 {
3085 	preempt_disable();
3086 }
3087 
3088 __bpf_kfunc void bpf_preempt_enable(void)
3089 {
3090 	preempt_enable();
3091 }
3092 
3093 struct bpf_iter_bits {
3094 	__u64 __opaque[2];
3095 } __aligned(8);
3096 
3097 #define BITS_ITER_NR_WORDS_MAX 511
3098 
3099 struct bpf_iter_bits_kern {
3100 	union {
3101 		__u64 *bits;
3102 		__u64 bits_copy;
3103 	};
3104 	int nr_bits;
3105 	int bit;
3106 } __aligned(8);
3107 
3108 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing
3109  * a u64 pointer and an unsigned long pointer to find_next_bit() will
3110  * return the same result, as both point to the same 8-byte area.
3111  *
3112  * For 32-bit little-endian hosts, using a u64 pointer or unsigned long
3113  * pointer also makes no difference. This is because the first iterated
3114  * unsigned long is composed of bits 0-31 of the u64 and the second unsigned
3115  * long is composed of bits 32-63 of the u64.
3116  *
3117  * However, for 32-bit big-endian hosts, this is not the case. The first
3118  * iterated unsigned long will be bits 32-63 of the u64, so swap these two
3119  * ulong values within the u64.
3120  */
3121 static void swap_ulong_in_u64(u64 *bits, unsigned int nr)
3122 {
3123 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
3124 	unsigned int i;
3125 
3126 	for (i = 0; i < nr; i++)
3127 		bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32);
3128 #endif
3129 }
3130 
3131 /**
3132  * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
3133  * @it: The new bpf_iter_bits to be created
3134  * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
3135  * @nr_words: The size of the specified memory area, measured in 8-byte units.
3136  * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be
3137  * further reduced by the BPF memory allocator implementation.
3138  *
3139  * This function initializes a new bpf_iter_bits structure for iterating over
3140  * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
3141  * copies the data of the memory area to the newly created bpf_iter_bits @it for
3142  * subsequent iteration operations.
3143  *
3144  * On success, 0 is returned. On failure, ERR is returned.
3145  */
3146 __bpf_kfunc int
3147 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
3148 {
3149 	struct bpf_iter_bits_kern *kit = (void *)it;
3150 	u32 nr_bytes = nr_words * sizeof(u64);
3151 	u32 nr_bits = BYTES_TO_BITS(nr_bytes);
3152 	int err;
3153 
3154 	BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
3155 	BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
3156 		     __alignof__(struct bpf_iter_bits));
3157 
3158 	kit->nr_bits = 0;
3159 	kit->bits_copy = 0;
3160 	kit->bit = -1;
3161 
3162 	if (!unsafe_ptr__ign || !nr_words)
3163 		return -EINVAL;
3164 	if (nr_words > BITS_ITER_NR_WORDS_MAX)
3165 		return -E2BIG;
3166 
3167 	/* Optimization for u64 mask */
3168 	if (nr_bits == 64) {
3169 		err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
3170 		if (err)
3171 			return -EFAULT;
3172 
3173 		swap_ulong_in_u64(&kit->bits_copy, nr_words);
3174 
3175 		kit->nr_bits = nr_bits;
3176 		return 0;
3177 	}
3178 
3179 	if (bpf_mem_alloc_check_size(false, nr_bytes))
3180 		return -E2BIG;
3181 
3182 	/* Fallback to memalloc */
3183 	kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
3184 	if (!kit->bits)
3185 		return -ENOMEM;
3186 
3187 	err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
3188 	if (err) {
3189 		bpf_mem_free(&bpf_global_ma, kit->bits);
3190 		return err;
3191 	}
3192 
3193 	swap_ulong_in_u64(kit->bits, nr_words);
3194 
3195 	kit->nr_bits = nr_bits;
3196 	return 0;
3197 }
3198 
3199 /**
3200  * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
3201  * @it: The bpf_iter_bits to be checked
3202  *
3203  * This function returns a pointer to a number representing the value of the
3204  * next bit in the bits.
3205  *
3206  * If there are no further bits available, it returns NULL.
3207  */
3208 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
3209 {
3210 	struct bpf_iter_bits_kern *kit = (void *)it;
3211 	int bit = kit->bit, nr_bits = kit->nr_bits;
3212 	const void *bits;
3213 
3214 	if (!nr_bits || bit >= nr_bits)
3215 		return NULL;
3216 
3217 	bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
3218 	bit = find_next_bit(bits, nr_bits, bit + 1);
3219 	if (bit >= nr_bits) {
3220 		kit->bit = bit;
3221 		return NULL;
3222 	}
3223 
3224 	kit->bit = bit;
3225 	return &kit->bit;
3226 }
3227 
3228 /**
3229  * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
3230  * @it: The bpf_iter_bits to be destroyed
3231  *
3232  * Destroy the resource associated with the bpf_iter_bits.
3233  */
3234 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
3235 {
3236 	struct bpf_iter_bits_kern *kit = (void *)it;
3237 
3238 	if (kit->nr_bits <= 64)
3239 		return;
3240 	bpf_mem_free(&bpf_global_ma, kit->bits);
3241 }
3242 
3243 /**
3244  * bpf_copy_from_user_str() - Copy a string from an unsafe user address
3245  * @dst:             Destination address, in kernel space.  This buffer must be
3246  *                   at least @dst__sz bytes long.
3247  * @dst__sz:         Maximum number of bytes to copy, includes the trailing NUL.
3248  * @unsafe_ptr__ign: Source address, in user space.
3249  * @flags:           The only supported flag is BPF_F_PAD_ZEROS
3250  *
3251  * Copies a NUL-terminated string from userspace to BPF space. If user string is
3252  * too long this will still ensure zero termination in the dst buffer unless
3253  * buffer size is 0.
3254  *
3255  * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and
3256  * memset all of @dst on failure.
3257  */
3258 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags)
3259 {
3260 	int ret;
3261 
3262 	if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3263 		return -EINVAL;
3264 
3265 	if (unlikely(!dst__sz))
3266 		return 0;
3267 
3268 	ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1);
3269 	if (ret < 0) {
3270 		if (flags & BPF_F_PAD_ZEROS)
3271 			memset((char *)dst, 0, dst__sz);
3272 
3273 		return ret;
3274 	}
3275 
3276 	if (flags & BPF_F_PAD_ZEROS)
3277 		memset((char *)dst + ret, 0, dst__sz - ret);
3278 	else
3279 		((char *)dst)[ret] = '\0';
3280 
3281 	return ret + 1;
3282 }
3283 
3284 /**
3285  * bpf_copy_from_user_task_str() - Copy a string from an task's address space
3286  * @dst:             Destination address, in kernel space.  This buffer must be
3287  *                   at least @dst__sz bytes long.
3288  * @dst__sz:         Maximum number of bytes to copy, includes the trailing NUL.
3289  * @unsafe_ptr__ign: Source address in the task's address space.
3290  * @tsk:             The task whose address space will be used
3291  * @flags:           The only supported flag is BPF_F_PAD_ZEROS
3292  *
3293  * Copies a NUL terminated string from a task's address space to @dst__sz
3294  * buffer. If user string is too long this will still ensure zero termination
3295  * in the @dst__sz buffer unless buffer size is 0.
3296  *
3297  * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success
3298  * and memset all of @dst__sz on failure.
3299  *
3300  * Return: The number of copied bytes on success including the NUL terminator.
3301  * A negative error code on failure.
3302  */
3303 __bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz,
3304 					    const void __user *unsafe_ptr__ign,
3305 					    struct task_struct *tsk, u64 flags)
3306 {
3307 	int ret;
3308 
3309 	if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3310 		return -EINVAL;
3311 
3312 	if (unlikely(dst__sz == 0))
3313 		return 0;
3314 
3315 	ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0);
3316 	if (ret < 0) {
3317 		if (flags & BPF_F_PAD_ZEROS)
3318 			memset(dst, 0, dst__sz);
3319 		return ret;
3320 	}
3321 
3322 	if (flags & BPF_F_PAD_ZEROS)
3323 		memset(dst + ret, 0, dst__sz - ret);
3324 
3325 	return ret + 1;
3326 }
3327 
3328 /* Keep unsinged long in prototype so that kfunc is usable when emitted to
3329  * vmlinux.h in BPF programs directly, but note that while in BPF prog, the
3330  * unsigned long always points to 8-byte region on stack, the kernel may only
3331  * read and write the 4-bytes on 32-bit.
3332  */
3333 __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag)
3334 {
3335 	local_irq_save(*flags__irq_flag);
3336 }
3337 
3338 __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag)
3339 {
3340 	local_irq_restore(*flags__irq_flag);
3341 }
3342 
3343 __bpf_kfunc void __bpf_trap(void)
3344 {
3345 }
3346 
3347 /*
3348  * Kfuncs for string operations.
3349  *
3350  * Since strings are not necessarily %NUL-terminated, we cannot directly call
3351  * in-kernel implementations. Instead, we open-code the implementations using
3352  * __get_kernel_nofault instead of plain dereference to make them safe.
3353  */
3354 
3355 static int __bpf_strcasecmp(const char *s1, const char *s2, bool ignore_case)
3356 {
3357 	char c1, c2;
3358 	int i;
3359 
3360 	if (!copy_from_kernel_nofault_allowed(s1, 1) ||
3361 	    !copy_from_kernel_nofault_allowed(s2, 1)) {
3362 		return -ERANGE;
3363 	}
3364 
3365 	guard(pagefault)();
3366 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3367 		__get_kernel_nofault(&c1, s1, char, err_out);
3368 		__get_kernel_nofault(&c2, s2, char, err_out);
3369 		if (ignore_case) {
3370 			c1 = tolower(c1);
3371 			c2 = tolower(c2);
3372 		}
3373 		if (c1 != c2)
3374 			return c1 < c2 ? -1 : 1;
3375 		if (c1 == '\0')
3376 			return 0;
3377 		s1++;
3378 		s2++;
3379 	}
3380 	return -E2BIG;
3381 err_out:
3382 	return -EFAULT;
3383 }
3384 
3385 /**
3386  * bpf_strcmp - Compare two strings
3387  * @s1__ign: One string
3388  * @s2__ign: Another string
3389  *
3390  * Return:
3391  * * %0       - Strings are equal
3392  * * %-1      - @s1__ign is smaller
3393  * * %1       - @s2__ign is smaller
3394  * * %-EFAULT - Cannot read one of the strings
3395  * * %-E2BIG  - One of strings is too large
3396  * * %-ERANGE - One of strings is outside of kernel address space
3397  */
3398 __bpf_kfunc int bpf_strcmp(const char *s1__ign, const char *s2__ign)
3399 {
3400 	return __bpf_strcasecmp(s1__ign, s2__ign, false);
3401 }
3402 
3403 /**
3404  * bpf_strcasecmp - Compare two strings, ignoring the case of the characters
3405  * @s1__ign: One string
3406  * @s2__ign: Another string
3407  *
3408  * Return:
3409  * * %0       - Strings are equal
3410  * * %-1      - @s1__ign is smaller
3411  * * %1       - @s2__ign is smaller
3412  * * %-EFAULT - Cannot read one of the strings
3413  * * %-E2BIG  - One of strings is too large
3414  * * %-ERANGE - One of strings is outside of kernel address space
3415  */
3416 __bpf_kfunc int bpf_strcasecmp(const char *s1__ign, const char *s2__ign)
3417 {
3418 	return __bpf_strcasecmp(s1__ign, s2__ign, true);
3419 }
3420 
3421 /**
3422  * bpf_strnchr - Find a character in a length limited string
3423  * @s__ign: The string to be searched
3424  * @count: The number of characters to be searched
3425  * @c: The character to search for
3426  *
3427  * Note that the %NUL-terminator is considered part of the string, and can
3428  * be searched for.
3429  *
3430  * Return:
3431  * * >=0      - Index of the first occurrence of @c within @s__ign
3432  * * %-ENOENT - @c not found in the first @count characters of @s__ign
3433  * * %-EFAULT - Cannot read @s__ign
3434  * * %-E2BIG  - @s__ign is too large
3435  * * %-ERANGE - @s__ign is outside of kernel address space
3436  */
3437 __bpf_kfunc int bpf_strnchr(const char *s__ign, size_t count, char c)
3438 {
3439 	char sc;
3440 	int i;
3441 
3442 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3443 		return -ERANGE;
3444 
3445 	guard(pagefault)();
3446 	for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) {
3447 		__get_kernel_nofault(&sc, s__ign, char, err_out);
3448 		if (sc == c)
3449 			return i;
3450 		if (sc == '\0')
3451 			return -ENOENT;
3452 		s__ign++;
3453 	}
3454 	return i == XATTR_SIZE_MAX ? -E2BIG : -ENOENT;
3455 err_out:
3456 	return -EFAULT;
3457 }
3458 
3459 /**
3460  * bpf_strchr - Find the first occurrence of a character in a string
3461  * @s__ign: The string to be searched
3462  * @c: The character to search for
3463  *
3464  * Note that the %NUL-terminator is considered part of the string, and can
3465  * be searched for.
3466  *
3467  * Return:
3468  * * >=0      - The index of the first occurrence of @c within @s__ign
3469  * * %-ENOENT - @c not found in @s__ign
3470  * * %-EFAULT - Cannot read @s__ign
3471  * * %-E2BIG  - @s__ign is too large
3472  * * %-ERANGE - @s__ign is outside of kernel address space
3473  */
3474 __bpf_kfunc int bpf_strchr(const char *s__ign, char c)
3475 {
3476 	return bpf_strnchr(s__ign, XATTR_SIZE_MAX, c);
3477 }
3478 
3479 /**
3480  * bpf_strchrnul - Find and return a character in a string, or end of string
3481  * @s__ign: The string to be searched
3482  * @c: The character to search for
3483  *
3484  * Return:
3485  * * >=0      - Index of the first occurrence of @c within @s__ign or index of
3486  *              the null byte at the end of @s__ign when @c is not found
3487  * * %-EFAULT - Cannot read @s__ign
3488  * * %-E2BIG  - @s__ign is too large
3489  * * %-ERANGE - @s__ign is outside of kernel address space
3490  */
3491 __bpf_kfunc int bpf_strchrnul(const char *s__ign, char c)
3492 {
3493 	char sc;
3494 	int i;
3495 
3496 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3497 		return -ERANGE;
3498 
3499 	guard(pagefault)();
3500 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3501 		__get_kernel_nofault(&sc, s__ign, char, err_out);
3502 		if (sc == '\0' || sc == c)
3503 			return i;
3504 		s__ign++;
3505 	}
3506 	return -E2BIG;
3507 err_out:
3508 	return -EFAULT;
3509 }
3510 
3511 /**
3512  * bpf_strrchr - Find the last occurrence of a character in a string
3513  * @s__ign: The string to be searched
3514  * @c: The character to search for
3515  *
3516  * Return:
3517  * * >=0      - Index of the last occurrence of @c within @s__ign
3518  * * %-ENOENT - @c not found in @s__ign
3519  * * %-EFAULT - Cannot read @s__ign
3520  * * %-E2BIG  - @s__ign is too large
3521  * * %-ERANGE - @s__ign is outside of kernel address space
3522  */
3523 __bpf_kfunc int bpf_strrchr(const char *s__ign, int c)
3524 {
3525 	char sc;
3526 	int i, last = -ENOENT;
3527 
3528 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3529 		return -ERANGE;
3530 
3531 	guard(pagefault)();
3532 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3533 		__get_kernel_nofault(&sc, s__ign, char, err_out);
3534 		if (sc == c)
3535 			last = i;
3536 		if (sc == '\0')
3537 			return last;
3538 		s__ign++;
3539 	}
3540 	return -E2BIG;
3541 err_out:
3542 	return -EFAULT;
3543 }
3544 
3545 /**
3546  * bpf_strnlen - Calculate the length of a length-limited string
3547  * @s__ign: The string
3548  * @count: The maximum number of characters to count
3549  *
3550  * Return:
3551  * * >=0      - The length of @s__ign
3552  * * %-EFAULT - Cannot read @s__ign
3553  * * %-E2BIG  - @s__ign is too large
3554  * * %-ERANGE - @s__ign is outside of kernel address space
3555  */
3556 __bpf_kfunc int bpf_strnlen(const char *s__ign, size_t count)
3557 {
3558 	char c;
3559 	int i;
3560 
3561 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3562 		return -ERANGE;
3563 
3564 	guard(pagefault)();
3565 	for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) {
3566 		__get_kernel_nofault(&c, s__ign, char, err_out);
3567 		if (c == '\0')
3568 			return i;
3569 		s__ign++;
3570 	}
3571 	return i == XATTR_SIZE_MAX ? -E2BIG : i;
3572 err_out:
3573 	return -EFAULT;
3574 }
3575 
3576 /**
3577  * bpf_strlen - Calculate the length of a string
3578  * @s__ign: The string
3579  *
3580  * Return:
3581  * * >=0      - The length of @s__ign
3582  * * %-EFAULT - Cannot read @s__ign
3583  * * %-E2BIG  - @s__ign is too large
3584  * * %-ERANGE - @s__ign is outside of kernel address space
3585  */
3586 __bpf_kfunc int bpf_strlen(const char *s__ign)
3587 {
3588 	return bpf_strnlen(s__ign, XATTR_SIZE_MAX);
3589 }
3590 
3591 /**
3592  * bpf_strspn - Calculate the length of the initial substring of @s__ign which
3593  *              only contains letters in @accept__ign
3594  * @s__ign: The string to be searched
3595  * @accept__ign: The string to search for
3596  *
3597  * Return:
3598  * * >=0      - The length of the initial substring of @s__ign which only
3599  *              contains letters from @accept__ign
3600  * * %-EFAULT - Cannot read one of the strings
3601  * * %-E2BIG  - One of the strings is too large
3602  * * %-ERANGE - One of the strings is outside of kernel address space
3603  */
3604 __bpf_kfunc int bpf_strspn(const char *s__ign, const char *accept__ign)
3605 {
3606 	char cs, ca;
3607 	int i, j;
3608 
3609 	if (!copy_from_kernel_nofault_allowed(s__ign, 1) ||
3610 	    !copy_from_kernel_nofault_allowed(accept__ign, 1)) {
3611 		return -ERANGE;
3612 	}
3613 
3614 	guard(pagefault)();
3615 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3616 		__get_kernel_nofault(&cs, s__ign, char, err_out);
3617 		if (cs == '\0')
3618 			return i;
3619 		for (j = 0; j < XATTR_SIZE_MAX; j++) {
3620 			__get_kernel_nofault(&ca, accept__ign + j, char, err_out);
3621 			if (cs == ca || ca == '\0')
3622 				break;
3623 		}
3624 		if (j == XATTR_SIZE_MAX)
3625 			return -E2BIG;
3626 		if (ca == '\0')
3627 			return i;
3628 		s__ign++;
3629 	}
3630 	return -E2BIG;
3631 err_out:
3632 	return -EFAULT;
3633 }
3634 
3635 /**
3636  * bpf_strcspn - Calculate the length of the initial substring of @s__ign which
3637  *               does not contain letters in @reject__ign
3638  * @s__ign: The string to be searched
3639  * @reject__ign: The string to search for
3640  *
3641  * Return:
3642  * * >=0      - The length of the initial substring of @s__ign which does not
3643  *              contain letters from @reject__ign
3644  * * %-EFAULT - Cannot read one of the strings
3645  * * %-E2BIG  - One of the strings is too large
3646  * * %-ERANGE - One of the strings is outside of kernel address space
3647  */
3648 __bpf_kfunc int bpf_strcspn(const char *s__ign, const char *reject__ign)
3649 {
3650 	char cs, cr;
3651 	int i, j;
3652 
3653 	if (!copy_from_kernel_nofault_allowed(s__ign, 1) ||
3654 	    !copy_from_kernel_nofault_allowed(reject__ign, 1)) {
3655 		return -ERANGE;
3656 	}
3657 
3658 	guard(pagefault)();
3659 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3660 		__get_kernel_nofault(&cs, s__ign, char, err_out);
3661 		if (cs == '\0')
3662 			return i;
3663 		for (j = 0; j < XATTR_SIZE_MAX; j++) {
3664 			__get_kernel_nofault(&cr, reject__ign + j, char, err_out);
3665 			if (cs == cr || cr == '\0')
3666 				break;
3667 		}
3668 		if (j == XATTR_SIZE_MAX)
3669 			return -E2BIG;
3670 		if (cr != '\0')
3671 			return i;
3672 		s__ign++;
3673 	}
3674 	return -E2BIG;
3675 err_out:
3676 	return -EFAULT;
3677 }
3678 
3679 /**
3680  * bpf_strnstr - Find the first substring in a length-limited string
3681  * @s1__ign: The string to be searched
3682  * @s2__ign: The string to search for
3683  * @len: the maximum number of characters to search
3684  *
3685  * Return:
3686  * * >=0      - Index of the first character of the first occurrence of @s2__ign
3687  *              within the first @len characters of @s1__ign
3688  * * %-ENOENT - @s2__ign not found in the first @len characters of @s1__ign
3689  * * %-EFAULT - Cannot read one of the strings
3690  * * %-E2BIG  - One of the strings is too large
3691  * * %-ERANGE - One of the strings is outside of kernel address space
3692  */
3693 __bpf_kfunc int bpf_strnstr(const char *s1__ign, const char *s2__ign, size_t len)
3694 {
3695 	char c1, c2;
3696 	int i, j;
3697 
3698 	if (!copy_from_kernel_nofault_allowed(s1__ign, 1) ||
3699 	    !copy_from_kernel_nofault_allowed(s2__ign, 1)) {
3700 		return -ERANGE;
3701 	}
3702 
3703 	guard(pagefault)();
3704 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3705 		for (j = 0; i + j <= len && j < XATTR_SIZE_MAX; j++) {
3706 			__get_kernel_nofault(&c2, s2__ign + j, char, err_out);
3707 			if (c2 == '\0')
3708 				return i;
3709 			/*
3710 			 * We allow reading an extra byte from s2 (note the
3711 			 * `i + j <= len` above) to cover the case when s2 is
3712 			 * a suffix of the first len chars of s1.
3713 			 */
3714 			if (i + j == len)
3715 				break;
3716 			__get_kernel_nofault(&c1, s1__ign + j, char, err_out);
3717 			if (c1 == '\0')
3718 				return -ENOENT;
3719 			if (c1 != c2)
3720 				break;
3721 		}
3722 		if (j == XATTR_SIZE_MAX)
3723 			return -E2BIG;
3724 		if (i + j == len)
3725 			return -ENOENT;
3726 		s1__ign++;
3727 	}
3728 	return -E2BIG;
3729 err_out:
3730 	return -EFAULT;
3731 }
3732 
3733 /**
3734  * bpf_strstr - Find the first substring in a string
3735  * @s1__ign: The string to be searched
3736  * @s2__ign: The string to search for
3737  *
3738  * Return:
3739  * * >=0      - Index of the first character of the first occurrence of @s2__ign
3740  *              within @s1__ign
3741  * * %-ENOENT - @s2__ign is not a substring of @s1__ign
3742  * * %-EFAULT - Cannot read one of the strings
3743  * * %-E2BIG  - One of the strings is too large
3744  * * %-ERANGE - One of the strings is outside of kernel address space
3745  */
3746 __bpf_kfunc int bpf_strstr(const char *s1__ign, const char *s2__ign)
3747 {
3748 	return bpf_strnstr(s1__ign, s2__ign, XATTR_SIZE_MAX);
3749 }
3750 #ifdef CONFIG_KEYS
3751 /**
3752  * bpf_lookup_user_key - lookup a key by its serial
3753  * @serial: key handle serial number
3754  * @flags: lookup-specific flags
3755  *
3756  * Search a key with a given *serial* and the provided *flags*.
3757  * If found, increment the reference count of the key by one, and
3758  * return it in the bpf_key structure.
3759  *
3760  * The bpf_key structure must be passed to bpf_key_put() when done
3761  * with it, so that the key reference count is decremented and the
3762  * bpf_key structure is freed.
3763  *
3764  * Permission checks are deferred to the time the key is used by
3765  * one of the available key-specific kfuncs.
3766  *
3767  * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
3768  * special keyring (e.g. session keyring), if it doesn't yet exist.
3769  * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
3770  * for the key construction, and to retrieve uninstantiated keys (keys
3771  * without data attached to them).
3772  *
3773  * Return: a bpf_key pointer with a valid key pointer if the key is found, a
3774  *         NULL pointer otherwise.
3775  */
3776 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags)
3777 {
3778 	key_ref_t key_ref;
3779 	struct bpf_key *bkey;
3780 
3781 	if (flags & ~KEY_LOOKUP_ALL)
3782 		return NULL;
3783 
3784 	/*
3785 	 * Permission check is deferred until the key is used, as the
3786 	 * intent of the caller is unknown here.
3787 	 */
3788 	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
3789 	if (IS_ERR(key_ref))
3790 		return NULL;
3791 
3792 	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
3793 	if (!bkey) {
3794 		key_put(key_ref_to_ptr(key_ref));
3795 		return NULL;
3796 	}
3797 
3798 	bkey->key = key_ref_to_ptr(key_ref);
3799 	bkey->has_ref = true;
3800 
3801 	return bkey;
3802 }
3803 
3804 /**
3805  * bpf_lookup_system_key - lookup a key by a system-defined ID
3806  * @id: key ID
3807  *
3808  * Obtain a bpf_key structure with a key pointer set to the passed key ID.
3809  * The key pointer is marked as invalid, to prevent bpf_key_put() from
3810  * attempting to decrement the key reference count on that pointer. The key
3811  * pointer set in such way is currently understood only by
3812  * verify_pkcs7_signature().
3813  *
3814  * Set *id* to one of the values defined in include/linux/verification.h:
3815  * 0 for the primary keyring (immutable keyring of system keys);
3816  * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
3817  * (where keys can be added only if they are vouched for by existing keys
3818  * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
3819  * keyring (primarily used by the integrity subsystem to verify a kexec'ed
3820  * kerned image and, possibly, the initramfs signature).
3821  *
3822  * Return: a bpf_key pointer with an invalid key pointer set from the
3823  *         pre-determined ID on success, a NULL pointer otherwise
3824  */
3825 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
3826 {
3827 	struct bpf_key *bkey;
3828 
3829 	if (system_keyring_id_check(id) < 0)
3830 		return NULL;
3831 
3832 	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
3833 	if (!bkey)
3834 		return NULL;
3835 
3836 	bkey->key = (struct key *)(unsigned long)id;
3837 	bkey->has_ref = false;
3838 
3839 	return bkey;
3840 }
3841 
3842 /**
3843  * bpf_key_put - decrement key reference count if key is valid and free bpf_key
3844  * @bkey: bpf_key structure
3845  *
3846  * Decrement the reference count of the key inside *bkey*, if the pointer
3847  * is valid, and free *bkey*.
3848  */
3849 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
3850 {
3851 	if (bkey->has_ref)
3852 		key_put(bkey->key);
3853 
3854 	kfree(bkey);
3855 }
3856 
3857 /**
3858  * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
3859  * @data_p: data to verify
3860  * @sig_p: signature of the data
3861  * @trusted_keyring: keyring with keys trusted for signature verification
3862  *
3863  * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
3864  * with keys in a keyring referenced by *trusted_keyring*.
3865  *
3866  * Return: 0 on success, a negative value on error.
3867  */
3868 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
3869 			       struct bpf_dynptr *sig_p,
3870 			       struct bpf_key *trusted_keyring)
3871 {
3872 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
3873 	struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
3874 	struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
3875 	const void *data, *sig;
3876 	u32 data_len, sig_len;
3877 	int ret;
3878 
3879 	if (trusted_keyring->has_ref) {
3880 		/*
3881 		 * Do the permission check deferred in bpf_lookup_user_key().
3882 		 * See bpf_lookup_user_key() for more details.
3883 		 *
3884 		 * A call to key_task_permission() here would be redundant, as
3885 		 * it is already done by keyring_search() called by
3886 		 * find_asymmetric_key().
3887 		 */
3888 		ret = key_validate(trusted_keyring->key);
3889 		if (ret < 0)
3890 			return ret;
3891 	}
3892 
3893 	data_len = __bpf_dynptr_size(data_ptr);
3894 	data = __bpf_dynptr_data(data_ptr, data_len);
3895 	sig_len = __bpf_dynptr_size(sig_ptr);
3896 	sig = __bpf_dynptr_data(sig_ptr, sig_len);
3897 
3898 	return verify_pkcs7_signature(data, data_len, sig, sig_len,
3899 				      trusted_keyring->key,
3900 				      VERIFYING_BPF_SIGNATURE, NULL,
3901 				      NULL);
3902 #else
3903 	return -EOPNOTSUPP;
3904 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
3905 }
3906 #endif /* CONFIG_KEYS */
3907 
3908 typedef int (*bpf_task_work_callback_t)(struct bpf_map *map, void *key, void *value);
3909 
3910 enum bpf_task_work_state {
3911 	/* bpf_task_work is ready to be used */
3912 	BPF_TW_STANDBY = 0,
3913 	/* irq work scheduling in progress */
3914 	BPF_TW_PENDING,
3915 	/* task work scheduling in progress */
3916 	BPF_TW_SCHEDULING,
3917 	/* task work is scheduled successfully */
3918 	BPF_TW_SCHEDULED,
3919 	/* callback is running */
3920 	BPF_TW_RUNNING,
3921 	/* associated BPF map value is deleted */
3922 	BPF_TW_FREED,
3923 };
3924 
3925 struct bpf_task_work_ctx {
3926 	enum bpf_task_work_state state;
3927 	refcount_t refcnt;
3928 	struct callback_head work;
3929 	struct irq_work irq_work;
3930 	/* bpf_prog that schedules task work */
3931 	struct bpf_prog *prog;
3932 	/* task for which callback is scheduled */
3933 	struct task_struct *task;
3934 	/* the map and map value associated with this context */
3935 	struct bpf_map *map;
3936 	void *map_val;
3937 	enum task_work_notify_mode mode;
3938 	bpf_task_work_callback_t callback_fn;
3939 	struct rcu_head rcu;
3940 } __aligned(8);
3941 
3942 /* Actual type for struct bpf_task_work */
3943 struct bpf_task_work_kern {
3944 	struct bpf_task_work_ctx *ctx;
3945 };
3946 
3947 static void bpf_task_work_ctx_reset(struct bpf_task_work_ctx *ctx)
3948 {
3949 	if (ctx->prog) {
3950 		bpf_prog_put(ctx->prog);
3951 		ctx->prog = NULL;
3952 	}
3953 	if (ctx->task) {
3954 		bpf_task_release(ctx->task);
3955 		ctx->task = NULL;
3956 	}
3957 }
3958 
3959 static bool bpf_task_work_ctx_tryget(struct bpf_task_work_ctx *ctx)
3960 {
3961 	return refcount_inc_not_zero(&ctx->refcnt);
3962 }
3963 
3964 static void bpf_task_work_ctx_put(struct bpf_task_work_ctx *ctx)
3965 {
3966 	if (!refcount_dec_and_test(&ctx->refcnt))
3967 		return;
3968 
3969 	bpf_task_work_ctx_reset(ctx);
3970 
3971 	/* bpf_mem_free expects migration to be disabled */
3972 	migrate_disable();
3973 	bpf_mem_free(&bpf_global_ma, ctx);
3974 	migrate_enable();
3975 }
3976 
3977 static void bpf_task_work_cancel(struct bpf_task_work_ctx *ctx)
3978 {
3979 	/*
3980 	 * Scheduled task_work callback holds ctx ref, so if we successfully
3981 	 * cancelled, we put that ref on callback's behalf. If we couldn't
3982 	 * cancel, callback will inevitably run or has already completed
3983 	 * running, and it would have taken care of its ctx ref itself.
3984 	 */
3985 	if (task_work_cancel(ctx->task, &ctx->work))
3986 		bpf_task_work_ctx_put(ctx);
3987 }
3988 
3989 static void bpf_task_work_callback(struct callback_head *cb)
3990 {
3991 	struct bpf_task_work_ctx *ctx = container_of(cb, struct bpf_task_work_ctx, work);
3992 	enum bpf_task_work_state state;
3993 	u32 idx;
3994 	void *key;
3995 
3996 	/* Read lock is needed to protect ctx and map key/value access */
3997 	guard(rcu_tasks_trace)();
3998 	/*
3999 	 * This callback may start running before bpf_task_work_irq() switched to
4000 	 * SCHEDULED state, so handle both transition variants SCHEDULING|SCHEDULED -> RUNNING.
4001 	 */
4002 	state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_RUNNING);
4003 	if (state == BPF_TW_SCHEDULED)
4004 		state = cmpxchg(&ctx->state, BPF_TW_SCHEDULED, BPF_TW_RUNNING);
4005 	if (state == BPF_TW_FREED) {
4006 		bpf_task_work_ctx_put(ctx);
4007 		return;
4008 	}
4009 
4010 	key = (void *)map_key_from_value(ctx->map, ctx->map_val, &idx);
4011 
4012 	migrate_disable();
4013 	ctx->callback_fn(ctx->map, key, ctx->map_val);
4014 	migrate_enable();
4015 
4016 	bpf_task_work_ctx_reset(ctx);
4017 	(void)cmpxchg(&ctx->state, BPF_TW_RUNNING, BPF_TW_STANDBY);
4018 
4019 	bpf_task_work_ctx_put(ctx);
4020 }
4021 
4022 static void bpf_task_work_irq(struct irq_work *irq_work)
4023 {
4024 	struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
4025 	enum bpf_task_work_state state;
4026 	int err;
4027 
4028 	guard(rcu_tasks_trace)();
4029 
4030 	if (cmpxchg(&ctx->state, BPF_TW_PENDING, BPF_TW_SCHEDULING) != BPF_TW_PENDING) {
4031 		bpf_task_work_ctx_put(ctx);
4032 		return;
4033 	}
4034 
4035 	err = task_work_add(ctx->task, &ctx->work, ctx->mode);
4036 	if (err) {
4037 		bpf_task_work_ctx_reset(ctx);
4038 		/*
4039 		 * try to switch back to STANDBY for another task_work reuse, but we might have
4040 		 * gone to FREED already, which is fine as we already cleaned up after ourselves
4041 		 */
4042 		(void)cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_STANDBY);
4043 		bpf_task_work_ctx_put(ctx);
4044 		return;
4045 	}
4046 
4047 	/*
4048 	 * It's technically possible for just scheduled task_work callback to
4049 	 * complete running by now, going SCHEDULING -> RUNNING and then
4050 	 * dropping its ctx refcount. Instead of capturing extra ref just to
4051 	 * protected below ctx->state access, we rely on RCU protection to
4052 	 * perform below SCHEDULING -> SCHEDULED attempt.
4053 	 */
4054 	state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_SCHEDULED);
4055 	if (state == BPF_TW_FREED)
4056 		bpf_task_work_cancel(ctx); /* clean up if we switched into FREED state */
4057 }
4058 
4059 static struct bpf_task_work_ctx *bpf_task_work_fetch_ctx(struct bpf_task_work *tw,
4060 							 struct bpf_map *map)
4061 {
4062 	struct bpf_task_work_kern *twk = (void *)tw;
4063 	struct bpf_task_work_ctx *ctx, *old_ctx;
4064 
4065 	ctx = READ_ONCE(twk->ctx);
4066 	if (ctx)
4067 		return ctx;
4068 
4069 	ctx = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_task_work_ctx));
4070 	if (!ctx)
4071 		return ERR_PTR(-ENOMEM);
4072 
4073 	memset(ctx, 0, sizeof(*ctx));
4074 	refcount_set(&ctx->refcnt, 1); /* map's own ref */
4075 	ctx->state = BPF_TW_STANDBY;
4076 
4077 	old_ctx = cmpxchg(&twk->ctx, NULL, ctx);
4078 	if (old_ctx) {
4079 		/*
4080 		 * tw->ctx is set by concurrent BPF program, release allocated
4081 		 * memory and try to reuse already set context.
4082 		 */
4083 		bpf_mem_free(&bpf_global_ma, ctx);
4084 		return old_ctx;
4085 	}
4086 
4087 	return ctx; /* Success */
4088 }
4089 
4090 static struct bpf_task_work_ctx *bpf_task_work_acquire_ctx(struct bpf_task_work *tw,
4091 							   struct bpf_map *map)
4092 {
4093 	struct bpf_task_work_ctx *ctx;
4094 
4095 	ctx = bpf_task_work_fetch_ctx(tw, map);
4096 	if (IS_ERR(ctx))
4097 		return ctx;
4098 
4099 	/* try to get ref for task_work callback to hold */
4100 	if (!bpf_task_work_ctx_tryget(ctx))
4101 		return ERR_PTR(-EBUSY);
4102 
4103 	if (cmpxchg(&ctx->state, BPF_TW_STANDBY, BPF_TW_PENDING) != BPF_TW_STANDBY) {
4104 		/* lost acquiring race or map_release_uref() stole it from us, put ref and bail */
4105 		bpf_task_work_ctx_put(ctx);
4106 		return ERR_PTR(-EBUSY);
4107 	}
4108 
4109 	/*
4110 	 * If no process or bpffs is holding a reference to the map, no new callbacks should be
4111 	 * scheduled. This does not address any race or correctness issue, but rather is a policy
4112 	 * choice: dropping user references should stop everything.
4113 	 */
4114 	if (!atomic64_read(&map->usercnt)) {
4115 		/* drop ref we just got for task_work callback itself */
4116 		bpf_task_work_ctx_put(ctx);
4117 		/* transfer map's ref into cancel_and_free() */
4118 		bpf_task_work_cancel_and_free(tw);
4119 		return ERR_PTR(-EBUSY);
4120 	}
4121 
4122 	return ctx;
4123 }
4124 
4125 static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work *tw,
4126 				  struct bpf_map *map, bpf_task_work_callback_t callback_fn,
4127 				  struct bpf_prog_aux *aux, enum task_work_notify_mode mode)
4128 {
4129 	struct bpf_prog *prog;
4130 	struct bpf_task_work_ctx *ctx;
4131 	int err;
4132 
4133 	BTF_TYPE_EMIT(struct bpf_task_work);
4134 
4135 	prog = bpf_prog_inc_not_zero(aux->prog);
4136 	if (IS_ERR(prog))
4137 		return -EBADF;
4138 	task = bpf_task_acquire(task);
4139 	if (!task) {
4140 		err = -EBADF;
4141 		goto release_prog;
4142 	}
4143 
4144 	ctx = bpf_task_work_acquire_ctx(tw, map);
4145 	if (IS_ERR(ctx)) {
4146 		err = PTR_ERR(ctx);
4147 		goto release_all;
4148 	}
4149 
4150 	ctx->task = task;
4151 	ctx->callback_fn = callback_fn;
4152 	ctx->prog = prog;
4153 	ctx->mode = mode;
4154 	ctx->map = map;
4155 	ctx->map_val = (void *)tw - map->record->task_work_off;
4156 	init_task_work(&ctx->work, bpf_task_work_callback);
4157 	init_irq_work(&ctx->irq_work, bpf_task_work_irq);
4158 
4159 	irq_work_queue(&ctx->irq_work);
4160 	return 0;
4161 
4162 release_all:
4163 	bpf_task_release(task);
4164 release_prog:
4165 	bpf_prog_put(prog);
4166 	return err;
4167 }
4168 
4169 /**
4170  * bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL
4171  * mode
4172  * @task: Task struct for which callback should be scheduled
4173  * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
4174  * @map__map: bpf_map that embeds struct bpf_task_work in the values
4175  * @callback: pointer to BPF subprogram to call
4176  * @aux__prog: user should pass NULL
4177  *
4178  * Return: 0 if task work has been scheduled successfully, negative error code otherwise
4179  */
4180 __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
4181 						   struct bpf_task_work *tw, void *map__map,
4182 						   bpf_task_work_callback_t callback,
4183 						   void *aux__prog)
4184 {
4185 	return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
4186 }
4187 
4188 /**
4189  * bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME
4190  * mode
4191  * @task: Task struct for which callback should be scheduled
4192  * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
4193  * @map__map: bpf_map that embeds struct bpf_task_work in the values
4194  * @callback: pointer to BPF subprogram to call
4195  * @aux__prog: user should pass NULL
4196  *
4197  * Return: 0 if task work has been scheduled successfully, negative error code otherwise
4198  */
4199 __bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task,
4200 						   struct bpf_task_work *tw, void *map__map,
4201 						   bpf_task_work_callback_t callback,
4202 						   void *aux__prog)
4203 {
4204 	return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
4205 }
4206 
4207 __bpf_kfunc_end_defs();
4208 
4209 static void bpf_task_work_cancel_scheduled(struct irq_work *irq_work)
4210 {
4211 	struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
4212 
4213 	bpf_task_work_cancel(ctx); /* this might put task_work callback's ref */
4214 	bpf_task_work_ctx_put(ctx); /* and here we put map's own ref that was transferred to us */
4215 }
4216 
4217 void bpf_task_work_cancel_and_free(void *val)
4218 {
4219 	struct bpf_task_work_kern *twk = val;
4220 	struct bpf_task_work_ctx *ctx;
4221 	enum bpf_task_work_state state;
4222 
4223 	ctx = xchg(&twk->ctx, NULL);
4224 	if (!ctx)
4225 		return;
4226 
4227 	state = xchg(&ctx->state, BPF_TW_FREED);
4228 	if (state == BPF_TW_SCHEDULED) {
4229 		/* run in irq_work to avoid locks in NMI */
4230 		init_irq_work(&ctx->irq_work, bpf_task_work_cancel_scheduled);
4231 		irq_work_queue(&ctx->irq_work);
4232 		return;
4233 	}
4234 
4235 	bpf_task_work_ctx_put(ctx); /* put bpf map's ref */
4236 }
4237 
4238 BTF_KFUNCS_START(generic_btf_ids)
4239 #ifdef CONFIG_CRASH_DUMP
4240 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
4241 #endif
4242 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
4243 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
4244 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
4245 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
4246 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
4247 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
4248 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
4249 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
4250 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
4251 BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
4252 BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
4253 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4254 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
4255 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
4256 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
4257 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
4258 BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL)
4259 BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL)
4260 BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL)
4261 
4262 #ifdef CONFIG_CGROUPS
4263 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4264 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
4265 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4266 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
4267 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
4268 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4269 #endif
4270 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
4271 BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL)
4272 BTF_ID_FLAGS(func, bpf_throw)
4273 #ifdef CONFIG_BPF_EVENTS
4274 BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS)
4275 #endif
4276 #ifdef CONFIG_KEYS
4277 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
4278 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
4279 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
4280 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
4281 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
4282 #endif
4283 #endif
4284 BTF_KFUNCS_END(generic_btf_ids)
4285 
4286 static const struct btf_kfunc_id_set generic_kfunc_set = {
4287 	.owner = THIS_MODULE,
4288 	.set   = &generic_btf_ids,
4289 };
4290 
4291 
4292 BTF_ID_LIST(generic_dtor_ids)
4293 BTF_ID(struct, task_struct)
4294 BTF_ID(func, bpf_task_release_dtor)
4295 #ifdef CONFIG_CGROUPS
4296 BTF_ID(struct, cgroup)
4297 BTF_ID(func, bpf_cgroup_release_dtor)
4298 #endif
4299 
4300 BTF_KFUNCS_START(common_btf_ids)
4301 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL)
4302 BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL)
4303 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
4304 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
4305 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
4306 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
4307 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
4308 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
4309 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
4310 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
4311 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
4312 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
4313 #ifdef CONFIG_CGROUPS
4314 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
4315 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
4316 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
4317 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
4318 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
4319 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
4320 #endif
4321 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
4322 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
4323 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
4324 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
4325 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
4326 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
4327 BTF_ID_FLAGS(func, bpf_dynptr_size)
4328 BTF_ID_FLAGS(func, bpf_dynptr_clone)
4329 BTF_ID_FLAGS(func, bpf_dynptr_copy)
4330 BTF_ID_FLAGS(func, bpf_dynptr_memset)
4331 #ifdef CONFIG_NET
4332 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
4333 #endif
4334 BTF_ID_FLAGS(func, bpf_wq_init)
4335 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
4336 BTF_ID_FLAGS(func, bpf_wq_start)
4337 BTF_ID_FLAGS(func, bpf_preempt_disable)
4338 BTF_ID_FLAGS(func, bpf_preempt_enable)
4339 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
4340 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
4341 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
4342 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
4343 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE)
4344 BTF_ID_FLAGS(func, bpf_get_kmem_cache)
4345 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
4346 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
4347 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
4348 BTF_ID_FLAGS(func, bpf_local_irq_save)
4349 BTF_ID_FLAGS(func, bpf_local_irq_restore)
4350 #ifdef CONFIG_BPF_EVENTS
4351 BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr)
4352 BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr)
4353 BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr)
4354 BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr)
4355 BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE)
4356 BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE)
4357 BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
4358 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
4359 #endif
4360 #ifdef CONFIG_DMA_SHARED_BUFFER
4361 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)
4362 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
4363 BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
4364 #endif
4365 BTF_ID_FLAGS(func, __bpf_trap)
4366 BTF_ID_FLAGS(func, bpf_strcmp);
4367 BTF_ID_FLAGS(func, bpf_strcasecmp);
4368 BTF_ID_FLAGS(func, bpf_strchr);
4369 BTF_ID_FLAGS(func, bpf_strchrnul);
4370 BTF_ID_FLAGS(func, bpf_strnchr);
4371 BTF_ID_FLAGS(func, bpf_strrchr);
4372 BTF_ID_FLAGS(func, bpf_strlen);
4373 BTF_ID_FLAGS(func, bpf_strnlen);
4374 BTF_ID_FLAGS(func, bpf_strspn);
4375 BTF_ID_FLAGS(func, bpf_strcspn);
4376 BTF_ID_FLAGS(func, bpf_strstr);
4377 BTF_ID_FLAGS(func, bpf_strnstr);
4378 #if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS)
4379 BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
4380 #endif
4381 BTF_ID_FLAGS(func, bpf_stream_vprintk_impl, KF_TRUSTED_ARGS)
4382 BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl, KF_TRUSTED_ARGS)
4383 BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl, KF_TRUSTED_ARGS)
4384 BTF_KFUNCS_END(common_btf_ids)
4385 
4386 static const struct btf_kfunc_id_set common_kfunc_set = {
4387 	.owner = THIS_MODULE,
4388 	.set   = &common_btf_ids,
4389 };
4390 
4391 static int __init kfunc_init(void)
4392 {
4393 	int ret;
4394 	const struct btf_id_dtor_kfunc generic_dtors[] = {
4395 		{
4396 			.btf_id       = generic_dtor_ids[0],
4397 			.kfunc_btf_id = generic_dtor_ids[1]
4398 		},
4399 #ifdef CONFIG_CGROUPS
4400 		{
4401 			.btf_id       = generic_dtor_ids[2],
4402 			.kfunc_btf_id = generic_dtor_ids[3]
4403 		},
4404 #endif
4405 	};
4406 
4407 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
4408 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
4409 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
4410 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
4411 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
4412 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set);
4413 	ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
4414 						  ARRAY_SIZE(generic_dtors),
4415 						  THIS_MODULE);
4416 	return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
4417 }
4418 
4419 late_initcall(kfunc_init);
4420 
4421 /* Get a pointer to dynptr data up to len bytes for read only access. If
4422  * the dynptr doesn't have continuous data up to len bytes, return NULL.
4423  */
4424 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
4425 {
4426 	const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
4427 
4428 	return bpf_dynptr_slice(p, 0, NULL, len);
4429 }
4430 
4431 /* Get a pointer to dynptr data up to len bytes for read write access. If
4432  * the dynptr doesn't have continuous data up to len bytes, or the dynptr
4433  * is read only, return NULL.
4434  */
4435 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
4436 {
4437 	if (__bpf_dynptr_is_rdonly(ptr))
4438 		return NULL;
4439 	return (void *)__bpf_dynptr_data(ptr, len);
4440 }
4441