xref: /linux/kernel/bpf/helpers.c (revision d59fec29b131f30b27343d54bdf1071ee98eda8e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 
26 #include "../../lib/kstrtox.h"
27 
28 /* If kernel subsystem is allowing eBPF programs to call this function,
29  * inside its own verifier_ops->get_func_proto() callback it should return
30  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
31  *
32  * Different map implementations will rely on rcu in map methods
33  * lookup/update/delete, therefore eBPF programs must run under rcu lock
34  * if program is allowed to access maps, so check rcu_read_lock_held in
35  * all three functions.
36  */
37 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
38 {
39 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
40 	return (unsigned long) map->ops->map_lookup_elem(map, key);
41 }
42 
43 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
44 	.func		= bpf_map_lookup_elem,
45 	.gpl_only	= false,
46 	.pkt_access	= true,
47 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
48 	.arg1_type	= ARG_CONST_MAP_PTR,
49 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
50 };
51 
52 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
53 	   void *, value, u64, flags)
54 {
55 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
56 	return map->ops->map_update_elem(map, key, value, flags);
57 }
58 
59 const struct bpf_func_proto bpf_map_update_elem_proto = {
60 	.func		= bpf_map_update_elem,
61 	.gpl_only	= false,
62 	.pkt_access	= true,
63 	.ret_type	= RET_INTEGER,
64 	.arg1_type	= ARG_CONST_MAP_PTR,
65 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
66 	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
67 	.arg4_type	= ARG_ANYTHING,
68 };
69 
70 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
71 {
72 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
73 	return map->ops->map_delete_elem(map, key);
74 }
75 
76 const struct bpf_func_proto bpf_map_delete_elem_proto = {
77 	.func		= bpf_map_delete_elem,
78 	.gpl_only	= false,
79 	.pkt_access	= true,
80 	.ret_type	= RET_INTEGER,
81 	.arg1_type	= ARG_CONST_MAP_PTR,
82 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
83 };
84 
85 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
86 {
87 	return map->ops->map_push_elem(map, value, flags);
88 }
89 
90 const struct bpf_func_proto bpf_map_push_elem_proto = {
91 	.func		= bpf_map_push_elem,
92 	.gpl_only	= false,
93 	.pkt_access	= true,
94 	.ret_type	= RET_INTEGER,
95 	.arg1_type	= ARG_CONST_MAP_PTR,
96 	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
97 	.arg3_type	= ARG_ANYTHING,
98 };
99 
100 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
101 {
102 	return map->ops->map_pop_elem(map, value);
103 }
104 
105 const struct bpf_func_proto bpf_map_pop_elem_proto = {
106 	.func		= bpf_map_pop_elem,
107 	.gpl_only	= false,
108 	.ret_type	= RET_INTEGER,
109 	.arg1_type	= ARG_CONST_MAP_PTR,
110 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
111 };
112 
113 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
114 {
115 	return map->ops->map_peek_elem(map, value);
116 }
117 
118 const struct bpf_func_proto bpf_map_peek_elem_proto = {
119 	.func		= bpf_map_peek_elem,
120 	.gpl_only	= false,
121 	.ret_type	= RET_INTEGER,
122 	.arg1_type	= ARG_CONST_MAP_PTR,
123 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
124 };
125 
126 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
127 {
128 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
129 	return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
130 }
131 
132 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
133 	.func		= bpf_map_lookup_percpu_elem,
134 	.gpl_only	= false,
135 	.pkt_access	= true,
136 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
137 	.arg1_type	= ARG_CONST_MAP_PTR,
138 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
139 	.arg3_type	= ARG_ANYTHING,
140 };
141 
142 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
143 	.func		= bpf_user_rnd_u32,
144 	.gpl_only	= false,
145 	.ret_type	= RET_INTEGER,
146 };
147 
148 BPF_CALL_0(bpf_get_smp_processor_id)
149 {
150 	return smp_processor_id();
151 }
152 
153 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
154 	.func		= bpf_get_smp_processor_id,
155 	.gpl_only	= false,
156 	.ret_type	= RET_INTEGER,
157 };
158 
159 BPF_CALL_0(bpf_get_numa_node_id)
160 {
161 	return numa_node_id();
162 }
163 
164 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
165 	.func		= bpf_get_numa_node_id,
166 	.gpl_only	= false,
167 	.ret_type	= RET_INTEGER,
168 };
169 
170 BPF_CALL_0(bpf_ktime_get_ns)
171 {
172 	/* NMI safe access to clock monotonic */
173 	return ktime_get_mono_fast_ns();
174 }
175 
176 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
177 	.func		= bpf_ktime_get_ns,
178 	.gpl_only	= false,
179 	.ret_type	= RET_INTEGER,
180 };
181 
182 BPF_CALL_0(bpf_ktime_get_boot_ns)
183 {
184 	/* NMI safe access to clock boottime */
185 	return ktime_get_boot_fast_ns();
186 }
187 
188 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
189 	.func		= bpf_ktime_get_boot_ns,
190 	.gpl_only	= false,
191 	.ret_type	= RET_INTEGER,
192 };
193 
194 BPF_CALL_0(bpf_ktime_get_coarse_ns)
195 {
196 	return ktime_get_coarse_ns();
197 }
198 
199 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
200 	.func		= bpf_ktime_get_coarse_ns,
201 	.gpl_only	= false,
202 	.ret_type	= RET_INTEGER,
203 };
204 
205 BPF_CALL_0(bpf_ktime_get_tai_ns)
206 {
207 	/* NMI safe access to clock tai */
208 	return ktime_get_tai_fast_ns();
209 }
210 
211 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
212 	.func		= bpf_ktime_get_tai_ns,
213 	.gpl_only	= false,
214 	.ret_type	= RET_INTEGER,
215 };
216 
217 BPF_CALL_0(bpf_get_current_pid_tgid)
218 {
219 	struct task_struct *task = current;
220 
221 	if (unlikely(!task))
222 		return -EINVAL;
223 
224 	return (u64) task->tgid << 32 | task->pid;
225 }
226 
227 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
228 	.func		= bpf_get_current_pid_tgid,
229 	.gpl_only	= false,
230 	.ret_type	= RET_INTEGER,
231 };
232 
233 BPF_CALL_0(bpf_get_current_uid_gid)
234 {
235 	struct task_struct *task = current;
236 	kuid_t uid;
237 	kgid_t gid;
238 
239 	if (unlikely(!task))
240 		return -EINVAL;
241 
242 	current_uid_gid(&uid, &gid);
243 	return (u64) from_kgid(&init_user_ns, gid) << 32 |
244 		     from_kuid(&init_user_ns, uid);
245 }
246 
247 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
248 	.func		= bpf_get_current_uid_gid,
249 	.gpl_only	= false,
250 	.ret_type	= RET_INTEGER,
251 };
252 
253 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
254 {
255 	struct task_struct *task = current;
256 
257 	if (unlikely(!task))
258 		goto err_clear;
259 
260 	/* Verifier guarantees that size > 0 */
261 	strscpy_pad(buf, task->comm, size);
262 	return 0;
263 err_clear:
264 	memset(buf, 0, size);
265 	return -EINVAL;
266 }
267 
268 const struct bpf_func_proto bpf_get_current_comm_proto = {
269 	.func		= bpf_get_current_comm,
270 	.gpl_only	= false,
271 	.ret_type	= RET_INTEGER,
272 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
273 	.arg2_type	= ARG_CONST_SIZE,
274 };
275 
276 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
277 
278 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
279 {
280 	arch_spinlock_t *l = (void *)lock;
281 	union {
282 		__u32 val;
283 		arch_spinlock_t lock;
284 	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
285 
286 	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
287 	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
288 	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
289 	arch_spin_lock(l);
290 }
291 
292 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
293 {
294 	arch_spinlock_t *l = (void *)lock;
295 
296 	arch_spin_unlock(l);
297 }
298 
299 #else
300 
301 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
302 {
303 	atomic_t *l = (void *)lock;
304 
305 	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
306 	do {
307 		atomic_cond_read_relaxed(l, !VAL);
308 	} while (atomic_xchg(l, 1));
309 }
310 
311 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
312 {
313 	atomic_t *l = (void *)lock;
314 
315 	atomic_set_release(l, 0);
316 }
317 
318 #endif
319 
320 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
321 
322 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
323 {
324 	unsigned long flags;
325 
326 	local_irq_save(flags);
327 	__bpf_spin_lock(lock);
328 	__this_cpu_write(irqsave_flags, flags);
329 }
330 
331 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
332 {
333 	__bpf_spin_lock_irqsave(lock);
334 	return 0;
335 }
336 
337 const struct bpf_func_proto bpf_spin_lock_proto = {
338 	.func		= bpf_spin_lock,
339 	.gpl_only	= false,
340 	.ret_type	= RET_VOID,
341 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
342 	.arg1_btf_id    = BPF_PTR_POISON,
343 };
344 
345 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
346 {
347 	unsigned long flags;
348 
349 	flags = __this_cpu_read(irqsave_flags);
350 	__bpf_spin_unlock(lock);
351 	local_irq_restore(flags);
352 }
353 
354 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
355 {
356 	__bpf_spin_unlock_irqrestore(lock);
357 	return 0;
358 }
359 
360 const struct bpf_func_proto bpf_spin_unlock_proto = {
361 	.func		= bpf_spin_unlock,
362 	.gpl_only	= false,
363 	.ret_type	= RET_VOID,
364 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
365 	.arg1_btf_id    = BPF_PTR_POISON,
366 };
367 
368 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
369 			   bool lock_src)
370 {
371 	struct bpf_spin_lock *lock;
372 
373 	if (lock_src)
374 		lock = src + map->record->spin_lock_off;
375 	else
376 		lock = dst + map->record->spin_lock_off;
377 	preempt_disable();
378 	__bpf_spin_lock_irqsave(lock);
379 	copy_map_value(map, dst, src);
380 	__bpf_spin_unlock_irqrestore(lock);
381 	preempt_enable();
382 }
383 
384 BPF_CALL_0(bpf_jiffies64)
385 {
386 	return get_jiffies_64();
387 }
388 
389 const struct bpf_func_proto bpf_jiffies64_proto = {
390 	.func		= bpf_jiffies64,
391 	.gpl_only	= false,
392 	.ret_type	= RET_INTEGER,
393 };
394 
395 #ifdef CONFIG_CGROUPS
396 BPF_CALL_0(bpf_get_current_cgroup_id)
397 {
398 	struct cgroup *cgrp;
399 	u64 cgrp_id;
400 
401 	rcu_read_lock();
402 	cgrp = task_dfl_cgroup(current);
403 	cgrp_id = cgroup_id(cgrp);
404 	rcu_read_unlock();
405 
406 	return cgrp_id;
407 }
408 
409 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
410 	.func		= bpf_get_current_cgroup_id,
411 	.gpl_only	= false,
412 	.ret_type	= RET_INTEGER,
413 };
414 
415 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
416 {
417 	struct cgroup *cgrp;
418 	struct cgroup *ancestor;
419 	u64 cgrp_id;
420 
421 	rcu_read_lock();
422 	cgrp = task_dfl_cgroup(current);
423 	ancestor = cgroup_ancestor(cgrp, ancestor_level);
424 	cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
425 	rcu_read_unlock();
426 
427 	return cgrp_id;
428 }
429 
430 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
431 	.func		= bpf_get_current_ancestor_cgroup_id,
432 	.gpl_only	= false,
433 	.ret_type	= RET_INTEGER,
434 	.arg1_type	= ARG_ANYTHING,
435 };
436 #endif /* CONFIG_CGROUPS */
437 
438 #define BPF_STRTOX_BASE_MASK 0x1F
439 
440 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
441 			  unsigned long long *res, bool *is_negative)
442 {
443 	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
444 	const char *cur_buf = buf;
445 	size_t cur_len = buf_len;
446 	unsigned int consumed;
447 	size_t val_len;
448 	char str[64];
449 
450 	if (!buf || !buf_len || !res || !is_negative)
451 		return -EINVAL;
452 
453 	if (base != 0 && base != 8 && base != 10 && base != 16)
454 		return -EINVAL;
455 
456 	if (flags & ~BPF_STRTOX_BASE_MASK)
457 		return -EINVAL;
458 
459 	while (cur_buf < buf + buf_len && isspace(*cur_buf))
460 		++cur_buf;
461 
462 	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
463 	if (*is_negative)
464 		++cur_buf;
465 
466 	consumed = cur_buf - buf;
467 	cur_len -= consumed;
468 	if (!cur_len)
469 		return -EINVAL;
470 
471 	cur_len = min(cur_len, sizeof(str) - 1);
472 	memcpy(str, cur_buf, cur_len);
473 	str[cur_len] = '\0';
474 	cur_buf = str;
475 
476 	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
477 	val_len = _parse_integer(cur_buf, base, res);
478 
479 	if (val_len & KSTRTOX_OVERFLOW)
480 		return -ERANGE;
481 
482 	if (val_len == 0)
483 		return -EINVAL;
484 
485 	cur_buf += val_len;
486 	consumed += cur_buf - str;
487 
488 	return consumed;
489 }
490 
491 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
492 			 long long *res)
493 {
494 	unsigned long long _res;
495 	bool is_negative;
496 	int err;
497 
498 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
499 	if (err < 0)
500 		return err;
501 	if (is_negative) {
502 		if ((long long)-_res > 0)
503 			return -ERANGE;
504 		*res = -_res;
505 	} else {
506 		if ((long long)_res < 0)
507 			return -ERANGE;
508 		*res = _res;
509 	}
510 	return err;
511 }
512 
513 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
514 	   long *, res)
515 {
516 	long long _res;
517 	int err;
518 
519 	err = __bpf_strtoll(buf, buf_len, flags, &_res);
520 	if (err < 0)
521 		return err;
522 	if (_res != (long)_res)
523 		return -ERANGE;
524 	*res = _res;
525 	return err;
526 }
527 
528 const struct bpf_func_proto bpf_strtol_proto = {
529 	.func		= bpf_strtol,
530 	.gpl_only	= false,
531 	.ret_type	= RET_INTEGER,
532 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
533 	.arg2_type	= ARG_CONST_SIZE,
534 	.arg3_type	= ARG_ANYTHING,
535 	.arg4_type	= ARG_PTR_TO_LONG,
536 };
537 
538 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
539 	   unsigned long *, res)
540 {
541 	unsigned long long _res;
542 	bool is_negative;
543 	int err;
544 
545 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
546 	if (err < 0)
547 		return err;
548 	if (is_negative)
549 		return -EINVAL;
550 	if (_res != (unsigned long)_res)
551 		return -ERANGE;
552 	*res = _res;
553 	return err;
554 }
555 
556 const struct bpf_func_proto bpf_strtoul_proto = {
557 	.func		= bpf_strtoul,
558 	.gpl_only	= false,
559 	.ret_type	= RET_INTEGER,
560 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
561 	.arg2_type	= ARG_CONST_SIZE,
562 	.arg3_type	= ARG_ANYTHING,
563 	.arg4_type	= ARG_PTR_TO_LONG,
564 };
565 
566 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
567 {
568 	return strncmp(s1, s2, s1_sz);
569 }
570 
571 static const struct bpf_func_proto bpf_strncmp_proto = {
572 	.func		= bpf_strncmp,
573 	.gpl_only	= false,
574 	.ret_type	= RET_INTEGER,
575 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
576 	.arg2_type	= ARG_CONST_SIZE,
577 	.arg3_type	= ARG_PTR_TO_CONST_STR,
578 };
579 
580 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
581 	   struct bpf_pidns_info *, nsdata, u32, size)
582 {
583 	struct task_struct *task = current;
584 	struct pid_namespace *pidns;
585 	int err = -EINVAL;
586 
587 	if (unlikely(size != sizeof(struct bpf_pidns_info)))
588 		goto clear;
589 
590 	if (unlikely((u64)(dev_t)dev != dev))
591 		goto clear;
592 
593 	if (unlikely(!task))
594 		goto clear;
595 
596 	pidns = task_active_pid_ns(task);
597 	if (unlikely(!pidns)) {
598 		err = -ENOENT;
599 		goto clear;
600 	}
601 
602 	if (!ns_match(&pidns->ns, (dev_t)dev, ino))
603 		goto clear;
604 
605 	nsdata->pid = task_pid_nr_ns(task, pidns);
606 	nsdata->tgid = task_tgid_nr_ns(task, pidns);
607 	return 0;
608 clear:
609 	memset((void *)nsdata, 0, (size_t) size);
610 	return err;
611 }
612 
613 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
614 	.func		= bpf_get_ns_current_pid_tgid,
615 	.gpl_only	= false,
616 	.ret_type	= RET_INTEGER,
617 	.arg1_type	= ARG_ANYTHING,
618 	.arg2_type	= ARG_ANYTHING,
619 	.arg3_type      = ARG_PTR_TO_UNINIT_MEM,
620 	.arg4_type      = ARG_CONST_SIZE,
621 };
622 
623 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
624 	.func		= bpf_get_raw_cpu_id,
625 	.gpl_only	= false,
626 	.ret_type	= RET_INTEGER,
627 };
628 
629 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
630 	   u64, flags, void *, data, u64, size)
631 {
632 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
633 		return -EINVAL;
634 
635 	return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
636 }
637 
638 const struct bpf_func_proto bpf_event_output_data_proto =  {
639 	.func		= bpf_event_output_data,
640 	.gpl_only       = true,
641 	.ret_type       = RET_INTEGER,
642 	.arg1_type      = ARG_PTR_TO_CTX,
643 	.arg2_type      = ARG_CONST_MAP_PTR,
644 	.arg3_type      = ARG_ANYTHING,
645 	.arg4_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
646 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
647 };
648 
649 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
650 	   const void __user *, user_ptr)
651 {
652 	int ret = copy_from_user(dst, user_ptr, size);
653 
654 	if (unlikely(ret)) {
655 		memset(dst, 0, size);
656 		ret = -EFAULT;
657 	}
658 
659 	return ret;
660 }
661 
662 const struct bpf_func_proto bpf_copy_from_user_proto = {
663 	.func		= bpf_copy_from_user,
664 	.gpl_only	= false,
665 	.might_sleep	= true,
666 	.ret_type	= RET_INTEGER,
667 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
668 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
669 	.arg3_type	= ARG_ANYTHING,
670 };
671 
672 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
673 	   const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
674 {
675 	int ret;
676 
677 	/* flags is not used yet */
678 	if (unlikely(flags))
679 		return -EINVAL;
680 
681 	if (unlikely(!size))
682 		return 0;
683 
684 	ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
685 	if (ret == size)
686 		return 0;
687 
688 	memset(dst, 0, size);
689 	/* Return -EFAULT for partial read */
690 	return ret < 0 ? ret : -EFAULT;
691 }
692 
693 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
694 	.func		= bpf_copy_from_user_task,
695 	.gpl_only	= true,
696 	.might_sleep	= true,
697 	.ret_type	= RET_INTEGER,
698 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
699 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
700 	.arg3_type	= ARG_ANYTHING,
701 	.arg4_type	= ARG_PTR_TO_BTF_ID,
702 	.arg4_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
703 	.arg5_type	= ARG_ANYTHING
704 };
705 
706 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
707 {
708 	if (cpu >= nr_cpu_ids)
709 		return (unsigned long)NULL;
710 
711 	return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
712 }
713 
714 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
715 	.func		= bpf_per_cpu_ptr,
716 	.gpl_only	= false,
717 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
718 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
719 	.arg2_type	= ARG_ANYTHING,
720 };
721 
722 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
723 {
724 	return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
725 }
726 
727 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
728 	.func		= bpf_this_cpu_ptr,
729 	.gpl_only	= false,
730 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
731 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
732 };
733 
734 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
735 		size_t bufsz)
736 {
737 	void __user *user_ptr = (__force void __user *)unsafe_ptr;
738 
739 	buf[0] = 0;
740 
741 	switch (fmt_ptype) {
742 	case 's':
743 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
744 		if ((unsigned long)unsafe_ptr < TASK_SIZE)
745 			return strncpy_from_user_nofault(buf, user_ptr, bufsz);
746 		fallthrough;
747 #endif
748 	case 'k':
749 		return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
750 	case 'u':
751 		return strncpy_from_user_nofault(buf, user_ptr, bufsz);
752 	}
753 
754 	return -EINVAL;
755 }
756 
757 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
758  * arguments representation.
759  */
760 #define MAX_BPRINTF_BIN_ARGS	512
761 
762 /* Support executing three nested bprintf helper calls on a given CPU */
763 #define MAX_BPRINTF_NEST_LEVEL	3
764 struct bpf_bprintf_buffers {
765 	char bin_args[MAX_BPRINTF_BIN_ARGS];
766 	char buf[MAX_BPRINTF_BUF];
767 };
768 
769 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
770 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
771 
772 static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
773 {
774 	int nest_level;
775 
776 	preempt_disable();
777 	nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
778 	if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
779 		this_cpu_dec(bpf_bprintf_nest_level);
780 		preempt_enable();
781 		return -EBUSY;
782 	}
783 	*bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
784 
785 	return 0;
786 }
787 
788 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
789 {
790 	if (!data->bin_args && !data->buf)
791 		return;
792 	if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
793 		return;
794 	this_cpu_dec(bpf_bprintf_nest_level);
795 	preempt_enable();
796 }
797 
798 /*
799  * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
800  *
801  * Returns a negative value if fmt is an invalid format string or 0 otherwise.
802  *
803  * This can be used in two ways:
804  * - Format string verification only: when data->get_bin_args is false
805  * - Arguments preparation: in addition to the above verification, it writes in
806  *   data->bin_args a binary representation of arguments usable by bstr_printf
807  *   where pointers from BPF have been sanitized.
808  *
809  * In argument preparation mode, if 0 is returned, safe temporary buffers are
810  * allocated and bpf_bprintf_cleanup should be called to free them after use.
811  */
812 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
813 			u32 num_args, struct bpf_bprintf_data *data)
814 {
815 	bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
816 	char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
817 	struct bpf_bprintf_buffers *buffers = NULL;
818 	size_t sizeof_cur_arg, sizeof_cur_ip;
819 	int err, i, num_spec = 0;
820 	u64 cur_arg;
821 	char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
822 
823 	fmt_end = strnchr(fmt, fmt_size, 0);
824 	if (!fmt_end)
825 		return -EINVAL;
826 	fmt_size = fmt_end - fmt;
827 
828 	if (get_buffers && try_get_buffers(&buffers))
829 		return -EBUSY;
830 
831 	if (data->get_bin_args) {
832 		if (num_args)
833 			tmp_buf = buffers->bin_args;
834 		tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
835 		data->bin_args = (u32 *)tmp_buf;
836 	}
837 
838 	if (data->get_buf)
839 		data->buf = buffers->buf;
840 
841 	for (i = 0; i < fmt_size; i++) {
842 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
843 			err = -EINVAL;
844 			goto out;
845 		}
846 
847 		if (fmt[i] != '%')
848 			continue;
849 
850 		if (fmt[i + 1] == '%') {
851 			i++;
852 			continue;
853 		}
854 
855 		if (num_spec >= num_args) {
856 			err = -EINVAL;
857 			goto out;
858 		}
859 
860 		/* The string is zero-terminated so if fmt[i] != 0, we can
861 		 * always access fmt[i + 1], in the worst case it will be a 0
862 		 */
863 		i++;
864 
865 		/* skip optional "[0 +-][num]" width formatting field */
866 		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
867 		       fmt[i] == ' ')
868 			i++;
869 		if (fmt[i] >= '1' && fmt[i] <= '9') {
870 			i++;
871 			while (fmt[i] >= '0' && fmt[i] <= '9')
872 				i++;
873 		}
874 
875 		if (fmt[i] == 'p') {
876 			sizeof_cur_arg = sizeof(long);
877 
878 			if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
879 			    fmt[i + 2] == 's') {
880 				fmt_ptype = fmt[i + 1];
881 				i += 2;
882 				goto fmt_str;
883 			}
884 
885 			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
886 			    ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
887 			    fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
888 			    fmt[i + 1] == 'S') {
889 				/* just kernel pointers */
890 				if (tmp_buf)
891 					cur_arg = raw_args[num_spec];
892 				i++;
893 				goto nocopy_fmt;
894 			}
895 
896 			if (fmt[i + 1] == 'B') {
897 				if (tmp_buf)  {
898 					err = snprintf(tmp_buf,
899 						       (tmp_buf_end - tmp_buf),
900 						       "%pB",
901 						       (void *)(long)raw_args[num_spec]);
902 					tmp_buf += (err + 1);
903 				}
904 
905 				i++;
906 				num_spec++;
907 				continue;
908 			}
909 
910 			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
911 			if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
912 			    (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
913 				err = -EINVAL;
914 				goto out;
915 			}
916 
917 			i += 2;
918 			if (!tmp_buf)
919 				goto nocopy_fmt;
920 
921 			sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
922 			if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
923 				err = -ENOSPC;
924 				goto out;
925 			}
926 
927 			unsafe_ptr = (char *)(long)raw_args[num_spec];
928 			err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
929 						       sizeof_cur_ip);
930 			if (err < 0)
931 				memset(cur_ip, 0, sizeof_cur_ip);
932 
933 			/* hack: bstr_printf expects IP addresses to be
934 			 * pre-formatted as strings, ironically, the easiest way
935 			 * to do that is to call snprintf.
936 			 */
937 			ip_spec[2] = fmt[i - 1];
938 			ip_spec[3] = fmt[i];
939 			err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
940 				       ip_spec, &cur_ip);
941 
942 			tmp_buf += err + 1;
943 			num_spec++;
944 
945 			continue;
946 		} else if (fmt[i] == 's') {
947 			fmt_ptype = fmt[i];
948 fmt_str:
949 			if (fmt[i + 1] != 0 &&
950 			    !isspace(fmt[i + 1]) &&
951 			    !ispunct(fmt[i + 1])) {
952 				err = -EINVAL;
953 				goto out;
954 			}
955 
956 			if (!tmp_buf)
957 				goto nocopy_fmt;
958 
959 			if (tmp_buf_end == tmp_buf) {
960 				err = -ENOSPC;
961 				goto out;
962 			}
963 
964 			unsafe_ptr = (char *)(long)raw_args[num_spec];
965 			err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
966 						    fmt_ptype,
967 						    tmp_buf_end - tmp_buf);
968 			if (err < 0) {
969 				tmp_buf[0] = '\0';
970 				err = 1;
971 			}
972 
973 			tmp_buf += err;
974 			num_spec++;
975 
976 			continue;
977 		} else if (fmt[i] == 'c') {
978 			if (!tmp_buf)
979 				goto nocopy_fmt;
980 
981 			if (tmp_buf_end == tmp_buf) {
982 				err = -ENOSPC;
983 				goto out;
984 			}
985 
986 			*tmp_buf = raw_args[num_spec];
987 			tmp_buf++;
988 			num_spec++;
989 
990 			continue;
991 		}
992 
993 		sizeof_cur_arg = sizeof(int);
994 
995 		if (fmt[i] == 'l') {
996 			sizeof_cur_arg = sizeof(long);
997 			i++;
998 		}
999 		if (fmt[i] == 'l') {
1000 			sizeof_cur_arg = sizeof(long long);
1001 			i++;
1002 		}
1003 
1004 		if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1005 		    fmt[i] != 'x' && fmt[i] != 'X') {
1006 			err = -EINVAL;
1007 			goto out;
1008 		}
1009 
1010 		if (tmp_buf)
1011 			cur_arg = raw_args[num_spec];
1012 nocopy_fmt:
1013 		if (tmp_buf) {
1014 			tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1015 			if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1016 				err = -ENOSPC;
1017 				goto out;
1018 			}
1019 
1020 			if (sizeof_cur_arg == 8) {
1021 				*(u32 *)tmp_buf = *(u32 *)&cur_arg;
1022 				*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1023 			} else {
1024 				*(u32 *)tmp_buf = (u32)(long)cur_arg;
1025 			}
1026 			tmp_buf += sizeof_cur_arg;
1027 		}
1028 		num_spec++;
1029 	}
1030 
1031 	err = 0;
1032 out:
1033 	if (err)
1034 		bpf_bprintf_cleanup(data);
1035 	return err;
1036 }
1037 
1038 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1039 	   const void *, args, u32, data_len)
1040 {
1041 	struct bpf_bprintf_data data = {
1042 		.get_bin_args	= true,
1043 	};
1044 	int err, num_args;
1045 
1046 	if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1047 	    (data_len && !args))
1048 		return -EINVAL;
1049 	num_args = data_len / 8;
1050 
1051 	/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1052 	 * can safely give an unbounded size.
1053 	 */
1054 	err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1055 	if (err < 0)
1056 		return err;
1057 
1058 	err = bstr_printf(str, str_size, fmt, data.bin_args);
1059 
1060 	bpf_bprintf_cleanup(&data);
1061 
1062 	return err + 1;
1063 }
1064 
1065 const struct bpf_func_proto bpf_snprintf_proto = {
1066 	.func		= bpf_snprintf,
1067 	.gpl_only	= true,
1068 	.ret_type	= RET_INTEGER,
1069 	.arg1_type	= ARG_PTR_TO_MEM_OR_NULL,
1070 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1071 	.arg3_type	= ARG_PTR_TO_CONST_STR,
1072 	.arg4_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1073 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1074 };
1075 
1076 /* BPF map elements can contain 'struct bpf_timer'.
1077  * Such map owns all of its BPF timers.
1078  * 'struct bpf_timer' is allocated as part of map element allocation
1079  * and it's zero initialized.
1080  * That space is used to keep 'struct bpf_timer_kern'.
1081  * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1082  * remembers 'struct bpf_map *' pointer it's part of.
1083  * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1084  * bpf_timer_start() arms the timer.
1085  * If user space reference to a map goes to zero at this point
1086  * ops->map_release_uref callback is responsible for cancelling the timers,
1087  * freeing their memory, and decrementing prog's refcnts.
1088  * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1089  * Inner maps can contain bpf timers as well. ops->map_release_uref is
1090  * freeing the timers when inner map is replaced or deleted by user space.
1091  */
1092 struct bpf_hrtimer {
1093 	struct hrtimer timer;
1094 	struct bpf_map *map;
1095 	struct bpf_prog *prog;
1096 	void __rcu *callback_fn;
1097 	void *value;
1098 };
1099 
1100 /* the actual struct hidden inside uapi struct bpf_timer */
1101 struct bpf_timer_kern {
1102 	struct bpf_hrtimer *timer;
1103 	/* bpf_spin_lock is used here instead of spinlock_t to make
1104 	 * sure that it always fits into space reserved by struct bpf_timer
1105 	 * regardless of LOCKDEP and spinlock debug flags.
1106 	 */
1107 	struct bpf_spin_lock lock;
1108 } __attribute__((aligned(8)));
1109 
1110 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1111 
1112 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1113 {
1114 	struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1115 	struct bpf_map *map = t->map;
1116 	void *value = t->value;
1117 	bpf_callback_t callback_fn;
1118 	void *key;
1119 	u32 idx;
1120 
1121 	BTF_TYPE_EMIT(struct bpf_timer);
1122 	callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
1123 	if (!callback_fn)
1124 		goto out;
1125 
1126 	/* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1127 	 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1128 	 * Remember the timer this callback is servicing to prevent
1129 	 * deadlock if callback_fn() calls bpf_timer_cancel() or
1130 	 * bpf_map_delete_elem() on the same timer.
1131 	 */
1132 	this_cpu_write(hrtimer_running, t);
1133 	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1134 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1135 
1136 		/* compute the key */
1137 		idx = ((char *)value - array->value) / array->elem_size;
1138 		key = &idx;
1139 	} else { /* hash or lru */
1140 		key = value - round_up(map->key_size, 8);
1141 	}
1142 
1143 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1144 	/* The verifier checked that return value is zero. */
1145 
1146 	this_cpu_write(hrtimer_running, NULL);
1147 out:
1148 	return HRTIMER_NORESTART;
1149 }
1150 
1151 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1152 	   u64, flags)
1153 {
1154 	clockid_t clockid = flags & (MAX_CLOCKS - 1);
1155 	struct bpf_hrtimer *t;
1156 	int ret = 0;
1157 
1158 	BUILD_BUG_ON(MAX_CLOCKS != 16);
1159 	BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
1160 	BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
1161 
1162 	if (in_nmi())
1163 		return -EOPNOTSUPP;
1164 
1165 	if (flags >= MAX_CLOCKS ||
1166 	    /* similar to timerfd except _ALARM variants are not supported */
1167 	    (clockid != CLOCK_MONOTONIC &&
1168 	     clockid != CLOCK_REALTIME &&
1169 	     clockid != CLOCK_BOOTTIME))
1170 		return -EINVAL;
1171 	__bpf_spin_lock_irqsave(&timer->lock);
1172 	t = timer->timer;
1173 	if (t) {
1174 		ret = -EBUSY;
1175 		goto out;
1176 	}
1177 	if (!atomic64_read(&map->usercnt)) {
1178 		/* maps with timers must be either held by user space
1179 		 * or pinned in bpffs.
1180 		 */
1181 		ret = -EPERM;
1182 		goto out;
1183 	}
1184 	/* allocate hrtimer via map_kmalloc to use memcg accounting */
1185 	t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1186 	if (!t) {
1187 		ret = -ENOMEM;
1188 		goto out;
1189 	}
1190 	t->value = (void *)timer - map->record->timer_off;
1191 	t->map = map;
1192 	t->prog = NULL;
1193 	rcu_assign_pointer(t->callback_fn, NULL);
1194 	hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1195 	t->timer.function = bpf_timer_cb;
1196 	timer->timer = t;
1197 out:
1198 	__bpf_spin_unlock_irqrestore(&timer->lock);
1199 	return ret;
1200 }
1201 
1202 static const struct bpf_func_proto bpf_timer_init_proto = {
1203 	.func		= bpf_timer_init,
1204 	.gpl_only	= true,
1205 	.ret_type	= RET_INTEGER,
1206 	.arg1_type	= ARG_PTR_TO_TIMER,
1207 	.arg2_type	= ARG_CONST_MAP_PTR,
1208 	.arg3_type	= ARG_ANYTHING,
1209 };
1210 
1211 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
1212 	   struct bpf_prog_aux *, aux)
1213 {
1214 	struct bpf_prog *prev, *prog = aux->prog;
1215 	struct bpf_hrtimer *t;
1216 	int ret = 0;
1217 
1218 	if (in_nmi())
1219 		return -EOPNOTSUPP;
1220 	__bpf_spin_lock_irqsave(&timer->lock);
1221 	t = timer->timer;
1222 	if (!t) {
1223 		ret = -EINVAL;
1224 		goto out;
1225 	}
1226 	if (!atomic64_read(&t->map->usercnt)) {
1227 		/* maps with timers must be either held by user space
1228 		 * or pinned in bpffs. Otherwise timer might still be
1229 		 * running even when bpf prog is detached and user space
1230 		 * is gone, since map_release_uref won't ever be called.
1231 		 */
1232 		ret = -EPERM;
1233 		goto out;
1234 	}
1235 	prev = t->prog;
1236 	if (prev != prog) {
1237 		/* Bump prog refcnt once. Every bpf_timer_set_callback()
1238 		 * can pick different callback_fn-s within the same prog.
1239 		 */
1240 		prog = bpf_prog_inc_not_zero(prog);
1241 		if (IS_ERR(prog)) {
1242 			ret = PTR_ERR(prog);
1243 			goto out;
1244 		}
1245 		if (prev)
1246 			/* Drop prev prog refcnt when swapping with new prog */
1247 			bpf_prog_put(prev);
1248 		t->prog = prog;
1249 	}
1250 	rcu_assign_pointer(t->callback_fn, callback_fn);
1251 out:
1252 	__bpf_spin_unlock_irqrestore(&timer->lock);
1253 	return ret;
1254 }
1255 
1256 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1257 	.func		= bpf_timer_set_callback,
1258 	.gpl_only	= true,
1259 	.ret_type	= RET_INTEGER,
1260 	.arg1_type	= ARG_PTR_TO_TIMER,
1261 	.arg2_type	= ARG_PTR_TO_FUNC,
1262 };
1263 
1264 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
1265 {
1266 	struct bpf_hrtimer *t;
1267 	int ret = 0;
1268 	enum hrtimer_mode mode;
1269 
1270 	if (in_nmi())
1271 		return -EOPNOTSUPP;
1272 	if (flags > BPF_F_TIMER_ABS)
1273 		return -EINVAL;
1274 	__bpf_spin_lock_irqsave(&timer->lock);
1275 	t = timer->timer;
1276 	if (!t || !t->prog) {
1277 		ret = -EINVAL;
1278 		goto out;
1279 	}
1280 
1281 	if (flags & BPF_F_TIMER_ABS)
1282 		mode = HRTIMER_MODE_ABS_SOFT;
1283 	else
1284 		mode = HRTIMER_MODE_REL_SOFT;
1285 
1286 	hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1287 out:
1288 	__bpf_spin_unlock_irqrestore(&timer->lock);
1289 	return ret;
1290 }
1291 
1292 static const struct bpf_func_proto bpf_timer_start_proto = {
1293 	.func		= bpf_timer_start,
1294 	.gpl_only	= true,
1295 	.ret_type	= RET_INTEGER,
1296 	.arg1_type	= ARG_PTR_TO_TIMER,
1297 	.arg2_type	= ARG_ANYTHING,
1298 	.arg3_type	= ARG_ANYTHING,
1299 };
1300 
1301 static void drop_prog_refcnt(struct bpf_hrtimer *t)
1302 {
1303 	struct bpf_prog *prog = t->prog;
1304 
1305 	if (prog) {
1306 		bpf_prog_put(prog);
1307 		t->prog = NULL;
1308 		rcu_assign_pointer(t->callback_fn, NULL);
1309 	}
1310 }
1311 
1312 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
1313 {
1314 	struct bpf_hrtimer *t;
1315 	int ret = 0;
1316 
1317 	if (in_nmi())
1318 		return -EOPNOTSUPP;
1319 	__bpf_spin_lock_irqsave(&timer->lock);
1320 	t = timer->timer;
1321 	if (!t) {
1322 		ret = -EINVAL;
1323 		goto out;
1324 	}
1325 	if (this_cpu_read(hrtimer_running) == t) {
1326 		/* If bpf callback_fn is trying to bpf_timer_cancel()
1327 		 * its own timer the hrtimer_cancel() will deadlock
1328 		 * since it waits for callback_fn to finish
1329 		 */
1330 		ret = -EDEADLK;
1331 		goto out;
1332 	}
1333 	drop_prog_refcnt(t);
1334 out:
1335 	__bpf_spin_unlock_irqrestore(&timer->lock);
1336 	/* Cancel the timer and wait for associated callback to finish
1337 	 * if it was running.
1338 	 */
1339 	ret = ret ?: hrtimer_cancel(&t->timer);
1340 	return ret;
1341 }
1342 
1343 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1344 	.func		= bpf_timer_cancel,
1345 	.gpl_only	= true,
1346 	.ret_type	= RET_INTEGER,
1347 	.arg1_type	= ARG_PTR_TO_TIMER,
1348 };
1349 
1350 /* This function is called by map_delete/update_elem for individual element and
1351  * by ops->map_release_uref when the user space reference to a map reaches zero.
1352  */
1353 void bpf_timer_cancel_and_free(void *val)
1354 {
1355 	struct bpf_timer_kern *timer = val;
1356 	struct bpf_hrtimer *t;
1357 
1358 	/* Performance optimization: read timer->timer without lock first. */
1359 	if (!READ_ONCE(timer->timer))
1360 		return;
1361 
1362 	__bpf_spin_lock_irqsave(&timer->lock);
1363 	/* re-read it under lock */
1364 	t = timer->timer;
1365 	if (!t)
1366 		goto out;
1367 	drop_prog_refcnt(t);
1368 	/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1369 	 * this timer, since it won't be initialized.
1370 	 */
1371 	timer->timer = NULL;
1372 out:
1373 	__bpf_spin_unlock_irqrestore(&timer->lock);
1374 	if (!t)
1375 		return;
1376 	/* Cancel the timer and wait for callback to complete if it was running.
1377 	 * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
1378 	 * right after for both preallocated and non-preallocated maps.
1379 	 * The timer->timer = NULL was already done and no code path can
1380 	 * see address 't' anymore.
1381 	 *
1382 	 * Check that bpf_map_delete/update_elem() wasn't called from timer
1383 	 * callback_fn. In such case don't call hrtimer_cancel() (since it will
1384 	 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
1385 	 * return -1). Though callback_fn is still running on this cpu it's
1386 	 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1387 	 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1388 	 * since timer->timer = NULL was already done. The timer will be
1389 	 * effectively cancelled because bpf_timer_cb() will return
1390 	 * HRTIMER_NORESTART.
1391 	 */
1392 	if (this_cpu_read(hrtimer_running) != t)
1393 		hrtimer_cancel(&t->timer);
1394 	kfree(t);
1395 }
1396 
1397 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
1398 {
1399 	unsigned long *kptr = map_value;
1400 
1401 	return xchg(kptr, (unsigned long)ptr);
1402 }
1403 
1404 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1405  * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1406  * denote type that verifier will determine.
1407  */
1408 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1409 	.func         = bpf_kptr_xchg,
1410 	.gpl_only     = false,
1411 	.ret_type     = RET_PTR_TO_BTF_ID_OR_NULL,
1412 	.ret_btf_id   = BPF_PTR_POISON,
1413 	.arg1_type    = ARG_PTR_TO_KPTR,
1414 	.arg2_type    = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1415 	.arg2_btf_id  = BPF_PTR_POISON,
1416 };
1417 
1418 /* Since the upper 8 bits of dynptr->size is reserved, the
1419  * maximum supported size is 2^24 - 1.
1420  */
1421 #define DYNPTR_MAX_SIZE	((1UL << 24) - 1)
1422 #define DYNPTR_TYPE_SHIFT	28
1423 #define DYNPTR_SIZE_MASK	0xFFFFFF
1424 #define DYNPTR_RDONLY_BIT	BIT(31)
1425 
1426 static bool bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1427 {
1428 	return ptr->size & DYNPTR_RDONLY_BIT;
1429 }
1430 
1431 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1432 {
1433 	ptr->size |= DYNPTR_RDONLY_BIT;
1434 }
1435 
1436 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1437 {
1438 	ptr->size |= type << DYNPTR_TYPE_SHIFT;
1439 }
1440 
1441 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1442 {
1443 	return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1444 }
1445 
1446 u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr)
1447 {
1448 	return ptr->size & DYNPTR_SIZE_MASK;
1449 }
1450 
1451 int bpf_dynptr_check_size(u32 size)
1452 {
1453 	return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1454 }
1455 
1456 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1457 		     enum bpf_dynptr_type type, u32 offset, u32 size)
1458 {
1459 	ptr->data = data;
1460 	ptr->offset = offset;
1461 	ptr->size = size;
1462 	bpf_dynptr_set_type(ptr, type);
1463 }
1464 
1465 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1466 {
1467 	memset(ptr, 0, sizeof(*ptr));
1468 }
1469 
1470 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
1471 {
1472 	u32 size = bpf_dynptr_get_size(ptr);
1473 
1474 	if (len > size || offset > size - len)
1475 		return -E2BIG;
1476 
1477 	return 0;
1478 }
1479 
1480 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1481 {
1482 	int err;
1483 
1484 	BTF_TYPE_EMIT(struct bpf_dynptr);
1485 
1486 	err = bpf_dynptr_check_size(size);
1487 	if (err)
1488 		goto error;
1489 
1490 	/* flags is currently unsupported */
1491 	if (flags) {
1492 		err = -EINVAL;
1493 		goto error;
1494 	}
1495 
1496 	bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1497 
1498 	return 0;
1499 
1500 error:
1501 	bpf_dynptr_set_null(ptr);
1502 	return err;
1503 }
1504 
1505 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1506 	.func		= bpf_dynptr_from_mem,
1507 	.gpl_only	= false,
1508 	.ret_type	= RET_INTEGER,
1509 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1510 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1511 	.arg3_type	= ARG_ANYTHING,
1512 	.arg4_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
1513 };
1514 
1515 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1516 	   u32, offset, u64, flags)
1517 {
1518 	enum bpf_dynptr_type type;
1519 	int err;
1520 
1521 	if (!src->data || flags)
1522 		return -EINVAL;
1523 
1524 	err = bpf_dynptr_check_off_len(src, offset, len);
1525 	if (err)
1526 		return err;
1527 
1528 	type = bpf_dynptr_get_type(src);
1529 
1530 	switch (type) {
1531 	case BPF_DYNPTR_TYPE_LOCAL:
1532 	case BPF_DYNPTR_TYPE_RINGBUF:
1533 		/* Source and destination may possibly overlap, hence use memmove to
1534 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1535 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1536 		 */
1537 		memmove(dst, src->data + src->offset + offset, len);
1538 		return 0;
1539 	case BPF_DYNPTR_TYPE_SKB:
1540 		return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1541 	case BPF_DYNPTR_TYPE_XDP:
1542 		return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1543 	default:
1544 		WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1545 		return -EFAULT;
1546 	}
1547 }
1548 
1549 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1550 	.func		= bpf_dynptr_read,
1551 	.gpl_only	= false,
1552 	.ret_type	= RET_INTEGER,
1553 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1554 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1555 	.arg3_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1556 	.arg4_type	= ARG_ANYTHING,
1557 	.arg5_type	= ARG_ANYTHING,
1558 };
1559 
1560 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1561 	   u32, len, u64, flags)
1562 {
1563 	enum bpf_dynptr_type type;
1564 	int err;
1565 
1566 	if (!dst->data || bpf_dynptr_is_rdonly(dst))
1567 		return -EINVAL;
1568 
1569 	err = bpf_dynptr_check_off_len(dst, offset, len);
1570 	if (err)
1571 		return err;
1572 
1573 	type = bpf_dynptr_get_type(dst);
1574 
1575 	switch (type) {
1576 	case BPF_DYNPTR_TYPE_LOCAL:
1577 	case BPF_DYNPTR_TYPE_RINGBUF:
1578 		if (flags)
1579 			return -EINVAL;
1580 		/* Source and destination may possibly overlap, hence use memmove to
1581 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1582 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1583 		 */
1584 		memmove(dst->data + dst->offset + offset, src, len);
1585 		return 0;
1586 	case BPF_DYNPTR_TYPE_SKB:
1587 		return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1588 					     flags);
1589 	case BPF_DYNPTR_TYPE_XDP:
1590 		if (flags)
1591 			return -EINVAL;
1592 		return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1593 	default:
1594 		WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1595 		return -EFAULT;
1596 	}
1597 }
1598 
1599 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1600 	.func		= bpf_dynptr_write,
1601 	.gpl_only	= false,
1602 	.ret_type	= RET_INTEGER,
1603 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1604 	.arg2_type	= ARG_ANYTHING,
1605 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1606 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
1607 	.arg5_type	= ARG_ANYTHING,
1608 };
1609 
1610 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1611 {
1612 	enum bpf_dynptr_type type;
1613 	int err;
1614 
1615 	if (!ptr->data)
1616 		return 0;
1617 
1618 	err = bpf_dynptr_check_off_len(ptr, offset, len);
1619 	if (err)
1620 		return 0;
1621 
1622 	if (bpf_dynptr_is_rdonly(ptr))
1623 		return 0;
1624 
1625 	type = bpf_dynptr_get_type(ptr);
1626 
1627 	switch (type) {
1628 	case BPF_DYNPTR_TYPE_LOCAL:
1629 	case BPF_DYNPTR_TYPE_RINGBUF:
1630 		return (unsigned long)(ptr->data + ptr->offset + offset);
1631 	case BPF_DYNPTR_TYPE_SKB:
1632 	case BPF_DYNPTR_TYPE_XDP:
1633 		/* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1634 		return 0;
1635 	default:
1636 		WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1637 		return 0;
1638 	}
1639 }
1640 
1641 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1642 	.func		= bpf_dynptr_data,
1643 	.gpl_only	= false,
1644 	.ret_type	= RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1645 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1646 	.arg2_type	= ARG_ANYTHING,
1647 	.arg3_type	= ARG_CONST_ALLOC_SIZE_OR_ZERO,
1648 };
1649 
1650 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1651 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1652 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1653 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1654 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1655 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1656 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1657 
1658 const struct bpf_func_proto *
1659 bpf_base_func_proto(enum bpf_func_id func_id)
1660 {
1661 	switch (func_id) {
1662 	case BPF_FUNC_map_lookup_elem:
1663 		return &bpf_map_lookup_elem_proto;
1664 	case BPF_FUNC_map_update_elem:
1665 		return &bpf_map_update_elem_proto;
1666 	case BPF_FUNC_map_delete_elem:
1667 		return &bpf_map_delete_elem_proto;
1668 	case BPF_FUNC_map_push_elem:
1669 		return &bpf_map_push_elem_proto;
1670 	case BPF_FUNC_map_pop_elem:
1671 		return &bpf_map_pop_elem_proto;
1672 	case BPF_FUNC_map_peek_elem:
1673 		return &bpf_map_peek_elem_proto;
1674 	case BPF_FUNC_map_lookup_percpu_elem:
1675 		return &bpf_map_lookup_percpu_elem_proto;
1676 	case BPF_FUNC_get_prandom_u32:
1677 		return &bpf_get_prandom_u32_proto;
1678 	case BPF_FUNC_get_smp_processor_id:
1679 		return &bpf_get_raw_smp_processor_id_proto;
1680 	case BPF_FUNC_get_numa_node_id:
1681 		return &bpf_get_numa_node_id_proto;
1682 	case BPF_FUNC_tail_call:
1683 		return &bpf_tail_call_proto;
1684 	case BPF_FUNC_ktime_get_ns:
1685 		return &bpf_ktime_get_ns_proto;
1686 	case BPF_FUNC_ktime_get_boot_ns:
1687 		return &bpf_ktime_get_boot_ns_proto;
1688 	case BPF_FUNC_ktime_get_tai_ns:
1689 		return &bpf_ktime_get_tai_ns_proto;
1690 	case BPF_FUNC_ringbuf_output:
1691 		return &bpf_ringbuf_output_proto;
1692 	case BPF_FUNC_ringbuf_reserve:
1693 		return &bpf_ringbuf_reserve_proto;
1694 	case BPF_FUNC_ringbuf_submit:
1695 		return &bpf_ringbuf_submit_proto;
1696 	case BPF_FUNC_ringbuf_discard:
1697 		return &bpf_ringbuf_discard_proto;
1698 	case BPF_FUNC_ringbuf_query:
1699 		return &bpf_ringbuf_query_proto;
1700 	case BPF_FUNC_strncmp:
1701 		return &bpf_strncmp_proto;
1702 	case BPF_FUNC_strtol:
1703 		return &bpf_strtol_proto;
1704 	case BPF_FUNC_strtoul:
1705 		return &bpf_strtoul_proto;
1706 	default:
1707 		break;
1708 	}
1709 
1710 	if (!bpf_capable())
1711 		return NULL;
1712 
1713 	switch (func_id) {
1714 	case BPF_FUNC_spin_lock:
1715 		return &bpf_spin_lock_proto;
1716 	case BPF_FUNC_spin_unlock:
1717 		return &bpf_spin_unlock_proto;
1718 	case BPF_FUNC_jiffies64:
1719 		return &bpf_jiffies64_proto;
1720 	case BPF_FUNC_per_cpu_ptr:
1721 		return &bpf_per_cpu_ptr_proto;
1722 	case BPF_FUNC_this_cpu_ptr:
1723 		return &bpf_this_cpu_ptr_proto;
1724 	case BPF_FUNC_timer_init:
1725 		return &bpf_timer_init_proto;
1726 	case BPF_FUNC_timer_set_callback:
1727 		return &bpf_timer_set_callback_proto;
1728 	case BPF_FUNC_timer_start:
1729 		return &bpf_timer_start_proto;
1730 	case BPF_FUNC_timer_cancel:
1731 		return &bpf_timer_cancel_proto;
1732 	case BPF_FUNC_kptr_xchg:
1733 		return &bpf_kptr_xchg_proto;
1734 	case BPF_FUNC_for_each_map_elem:
1735 		return &bpf_for_each_map_elem_proto;
1736 	case BPF_FUNC_loop:
1737 		return &bpf_loop_proto;
1738 	case BPF_FUNC_user_ringbuf_drain:
1739 		return &bpf_user_ringbuf_drain_proto;
1740 	case BPF_FUNC_ringbuf_reserve_dynptr:
1741 		return &bpf_ringbuf_reserve_dynptr_proto;
1742 	case BPF_FUNC_ringbuf_submit_dynptr:
1743 		return &bpf_ringbuf_submit_dynptr_proto;
1744 	case BPF_FUNC_ringbuf_discard_dynptr:
1745 		return &bpf_ringbuf_discard_dynptr_proto;
1746 	case BPF_FUNC_dynptr_from_mem:
1747 		return &bpf_dynptr_from_mem_proto;
1748 	case BPF_FUNC_dynptr_read:
1749 		return &bpf_dynptr_read_proto;
1750 	case BPF_FUNC_dynptr_write:
1751 		return &bpf_dynptr_write_proto;
1752 	case BPF_FUNC_dynptr_data:
1753 		return &bpf_dynptr_data_proto;
1754 #ifdef CONFIG_CGROUPS
1755 	case BPF_FUNC_cgrp_storage_get:
1756 		return &bpf_cgrp_storage_get_proto;
1757 	case BPF_FUNC_cgrp_storage_delete:
1758 		return &bpf_cgrp_storage_delete_proto;
1759 	case BPF_FUNC_get_current_cgroup_id:
1760 		return &bpf_get_current_cgroup_id_proto;
1761 	case BPF_FUNC_get_current_ancestor_cgroup_id:
1762 		return &bpf_get_current_ancestor_cgroup_id_proto;
1763 #endif
1764 	default:
1765 		break;
1766 	}
1767 
1768 	if (!perfmon_capable())
1769 		return NULL;
1770 
1771 	switch (func_id) {
1772 	case BPF_FUNC_trace_printk:
1773 		return bpf_get_trace_printk_proto();
1774 	case BPF_FUNC_get_current_task:
1775 		return &bpf_get_current_task_proto;
1776 	case BPF_FUNC_get_current_task_btf:
1777 		return &bpf_get_current_task_btf_proto;
1778 	case BPF_FUNC_probe_read_user:
1779 		return &bpf_probe_read_user_proto;
1780 	case BPF_FUNC_probe_read_kernel:
1781 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1782 		       NULL : &bpf_probe_read_kernel_proto;
1783 	case BPF_FUNC_probe_read_user_str:
1784 		return &bpf_probe_read_user_str_proto;
1785 	case BPF_FUNC_probe_read_kernel_str:
1786 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1787 		       NULL : &bpf_probe_read_kernel_str_proto;
1788 	case BPF_FUNC_snprintf_btf:
1789 		return &bpf_snprintf_btf_proto;
1790 	case BPF_FUNC_snprintf:
1791 		return &bpf_snprintf_proto;
1792 	case BPF_FUNC_task_pt_regs:
1793 		return &bpf_task_pt_regs_proto;
1794 	case BPF_FUNC_trace_vprintk:
1795 		return bpf_get_trace_vprintk_proto();
1796 	default:
1797 		return NULL;
1798 	}
1799 }
1800 
1801 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec);
1802 
1803 void bpf_list_head_free(const struct btf_field *field, void *list_head,
1804 			struct bpf_spin_lock *spin_lock)
1805 {
1806 	struct list_head *head = list_head, *orig_head = list_head;
1807 
1808 	BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
1809 	BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
1810 
1811 	/* Do the actual list draining outside the lock to not hold the lock for
1812 	 * too long, and also prevent deadlocks if tracing programs end up
1813 	 * executing on entry/exit of functions called inside the critical
1814 	 * section, and end up doing map ops that call bpf_list_head_free for
1815 	 * the same map value again.
1816 	 */
1817 	__bpf_spin_lock_irqsave(spin_lock);
1818 	if (!head->next || list_empty(head))
1819 		goto unlock;
1820 	head = head->next;
1821 unlock:
1822 	INIT_LIST_HEAD(orig_head);
1823 	__bpf_spin_unlock_irqrestore(spin_lock);
1824 
1825 	while (head != orig_head) {
1826 		void *obj = head;
1827 
1828 		obj -= field->graph_root.node_offset;
1829 		head = head->next;
1830 		/* The contained type can also have resources, including a
1831 		 * bpf_list_head which needs to be freed.
1832 		 */
1833 		migrate_disable();
1834 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec);
1835 		migrate_enable();
1836 	}
1837 }
1838 
1839 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
1840  * 'rb_node *', so field name of rb_node within containing struct is not
1841  * needed.
1842  *
1843  * Since bpf_rb_tree's node type has a corresponding struct btf_field with
1844  * graph_root.node_offset, it's not necessary to know field name
1845  * or type of node struct
1846  */
1847 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
1848 	for (pos = rb_first_postorder(root); \
1849 	    pos && ({ n = rb_next_postorder(pos); 1; }); \
1850 	    pos = n)
1851 
1852 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
1853 		      struct bpf_spin_lock *spin_lock)
1854 {
1855 	struct rb_root_cached orig_root, *root = rb_root;
1856 	struct rb_node *pos, *n;
1857 	void *obj;
1858 
1859 	BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
1860 	BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
1861 
1862 	__bpf_spin_lock_irqsave(spin_lock);
1863 	orig_root = *root;
1864 	*root = RB_ROOT_CACHED;
1865 	__bpf_spin_unlock_irqrestore(spin_lock);
1866 
1867 	bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
1868 		obj = pos;
1869 		obj -= field->graph_root.node_offset;
1870 
1871 
1872 		migrate_disable();
1873 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec);
1874 		migrate_enable();
1875 	}
1876 }
1877 
1878 __diag_push();
1879 __diag_ignore_all("-Wmissing-prototypes",
1880 		  "Global functions as their definitions will be in vmlinux BTF");
1881 
1882 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
1883 {
1884 	struct btf_struct_meta *meta = meta__ign;
1885 	u64 size = local_type_id__k;
1886 	void *p;
1887 
1888 	p = bpf_mem_alloc(&bpf_global_ma, size);
1889 	if (!p)
1890 		return NULL;
1891 	if (meta)
1892 		bpf_obj_init(meta->record, p);
1893 	return p;
1894 }
1895 
1896 /* Must be called under migrate_disable(), as required by bpf_mem_free */
1897 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec)
1898 {
1899 	if (rec && rec->refcount_off >= 0 &&
1900 	    !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
1901 		/* Object is refcounted and refcount_dec didn't result in 0
1902 		 * refcount. Return without freeing the object
1903 		 */
1904 		return;
1905 	}
1906 
1907 	if (rec)
1908 		bpf_obj_free_fields(rec, p);
1909 	bpf_mem_free(&bpf_global_ma, p);
1910 }
1911 
1912 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
1913 {
1914 	struct btf_struct_meta *meta = meta__ign;
1915 	void *p = p__alloc;
1916 
1917 	__bpf_obj_drop_impl(p, meta ? meta->record : NULL);
1918 }
1919 
1920 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
1921 {
1922 	struct btf_struct_meta *meta = meta__ign;
1923 	struct bpf_refcount *ref;
1924 
1925 	/* Could just cast directly to refcount_t *, but need some code using
1926 	 * bpf_refcount type so that it is emitted in vmlinux BTF
1927 	 */
1928 	ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
1929 
1930 	refcount_inc((refcount_t *)ref);
1931 	return (void *)p__refcounted_kptr;
1932 }
1933 
1934 static int __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head,
1935 			  bool tail, struct btf_record *rec, u64 off)
1936 {
1937 	struct list_head *n = (void *)node, *h = (void *)head;
1938 
1939 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
1940 	 * called on its fields, so init here
1941 	 */
1942 	if (unlikely(!h->next))
1943 		INIT_LIST_HEAD(h);
1944 	if (!list_empty(n)) {
1945 		/* Only called from BPF prog, no need to migrate_disable */
1946 		__bpf_obj_drop_impl(n - off, rec);
1947 		return -EINVAL;
1948 	}
1949 
1950 	tail ? list_add_tail(n, h) : list_add(n, h);
1951 
1952 	return 0;
1953 }
1954 
1955 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
1956 					 struct bpf_list_node *node,
1957 					 void *meta__ign, u64 off)
1958 {
1959 	struct btf_struct_meta *meta = meta__ign;
1960 
1961 	return __bpf_list_add(node, head, false,
1962 			      meta ? meta->record : NULL, off);
1963 }
1964 
1965 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
1966 					struct bpf_list_node *node,
1967 					void *meta__ign, u64 off)
1968 {
1969 	struct btf_struct_meta *meta = meta__ign;
1970 
1971 	return __bpf_list_add(node, head, true,
1972 			      meta ? meta->record : NULL, off);
1973 }
1974 
1975 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
1976 {
1977 	struct list_head *n, *h = (void *)head;
1978 
1979 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
1980 	 * called on its fields, so init here
1981 	 */
1982 	if (unlikely(!h->next))
1983 		INIT_LIST_HEAD(h);
1984 	if (list_empty(h))
1985 		return NULL;
1986 	n = tail ? h->prev : h->next;
1987 	list_del_init(n);
1988 	return (struct bpf_list_node *)n;
1989 }
1990 
1991 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
1992 {
1993 	return __bpf_list_del(head, false);
1994 }
1995 
1996 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
1997 {
1998 	return __bpf_list_del(head, true);
1999 }
2000 
2001 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2002 						  struct bpf_rb_node *node)
2003 {
2004 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2005 	struct rb_node *n = (struct rb_node *)node;
2006 
2007 	if (RB_EMPTY_NODE(n))
2008 		return NULL;
2009 
2010 	rb_erase_cached(n, r);
2011 	RB_CLEAR_NODE(n);
2012 	return (struct bpf_rb_node *)n;
2013 }
2014 
2015 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2016  * program
2017  */
2018 static int __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
2019 			    void *less, struct btf_record *rec, u64 off)
2020 {
2021 	struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2022 	struct rb_node *parent = NULL, *n = (struct rb_node *)node;
2023 	bpf_callback_t cb = (bpf_callback_t)less;
2024 	bool leftmost = true;
2025 
2026 	if (!RB_EMPTY_NODE(n)) {
2027 		/* Only called from BPF prog, no need to migrate_disable */
2028 		__bpf_obj_drop_impl(n - off, rec);
2029 		return -EINVAL;
2030 	}
2031 
2032 	while (*link) {
2033 		parent = *link;
2034 		if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2035 			link = &parent->rb_left;
2036 		} else {
2037 			link = &parent->rb_right;
2038 			leftmost = false;
2039 		}
2040 	}
2041 
2042 	rb_link_node(n, parent, link);
2043 	rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2044 	return 0;
2045 }
2046 
2047 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2048 				    bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2049 				    void *meta__ign, u64 off)
2050 {
2051 	struct btf_struct_meta *meta = meta__ign;
2052 
2053 	return __bpf_rbtree_add(root, node, (void *)less, meta ? meta->record : NULL, off);
2054 }
2055 
2056 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2057 {
2058 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2059 
2060 	return (struct bpf_rb_node *)rb_first_cached(r);
2061 }
2062 
2063 /**
2064  * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2065  * kfunc which is not stored in a map as a kptr, must be released by calling
2066  * bpf_task_release().
2067  * @p: The task on which a reference is being acquired.
2068  */
2069 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2070 {
2071 	if (refcount_inc_not_zero(&p->rcu_users))
2072 		return p;
2073 	return NULL;
2074 }
2075 
2076 /**
2077  * bpf_task_release - Release the reference acquired on a task.
2078  * @p: The task on which a reference is being released.
2079  */
2080 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2081 {
2082 	put_task_struct_rcu_user(p);
2083 }
2084 
2085 #ifdef CONFIG_CGROUPS
2086 /**
2087  * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2088  * this kfunc which is not stored in a map as a kptr, must be released by
2089  * calling bpf_cgroup_release().
2090  * @cgrp: The cgroup on which a reference is being acquired.
2091  */
2092 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2093 {
2094 	return cgroup_tryget(cgrp) ? cgrp : NULL;
2095 }
2096 
2097 /**
2098  * bpf_cgroup_release - Release the reference acquired on a cgroup.
2099  * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2100  * not be freed until the current grace period has ended, even if its refcount
2101  * drops to 0.
2102  * @cgrp: The cgroup on which a reference is being released.
2103  */
2104 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2105 {
2106 	cgroup_put(cgrp);
2107 }
2108 
2109 /**
2110  * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2111  * array. A cgroup returned by this kfunc which is not subsequently stored in a
2112  * map, must be released by calling bpf_cgroup_release().
2113  * @cgrp: The cgroup for which we're performing a lookup.
2114  * @level: The level of ancestor to look up.
2115  */
2116 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2117 {
2118 	struct cgroup *ancestor;
2119 
2120 	if (level > cgrp->level || level < 0)
2121 		return NULL;
2122 
2123 	/* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2124 	ancestor = cgrp->ancestors[level];
2125 	if (!cgroup_tryget(ancestor))
2126 		return NULL;
2127 	return ancestor;
2128 }
2129 
2130 /**
2131  * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2132  * kfunc which is not subsequently stored in a map, must be released by calling
2133  * bpf_cgroup_release().
2134  * @cgid: cgroup id.
2135  */
2136 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2137 {
2138 	struct cgroup *cgrp;
2139 
2140 	cgrp = cgroup_get_from_id(cgid);
2141 	if (IS_ERR(cgrp))
2142 		return NULL;
2143 	return cgrp;
2144 }
2145 #endif /* CONFIG_CGROUPS */
2146 
2147 /**
2148  * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2149  * in the root pid namespace idr. If a task is returned, it must either be
2150  * stored in a map, or released with bpf_task_release().
2151  * @pid: The pid of the task being looked up.
2152  */
2153 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2154 {
2155 	struct task_struct *p;
2156 
2157 	rcu_read_lock();
2158 	p = find_task_by_pid_ns(pid, &init_pid_ns);
2159 	if (p)
2160 		p = bpf_task_acquire(p);
2161 	rcu_read_unlock();
2162 
2163 	return p;
2164 }
2165 
2166 /**
2167  * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2168  * @ptr: The dynptr whose data slice to retrieve
2169  * @offset: Offset into the dynptr
2170  * @buffer: User-provided buffer to copy contents into
2171  * @buffer__szk: Size (in bytes) of the buffer. This is the length of the
2172  *		 requested slice. This must be a constant.
2173  *
2174  * For non-skb and non-xdp type dynptrs, there is no difference between
2175  * bpf_dynptr_slice and bpf_dynptr_data.
2176  *
2177  * If the intention is to write to the data slice, please use
2178  * bpf_dynptr_slice_rdwr.
2179  *
2180  * The user must check that the returned pointer is not null before using it.
2181  *
2182  * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2183  * does not change the underlying packet data pointers, so a call to
2184  * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2185  * the bpf program.
2186  *
2187  * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2188  * data slice (can be either direct pointer to the data or a pointer to the user
2189  * provided buffer, with its contents containing the data, if unable to obtain
2190  * direct pointer)
2191  */
2192 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset,
2193 				   void *buffer, u32 buffer__szk)
2194 {
2195 	enum bpf_dynptr_type type;
2196 	u32 len = buffer__szk;
2197 	int err;
2198 
2199 	if (!ptr->data)
2200 		return NULL;
2201 
2202 	err = bpf_dynptr_check_off_len(ptr, offset, len);
2203 	if (err)
2204 		return NULL;
2205 
2206 	type = bpf_dynptr_get_type(ptr);
2207 
2208 	switch (type) {
2209 	case BPF_DYNPTR_TYPE_LOCAL:
2210 	case BPF_DYNPTR_TYPE_RINGBUF:
2211 		return ptr->data + ptr->offset + offset;
2212 	case BPF_DYNPTR_TYPE_SKB:
2213 		return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer);
2214 	case BPF_DYNPTR_TYPE_XDP:
2215 	{
2216 		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2217 		if (xdp_ptr)
2218 			return xdp_ptr;
2219 
2220 		bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer, len, false);
2221 		return buffer;
2222 	}
2223 	default:
2224 		WARN_ONCE(true, "unknown dynptr type %d\n", type);
2225 		return NULL;
2226 	}
2227 }
2228 
2229 /**
2230  * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2231  * @ptr: The dynptr whose data slice to retrieve
2232  * @offset: Offset into the dynptr
2233  * @buffer: User-provided buffer to copy contents into
2234  * @buffer__szk: Size (in bytes) of the buffer. This is the length of the
2235  *		 requested slice. This must be a constant.
2236  *
2237  * For non-skb and non-xdp type dynptrs, there is no difference between
2238  * bpf_dynptr_slice and bpf_dynptr_data.
2239  *
2240  * The returned pointer is writable and may point to either directly the dynptr
2241  * data at the requested offset or to the buffer if unable to obtain a direct
2242  * data pointer to (example: the requested slice is to the paged area of an skb
2243  * packet). In the case where the returned pointer is to the buffer, the user
2244  * is responsible for persisting writes through calling bpf_dynptr_write(). This
2245  * usually looks something like this pattern:
2246  *
2247  * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2248  * if (!eth)
2249  *	return TC_ACT_SHOT;
2250  *
2251  * // mutate eth header //
2252  *
2253  * if (eth == buffer)
2254  *	bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2255  *
2256  * Please note that, as in the example above, the user must check that the
2257  * returned pointer is not null before using it.
2258  *
2259  * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2260  * does not change the underlying packet data pointers, so a call to
2261  * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2262  * the bpf program.
2263  *
2264  * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2265  * data slice (can be either direct pointer to the data or a pointer to the user
2266  * provided buffer, with its contents containing the data, if unable to obtain
2267  * direct pointer)
2268  */
2269 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset,
2270 					void *buffer, u32 buffer__szk)
2271 {
2272 	if (!ptr->data || bpf_dynptr_is_rdonly(ptr))
2273 		return NULL;
2274 
2275 	/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2276 	 *
2277 	 * For skb-type dynptrs, it is safe to write into the returned pointer
2278 	 * if the bpf program allows skb data writes. There are two possiblities
2279 	 * that may occur when calling bpf_dynptr_slice_rdwr:
2280 	 *
2281 	 * 1) The requested slice is in the head of the skb. In this case, the
2282 	 * returned pointer is directly to skb data, and if the skb is cloned, the
2283 	 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2284 	 * The pointer can be directly written into.
2285 	 *
2286 	 * 2) Some portion of the requested slice is in the paged buffer area.
2287 	 * In this case, the requested data will be copied out into the buffer
2288 	 * and the returned pointer will be a pointer to the buffer. The skb
2289 	 * will not be pulled. To persist the write, the user will need to call
2290 	 * bpf_dynptr_write(), which will pull the skb and commit the write.
2291 	 *
2292 	 * Similarly for xdp programs, if the requested slice is not across xdp
2293 	 * fragments, then a direct pointer will be returned, otherwise the data
2294 	 * will be copied out into the buffer and the user will need to call
2295 	 * bpf_dynptr_write() to commit changes.
2296 	 */
2297 	return bpf_dynptr_slice(ptr, offset, buffer, buffer__szk);
2298 }
2299 
2300 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2301 {
2302 	return obj;
2303 }
2304 
2305 __bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
2306 {
2307 	return obj__ign;
2308 }
2309 
2310 __bpf_kfunc void bpf_rcu_read_lock(void)
2311 {
2312 	rcu_read_lock();
2313 }
2314 
2315 __bpf_kfunc void bpf_rcu_read_unlock(void)
2316 {
2317 	rcu_read_unlock();
2318 }
2319 
2320 __diag_pop();
2321 
2322 BTF_SET8_START(generic_btf_ids)
2323 #ifdef CONFIG_KEXEC_CORE
2324 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
2325 #endif
2326 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
2327 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
2328 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE)
2329 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
2330 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
2331 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
2332 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
2333 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2334 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
2335 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
2336 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
2337 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
2338 
2339 #ifdef CONFIG_CGROUPS
2340 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2341 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
2342 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2343 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
2344 #endif
2345 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
2346 BTF_SET8_END(generic_btf_ids)
2347 
2348 static const struct btf_kfunc_id_set generic_kfunc_set = {
2349 	.owner = THIS_MODULE,
2350 	.set   = &generic_btf_ids,
2351 };
2352 
2353 
2354 BTF_ID_LIST(generic_dtor_ids)
2355 BTF_ID(struct, task_struct)
2356 BTF_ID(func, bpf_task_release)
2357 #ifdef CONFIG_CGROUPS
2358 BTF_ID(struct, cgroup)
2359 BTF_ID(func, bpf_cgroup_release)
2360 #endif
2361 
2362 BTF_SET8_START(common_btf_ids)
2363 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
2364 BTF_ID_FLAGS(func, bpf_rdonly_cast)
2365 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
2366 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
2367 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
2368 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
2369 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
2370 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
2371 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
2372 BTF_SET8_END(common_btf_ids)
2373 
2374 static const struct btf_kfunc_id_set common_kfunc_set = {
2375 	.owner = THIS_MODULE,
2376 	.set   = &common_btf_ids,
2377 };
2378 
2379 static int __init kfunc_init(void)
2380 {
2381 	int ret;
2382 	const struct btf_id_dtor_kfunc generic_dtors[] = {
2383 		{
2384 			.btf_id       = generic_dtor_ids[0],
2385 			.kfunc_btf_id = generic_dtor_ids[1]
2386 		},
2387 #ifdef CONFIG_CGROUPS
2388 		{
2389 			.btf_id       = generic_dtor_ids[2],
2390 			.kfunc_btf_id = generic_dtor_ids[3]
2391 		},
2392 #endif
2393 	};
2394 
2395 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
2396 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
2397 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
2398 	ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
2399 						  ARRAY_SIZE(generic_dtors),
2400 						  THIS_MODULE);
2401 	return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
2402 }
2403 
2404 late_initcall(kfunc_init);
2405