xref: /linux/kernel/bpf/helpers.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 #include <linux/kasan.h>
26 #include <linux/bpf_verifier.h>
27 #include <linux/uaccess.h>
28 #include <linux/verification.h>
29 #include <linux/task_work.h>
30 #include <linux/irq_work.h>
31 #include <linux/buildid.h>
32 
33 #include "../../lib/kstrtox.h"
34 
35 /* If kernel subsystem is allowing eBPF programs to call this function,
36  * inside its own verifier_ops->get_func_proto() callback it should return
37  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
38  *
39  * Different map implementations will rely on rcu in map methods
40  * lookup/update/delete, therefore eBPF programs must run under rcu lock
41  * if program is allowed to access maps, so check rcu_read_lock_held() or
42  * rcu_read_lock_trace_held() in all three functions.
43  */
BPF_CALL_2(bpf_map_lookup_elem,struct bpf_map *,map,void *,key)44 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
45 {
46 	WARN_ON_ONCE(!bpf_rcu_lock_held());
47 	return (unsigned long) map->ops->map_lookup_elem(map, key);
48 }
49 
50 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
51 	.func		= bpf_map_lookup_elem,
52 	.gpl_only	= false,
53 	.pkt_access	= true,
54 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
55 	.arg1_type	= ARG_CONST_MAP_PTR,
56 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
57 };
58 
BPF_CALL_4(bpf_map_update_elem,struct bpf_map *,map,void *,key,void *,value,u64,flags)59 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
60 	   void *, value, u64, flags)
61 {
62 	WARN_ON_ONCE(!bpf_rcu_lock_held());
63 	return map->ops->map_update_elem(map, key, value, flags);
64 }
65 
66 const struct bpf_func_proto bpf_map_update_elem_proto = {
67 	.func		= bpf_map_update_elem,
68 	.gpl_only	= false,
69 	.pkt_access	= true,
70 	.ret_type	= RET_INTEGER,
71 	.arg1_type	= ARG_CONST_MAP_PTR,
72 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
73 	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
74 	.arg4_type	= ARG_ANYTHING,
75 };
76 
BPF_CALL_2(bpf_map_delete_elem,struct bpf_map *,map,void *,key)77 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
78 {
79 	WARN_ON_ONCE(!bpf_rcu_lock_held());
80 	return map->ops->map_delete_elem(map, key);
81 }
82 
83 const struct bpf_func_proto bpf_map_delete_elem_proto = {
84 	.func		= bpf_map_delete_elem,
85 	.gpl_only	= false,
86 	.pkt_access	= true,
87 	.ret_type	= RET_INTEGER,
88 	.arg1_type	= ARG_CONST_MAP_PTR,
89 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
90 };
91 
BPF_CALL_3(bpf_map_push_elem,struct bpf_map *,map,void *,value,u64,flags)92 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
93 {
94 	return map->ops->map_push_elem(map, value, flags);
95 }
96 
97 const struct bpf_func_proto bpf_map_push_elem_proto = {
98 	.func		= bpf_map_push_elem,
99 	.gpl_only	= false,
100 	.pkt_access	= true,
101 	.ret_type	= RET_INTEGER,
102 	.arg1_type	= ARG_CONST_MAP_PTR,
103 	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
104 	.arg3_type	= ARG_ANYTHING,
105 };
106 
BPF_CALL_2(bpf_map_pop_elem,struct bpf_map *,map,void *,value)107 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
108 {
109 	return map->ops->map_pop_elem(map, value);
110 }
111 
112 const struct bpf_func_proto bpf_map_pop_elem_proto = {
113 	.func		= bpf_map_pop_elem,
114 	.gpl_only	= false,
115 	.ret_type	= RET_INTEGER,
116 	.arg1_type	= ARG_CONST_MAP_PTR,
117 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
118 };
119 
BPF_CALL_2(bpf_map_peek_elem,struct bpf_map *,map,void *,value)120 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
121 {
122 	return map->ops->map_peek_elem(map, value);
123 }
124 
125 const struct bpf_func_proto bpf_map_peek_elem_proto = {
126 	.func		= bpf_map_peek_elem,
127 	.gpl_only	= false,
128 	.ret_type	= RET_INTEGER,
129 	.arg1_type	= ARG_CONST_MAP_PTR,
130 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
131 };
132 
BPF_CALL_3(bpf_map_lookup_percpu_elem,struct bpf_map *,map,void *,key,u32,cpu)133 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
134 {
135 	WARN_ON_ONCE(!bpf_rcu_lock_held());
136 	return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
137 }
138 
139 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
140 	.func		= bpf_map_lookup_percpu_elem,
141 	.gpl_only	= false,
142 	.pkt_access	= true,
143 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
144 	.arg1_type	= ARG_CONST_MAP_PTR,
145 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
146 	.arg3_type	= ARG_ANYTHING,
147 };
148 
149 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
150 	.func		= bpf_user_rnd_u32,
151 	.gpl_only	= false,
152 	.ret_type	= RET_INTEGER,
153 };
154 
BPF_CALL_0(bpf_get_smp_processor_id)155 BPF_CALL_0(bpf_get_smp_processor_id)
156 {
157 	return smp_processor_id();
158 }
159 
160 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
161 	.func		= bpf_get_smp_processor_id,
162 	.gpl_only	= false,
163 	.ret_type	= RET_INTEGER,
164 	.allow_fastcall	= true,
165 };
166 
BPF_CALL_0(bpf_get_numa_node_id)167 BPF_CALL_0(bpf_get_numa_node_id)
168 {
169 	return numa_node_id();
170 }
171 
172 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
173 	.func		= bpf_get_numa_node_id,
174 	.gpl_only	= false,
175 	.ret_type	= RET_INTEGER,
176 };
177 
BPF_CALL_0(bpf_ktime_get_ns)178 BPF_CALL_0(bpf_ktime_get_ns)
179 {
180 	/* NMI safe access to clock monotonic */
181 	return ktime_get_mono_fast_ns();
182 }
183 
184 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
185 	.func		= bpf_ktime_get_ns,
186 	.gpl_only	= false,
187 	.ret_type	= RET_INTEGER,
188 };
189 
BPF_CALL_0(bpf_ktime_get_boot_ns)190 BPF_CALL_0(bpf_ktime_get_boot_ns)
191 {
192 	/* NMI safe access to clock boottime */
193 	return ktime_get_boot_fast_ns();
194 }
195 
196 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
197 	.func		= bpf_ktime_get_boot_ns,
198 	.gpl_only	= false,
199 	.ret_type	= RET_INTEGER,
200 };
201 
BPF_CALL_0(bpf_ktime_get_coarse_ns)202 BPF_CALL_0(bpf_ktime_get_coarse_ns)
203 {
204 	return ktime_get_coarse_ns();
205 }
206 
207 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
208 	.func		= bpf_ktime_get_coarse_ns,
209 	.gpl_only	= false,
210 	.ret_type	= RET_INTEGER,
211 };
212 
BPF_CALL_0(bpf_ktime_get_tai_ns)213 BPF_CALL_0(bpf_ktime_get_tai_ns)
214 {
215 	/* NMI safe access to clock tai */
216 	return ktime_get_tai_fast_ns();
217 }
218 
219 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
220 	.func		= bpf_ktime_get_tai_ns,
221 	.gpl_only	= false,
222 	.ret_type	= RET_INTEGER,
223 };
224 
BPF_CALL_0(bpf_get_current_pid_tgid)225 BPF_CALL_0(bpf_get_current_pid_tgid)
226 {
227 	struct task_struct *task = current;
228 
229 	if (unlikely(!task))
230 		return -EINVAL;
231 
232 	return (u64) task->tgid << 32 | task->pid;
233 }
234 
235 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
236 	.func		= bpf_get_current_pid_tgid,
237 	.gpl_only	= false,
238 	.ret_type	= RET_INTEGER,
239 };
240 
BPF_CALL_0(bpf_get_current_uid_gid)241 BPF_CALL_0(bpf_get_current_uid_gid)
242 {
243 	struct task_struct *task = current;
244 	kuid_t uid;
245 	kgid_t gid;
246 
247 	if (unlikely(!task))
248 		return -EINVAL;
249 
250 	current_uid_gid(&uid, &gid);
251 	return (u64) from_kgid(&init_user_ns, gid) << 32 |
252 		     from_kuid(&init_user_ns, uid);
253 }
254 
255 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
256 	.func		= bpf_get_current_uid_gid,
257 	.gpl_only	= false,
258 	.ret_type	= RET_INTEGER,
259 };
260 
BPF_CALL_2(bpf_get_current_comm,char *,buf,u32,size)261 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
262 {
263 	struct task_struct *task = current;
264 
265 	if (unlikely(!task))
266 		goto err_clear;
267 
268 	/* Verifier guarantees that size > 0 */
269 	strscpy_pad(buf, task->comm, size);
270 	return 0;
271 err_clear:
272 	memset(buf, 0, size);
273 	return -EINVAL;
274 }
275 
276 const struct bpf_func_proto bpf_get_current_comm_proto = {
277 	.func		= bpf_get_current_comm,
278 	.gpl_only	= false,
279 	.ret_type	= RET_INTEGER,
280 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
281 	.arg2_type	= ARG_CONST_SIZE,
282 };
283 
284 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
285 
__bpf_spin_lock(struct bpf_spin_lock * lock)286 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
287 {
288 	arch_spinlock_t *l = (void *)lock;
289 	union {
290 		__u32 val;
291 		arch_spinlock_t lock;
292 	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
293 
294 	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
295 	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
296 	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
297 	preempt_disable();
298 	arch_spin_lock(l);
299 }
300 
__bpf_spin_unlock(struct bpf_spin_lock * lock)301 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
302 {
303 	arch_spinlock_t *l = (void *)lock;
304 
305 	arch_spin_unlock(l);
306 	preempt_enable();
307 }
308 
309 #else
310 
__bpf_spin_lock(struct bpf_spin_lock * lock)311 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
312 {
313 	atomic_t *l = (void *)lock;
314 
315 	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
316 	do {
317 		atomic_cond_read_relaxed(l, !VAL);
318 	} while (atomic_xchg(l, 1));
319 }
320 
__bpf_spin_unlock(struct bpf_spin_lock * lock)321 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
322 {
323 	atomic_t *l = (void *)lock;
324 
325 	atomic_set_release(l, 0);
326 }
327 
328 #endif
329 
330 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
331 
__bpf_spin_lock_irqsave(struct bpf_spin_lock * lock)332 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
333 {
334 	unsigned long flags;
335 
336 	local_irq_save(flags);
337 	__bpf_spin_lock(lock);
338 	__this_cpu_write(irqsave_flags, flags);
339 }
340 
NOTRACE_BPF_CALL_1(bpf_spin_lock,struct bpf_spin_lock *,lock)341 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
342 {
343 	__bpf_spin_lock_irqsave(lock);
344 	return 0;
345 }
346 
347 const struct bpf_func_proto bpf_spin_lock_proto = {
348 	.func		= bpf_spin_lock,
349 	.gpl_only	= false,
350 	.ret_type	= RET_VOID,
351 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
352 	.arg1_btf_id    = BPF_PTR_POISON,
353 };
354 
__bpf_spin_unlock_irqrestore(struct bpf_spin_lock * lock)355 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
356 {
357 	unsigned long flags;
358 
359 	flags = __this_cpu_read(irqsave_flags);
360 	__bpf_spin_unlock(lock);
361 	local_irq_restore(flags);
362 }
363 
NOTRACE_BPF_CALL_1(bpf_spin_unlock,struct bpf_spin_lock *,lock)364 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
365 {
366 	__bpf_spin_unlock_irqrestore(lock);
367 	return 0;
368 }
369 
370 const struct bpf_func_proto bpf_spin_unlock_proto = {
371 	.func		= bpf_spin_unlock,
372 	.gpl_only	= false,
373 	.ret_type	= RET_VOID,
374 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
375 	.arg1_btf_id    = BPF_PTR_POISON,
376 };
377 
copy_map_value_locked(struct bpf_map * map,void * dst,void * src,bool lock_src)378 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
379 			   bool lock_src)
380 {
381 	struct bpf_spin_lock *lock;
382 
383 	if (lock_src)
384 		lock = src + map->record->spin_lock_off;
385 	else
386 		lock = dst + map->record->spin_lock_off;
387 	preempt_disable();
388 	__bpf_spin_lock_irqsave(lock);
389 	copy_map_value(map, dst, src);
390 	__bpf_spin_unlock_irqrestore(lock);
391 	preempt_enable();
392 }
393 
BPF_CALL_0(bpf_jiffies64)394 BPF_CALL_0(bpf_jiffies64)
395 {
396 	return get_jiffies_64();
397 }
398 
399 const struct bpf_func_proto bpf_jiffies64_proto = {
400 	.func		= bpf_jiffies64,
401 	.gpl_only	= false,
402 	.ret_type	= RET_INTEGER,
403 };
404 
405 #ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)406 BPF_CALL_0(bpf_get_current_cgroup_id)
407 {
408 	struct cgroup *cgrp;
409 	u64 cgrp_id;
410 
411 	rcu_read_lock();
412 	cgrp = task_dfl_cgroup(current);
413 	cgrp_id = cgroup_id(cgrp);
414 	rcu_read_unlock();
415 
416 	return cgrp_id;
417 }
418 
419 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
420 	.func		= bpf_get_current_cgroup_id,
421 	.gpl_only	= false,
422 	.ret_type	= RET_INTEGER,
423 };
424 
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id,int,ancestor_level)425 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
426 {
427 	struct cgroup *cgrp;
428 	struct cgroup *ancestor;
429 	u64 cgrp_id;
430 
431 	rcu_read_lock();
432 	cgrp = task_dfl_cgroup(current);
433 	ancestor = cgroup_ancestor(cgrp, ancestor_level);
434 	cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
435 	rcu_read_unlock();
436 
437 	return cgrp_id;
438 }
439 
440 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
441 	.func		= bpf_get_current_ancestor_cgroup_id,
442 	.gpl_only	= false,
443 	.ret_type	= RET_INTEGER,
444 	.arg1_type	= ARG_ANYTHING,
445 };
446 #endif /* CONFIG_CGROUPS */
447 
448 #define BPF_STRTOX_BASE_MASK 0x1F
449 
__bpf_strtoull(const char * buf,size_t buf_len,u64 flags,unsigned long long * res,bool * is_negative)450 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
451 			  unsigned long long *res, bool *is_negative)
452 {
453 	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
454 	const char *cur_buf = buf;
455 	size_t cur_len = buf_len;
456 	unsigned int consumed;
457 	size_t val_len;
458 	char str[64];
459 
460 	if (!buf || !buf_len || !res || !is_negative)
461 		return -EINVAL;
462 
463 	if (base != 0 && base != 8 && base != 10 && base != 16)
464 		return -EINVAL;
465 
466 	if (flags & ~BPF_STRTOX_BASE_MASK)
467 		return -EINVAL;
468 
469 	while (cur_buf < buf + buf_len && isspace(*cur_buf))
470 		++cur_buf;
471 
472 	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
473 	if (*is_negative)
474 		++cur_buf;
475 
476 	consumed = cur_buf - buf;
477 	cur_len -= consumed;
478 	if (!cur_len)
479 		return -EINVAL;
480 
481 	cur_len = min(cur_len, sizeof(str) - 1);
482 	memcpy(str, cur_buf, cur_len);
483 	str[cur_len] = '\0';
484 	cur_buf = str;
485 
486 	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
487 	val_len = _parse_integer(cur_buf, base, res);
488 
489 	if (val_len & KSTRTOX_OVERFLOW)
490 		return -ERANGE;
491 
492 	if (val_len == 0)
493 		return -EINVAL;
494 
495 	cur_buf += val_len;
496 	consumed += cur_buf - str;
497 
498 	return consumed;
499 }
500 
__bpf_strtoll(const char * buf,size_t buf_len,u64 flags,long long * res)501 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
502 			 long long *res)
503 {
504 	unsigned long long _res;
505 	bool is_negative;
506 	int err;
507 
508 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
509 	if (err < 0)
510 		return err;
511 	if (is_negative) {
512 		if ((long long)-_res > 0)
513 			return -ERANGE;
514 		*res = -_res;
515 	} else {
516 		if ((long long)_res < 0)
517 			return -ERANGE;
518 		*res = _res;
519 	}
520 	return err;
521 }
522 
BPF_CALL_4(bpf_strtol,const char *,buf,size_t,buf_len,u64,flags,s64 *,res)523 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
524 	   s64 *, res)
525 {
526 	long long _res;
527 	int err;
528 
529 	*res = 0;
530 	err = __bpf_strtoll(buf, buf_len, flags, &_res);
531 	if (err < 0)
532 		return err;
533 	*res = _res;
534 	return err;
535 }
536 
537 const struct bpf_func_proto bpf_strtol_proto = {
538 	.func		= bpf_strtol,
539 	.gpl_only	= false,
540 	.ret_type	= RET_INTEGER,
541 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
542 	.arg2_type	= ARG_CONST_SIZE,
543 	.arg3_type	= ARG_ANYTHING,
544 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
545 	.arg4_size	= sizeof(s64),
546 };
547 
BPF_CALL_4(bpf_strtoul,const char *,buf,size_t,buf_len,u64,flags,u64 *,res)548 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
549 	   u64 *, res)
550 {
551 	unsigned long long _res;
552 	bool is_negative;
553 	int err;
554 
555 	*res = 0;
556 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
557 	if (err < 0)
558 		return err;
559 	if (is_negative)
560 		return -EINVAL;
561 	*res = _res;
562 	return err;
563 }
564 
565 const struct bpf_func_proto bpf_strtoul_proto = {
566 	.func		= bpf_strtoul,
567 	.gpl_only	= false,
568 	.ret_type	= RET_INTEGER,
569 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
570 	.arg2_type	= ARG_CONST_SIZE,
571 	.arg3_type	= ARG_ANYTHING,
572 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
573 	.arg4_size	= sizeof(u64),
574 };
575 
BPF_CALL_3(bpf_strncmp,const char *,s1,u32,s1_sz,const char *,s2)576 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
577 {
578 	return strncmp(s1, s2, s1_sz);
579 }
580 
581 static const struct bpf_func_proto bpf_strncmp_proto = {
582 	.func		= bpf_strncmp,
583 	.gpl_only	= false,
584 	.ret_type	= RET_INTEGER,
585 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
586 	.arg2_type	= ARG_CONST_SIZE,
587 	.arg3_type	= ARG_PTR_TO_CONST_STR,
588 };
589 
BPF_CALL_4(bpf_get_ns_current_pid_tgid,u64,dev,u64,ino,struct bpf_pidns_info *,nsdata,u32,size)590 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
591 	   struct bpf_pidns_info *, nsdata, u32, size)
592 {
593 	struct task_struct *task = current;
594 	struct pid_namespace *pidns;
595 	int err = -EINVAL;
596 
597 	if (unlikely(size != sizeof(struct bpf_pidns_info)))
598 		goto clear;
599 
600 	if (unlikely((u64)(dev_t)dev != dev))
601 		goto clear;
602 
603 	if (unlikely(!task))
604 		goto clear;
605 
606 	pidns = task_active_pid_ns(task);
607 	if (unlikely(!pidns)) {
608 		err = -ENOENT;
609 		goto clear;
610 	}
611 
612 	if (!ns_match(&pidns->ns, (dev_t)dev, ino))
613 		goto clear;
614 
615 	nsdata->pid = task_pid_nr_ns(task, pidns);
616 	nsdata->tgid = task_tgid_nr_ns(task, pidns);
617 	return 0;
618 clear:
619 	memset((void *)nsdata, 0, (size_t) size);
620 	return err;
621 }
622 
623 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
624 	.func		= bpf_get_ns_current_pid_tgid,
625 	.gpl_only	= false,
626 	.ret_type	= RET_INTEGER,
627 	.arg1_type	= ARG_ANYTHING,
628 	.arg2_type	= ARG_ANYTHING,
629 	.arg3_type      = ARG_PTR_TO_UNINIT_MEM,
630 	.arg4_type      = ARG_CONST_SIZE,
631 };
632 
633 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
634 	.func		= bpf_get_raw_cpu_id,
635 	.gpl_only	= false,
636 	.ret_type	= RET_INTEGER,
637 };
638 
BPF_CALL_5(bpf_event_output_data,void *,ctx,struct bpf_map *,map,u64,flags,void *,data,u64,size)639 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
640 	   u64, flags, void *, data, u64, size)
641 {
642 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
643 		return -EINVAL;
644 
645 	return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
646 }
647 
648 const struct bpf_func_proto bpf_event_output_data_proto =  {
649 	.func		= bpf_event_output_data,
650 	.gpl_only       = true,
651 	.ret_type       = RET_INTEGER,
652 	.arg1_type      = ARG_PTR_TO_CTX,
653 	.arg2_type      = ARG_CONST_MAP_PTR,
654 	.arg3_type      = ARG_ANYTHING,
655 	.arg4_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
656 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
657 };
658 
BPF_CALL_3(bpf_copy_from_user,void *,dst,u32,size,const void __user *,user_ptr)659 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
660 	   const void __user *, user_ptr)
661 {
662 	int ret = copy_from_user(dst, user_ptr, size);
663 
664 	if (unlikely(ret)) {
665 		memset(dst, 0, size);
666 		ret = -EFAULT;
667 	}
668 
669 	return ret;
670 }
671 
672 const struct bpf_func_proto bpf_copy_from_user_proto = {
673 	.func		= bpf_copy_from_user,
674 	.gpl_only	= false,
675 	.might_sleep	= true,
676 	.ret_type	= RET_INTEGER,
677 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
678 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
679 	.arg3_type	= ARG_ANYTHING,
680 };
681 
BPF_CALL_5(bpf_copy_from_user_task,void *,dst,u32,size,const void __user *,user_ptr,struct task_struct *,tsk,u64,flags)682 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
683 	   const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
684 {
685 	int ret;
686 
687 	/* flags is not used yet */
688 	if (unlikely(flags))
689 		return -EINVAL;
690 
691 	if (unlikely(!size))
692 		return 0;
693 
694 	ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
695 	if (ret == size)
696 		return 0;
697 
698 	memset(dst, 0, size);
699 	/* Return -EFAULT for partial read */
700 	return ret < 0 ? ret : -EFAULT;
701 }
702 
703 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
704 	.func		= bpf_copy_from_user_task,
705 	.gpl_only	= true,
706 	.might_sleep	= true,
707 	.ret_type	= RET_INTEGER,
708 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
709 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
710 	.arg3_type	= ARG_ANYTHING,
711 	.arg4_type	= ARG_PTR_TO_BTF_ID,
712 	.arg4_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
713 	.arg5_type	= ARG_ANYTHING
714 };
715 
BPF_CALL_2(bpf_per_cpu_ptr,const void *,ptr,u32,cpu)716 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
717 {
718 	if (cpu >= nr_cpu_ids)
719 		return (unsigned long)NULL;
720 
721 	return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
722 }
723 
724 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
725 	.func		= bpf_per_cpu_ptr,
726 	.gpl_only	= false,
727 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
728 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
729 	.arg2_type	= ARG_ANYTHING,
730 };
731 
BPF_CALL_1(bpf_this_cpu_ptr,const void *,percpu_ptr)732 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
733 {
734 	return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
735 }
736 
737 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
738 	.func		= bpf_this_cpu_ptr,
739 	.gpl_only	= false,
740 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
741 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
742 };
743 
bpf_trace_copy_string(char * buf,void * unsafe_ptr,char fmt_ptype,size_t bufsz)744 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
745 		size_t bufsz)
746 {
747 	void __user *user_ptr = (__force void __user *)unsafe_ptr;
748 
749 	buf[0] = 0;
750 
751 	switch (fmt_ptype) {
752 	case 's':
753 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
754 		if ((unsigned long)unsafe_ptr < TASK_SIZE)
755 			return strncpy_from_user_nofault(buf, user_ptr, bufsz);
756 		fallthrough;
757 #endif
758 	case 'k':
759 		return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
760 	case 'u':
761 		return strncpy_from_user_nofault(buf, user_ptr, bufsz);
762 	}
763 
764 	return -EINVAL;
765 }
766 
767 /* Support executing three nested bprintf helper calls on a given CPU */
768 #define MAX_BPRINTF_NEST_LEVEL	3
769 
770 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
771 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
772 
bpf_try_get_buffers(struct bpf_bprintf_buffers ** bufs)773 int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs)
774 {
775 	int nest_level;
776 
777 	preempt_disable();
778 	nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
779 	if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
780 		this_cpu_dec(bpf_bprintf_nest_level);
781 		preempt_enable();
782 		return -EBUSY;
783 	}
784 	*bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
785 
786 	return 0;
787 }
788 
bpf_put_buffers(void)789 void bpf_put_buffers(void)
790 {
791 	if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
792 		return;
793 	this_cpu_dec(bpf_bprintf_nest_level);
794 	preempt_enable();
795 }
796 
bpf_bprintf_cleanup(struct bpf_bprintf_data * data)797 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
798 {
799 	if (!data->bin_args && !data->buf)
800 		return;
801 	bpf_put_buffers();
802 }
803 
804 /*
805  * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
806  *
807  * Returns a negative value if fmt is an invalid format string or 0 otherwise.
808  *
809  * This can be used in two ways:
810  * - Format string verification only: when data->get_bin_args is false
811  * - Arguments preparation: in addition to the above verification, it writes in
812  *   data->bin_args a binary representation of arguments usable by bstr_printf
813  *   where pointers from BPF have been sanitized.
814  *
815  * In argument preparation mode, if 0 is returned, safe temporary buffers are
816  * allocated and bpf_bprintf_cleanup should be called to free them after use.
817  */
bpf_bprintf_prepare(const char * fmt,u32 fmt_size,const u64 * raw_args,u32 num_args,struct bpf_bprintf_data * data)818 int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args,
819 			u32 num_args, struct bpf_bprintf_data *data)
820 {
821 	bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
822 	char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
823 	struct bpf_bprintf_buffers *buffers = NULL;
824 	size_t sizeof_cur_arg, sizeof_cur_ip;
825 	int err, i, num_spec = 0;
826 	u64 cur_arg;
827 	char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
828 
829 	fmt_end = strnchr(fmt, fmt_size, 0);
830 	if (!fmt_end)
831 		return -EINVAL;
832 	fmt_size = fmt_end - fmt;
833 
834 	if (get_buffers && bpf_try_get_buffers(&buffers))
835 		return -EBUSY;
836 
837 	if (data->get_bin_args) {
838 		if (num_args)
839 			tmp_buf = buffers->bin_args;
840 		tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
841 		data->bin_args = (u32 *)tmp_buf;
842 	}
843 
844 	if (data->get_buf)
845 		data->buf = buffers->buf;
846 
847 	for (i = 0; i < fmt_size; i++) {
848 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
849 			err = -EINVAL;
850 			goto out;
851 		}
852 
853 		if (fmt[i] != '%')
854 			continue;
855 
856 		if (fmt[i + 1] == '%') {
857 			i++;
858 			continue;
859 		}
860 
861 		if (num_spec >= num_args) {
862 			err = -EINVAL;
863 			goto out;
864 		}
865 
866 		/* The string is zero-terminated so if fmt[i] != 0, we can
867 		 * always access fmt[i + 1], in the worst case it will be a 0
868 		 */
869 		i++;
870 
871 		/* skip optional "[0 +-][num]" width formatting field */
872 		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
873 		       fmt[i] == ' ')
874 			i++;
875 		if (fmt[i] >= '1' && fmt[i] <= '9') {
876 			i++;
877 			while (fmt[i] >= '0' && fmt[i] <= '9')
878 				i++;
879 		}
880 
881 		if (fmt[i] == 'p') {
882 			sizeof_cur_arg = sizeof(long);
883 
884 			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
885 			    ispunct(fmt[i + 1])) {
886 				if (tmp_buf)
887 					cur_arg = raw_args[num_spec];
888 				goto nocopy_fmt;
889 			}
890 
891 			if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
892 			    fmt[i + 2] == 's') {
893 				fmt_ptype = fmt[i + 1];
894 				i += 2;
895 				goto fmt_str;
896 			}
897 
898 			if (fmt[i + 1] == 'K' ||
899 			    fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
900 			    fmt[i + 1] == 'S') {
901 				if (tmp_buf)
902 					cur_arg = raw_args[num_spec];
903 				i++;
904 				goto nocopy_fmt;
905 			}
906 
907 			if (fmt[i + 1] == 'B') {
908 				if (tmp_buf)  {
909 					err = snprintf(tmp_buf,
910 						       (tmp_buf_end - tmp_buf),
911 						       "%pB",
912 						       (void *)(long)raw_args[num_spec]);
913 					tmp_buf += (err + 1);
914 				}
915 
916 				i++;
917 				num_spec++;
918 				continue;
919 			}
920 
921 			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
922 			if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
923 			    (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
924 				err = -EINVAL;
925 				goto out;
926 			}
927 
928 			i += 2;
929 			if (!tmp_buf)
930 				goto nocopy_fmt;
931 
932 			sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
933 			if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
934 				err = -ENOSPC;
935 				goto out;
936 			}
937 
938 			unsafe_ptr = (char *)(long)raw_args[num_spec];
939 			err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
940 						       sizeof_cur_ip);
941 			if (err < 0)
942 				memset(cur_ip, 0, sizeof_cur_ip);
943 
944 			/* hack: bstr_printf expects IP addresses to be
945 			 * pre-formatted as strings, ironically, the easiest way
946 			 * to do that is to call snprintf.
947 			 */
948 			ip_spec[2] = fmt[i - 1];
949 			ip_spec[3] = fmt[i];
950 			err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
951 				       ip_spec, &cur_ip);
952 
953 			tmp_buf += err + 1;
954 			num_spec++;
955 
956 			continue;
957 		} else if (fmt[i] == 's') {
958 			fmt_ptype = fmt[i];
959 fmt_str:
960 			if (fmt[i + 1] != 0 &&
961 			    !isspace(fmt[i + 1]) &&
962 			    !ispunct(fmt[i + 1])) {
963 				err = -EINVAL;
964 				goto out;
965 			}
966 
967 			if (!tmp_buf)
968 				goto nocopy_fmt;
969 
970 			if (tmp_buf_end == tmp_buf) {
971 				err = -ENOSPC;
972 				goto out;
973 			}
974 
975 			unsafe_ptr = (char *)(long)raw_args[num_spec];
976 			err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
977 						    fmt_ptype,
978 						    tmp_buf_end - tmp_buf);
979 			if (err < 0) {
980 				tmp_buf[0] = '\0';
981 				err = 1;
982 			}
983 
984 			tmp_buf += err;
985 			num_spec++;
986 
987 			continue;
988 		} else if (fmt[i] == 'c') {
989 			if (!tmp_buf)
990 				goto nocopy_fmt;
991 
992 			if (tmp_buf_end == tmp_buf) {
993 				err = -ENOSPC;
994 				goto out;
995 			}
996 
997 			*tmp_buf = raw_args[num_spec];
998 			tmp_buf++;
999 			num_spec++;
1000 
1001 			continue;
1002 		}
1003 
1004 		sizeof_cur_arg = sizeof(int);
1005 
1006 		if (fmt[i] == 'l') {
1007 			sizeof_cur_arg = sizeof(long);
1008 			i++;
1009 		}
1010 		if (fmt[i] == 'l') {
1011 			sizeof_cur_arg = sizeof(long long);
1012 			i++;
1013 		}
1014 
1015 		if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1016 		    fmt[i] != 'x' && fmt[i] != 'X') {
1017 			err = -EINVAL;
1018 			goto out;
1019 		}
1020 
1021 		if (tmp_buf)
1022 			cur_arg = raw_args[num_spec];
1023 nocopy_fmt:
1024 		if (tmp_buf) {
1025 			tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1026 			if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1027 				err = -ENOSPC;
1028 				goto out;
1029 			}
1030 
1031 			if (sizeof_cur_arg == 8) {
1032 				*(u32 *)tmp_buf = *(u32 *)&cur_arg;
1033 				*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1034 			} else {
1035 				*(u32 *)tmp_buf = (u32)(long)cur_arg;
1036 			}
1037 			tmp_buf += sizeof_cur_arg;
1038 		}
1039 		num_spec++;
1040 	}
1041 
1042 	err = 0;
1043 out:
1044 	if (err)
1045 		bpf_bprintf_cleanup(data);
1046 	return err;
1047 }
1048 
BPF_CALL_5(bpf_snprintf,char *,str,u32,str_size,char *,fmt,const void *,args,u32,data_len)1049 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1050 	   const void *, args, u32, data_len)
1051 {
1052 	struct bpf_bprintf_data data = {
1053 		.get_bin_args	= true,
1054 	};
1055 	int err, num_args;
1056 
1057 	if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1058 	    (data_len && !args))
1059 		return -EINVAL;
1060 	num_args = data_len / 8;
1061 
1062 	/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1063 	 * can safely give an unbounded size.
1064 	 */
1065 	err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1066 	if (err < 0)
1067 		return err;
1068 
1069 	err = bstr_printf(str, str_size, fmt, data.bin_args);
1070 
1071 	bpf_bprintf_cleanup(&data);
1072 
1073 	return err + 1;
1074 }
1075 
1076 const struct bpf_func_proto bpf_snprintf_proto = {
1077 	.func		= bpf_snprintf,
1078 	.gpl_only	= true,
1079 	.ret_type	= RET_INTEGER,
1080 	.arg1_type	= ARG_PTR_TO_MEM_OR_NULL,
1081 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1082 	.arg3_type	= ARG_PTR_TO_CONST_STR,
1083 	.arg4_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1084 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1085 };
1086 
map_key_from_value(struct bpf_map * map,void * value,u32 * arr_idx)1087 static void *map_key_from_value(struct bpf_map *map, void *value, u32 *arr_idx)
1088 {
1089 	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1090 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1091 
1092 		*arr_idx = ((char *)value - array->value) / array->elem_size;
1093 		return arr_idx;
1094 	}
1095 	return (void *)value - round_up(map->key_size, 8);
1096 }
1097 
1098 struct bpf_async_cb {
1099 	struct bpf_map *map;
1100 	struct bpf_prog *prog;
1101 	void __rcu *callback_fn;
1102 	void *value;
1103 	union {
1104 		struct rcu_head rcu;
1105 		struct work_struct delete_work;
1106 	};
1107 	u64 flags;
1108 };
1109 
1110 /* BPF map elements can contain 'struct bpf_timer'.
1111  * Such map owns all of its BPF timers.
1112  * 'struct bpf_timer' is allocated as part of map element allocation
1113  * and it's zero initialized.
1114  * That space is used to keep 'struct bpf_async_kern'.
1115  * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1116  * remembers 'struct bpf_map *' pointer it's part of.
1117  * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1118  * bpf_timer_start() arms the timer.
1119  * If user space reference to a map goes to zero at this point
1120  * ops->map_release_uref callback is responsible for cancelling the timers,
1121  * freeing their memory, and decrementing prog's refcnts.
1122  * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1123  * Inner maps can contain bpf timers as well. ops->map_release_uref is
1124  * freeing the timers when inner map is replaced or deleted by user space.
1125  */
1126 struct bpf_hrtimer {
1127 	struct bpf_async_cb cb;
1128 	struct hrtimer timer;
1129 	atomic_t cancelling;
1130 };
1131 
1132 struct bpf_work {
1133 	struct bpf_async_cb cb;
1134 	struct work_struct work;
1135 	struct work_struct delete_work;
1136 };
1137 
1138 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
1139 struct bpf_async_kern {
1140 	union {
1141 		struct bpf_async_cb *cb;
1142 		struct bpf_hrtimer *timer;
1143 		struct bpf_work *work;
1144 	};
1145 	/* bpf_spin_lock is used here instead of spinlock_t to make
1146 	 * sure that it always fits into space reserved by struct bpf_timer
1147 	 * regardless of LOCKDEP and spinlock debug flags.
1148 	 */
1149 	struct bpf_spin_lock lock;
1150 } __attribute__((aligned(8)));
1151 
1152 enum bpf_async_type {
1153 	BPF_ASYNC_TYPE_TIMER = 0,
1154 	BPF_ASYNC_TYPE_WQ,
1155 };
1156 
1157 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1158 
bpf_timer_cb(struct hrtimer * hrtimer)1159 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1160 {
1161 	struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1162 	struct bpf_map *map = t->cb.map;
1163 	void *value = t->cb.value;
1164 	bpf_callback_t callback_fn;
1165 	void *key;
1166 	u32 idx;
1167 
1168 	BTF_TYPE_EMIT(struct bpf_timer);
1169 	callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
1170 	if (!callback_fn)
1171 		goto out;
1172 
1173 	/* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1174 	 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1175 	 * Remember the timer this callback is servicing to prevent
1176 	 * deadlock if callback_fn() calls bpf_timer_cancel() or
1177 	 * bpf_map_delete_elem() on the same timer.
1178 	 */
1179 	this_cpu_write(hrtimer_running, t);
1180 
1181 	key = map_key_from_value(map, value, &idx);
1182 
1183 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1184 	/* The verifier checked that return value is zero. */
1185 
1186 	this_cpu_write(hrtimer_running, NULL);
1187 out:
1188 	return HRTIMER_NORESTART;
1189 }
1190 
bpf_wq_work(struct work_struct * work)1191 static void bpf_wq_work(struct work_struct *work)
1192 {
1193 	struct bpf_work *w = container_of(work, struct bpf_work, work);
1194 	struct bpf_async_cb *cb = &w->cb;
1195 	struct bpf_map *map = cb->map;
1196 	bpf_callback_t callback_fn;
1197 	void *value = cb->value;
1198 	void *key;
1199 	u32 idx;
1200 
1201 	BTF_TYPE_EMIT(struct bpf_wq);
1202 
1203 	callback_fn = READ_ONCE(cb->callback_fn);
1204 	if (!callback_fn)
1205 		return;
1206 
1207 	key = map_key_from_value(map, value, &idx);
1208 
1209         rcu_read_lock_trace();
1210         migrate_disable();
1211 
1212 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1213 
1214 	migrate_enable();
1215 	rcu_read_unlock_trace();
1216 }
1217 
bpf_async_cb_rcu_free(struct rcu_head * rcu)1218 static void bpf_async_cb_rcu_free(struct rcu_head *rcu)
1219 {
1220 	struct bpf_async_cb *cb = container_of(rcu, struct bpf_async_cb, rcu);
1221 
1222 	kfree_nolock(cb);
1223 }
1224 
bpf_wq_delete_work(struct work_struct * work)1225 static void bpf_wq_delete_work(struct work_struct *work)
1226 {
1227 	struct bpf_work *w = container_of(work, struct bpf_work, delete_work);
1228 
1229 	cancel_work_sync(&w->work);
1230 
1231 	call_rcu(&w->cb.rcu, bpf_async_cb_rcu_free);
1232 }
1233 
bpf_timer_delete_work(struct work_struct * work)1234 static void bpf_timer_delete_work(struct work_struct *work)
1235 {
1236 	struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work);
1237 
1238 	/* Cancel the timer and wait for callback to complete if it was running.
1239 	 * If hrtimer_cancel() can be safely called it's safe to call
1240 	 * call_rcu() right after for both preallocated and non-preallocated
1241 	 * maps.  The async->cb = NULL was already done and no code path can see
1242 	 * address 't' anymore. Timer if armed for existing bpf_hrtimer before
1243 	 * bpf_timer_cancel_and_free will have been cancelled.
1244 	 */
1245 	hrtimer_cancel(&t->timer);
1246 	call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free);
1247 }
1248 
__bpf_async_init(struct bpf_async_kern * async,struct bpf_map * map,u64 flags,enum bpf_async_type type)1249 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1250 			    enum bpf_async_type type)
1251 {
1252 	struct bpf_async_cb *cb;
1253 	struct bpf_hrtimer *t;
1254 	struct bpf_work *w;
1255 	clockid_t clockid;
1256 	size_t size;
1257 	int ret = 0;
1258 
1259 	if (in_nmi())
1260 		return -EOPNOTSUPP;
1261 
1262 	switch (type) {
1263 	case BPF_ASYNC_TYPE_TIMER:
1264 		size = sizeof(struct bpf_hrtimer);
1265 		break;
1266 	case BPF_ASYNC_TYPE_WQ:
1267 		size = sizeof(struct bpf_work);
1268 		break;
1269 	default:
1270 		return -EINVAL;
1271 	}
1272 
1273 	__bpf_spin_lock_irqsave(&async->lock);
1274 	t = async->timer;
1275 	if (t) {
1276 		ret = -EBUSY;
1277 		goto out;
1278 	}
1279 
1280 	cb = bpf_map_kmalloc_nolock(map, size, 0, map->numa_node);
1281 	if (!cb) {
1282 		ret = -ENOMEM;
1283 		goto out;
1284 	}
1285 
1286 	switch (type) {
1287 	case BPF_ASYNC_TYPE_TIMER:
1288 		clockid = flags & (MAX_CLOCKS - 1);
1289 		t = (struct bpf_hrtimer *)cb;
1290 
1291 		atomic_set(&t->cancelling, 0);
1292 		INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work);
1293 		hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT);
1294 		cb->value = (void *)async - map->record->timer_off;
1295 		break;
1296 	case BPF_ASYNC_TYPE_WQ:
1297 		w = (struct bpf_work *)cb;
1298 
1299 		INIT_WORK(&w->work, bpf_wq_work);
1300 		INIT_WORK(&w->delete_work, bpf_wq_delete_work);
1301 		cb->value = (void *)async - map->record->wq_off;
1302 		break;
1303 	}
1304 	cb->map = map;
1305 	cb->prog = NULL;
1306 	cb->flags = flags;
1307 	rcu_assign_pointer(cb->callback_fn, NULL);
1308 
1309 	WRITE_ONCE(async->cb, cb);
1310 	/* Guarantee the order between async->cb and map->usercnt. So
1311 	 * when there are concurrent uref release and bpf timer init, either
1312 	 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1313 	 * timer or atomic64_read() below returns a zero usercnt.
1314 	 */
1315 	smp_mb();
1316 	if (!atomic64_read(&map->usercnt)) {
1317 		/* maps with timers must be either held by user space
1318 		 * or pinned in bpffs.
1319 		 */
1320 		WRITE_ONCE(async->cb, NULL);
1321 		kfree_nolock(cb);
1322 		ret = -EPERM;
1323 	}
1324 out:
1325 	__bpf_spin_unlock_irqrestore(&async->lock);
1326 	return ret;
1327 }
1328 
BPF_CALL_3(bpf_timer_init,struct bpf_async_kern *,timer,struct bpf_map *,map,u64,flags)1329 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1330 	   u64, flags)
1331 {
1332 	clock_t clockid = flags & (MAX_CLOCKS - 1);
1333 
1334 	BUILD_BUG_ON(MAX_CLOCKS != 16);
1335 	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1336 	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1337 
1338 	if (flags >= MAX_CLOCKS ||
1339 	    /* similar to timerfd except _ALARM variants are not supported */
1340 	    (clockid != CLOCK_MONOTONIC &&
1341 	     clockid != CLOCK_REALTIME &&
1342 	     clockid != CLOCK_BOOTTIME))
1343 		return -EINVAL;
1344 
1345 	return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1346 }
1347 
1348 static const struct bpf_func_proto bpf_timer_init_proto = {
1349 	.func		= bpf_timer_init,
1350 	.gpl_only	= true,
1351 	.ret_type	= RET_INTEGER,
1352 	.arg1_type	= ARG_PTR_TO_TIMER,
1353 	.arg2_type	= ARG_CONST_MAP_PTR,
1354 	.arg3_type	= ARG_ANYTHING,
1355 };
1356 
__bpf_async_set_callback(struct bpf_async_kern * async,void * callback_fn,struct bpf_prog_aux * aux,unsigned int flags,enum bpf_async_type type)1357 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
1358 				    struct bpf_prog_aux *aux, unsigned int flags,
1359 				    enum bpf_async_type type)
1360 {
1361 	struct bpf_prog *prev, *prog = aux->prog;
1362 	struct bpf_async_cb *cb;
1363 	int ret = 0;
1364 
1365 	if (in_nmi())
1366 		return -EOPNOTSUPP;
1367 	__bpf_spin_lock_irqsave(&async->lock);
1368 	cb = async->cb;
1369 	if (!cb) {
1370 		ret = -EINVAL;
1371 		goto out;
1372 	}
1373 	if (!atomic64_read(&cb->map->usercnt)) {
1374 		/* maps with timers must be either held by user space
1375 		 * or pinned in bpffs. Otherwise timer might still be
1376 		 * running even when bpf prog is detached and user space
1377 		 * is gone, since map_release_uref won't ever be called.
1378 		 */
1379 		ret = -EPERM;
1380 		goto out;
1381 	}
1382 	prev = cb->prog;
1383 	if (prev != prog) {
1384 		/* Bump prog refcnt once. Every bpf_timer_set_callback()
1385 		 * can pick different callback_fn-s within the same prog.
1386 		 */
1387 		prog = bpf_prog_inc_not_zero(prog);
1388 		if (IS_ERR(prog)) {
1389 			ret = PTR_ERR(prog);
1390 			goto out;
1391 		}
1392 		if (prev)
1393 			/* Drop prev prog refcnt when swapping with new prog */
1394 			bpf_prog_put(prev);
1395 		cb->prog = prog;
1396 	}
1397 	rcu_assign_pointer(cb->callback_fn, callback_fn);
1398 out:
1399 	__bpf_spin_unlock_irqrestore(&async->lock);
1400 	return ret;
1401 }
1402 
BPF_CALL_3(bpf_timer_set_callback,struct bpf_async_kern *,timer,void *,callback_fn,struct bpf_prog_aux *,aux)1403 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
1404 	   struct bpf_prog_aux *, aux)
1405 {
1406 	return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER);
1407 }
1408 
1409 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1410 	.func		= bpf_timer_set_callback,
1411 	.gpl_only	= true,
1412 	.ret_type	= RET_INTEGER,
1413 	.arg1_type	= ARG_PTR_TO_TIMER,
1414 	.arg2_type	= ARG_PTR_TO_FUNC,
1415 };
1416 
BPF_CALL_3(bpf_timer_start,struct bpf_async_kern *,timer,u64,nsecs,u64,flags)1417 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
1418 {
1419 	struct bpf_hrtimer *t;
1420 	int ret = 0;
1421 	enum hrtimer_mode mode;
1422 
1423 	if (in_nmi())
1424 		return -EOPNOTSUPP;
1425 	if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
1426 		return -EINVAL;
1427 	__bpf_spin_lock_irqsave(&timer->lock);
1428 	t = timer->timer;
1429 	if (!t || !t->cb.prog) {
1430 		ret = -EINVAL;
1431 		goto out;
1432 	}
1433 
1434 	if (flags & BPF_F_TIMER_ABS)
1435 		mode = HRTIMER_MODE_ABS_SOFT;
1436 	else
1437 		mode = HRTIMER_MODE_REL_SOFT;
1438 
1439 	if (flags & BPF_F_TIMER_CPU_PIN)
1440 		mode |= HRTIMER_MODE_PINNED;
1441 
1442 	hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1443 out:
1444 	__bpf_spin_unlock_irqrestore(&timer->lock);
1445 	return ret;
1446 }
1447 
1448 static const struct bpf_func_proto bpf_timer_start_proto = {
1449 	.func		= bpf_timer_start,
1450 	.gpl_only	= true,
1451 	.ret_type	= RET_INTEGER,
1452 	.arg1_type	= ARG_PTR_TO_TIMER,
1453 	.arg2_type	= ARG_ANYTHING,
1454 	.arg3_type	= ARG_ANYTHING,
1455 };
1456 
drop_prog_refcnt(struct bpf_async_cb * async)1457 static void drop_prog_refcnt(struct bpf_async_cb *async)
1458 {
1459 	struct bpf_prog *prog = async->prog;
1460 
1461 	if (prog) {
1462 		bpf_prog_put(prog);
1463 		async->prog = NULL;
1464 		rcu_assign_pointer(async->callback_fn, NULL);
1465 	}
1466 }
1467 
BPF_CALL_1(bpf_timer_cancel,struct bpf_async_kern *,timer)1468 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
1469 {
1470 	struct bpf_hrtimer *t, *cur_t;
1471 	bool inc = false;
1472 	int ret = 0;
1473 
1474 	if (in_nmi())
1475 		return -EOPNOTSUPP;
1476 	rcu_read_lock();
1477 	__bpf_spin_lock_irqsave(&timer->lock);
1478 	t = timer->timer;
1479 	if (!t) {
1480 		ret = -EINVAL;
1481 		goto out;
1482 	}
1483 
1484 	cur_t = this_cpu_read(hrtimer_running);
1485 	if (cur_t == t) {
1486 		/* If bpf callback_fn is trying to bpf_timer_cancel()
1487 		 * its own timer the hrtimer_cancel() will deadlock
1488 		 * since it waits for callback_fn to finish.
1489 		 */
1490 		ret = -EDEADLK;
1491 		goto out;
1492 	}
1493 
1494 	/* Only account in-flight cancellations when invoked from a timer
1495 	 * callback, since we want to avoid waiting only if other _callbacks_
1496 	 * are waiting on us, to avoid introducing lockups. Non-callback paths
1497 	 * are ok, since nobody would synchronously wait for their completion.
1498 	 */
1499 	if (!cur_t)
1500 		goto drop;
1501 	atomic_inc(&t->cancelling);
1502 	/* Need full barrier after relaxed atomic_inc */
1503 	smp_mb__after_atomic();
1504 	inc = true;
1505 	if (atomic_read(&cur_t->cancelling)) {
1506 		/* We're cancelling timer t, while some other timer callback is
1507 		 * attempting to cancel us. In such a case, it might be possible
1508 		 * that timer t belongs to the other callback, or some other
1509 		 * callback waiting upon it (creating transitive dependencies
1510 		 * upon us), and we will enter a deadlock if we continue
1511 		 * cancelling and waiting for it synchronously, since it might
1512 		 * do the same. Bail!
1513 		 */
1514 		ret = -EDEADLK;
1515 		goto out;
1516 	}
1517 drop:
1518 	drop_prog_refcnt(&t->cb);
1519 out:
1520 	__bpf_spin_unlock_irqrestore(&timer->lock);
1521 	/* Cancel the timer and wait for associated callback to finish
1522 	 * if it was running.
1523 	 */
1524 	ret = ret ?: hrtimer_cancel(&t->timer);
1525 	if (inc)
1526 		atomic_dec(&t->cancelling);
1527 	rcu_read_unlock();
1528 	return ret;
1529 }
1530 
1531 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1532 	.func		= bpf_timer_cancel,
1533 	.gpl_only	= true,
1534 	.ret_type	= RET_INTEGER,
1535 	.arg1_type	= ARG_PTR_TO_TIMER,
1536 };
1537 
__bpf_async_cancel_and_free(struct bpf_async_kern * async)1538 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async)
1539 {
1540 	struct bpf_async_cb *cb;
1541 
1542 	/* Performance optimization: read async->cb without lock first. */
1543 	if (!READ_ONCE(async->cb))
1544 		return NULL;
1545 
1546 	__bpf_spin_lock_irqsave(&async->lock);
1547 	/* re-read it under lock */
1548 	cb = async->cb;
1549 	if (!cb)
1550 		goto out;
1551 	drop_prog_refcnt(cb);
1552 	/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1553 	 * this timer, since it won't be initialized.
1554 	 */
1555 	WRITE_ONCE(async->cb, NULL);
1556 out:
1557 	__bpf_spin_unlock_irqrestore(&async->lock);
1558 	return cb;
1559 }
1560 
1561 /* This function is called by map_delete/update_elem for individual element and
1562  * by ops->map_release_uref when the user space reference to a map reaches zero.
1563  */
bpf_timer_cancel_and_free(void * val)1564 void bpf_timer_cancel_and_free(void *val)
1565 {
1566 	struct bpf_hrtimer *t;
1567 
1568 	t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val);
1569 
1570 	if (!t)
1571 		return;
1572 	/* We check that bpf_map_delete/update_elem() was called from timer
1573 	 * callback_fn. In such case we don't call hrtimer_cancel() (since it
1574 	 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will
1575 	 * just return -1). Though callback_fn is still running on this cpu it's
1576 	 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1577 	 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1578 	 * since async->cb = NULL was already done. The timer will be
1579 	 * effectively cancelled because bpf_timer_cb() will return
1580 	 * HRTIMER_NORESTART.
1581 	 *
1582 	 * However, it is possible the timer callback_fn calling us armed the
1583 	 * timer _before_ calling us, such that failing to cancel it here will
1584 	 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
1585 	 * Therefore, we _need_ to cancel any outstanding timers before we do
1586 	 * call_rcu, even though no more timers can be armed.
1587 	 *
1588 	 * Moreover, we need to schedule work even if timer does not belong to
1589 	 * the calling callback_fn, as on two different CPUs, we can end up in a
1590 	 * situation where both sides run in parallel, try to cancel one
1591 	 * another, and we end up waiting on both sides in hrtimer_cancel
1592 	 * without making forward progress, since timer1 depends on time2
1593 	 * callback to finish, and vice versa.
1594 	 *
1595 	 *  CPU 1 (timer1_cb)			CPU 2 (timer2_cb)
1596 	 *  bpf_timer_cancel_and_free(timer2)	bpf_timer_cancel_and_free(timer1)
1597 	 *
1598 	 * To avoid these issues, punt to workqueue context when we are in a
1599 	 * timer callback.
1600 	 */
1601 	if (this_cpu_read(hrtimer_running)) {
1602 		queue_work(system_dfl_wq, &t->cb.delete_work);
1603 		return;
1604 	}
1605 
1606 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1607 		/* If the timer is running on other CPU, also use a kworker to
1608 		 * wait for the completion of the timer instead of trying to
1609 		 * acquire a sleepable lock in hrtimer_cancel() to wait for its
1610 		 * completion.
1611 		 */
1612 		if (hrtimer_try_to_cancel(&t->timer) >= 0)
1613 			call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free);
1614 		else
1615 			queue_work(system_dfl_wq, &t->cb.delete_work);
1616 	} else {
1617 		bpf_timer_delete_work(&t->cb.delete_work);
1618 	}
1619 }
1620 
1621 /* This function is called by map_delete/update_elem for individual element and
1622  * by ops->map_release_uref when the user space reference to a map reaches zero.
1623  */
bpf_wq_cancel_and_free(void * val)1624 void bpf_wq_cancel_and_free(void *val)
1625 {
1626 	struct bpf_work *work;
1627 
1628 	BTF_TYPE_EMIT(struct bpf_wq);
1629 
1630 	work = (struct bpf_work *)__bpf_async_cancel_and_free(val);
1631 	if (!work)
1632 		return;
1633 	/* Trigger cancel of the sleepable work, but *do not* wait for
1634 	 * it to finish if it was running as we might not be in a
1635 	 * sleepable context.
1636 	 * kfree will be called once the work has finished.
1637 	 */
1638 	schedule_work(&work->delete_work);
1639 }
1640 
BPF_CALL_2(bpf_kptr_xchg,void *,dst,void *,ptr)1641 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
1642 {
1643 	unsigned long *kptr = dst;
1644 
1645 	/* This helper may be inlined by verifier. */
1646 	return xchg(kptr, (unsigned long)ptr);
1647 }
1648 
1649 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1650  * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1651  * denote type that verifier will determine.
1652  */
1653 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1654 	.func         = bpf_kptr_xchg,
1655 	.gpl_only     = false,
1656 	.ret_type     = RET_PTR_TO_BTF_ID_OR_NULL,
1657 	.ret_btf_id   = BPF_PTR_POISON,
1658 	.arg1_type    = ARG_KPTR_XCHG_DEST,
1659 	.arg2_type    = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1660 	.arg2_btf_id  = BPF_PTR_POISON,
1661 };
1662 
1663 struct bpf_dynptr_file_impl {
1664 	struct freader freader;
1665 	/* 64 bit offset and size overriding 32 bit ones in bpf_dynptr_kern */
1666 	u64 offset;
1667 	u64 size;
1668 };
1669 
1670 /* Since the upper 8 bits of dynptr->size is reserved, the
1671  * maximum supported size is 2^24 - 1.
1672  */
1673 #define DYNPTR_MAX_SIZE	((1UL << 24) - 1)
1674 #define DYNPTR_TYPE_SHIFT	28
1675 #define DYNPTR_SIZE_MASK	0xFFFFFF
1676 #define DYNPTR_RDONLY_BIT	BIT(31)
1677 
__bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern * ptr)1678 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1679 {
1680 	return ptr->size & DYNPTR_RDONLY_BIT;
1681 }
1682 
bpf_dynptr_set_rdonly(struct bpf_dynptr_kern * ptr)1683 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1684 {
1685 	ptr->size |= DYNPTR_RDONLY_BIT;
1686 }
1687 
bpf_dynptr_set_type(struct bpf_dynptr_kern * ptr,enum bpf_dynptr_type type)1688 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1689 {
1690 	ptr->size |= type << DYNPTR_TYPE_SHIFT;
1691 }
1692 
bpf_dynptr_get_type(const struct bpf_dynptr_kern * ptr)1693 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1694 {
1695 	return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1696 }
1697 
__bpf_dynptr_size(const struct bpf_dynptr_kern * ptr)1698 u64 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1699 {
1700 	if (bpf_dynptr_get_type(ptr) == BPF_DYNPTR_TYPE_FILE) {
1701 		struct bpf_dynptr_file_impl *df = ptr->data;
1702 
1703 		return df->size;
1704 	}
1705 
1706 	return ptr->size & DYNPTR_SIZE_MASK;
1707 }
1708 
bpf_dynptr_advance_offset(struct bpf_dynptr_kern * ptr,u64 off)1709 static void bpf_dynptr_advance_offset(struct bpf_dynptr_kern *ptr, u64 off)
1710 {
1711 	if (bpf_dynptr_get_type(ptr) == BPF_DYNPTR_TYPE_FILE) {
1712 		struct bpf_dynptr_file_impl *df = ptr->data;
1713 
1714 		df->offset += off;
1715 		return;
1716 	}
1717 	ptr->offset += off;
1718 }
1719 
bpf_dynptr_set_size(struct bpf_dynptr_kern * ptr,u64 new_size)1720 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u64 new_size)
1721 {
1722 	u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1723 
1724 	if (bpf_dynptr_get_type(ptr) == BPF_DYNPTR_TYPE_FILE) {
1725 		struct bpf_dynptr_file_impl *df = ptr->data;
1726 
1727 		df->size = new_size;
1728 		return;
1729 	}
1730 	ptr->size = (u32)new_size | metadata;
1731 }
1732 
bpf_dynptr_check_size(u64 size)1733 int bpf_dynptr_check_size(u64 size)
1734 {
1735 	return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1736 }
1737 
bpf_file_fetch_bytes(struct bpf_dynptr_file_impl * df,u64 offset,void * buf,u64 len)1738 static int bpf_file_fetch_bytes(struct bpf_dynptr_file_impl *df, u64 offset, void *buf, u64 len)
1739 {
1740 	const void *ptr;
1741 
1742 	if (!buf)
1743 		return -EINVAL;
1744 
1745 	df->freader.buf = buf;
1746 	df->freader.buf_sz = len;
1747 	ptr = freader_fetch(&df->freader, offset + df->offset, len);
1748 	if (!ptr)
1749 		return df->freader.err;
1750 
1751 	if (ptr != buf) /* Force copying into the buffer */
1752 		memcpy(buf, ptr, len);
1753 
1754 	return 0;
1755 }
1756 
bpf_dynptr_init(struct bpf_dynptr_kern * ptr,void * data,enum bpf_dynptr_type type,u32 offset,u32 size)1757 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1758 		     enum bpf_dynptr_type type, u32 offset, u32 size)
1759 {
1760 	ptr->data = data;
1761 	ptr->offset = offset;
1762 	ptr->size = size;
1763 	bpf_dynptr_set_type(ptr, type);
1764 }
1765 
bpf_dynptr_set_null(struct bpf_dynptr_kern * ptr)1766 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1767 {
1768 	memset(ptr, 0, sizeof(*ptr));
1769 }
1770 
BPF_CALL_4(bpf_dynptr_from_mem,void *,data,u64,size,u64,flags,struct bpf_dynptr_kern *,ptr)1771 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u64, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1772 {
1773 	int err;
1774 
1775 	BTF_TYPE_EMIT(struct bpf_dynptr);
1776 
1777 	err = bpf_dynptr_check_size(size);
1778 	if (err)
1779 		goto error;
1780 
1781 	/* flags is currently unsupported */
1782 	if (flags) {
1783 		err = -EINVAL;
1784 		goto error;
1785 	}
1786 
1787 	bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1788 
1789 	return 0;
1790 
1791 error:
1792 	bpf_dynptr_set_null(ptr);
1793 	return err;
1794 }
1795 
1796 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1797 	.func		= bpf_dynptr_from_mem,
1798 	.gpl_only	= false,
1799 	.ret_type	= RET_INTEGER,
1800 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1801 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1802 	.arg3_type	= ARG_ANYTHING,
1803 	.arg4_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
1804 };
1805 
__bpf_dynptr_read(void * dst,u64 len,const struct bpf_dynptr_kern * src,u64 offset,u64 flags)1806 static int __bpf_dynptr_read(void *dst, u64 len, const struct bpf_dynptr_kern *src,
1807 			     u64 offset, u64 flags)
1808 {
1809 	enum bpf_dynptr_type type;
1810 	int err;
1811 
1812 	if (!src->data || flags)
1813 		return -EINVAL;
1814 
1815 	err = bpf_dynptr_check_off_len(src, offset, len);
1816 	if (err)
1817 		return err;
1818 
1819 	type = bpf_dynptr_get_type(src);
1820 
1821 	switch (type) {
1822 	case BPF_DYNPTR_TYPE_LOCAL:
1823 	case BPF_DYNPTR_TYPE_RINGBUF:
1824 		/* Source and destination may possibly overlap, hence use memmove to
1825 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1826 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1827 		 */
1828 		memmove(dst, src->data + src->offset + offset, len);
1829 		return 0;
1830 	case BPF_DYNPTR_TYPE_SKB:
1831 		return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1832 	case BPF_DYNPTR_TYPE_XDP:
1833 		return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1834 	case BPF_DYNPTR_TYPE_SKB_META:
1835 		memmove(dst, bpf_skb_meta_pointer(src->data, src->offset + offset), len);
1836 		return 0;
1837 	case BPF_DYNPTR_TYPE_FILE:
1838 		return bpf_file_fetch_bytes(src->data, offset, dst, len);
1839 	default:
1840 		WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1841 		return -EFAULT;
1842 	}
1843 }
1844 
BPF_CALL_5(bpf_dynptr_read,void *,dst,u64,len,const struct bpf_dynptr_kern *,src,u64,offset,u64,flags)1845 BPF_CALL_5(bpf_dynptr_read, void *, dst, u64, len, const struct bpf_dynptr_kern *, src,
1846 	   u64, offset, u64, flags)
1847 {
1848 	return __bpf_dynptr_read(dst, len, src, offset, flags);
1849 }
1850 
1851 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1852 	.func		= bpf_dynptr_read,
1853 	.gpl_only	= false,
1854 	.ret_type	= RET_INTEGER,
1855 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1856 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1857 	.arg3_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1858 	.arg4_type	= ARG_ANYTHING,
1859 	.arg5_type	= ARG_ANYTHING,
1860 };
1861 
__bpf_dynptr_write(const struct bpf_dynptr_kern * dst,u64 offset,void * src,u64 len,u64 flags)1862 int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u64 offset, void *src,
1863 		       u64 len, u64 flags)
1864 {
1865 	enum bpf_dynptr_type type;
1866 	int err;
1867 
1868 	if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1869 		return -EINVAL;
1870 
1871 	err = bpf_dynptr_check_off_len(dst, offset, len);
1872 	if (err)
1873 		return err;
1874 
1875 	type = bpf_dynptr_get_type(dst);
1876 
1877 	switch (type) {
1878 	case BPF_DYNPTR_TYPE_LOCAL:
1879 	case BPF_DYNPTR_TYPE_RINGBUF:
1880 		if (flags)
1881 			return -EINVAL;
1882 		/* Source and destination may possibly overlap, hence use memmove to
1883 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1884 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1885 		 */
1886 		memmove(dst->data + dst->offset + offset, src, len);
1887 		return 0;
1888 	case BPF_DYNPTR_TYPE_SKB:
1889 		return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1890 					     flags);
1891 	case BPF_DYNPTR_TYPE_XDP:
1892 		if (flags)
1893 			return -EINVAL;
1894 		return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1895 	case BPF_DYNPTR_TYPE_SKB_META:
1896 		return __bpf_skb_meta_store_bytes(dst->data, dst->offset + offset, src,
1897 						  len, flags);
1898 	default:
1899 		WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1900 		return -EFAULT;
1901 	}
1902 }
1903 
BPF_CALL_5(bpf_dynptr_write,const struct bpf_dynptr_kern *,dst,u64,offset,void *,src,u64,len,u64,flags)1904 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u64, offset, void *, src,
1905 	   u64, len, u64, flags)
1906 {
1907 	return __bpf_dynptr_write(dst, offset, src, len, flags);
1908 }
1909 
1910 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1911 	.func		= bpf_dynptr_write,
1912 	.gpl_only	= false,
1913 	.ret_type	= RET_INTEGER,
1914 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1915 	.arg2_type	= ARG_ANYTHING,
1916 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1917 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
1918 	.arg5_type	= ARG_ANYTHING,
1919 };
1920 
BPF_CALL_3(bpf_dynptr_data,const struct bpf_dynptr_kern *,ptr,u64,offset,u64,len)1921 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u64, offset, u64, len)
1922 {
1923 	enum bpf_dynptr_type type;
1924 	int err;
1925 
1926 	if (!ptr->data)
1927 		return 0;
1928 
1929 	err = bpf_dynptr_check_off_len(ptr, offset, len);
1930 	if (err)
1931 		return 0;
1932 
1933 	if (__bpf_dynptr_is_rdonly(ptr))
1934 		return 0;
1935 
1936 	type = bpf_dynptr_get_type(ptr);
1937 
1938 	switch (type) {
1939 	case BPF_DYNPTR_TYPE_LOCAL:
1940 	case BPF_DYNPTR_TYPE_RINGBUF:
1941 		return (unsigned long)(ptr->data + ptr->offset + offset);
1942 	case BPF_DYNPTR_TYPE_SKB:
1943 	case BPF_DYNPTR_TYPE_XDP:
1944 	case BPF_DYNPTR_TYPE_SKB_META:
1945 		/* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1946 		return 0;
1947 	default:
1948 		WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1949 		return 0;
1950 	}
1951 }
1952 
1953 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1954 	.func		= bpf_dynptr_data,
1955 	.gpl_only	= false,
1956 	.ret_type	= RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1957 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1958 	.arg2_type	= ARG_ANYTHING,
1959 	.arg3_type	= ARG_CONST_ALLOC_SIZE_OR_ZERO,
1960 };
1961 
1962 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1963 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1964 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1965 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1966 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1967 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1968 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1969 const struct bpf_func_proto bpf_perf_event_read_proto __weak;
1970 const struct bpf_func_proto bpf_send_signal_proto __weak;
1971 const struct bpf_func_proto bpf_send_signal_thread_proto __weak;
1972 const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak;
1973 const struct bpf_func_proto bpf_get_task_stack_proto __weak;
1974 const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak;
1975 
1976 const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1977 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1978 {
1979 	switch (func_id) {
1980 	case BPF_FUNC_map_lookup_elem:
1981 		return &bpf_map_lookup_elem_proto;
1982 	case BPF_FUNC_map_update_elem:
1983 		return &bpf_map_update_elem_proto;
1984 	case BPF_FUNC_map_delete_elem:
1985 		return &bpf_map_delete_elem_proto;
1986 	case BPF_FUNC_map_push_elem:
1987 		return &bpf_map_push_elem_proto;
1988 	case BPF_FUNC_map_pop_elem:
1989 		return &bpf_map_pop_elem_proto;
1990 	case BPF_FUNC_map_peek_elem:
1991 		return &bpf_map_peek_elem_proto;
1992 	case BPF_FUNC_map_lookup_percpu_elem:
1993 		return &bpf_map_lookup_percpu_elem_proto;
1994 	case BPF_FUNC_get_prandom_u32:
1995 		return &bpf_get_prandom_u32_proto;
1996 	case BPF_FUNC_get_smp_processor_id:
1997 		return &bpf_get_raw_smp_processor_id_proto;
1998 	case BPF_FUNC_get_numa_node_id:
1999 		return &bpf_get_numa_node_id_proto;
2000 	case BPF_FUNC_tail_call:
2001 		return &bpf_tail_call_proto;
2002 	case BPF_FUNC_ktime_get_ns:
2003 		return &bpf_ktime_get_ns_proto;
2004 	case BPF_FUNC_ktime_get_boot_ns:
2005 		return &bpf_ktime_get_boot_ns_proto;
2006 	case BPF_FUNC_ktime_get_tai_ns:
2007 		return &bpf_ktime_get_tai_ns_proto;
2008 	case BPF_FUNC_ringbuf_output:
2009 		return &bpf_ringbuf_output_proto;
2010 	case BPF_FUNC_ringbuf_reserve:
2011 		return &bpf_ringbuf_reserve_proto;
2012 	case BPF_FUNC_ringbuf_submit:
2013 		return &bpf_ringbuf_submit_proto;
2014 	case BPF_FUNC_ringbuf_discard:
2015 		return &bpf_ringbuf_discard_proto;
2016 	case BPF_FUNC_ringbuf_query:
2017 		return &bpf_ringbuf_query_proto;
2018 	case BPF_FUNC_strncmp:
2019 		return &bpf_strncmp_proto;
2020 	case BPF_FUNC_strtol:
2021 		return &bpf_strtol_proto;
2022 	case BPF_FUNC_strtoul:
2023 		return &bpf_strtoul_proto;
2024 	case BPF_FUNC_get_current_pid_tgid:
2025 		return &bpf_get_current_pid_tgid_proto;
2026 	case BPF_FUNC_get_ns_current_pid_tgid:
2027 		return &bpf_get_ns_current_pid_tgid_proto;
2028 	case BPF_FUNC_get_current_uid_gid:
2029 		return &bpf_get_current_uid_gid_proto;
2030 	default:
2031 		break;
2032 	}
2033 
2034 	if (!bpf_token_capable(prog->aux->token, CAP_BPF))
2035 		return NULL;
2036 
2037 	switch (func_id) {
2038 	case BPF_FUNC_spin_lock:
2039 		return &bpf_spin_lock_proto;
2040 	case BPF_FUNC_spin_unlock:
2041 		return &bpf_spin_unlock_proto;
2042 	case BPF_FUNC_jiffies64:
2043 		return &bpf_jiffies64_proto;
2044 	case BPF_FUNC_per_cpu_ptr:
2045 		return &bpf_per_cpu_ptr_proto;
2046 	case BPF_FUNC_this_cpu_ptr:
2047 		return &bpf_this_cpu_ptr_proto;
2048 	case BPF_FUNC_timer_init:
2049 		return &bpf_timer_init_proto;
2050 	case BPF_FUNC_timer_set_callback:
2051 		return &bpf_timer_set_callback_proto;
2052 	case BPF_FUNC_timer_start:
2053 		return &bpf_timer_start_proto;
2054 	case BPF_FUNC_timer_cancel:
2055 		return &bpf_timer_cancel_proto;
2056 	case BPF_FUNC_kptr_xchg:
2057 		return &bpf_kptr_xchg_proto;
2058 	case BPF_FUNC_for_each_map_elem:
2059 		return &bpf_for_each_map_elem_proto;
2060 	case BPF_FUNC_loop:
2061 		return &bpf_loop_proto;
2062 	case BPF_FUNC_user_ringbuf_drain:
2063 		return &bpf_user_ringbuf_drain_proto;
2064 	case BPF_FUNC_ringbuf_reserve_dynptr:
2065 		return &bpf_ringbuf_reserve_dynptr_proto;
2066 	case BPF_FUNC_ringbuf_submit_dynptr:
2067 		return &bpf_ringbuf_submit_dynptr_proto;
2068 	case BPF_FUNC_ringbuf_discard_dynptr:
2069 		return &bpf_ringbuf_discard_dynptr_proto;
2070 	case BPF_FUNC_dynptr_from_mem:
2071 		return &bpf_dynptr_from_mem_proto;
2072 	case BPF_FUNC_dynptr_read:
2073 		return &bpf_dynptr_read_proto;
2074 	case BPF_FUNC_dynptr_write:
2075 		return &bpf_dynptr_write_proto;
2076 	case BPF_FUNC_dynptr_data:
2077 		return &bpf_dynptr_data_proto;
2078 #ifdef CONFIG_CGROUPS
2079 	case BPF_FUNC_cgrp_storage_get:
2080 		return &bpf_cgrp_storage_get_proto;
2081 	case BPF_FUNC_cgrp_storage_delete:
2082 		return &bpf_cgrp_storage_delete_proto;
2083 	case BPF_FUNC_get_current_cgroup_id:
2084 		return &bpf_get_current_cgroup_id_proto;
2085 	case BPF_FUNC_get_current_ancestor_cgroup_id:
2086 		return &bpf_get_current_ancestor_cgroup_id_proto;
2087 	case BPF_FUNC_current_task_under_cgroup:
2088 		return &bpf_current_task_under_cgroup_proto;
2089 #endif
2090 #ifdef CONFIG_CGROUP_NET_CLASSID
2091 	case BPF_FUNC_get_cgroup_classid:
2092 		return &bpf_get_cgroup_classid_curr_proto;
2093 #endif
2094 	case BPF_FUNC_task_storage_get:
2095 		if (bpf_prog_check_recur(prog))
2096 			return &bpf_task_storage_get_recur_proto;
2097 		return &bpf_task_storage_get_proto;
2098 	case BPF_FUNC_task_storage_delete:
2099 		if (bpf_prog_check_recur(prog))
2100 			return &bpf_task_storage_delete_recur_proto;
2101 		return &bpf_task_storage_delete_proto;
2102 	default:
2103 		break;
2104 	}
2105 
2106 	if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
2107 		return NULL;
2108 
2109 	switch (func_id) {
2110 	case BPF_FUNC_trace_printk:
2111 		return bpf_get_trace_printk_proto();
2112 	case BPF_FUNC_get_current_task:
2113 		return &bpf_get_current_task_proto;
2114 	case BPF_FUNC_get_current_task_btf:
2115 		return &bpf_get_current_task_btf_proto;
2116 	case BPF_FUNC_get_current_comm:
2117 		return &bpf_get_current_comm_proto;
2118 	case BPF_FUNC_probe_read_user:
2119 		return &bpf_probe_read_user_proto;
2120 	case BPF_FUNC_probe_read_kernel:
2121 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2122 		       NULL : &bpf_probe_read_kernel_proto;
2123 	case BPF_FUNC_probe_read_user_str:
2124 		return &bpf_probe_read_user_str_proto;
2125 	case BPF_FUNC_probe_read_kernel_str:
2126 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2127 		       NULL : &bpf_probe_read_kernel_str_proto;
2128 	case BPF_FUNC_copy_from_user:
2129 		return &bpf_copy_from_user_proto;
2130 	case BPF_FUNC_copy_from_user_task:
2131 		return &bpf_copy_from_user_task_proto;
2132 	case BPF_FUNC_snprintf_btf:
2133 		return &bpf_snprintf_btf_proto;
2134 	case BPF_FUNC_snprintf:
2135 		return &bpf_snprintf_proto;
2136 	case BPF_FUNC_task_pt_regs:
2137 		return &bpf_task_pt_regs_proto;
2138 	case BPF_FUNC_trace_vprintk:
2139 		return bpf_get_trace_vprintk_proto();
2140 	case BPF_FUNC_perf_event_read_value:
2141 		return bpf_get_perf_event_read_value_proto();
2142 	case BPF_FUNC_perf_event_read:
2143 		return &bpf_perf_event_read_proto;
2144 	case BPF_FUNC_send_signal:
2145 		return &bpf_send_signal_proto;
2146 	case BPF_FUNC_send_signal_thread:
2147 		return &bpf_send_signal_thread_proto;
2148 	case BPF_FUNC_get_task_stack:
2149 		return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
2150 				       : &bpf_get_task_stack_proto;
2151 	case BPF_FUNC_get_branch_snapshot:
2152 		return &bpf_get_branch_snapshot_proto;
2153 	case BPF_FUNC_find_vma:
2154 		return &bpf_find_vma_proto;
2155 	default:
2156 		return NULL;
2157 	}
2158 }
2159 EXPORT_SYMBOL_GPL(bpf_base_func_proto);
2160 
bpf_list_head_free(const struct btf_field * field,void * list_head,struct bpf_spin_lock * spin_lock)2161 void bpf_list_head_free(const struct btf_field *field, void *list_head,
2162 			struct bpf_spin_lock *spin_lock)
2163 {
2164 	struct list_head *head = list_head, *orig_head = list_head;
2165 
2166 	BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
2167 	BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
2168 
2169 	/* Do the actual list draining outside the lock to not hold the lock for
2170 	 * too long, and also prevent deadlocks if tracing programs end up
2171 	 * executing on entry/exit of functions called inside the critical
2172 	 * section, and end up doing map ops that call bpf_list_head_free for
2173 	 * the same map value again.
2174 	 */
2175 	__bpf_spin_lock_irqsave(spin_lock);
2176 	if (!head->next || list_empty(head))
2177 		goto unlock;
2178 	head = head->next;
2179 unlock:
2180 	INIT_LIST_HEAD(orig_head);
2181 	__bpf_spin_unlock_irqrestore(spin_lock);
2182 
2183 	while (head != orig_head) {
2184 		void *obj = head;
2185 
2186 		obj -= field->graph_root.node_offset;
2187 		head = head->next;
2188 		/* The contained type can also have resources, including a
2189 		 * bpf_list_head which needs to be freed.
2190 		 */
2191 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2192 	}
2193 }
2194 
2195 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
2196  * 'rb_node *', so field name of rb_node within containing struct is not
2197  * needed.
2198  *
2199  * Since bpf_rb_tree's node type has a corresponding struct btf_field with
2200  * graph_root.node_offset, it's not necessary to know field name
2201  * or type of node struct
2202  */
2203 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
2204 	for (pos = rb_first_postorder(root); \
2205 	    pos && ({ n = rb_next_postorder(pos); 1; }); \
2206 	    pos = n)
2207 
bpf_rb_root_free(const struct btf_field * field,void * rb_root,struct bpf_spin_lock * spin_lock)2208 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
2209 		      struct bpf_spin_lock *spin_lock)
2210 {
2211 	struct rb_root_cached orig_root, *root = rb_root;
2212 	struct rb_node *pos, *n;
2213 	void *obj;
2214 
2215 	BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
2216 	BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
2217 
2218 	__bpf_spin_lock_irqsave(spin_lock);
2219 	orig_root = *root;
2220 	*root = RB_ROOT_CACHED;
2221 	__bpf_spin_unlock_irqrestore(spin_lock);
2222 
2223 	bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
2224 		obj = pos;
2225 		obj -= field->graph_root.node_offset;
2226 
2227 
2228 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2229 	}
2230 }
2231 
2232 __bpf_kfunc_start_defs();
2233 
bpf_obj_new_impl(u64 local_type_id__k,void * meta__ign)2234 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2235 {
2236 	struct btf_struct_meta *meta = meta__ign;
2237 	u64 size = local_type_id__k;
2238 	void *p;
2239 
2240 	p = bpf_mem_alloc(&bpf_global_ma, size);
2241 	if (!p)
2242 		return NULL;
2243 	if (meta)
2244 		bpf_obj_init(meta->record, p);
2245 	return p;
2246 }
2247 
bpf_percpu_obj_new_impl(u64 local_type_id__k,void * meta__ign)2248 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2249 {
2250 	u64 size = local_type_id__k;
2251 
2252 	/* The verifier has ensured that meta__ign must be NULL */
2253 	return bpf_mem_alloc(&bpf_global_percpu_ma, size);
2254 }
2255 
2256 /* Must be called under migrate_disable(), as required by bpf_mem_free */
__bpf_obj_drop_impl(void * p,const struct btf_record * rec,bool percpu)2257 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
2258 {
2259 	struct bpf_mem_alloc *ma;
2260 
2261 	if (rec && rec->refcount_off >= 0 &&
2262 	    !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
2263 		/* Object is refcounted and refcount_dec didn't result in 0
2264 		 * refcount. Return without freeing the object
2265 		 */
2266 		return;
2267 	}
2268 
2269 	if (rec)
2270 		bpf_obj_free_fields(rec, p);
2271 
2272 	if (percpu)
2273 		ma = &bpf_global_percpu_ma;
2274 	else
2275 		ma = &bpf_global_ma;
2276 	bpf_mem_free_rcu(ma, p);
2277 }
2278 
bpf_obj_drop_impl(void * p__alloc,void * meta__ign)2279 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
2280 {
2281 	struct btf_struct_meta *meta = meta__ign;
2282 	void *p = p__alloc;
2283 
2284 	__bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
2285 }
2286 
bpf_percpu_obj_drop_impl(void * p__alloc,void * meta__ign)2287 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
2288 {
2289 	/* The verifier has ensured that meta__ign must be NULL */
2290 	bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
2291 }
2292 
bpf_refcount_acquire_impl(void * p__refcounted_kptr,void * meta__ign)2293 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
2294 {
2295 	struct btf_struct_meta *meta = meta__ign;
2296 	struct bpf_refcount *ref;
2297 
2298 	/* Could just cast directly to refcount_t *, but need some code using
2299 	 * bpf_refcount type so that it is emitted in vmlinux BTF
2300 	 */
2301 	ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
2302 	if (!refcount_inc_not_zero((refcount_t *)ref))
2303 		return NULL;
2304 
2305 	/* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
2306 	 * in verifier.c
2307 	 */
2308 	return (void *)p__refcounted_kptr;
2309 }
2310 
__bpf_list_add(struct bpf_list_node_kern * node,struct bpf_list_head * head,bool tail,struct btf_record * rec,u64 off)2311 static int __bpf_list_add(struct bpf_list_node_kern *node,
2312 			  struct bpf_list_head *head,
2313 			  bool tail, struct btf_record *rec, u64 off)
2314 {
2315 	struct list_head *n = &node->list_head, *h = (void *)head;
2316 
2317 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2318 	 * called on its fields, so init here
2319 	 */
2320 	if (unlikely(!h->next))
2321 		INIT_LIST_HEAD(h);
2322 
2323 	/* node->owner != NULL implies !list_empty(n), no need to separately
2324 	 * check the latter
2325 	 */
2326 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2327 		/* Only called from BPF prog, no need to migrate_disable */
2328 		__bpf_obj_drop_impl((void *)n - off, rec, false);
2329 		return -EINVAL;
2330 	}
2331 
2332 	tail ? list_add_tail(n, h) : list_add(n, h);
2333 	WRITE_ONCE(node->owner, head);
2334 
2335 	return 0;
2336 }
2337 
bpf_list_push_front_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2338 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
2339 					 struct bpf_list_node *node,
2340 					 void *meta__ign, u64 off)
2341 {
2342 	struct bpf_list_node_kern *n = (void *)node;
2343 	struct btf_struct_meta *meta = meta__ign;
2344 
2345 	return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
2346 }
2347 
bpf_list_push_back_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2348 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
2349 					struct bpf_list_node *node,
2350 					void *meta__ign, u64 off)
2351 {
2352 	struct bpf_list_node_kern *n = (void *)node;
2353 	struct btf_struct_meta *meta = meta__ign;
2354 
2355 	return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2356 }
2357 
__bpf_list_del(struct bpf_list_head * head,bool tail)2358 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2359 {
2360 	struct list_head *n, *h = (void *)head;
2361 	struct bpf_list_node_kern *node;
2362 
2363 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2364 	 * called on its fields, so init here
2365 	 */
2366 	if (unlikely(!h->next))
2367 		INIT_LIST_HEAD(h);
2368 	if (list_empty(h))
2369 		return NULL;
2370 
2371 	n = tail ? h->prev : h->next;
2372 	node = container_of(n, struct bpf_list_node_kern, list_head);
2373 	if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2374 		return NULL;
2375 
2376 	list_del_init(n);
2377 	WRITE_ONCE(node->owner, NULL);
2378 	return (struct bpf_list_node *)n;
2379 }
2380 
bpf_list_pop_front(struct bpf_list_head * head)2381 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2382 {
2383 	return __bpf_list_del(head, false);
2384 }
2385 
bpf_list_pop_back(struct bpf_list_head * head)2386 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2387 {
2388 	return __bpf_list_del(head, true);
2389 }
2390 
bpf_list_front(struct bpf_list_head * head)2391 __bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head)
2392 {
2393 	struct list_head *h = (struct list_head *)head;
2394 
2395 	if (list_empty(h) || unlikely(!h->next))
2396 		return NULL;
2397 
2398 	return (struct bpf_list_node *)h->next;
2399 }
2400 
bpf_list_back(struct bpf_list_head * head)2401 __bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head)
2402 {
2403 	struct list_head *h = (struct list_head *)head;
2404 
2405 	if (list_empty(h) || unlikely(!h->next))
2406 		return NULL;
2407 
2408 	return (struct bpf_list_node *)h->prev;
2409 }
2410 
bpf_rbtree_remove(struct bpf_rb_root * root,struct bpf_rb_node * node)2411 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2412 						  struct bpf_rb_node *node)
2413 {
2414 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2415 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2416 	struct rb_node *n = &node_internal->rb_node;
2417 
2418 	/* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2419 	 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2420 	 */
2421 	if (READ_ONCE(node_internal->owner) != root)
2422 		return NULL;
2423 
2424 	rb_erase_cached(n, r);
2425 	RB_CLEAR_NODE(n);
2426 	WRITE_ONCE(node_internal->owner, NULL);
2427 	return (struct bpf_rb_node *)n;
2428 }
2429 
2430 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2431  * program
2432  */
__bpf_rbtree_add(struct bpf_rb_root * root,struct bpf_rb_node_kern * node,void * less,struct btf_record * rec,u64 off)2433 static int __bpf_rbtree_add(struct bpf_rb_root *root,
2434 			    struct bpf_rb_node_kern *node,
2435 			    void *less, struct btf_record *rec, u64 off)
2436 {
2437 	struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2438 	struct rb_node *parent = NULL, *n = &node->rb_node;
2439 	bpf_callback_t cb = (bpf_callback_t)less;
2440 	bool leftmost = true;
2441 
2442 	/* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2443 	 * check the latter
2444 	 */
2445 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2446 		/* Only called from BPF prog, no need to migrate_disable */
2447 		__bpf_obj_drop_impl((void *)n - off, rec, false);
2448 		return -EINVAL;
2449 	}
2450 
2451 	while (*link) {
2452 		parent = *link;
2453 		if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2454 			link = &parent->rb_left;
2455 		} else {
2456 			link = &parent->rb_right;
2457 			leftmost = false;
2458 		}
2459 	}
2460 
2461 	rb_link_node(n, parent, link);
2462 	rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2463 	WRITE_ONCE(node->owner, root);
2464 	return 0;
2465 }
2466 
bpf_rbtree_add_impl(struct bpf_rb_root * root,struct bpf_rb_node * node,bool (less)(struct bpf_rb_node * a,const struct bpf_rb_node * b),void * meta__ign,u64 off)2467 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2468 				    bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2469 				    void *meta__ign, u64 off)
2470 {
2471 	struct btf_struct_meta *meta = meta__ign;
2472 	struct bpf_rb_node_kern *n = (void *)node;
2473 
2474 	return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2475 }
2476 
bpf_rbtree_first(struct bpf_rb_root * root)2477 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2478 {
2479 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2480 
2481 	return (struct bpf_rb_node *)rb_first_cached(r);
2482 }
2483 
bpf_rbtree_root(struct bpf_rb_root * root)2484 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root)
2485 {
2486 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2487 
2488 	return (struct bpf_rb_node *)r->rb_root.rb_node;
2489 }
2490 
bpf_rbtree_left(struct bpf_rb_root * root,struct bpf_rb_node * node)2491 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node)
2492 {
2493 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2494 
2495 	if (READ_ONCE(node_internal->owner) != root)
2496 		return NULL;
2497 
2498 	return (struct bpf_rb_node *)node_internal->rb_node.rb_left;
2499 }
2500 
bpf_rbtree_right(struct bpf_rb_root * root,struct bpf_rb_node * node)2501 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node)
2502 {
2503 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2504 
2505 	if (READ_ONCE(node_internal->owner) != root)
2506 		return NULL;
2507 
2508 	return (struct bpf_rb_node *)node_internal->rb_node.rb_right;
2509 }
2510 
2511 /**
2512  * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2513  * kfunc which is not stored in a map as a kptr, must be released by calling
2514  * bpf_task_release().
2515  * @p: The task on which a reference is being acquired.
2516  */
bpf_task_acquire(struct task_struct * p)2517 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2518 {
2519 	if (refcount_inc_not_zero(&p->rcu_users))
2520 		return p;
2521 	return NULL;
2522 }
2523 
2524 /**
2525  * bpf_task_release - Release the reference acquired on a task.
2526  * @p: The task on which a reference is being released.
2527  */
bpf_task_release(struct task_struct * p)2528 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2529 {
2530 	put_task_struct_rcu_user(p);
2531 }
2532 
bpf_task_release_dtor(void * p)2533 __bpf_kfunc void bpf_task_release_dtor(void *p)
2534 {
2535 	put_task_struct_rcu_user(p);
2536 }
2537 CFI_NOSEAL(bpf_task_release_dtor);
2538 
2539 #ifdef CONFIG_CGROUPS
2540 /**
2541  * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2542  * this kfunc which is not stored in a map as a kptr, must be released by
2543  * calling bpf_cgroup_release().
2544  * @cgrp: The cgroup on which a reference is being acquired.
2545  */
bpf_cgroup_acquire(struct cgroup * cgrp)2546 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2547 {
2548 	return cgroup_tryget(cgrp) ? cgrp : NULL;
2549 }
2550 
2551 /**
2552  * bpf_cgroup_release - Release the reference acquired on a cgroup.
2553  * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2554  * not be freed until the current grace period has ended, even if its refcount
2555  * drops to 0.
2556  * @cgrp: The cgroup on which a reference is being released.
2557  */
bpf_cgroup_release(struct cgroup * cgrp)2558 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2559 {
2560 	cgroup_put(cgrp);
2561 }
2562 
bpf_cgroup_release_dtor(void * cgrp)2563 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
2564 {
2565 	cgroup_put(cgrp);
2566 }
2567 CFI_NOSEAL(bpf_cgroup_release_dtor);
2568 
2569 /**
2570  * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2571  * array. A cgroup returned by this kfunc which is not subsequently stored in a
2572  * map, must be released by calling bpf_cgroup_release().
2573  * @cgrp: The cgroup for which we're performing a lookup.
2574  * @level: The level of ancestor to look up.
2575  */
bpf_cgroup_ancestor(struct cgroup * cgrp,int level)2576 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2577 {
2578 	struct cgroup *ancestor;
2579 
2580 	if (level > cgrp->level || level < 0)
2581 		return NULL;
2582 
2583 	/* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2584 	ancestor = cgrp->ancestors[level];
2585 	if (!cgroup_tryget(ancestor))
2586 		return NULL;
2587 	return ancestor;
2588 }
2589 
2590 /**
2591  * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2592  * kfunc which is not subsequently stored in a map, must be released by calling
2593  * bpf_cgroup_release().
2594  * @cgid: cgroup id.
2595  */
bpf_cgroup_from_id(u64 cgid)2596 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2597 {
2598 	struct cgroup *cgrp;
2599 
2600 	cgrp = __cgroup_get_from_id(cgid);
2601 	if (IS_ERR(cgrp))
2602 		return NULL;
2603 	return cgrp;
2604 }
2605 
2606 /**
2607  * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2608  * task's membership of cgroup ancestry.
2609  * @task: the task to be tested
2610  * @ancestor: possible ancestor of @task's cgroup
2611  *
2612  * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2613  * It follows all the same rules as cgroup_is_descendant, and only applies
2614  * to the default hierarchy.
2615  */
bpf_task_under_cgroup(struct task_struct * task,struct cgroup * ancestor)2616 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2617 				       struct cgroup *ancestor)
2618 {
2619 	long ret;
2620 
2621 	rcu_read_lock();
2622 	ret = task_under_cgroup_hierarchy(task, ancestor);
2623 	rcu_read_unlock();
2624 	return ret;
2625 }
2626 
BPF_CALL_2(bpf_current_task_under_cgroup,struct bpf_map *,map,u32,idx)2627 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
2628 {
2629 	struct bpf_array *array = container_of(map, struct bpf_array, map);
2630 	struct cgroup *cgrp;
2631 
2632 	if (unlikely(idx >= array->map.max_entries))
2633 		return -E2BIG;
2634 
2635 	cgrp = READ_ONCE(array->ptrs[idx]);
2636 	if (unlikely(!cgrp))
2637 		return -EAGAIN;
2638 
2639 	return task_under_cgroup_hierarchy(current, cgrp);
2640 }
2641 
2642 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
2643 	.func           = bpf_current_task_under_cgroup,
2644 	.gpl_only       = false,
2645 	.ret_type       = RET_INTEGER,
2646 	.arg1_type      = ARG_CONST_MAP_PTR,
2647 	.arg2_type      = ARG_ANYTHING,
2648 };
2649 
2650 /**
2651  * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2652  * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
2653  * hierarchy ID.
2654  * @task: The target task
2655  * @hierarchy_id: The ID of a cgroup1 hierarchy
2656  *
2657  * On success, the cgroup is returen. On failure, NULL is returned.
2658  */
2659 __bpf_kfunc struct cgroup *
bpf_task_get_cgroup1(struct task_struct * task,int hierarchy_id)2660 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
2661 {
2662 	struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
2663 
2664 	if (IS_ERR(cgrp))
2665 		return NULL;
2666 	return cgrp;
2667 }
2668 #endif /* CONFIG_CGROUPS */
2669 
2670 /**
2671  * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2672  * in the root pid namespace idr. If a task is returned, it must either be
2673  * stored in a map, or released with bpf_task_release().
2674  * @pid: The pid of the task being looked up.
2675  */
bpf_task_from_pid(s32 pid)2676 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2677 {
2678 	struct task_struct *p;
2679 
2680 	rcu_read_lock();
2681 	p = find_task_by_pid_ns(pid, &init_pid_ns);
2682 	if (p)
2683 		p = bpf_task_acquire(p);
2684 	rcu_read_unlock();
2685 
2686 	return p;
2687 }
2688 
2689 /**
2690  * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up
2691  * in the pid namespace of the current task. If a task is returned, it must
2692  * either be stored in a map, or released with bpf_task_release().
2693  * @vpid: The vpid of the task being looked up.
2694  */
bpf_task_from_vpid(s32 vpid)2695 __bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid)
2696 {
2697 	struct task_struct *p;
2698 
2699 	rcu_read_lock();
2700 	p = find_task_by_vpid(vpid);
2701 	if (p)
2702 		p = bpf_task_acquire(p);
2703 	rcu_read_unlock();
2704 
2705 	return p;
2706 }
2707 
2708 /**
2709  * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2710  * @p: The dynptr whose data slice to retrieve
2711  * @offset: Offset into the dynptr
2712  * @buffer__opt: User-provided buffer to copy contents into.  May be NULL
2713  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2714  *               length of the requested slice. This must be a constant.
2715  *
2716  * For non-skb and non-xdp type dynptrs, there is no difference between
2717  * bpf_dynptr_slice and bpf_dynptr_data.
2718  *
2719  *  If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2720  *
2721  * If the intention is to write to the data slice, please use
2722  * bpf_dynptr_slice_rdwr.
2723  *
2724  * The user must check that the returned pointer is not null before using it.
2725  *
2726  * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2727  * does not change the underlying packet data pointers, so a call to
2728  * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2729  * the bpf program.
2730  *
2731  * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2732  * data slice (can be either direct pointer to the data or a pointer to the user
2733  * provided buffer, with its contents containing the data, if unable to obtain
2734  * direct pointer)
2735  */
bpf_dynptr_slice(const struct bpf_dynptr * p,u64 offset,void * buffer__opt,u64 buffer__szk)2736 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u64 offset,
2737 				   void *buffer__opt, u64 buffer__szk)
2738 {
2739 	const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2740 	enum bpf_dynptr_type type;
2741 	u64 len = buffer__szk;
2742 	int err;
2743 
2744 	if (!ptr->data)
2745 		return NULL;
2746 
2747 	err = bpf_dynptr_check_off_len(ptr, offset, len);
2748 	if (err)
2749 		return NULL;
2750 
2751 	type = bpf_dynptr_get_type(ptr);
2752 
2753 	switch (type) {
2754 	case BPF_DYNPTR_TYPE_LOCAL:
2755 	case BPF_DYNPTR_TYPE_RINGBUF:
2756 		return ptr->data + ptr->offset + offset;
2757 	case BPF_DYNPTR_TYPE_SKB:
2758 		if (buffer__opt)
2759 			return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
2760 		else
2761 			return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2762 	case BPF_DYNPTR_TYPE_XDP:
2763 	{
2764 		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2765 		if (!IS_ERR_OR_NULL(xdp_ptr))
2766 			return xdp_ptr;
2767 
2768 		if (!buffer__opt)
2769 			return NULL;
2770 		bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
2771 		return buffer__opt;
2772 	}
2773 	case BPF_DYNPTR_TYPE_SKB_META:
2774 		return bpf_skb_meta_pointer(ptr->data, ptr->offset + offset);
2775 	case BPF_DYNPTR_TYPE_FILE:
2776 		err = bpf_file_fetch_bytes(ptr->data, offset, buffer__opt, buffer__szk);
2777 		return err ? NULL : buffer__opt;
2778 	default:
2779 		WARN_ONCE(true, "unknown dynptr type %d\n", type);
2780 		return NULL;
2781 	}
2782 }
2783 
2784 /**
2785  * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2786  * @p: The dynptr whose data slice to retrieve
2787  * @offset: Offset into the dynptr
2788  * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2789  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2790  *               length of the requested slice. This must be a constant.
2791  *
2792  * For non-skb and non-xdp type dynptrs, there is no difference between
2793  * bpf_dynptr_slice and bpf_dynptr_data.
2794  *
2795  * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2796  *
2797  * The returned pointer is writable and may point to either directly the dynptr
2798  * data at the requested offset or to the buffer if unable to obtain a direct
2799  * data pointer to (example: the requested slice is to the paged area of an skb
2800  * packet). In the case where the returned pointer is to the buffer, the user
2801  * is responsible for persisting writes through calling bpf_dynptr_write(). This
2802  * usually looks something like this pattern:
2803  *
2804  * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2805  * if (!eth)
2806  *	return TC_ACT_SHOT;
2807  *
2808  * // mutate eth header //
2809  *
2810  * if (eth == buffer)
2811  *	bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2812  *
2813  * Please note that, as in the example above, the user must check that the
2814  * returned pointer is not null before using it.
2815  *
2816  * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2817  * does not change the underlying packet data pointers, so a call to
2818  * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2819  * the bpf program.
2820  *
2821  * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2822  * data slice (can be either direct pointer to the data or a pointer to the user
2823  * provided buffer, with its contents containing the data, if unable to obtain
2824  * direct pointer)
2825  */
bpf_dynptr_slice_rdwr(const struct bpf_dynptr * p,u64 offset,void * buffer__opt,u64 buffer__szk)2826 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u64 offset,
2827 					void *buffer__opt, u64 buffer__szk)
2828 {
2829 	const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2830 
2831 	if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2832 		return NULL;
2833 
2834 	/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2835 	 *
2836 	 * For skb-type dynptrs, it is safe to write into the returned pointer
2837 	 * if the bpf program allows skb data writes. There are two possibilities
2838 	 * that may occur when calling bpf_dynptr_slice_rdwr:
2839 	 *
2840 	 * 1) The requested slice is in the head of the skb. In this case, the
2841 	 * returned pointer is directly to skb data, and if the skb is cloned, the
2842 	 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2843 	 * The pointer can be directly written into.
2844 	 *
2845 	 * 2) Some portion of the requested slice is in the paged buffer area.
2846 	 * In this case, the requested data will be copied out into the buffer
2847 	 * and the returned pointer will be a pointer to the buffer. The skb
2848 	 * will not be pulled. To persist the write, the user will need to call
2849 	 * bpf_dynptr_write(), which will pull the skb and commit the write.
2850 	 *
2851 	 * Similarly for xdp programs, if the requested slice is not across xdp
2852 	 * fragments, then a direct pointer will be returned, otherwise the data
2853 	 * will be copied out into the buffer and the user will need to call
2854 	 * bpf_dynptr_write() to commit changes.
2855 	 */
2856 	return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk);
2857 }
2858 
bpf_dynptr_adjust(const struct bpf_dynptr * p,u64 start,u64 end)2859 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u64 start, u64 end)
2860 {
2861 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2862 	u64 size;
2863 
2864 	if (!ptr->data || start > end)
2865 		return -EINVAL;
2866 
2867 	size = __bpf_dynptr_size(ptr);
2868 
2869 	if (start > size || end > size)
2870 		return -ERANGE;
2871 
2872 	bpf_dynptr_advance_offset(ptr, start);
2873 	bpf_dynptr_set_size(ptr, end - start);
2874 
2875 	return 0;
2876 }
2877 
bpf_dynptr_is_null(const struct bpf_dynptr * p)2878 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
2879 {
2880 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2881 
2882 	return !ptr->data;
2883 }
2884 
bpf_dynptr_is_rdonly(const struct bpf_dynptr * p)2885 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
2886 {
2887 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2888 
2889 	if (!ptr->data)
2890 		return false;
2891 
2892 	return __bpf_dynptr_is_rdonly(ptr);
2893 }
2894 
bpf_dynptr_size(const struct bpf_dynptr * p)2895 __bpf_kfunc u64 bpf_dynptr_size(const struct bpf_dynptr *p)
2896 {
2897 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2898 
2899 	if (!ptr->data)
2900 		return -EINVAL;
2901 
2902 	return __bpf_dynptr_size(ptr);
2903 }
2904 
bpf_dynptr_clone(const struct bpf_dynptr * p,struct bpf_dynptr * clone__uninit)2905 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
2906 				 struct bpf_dynptr *clone__uninit)
2907 {
2908 	struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
2909 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2910 
2911 	if (!ptr->data) {
2912 		bpf_dynptr_set_null(clone);
2913 		return -EINVAL;
2914 	}
2915 
2916 	*clone = *ptr;
2917 
2918 	return 0;
2919 }
2920 
2921 /**
2922  * bpf_dynptr_copy() - Copy data from one dynptr to another.
2923  * @dst_ptr: Destination dynptr - where data should be copied to
2924  * @dst_off: Offset into the destination dynptr
2925  * @src_ptr: Source dynptr - where data should be copied from
2926  * @src_off: Offset into the source dynptr
2927  * @size: Length of the data to copy from source to destination
2928  *
2929  * Copies data from source dynptr to destination dynptr.
2930  * Returns 0 on success; negative error, otherwise.
2931  */
bpf_dynptr_copy(struct bpf_dynptr * dst_ptr,u64 dst_off,struct bpf_dynptr * src_ptr,u64 src_off,u64 size)2932 __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u64 dst_off,
2933 				struct bpf_dynptr *src_ptr, u64 src_off, u64 size)
2934 {
2935 	struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr;
2936 	struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr;
2937 	void *src_slice, *dst_slice;
2938 	char buf[256];
2939 	u64 off;
2940 
2941 	src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size);
2942 	dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size);
2943 
2944 	if (src_slice && dst_slice) {
2945 		memmove(dst_slice, src_slice, size);
2946 		return 0;
2947 	}
2948 
2949 	if (src_slice)
2950 		return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0);
2951 
2952 	if (dst_slice)
2953 		return __bpf_dynptr_read(dst_slice, size, src, src_off, 0);
2954 
2955 	if (bpf_dynptr_check_off_len(dst, dst_off, size) ||
2956 	    bpf_dynptr_check_off_len(src, src_off, size))
2957 		return -E2BIG;
2958 
2959 	off = 0;
2960 	while (off < size) {
2961 		u64 chunk_sz = min_t(u64, sizeof(buf), size - off);
2962 		int err;
2963 
2964 		err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0);
2965 		if (err)
2966 			return err;
2967 		err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0);
2968 		if (err)
2969 			return err;
2970 
2971 		off += chunk_sz;
2972 	}
2973 	return 0;
2974 }
2975 
2976 /**
2977  * bpf_dynptr_memset() - Fill dynptr memory with a constant byte.
2978  * @p: Destination dynptr - where data will be filled
2979  * @offset: Offset into the dynptr to start filling from
2980  * @size: Number of bytes to fill
2981  * @val: Constant byte to fill the memory with
2982  *
2983  * Fills the @size bytes of the memory area pointed to by @p
2984  * at @offset with the constant byte @val.
2985  * Returns 0 on success; negative error, otherwise.
2986  */
bpf_dynptr_memset(struct bpf_dynptr * p,u64 offset,u64 size,u8 val)2987 __bpf_kfunc int bpf_dynptr_memset(struct bpf_dynptr *p, u64 offset, u64 size, u8 val)
2988 {
2989 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2990 	u64 chunk_sz, write_off;
2991 	char buf[256];
2992 	void* slice;
2993 	int err;
2994 
2995 	slice = bpf_dynptr_slice_rdwr(p, offset, NULL, size);
2996 	if (likely(slice)) {
2997 		memset(slice, val, size);
2998 		return 0;
2999 	}
3000 
3001 	if (__bpf_dynptr_is_rdonly(ptr))
3002 		return -EINVAL;
3003 
3004 	err = bpf_dynptr_check_off_len(ptr, offset, size);
3005 	if (err)
3006 		return err;
3007 
3008 	/* Non-linear data under the dynptr, write from a local buffer */
3009 	chunk_sz = min_t(u64, sizeof(buf), size);
3010 	memset(buf, val, chunk_sz);
3011 
3012 	for (write_off = 0; write_off < size; write_off += chunk_sz) {
3013 		chunk_sz = min_t(u64, sizeof(buf), size - write_off);
3014 		err = __bpf_dynptr_write(ptr, offset + write_off, buf, chunk_sz, 0);
3015 		if (err)
3016 			return err;
3017 	}
3018 
3019 	return 0;
3020 }
3021 
bpf_cast_to_kern_ctx(void * obj)3022 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
3023 {
3024 	return obj;
3025 }
3026 
bpf_rdonly_cast(const void * obj__ign,u32 btf_id__k)3027 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
3028 {
3029 	return (void *)obj__ign;
3030 }
3031 
bpf_rcu_read_lock(void)3032 __bpf_kfunc void bpf_rcu_read_lock(void)
3033 {
3034 	rcu_read_lock();
3035 }
3036 
bpf_rcu_read_unlock(void)3037 __bpf_kfunc void bpf_rcu_read_unlock(void)
3038 {
3039 	rcu_read_unlock();
3040 }
3041 
3042 struct bpf_throw_ctx {
3043 	struct bpf_prog_aux *aux;
3044 	u64 sp;
3045 	u64 bp;
3046 	int cnt;
3047 };
3048 
bpf_stack_walker(void * cookie,u64 ip,u64 sp,u64 bp)3049 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
3050 {
3051 	struct bpf_throw_ctx *ctx = cookie;
3052 	struct bpf_prog *prog;
3053 
3054 	/*
3055 	 * The RCU read lock is held to safely traverse the latch tree, but we
3056 	 * don't need its protection when accessing the prog, since it has an
3057 	 * active stack frame on the current stack trace, and won't disappear.
3058 	 */
3059 	rcu_read_lock();
3060 	prog = bpf_prog_ksym_find(ip);
3061 	rcu_read_unlock();
3062 	if (!prog)
3063 		return !ctx->cnt;
3064 	ctx->cnt++;
3065 	if (bpf_is_subprog(prog))
3066 		return true;
3067 	ctx->aux = prog->aux;
3068 	ctx->sp = sp;
3069 	ctx->bp = bp;
3070 	return false;
3071 }
3072 
bpf_throw(u64 cookie)3073 __bpf_kfunc void bpf_throw(u64 cookie)
3074 {
3075 	struct bpf_throw_ctx ctx = {};
3076 
3077 	arch_bpf_stack_walk(bpf_stack_walker, &ctx);
3078 	WARN_ON_ONCE(!ctx.aux);
3079 	if (ctx.aux)
3080 		WARN_ON_ONCE(!ctx.aux->exception_boundary);
3081 	WARN_ON_ONCE(!ctx.bp);
3082 	WARN_ON_ONCE(!ctx.cnt);
3083 	/* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
3084 	 * deeper stack depths than ctx.sp as we do not return from bpf_throw,
3085 	 * which skips compiler generated instrumentation to do the same.
3086 	 */
3087 	kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
3088 	ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
3089 	WARN(1, "A call to BPF exception callback should never return\n");
3090 }
3091 
bpf_wq_init(struct bpf_wq * wq,void * p__map,unsigned int flags)3092 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
3093 {
3094 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3095 	struct bpf_map *map = p__map;
3096 
3097 	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq));
3098 	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq));
3099 
3100 	if (flags)
3101 		return -EINVAL;
3102 
3103 	return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
3104 }
3105 
bpf_wq_start(struct bpf_wq * wq,unsigned int flags)3106 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
3107 {
3108 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3109 	struct bpf_work *w;
3110 
3111 	if (in_nmi())
3112 		return -EOPNOTSUPP;
3113 	if (flags)
3114 		return -EINVAL;
3115 	w = READ_ONCE(async->work);
3116 	if (!w || !READ_ONCE(w->cb.prog))
3117 		return -EINVAL;
3118 
3119 	schedule_work(&w->work);
3120 	return 0;
3121 }
3122 
bpf_wq_set_callback_impl(struct bpf_wq * wq,int (callback_fn)(void * map,int * key,void * value),unsigned int flags,void * aux__prog)3123 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
3124 					 int (callback_fn)(void *map, int *key, void *value),
3125 					 unsigned int flags,
3126 					 void *aux__prog)
3127 {
3128 	struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog;
3129 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3130 
3131 	if (flags)
3132 		return -EINVAL;
3133 
3134 	return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ);
3135 }
3136 
bpf_preempt_disable(void)3137 __bpf_kfunc void bpf_preempt_disable(void)
3138 {
3139 	preempt_disable();
3140 }
3141 
bpf_preempt_enable(void)3142 __bpf_kfunc void bpf_preempt_enable(void)
3143 {
3144 	preempt_enable();
3145 }
3146 
3147 struct bpf_iter_bits {
3148 	__u64 __opaque[2];
3149 } __aligned(8);
3150 
3151 #define BITS_ITER_NR_WORDS_MAX 511
3152 
3153 struct bpf_iter_bits_kern {
3154 	union {
3155 		__u64 *bits;
3156 		__u64 bits_copy;
3157 	};
3158 	int nr_bits;
3159 	int bit;
3160 } __aligned(8);
3161 
3162 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing
3163  * a u64 pointer and an unsigned long pointer to find_next_bit() will
3164  * return the same result, as both point to the same 8-byte area.
3165  *
3166  * For 32-bit little-endian hosts, using a u64 pointer or unsigned long
3167  * pointer also makes no difference. This is because the first iterated
3168  * unsigned long is composed of bits 0-31 of the u64 and the second unsigned
3169  * long is composed of bits 32-63 of the u64.
3170  *
3171  * However, for 32-bit big-endian hosts, this is not the case. The first
3172  * iterated unsigned long will be bits 32-63 of the u64, so swap these two
3173  * ulong values within the u64.
3174  */
swap_ulong_in_u64(u64 * bits,unsigned int nr)3175 static void swap_ulong_in_u64(u64 *bits, unsigned int nr)
3176 {
3177 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
3178 	unsigned int i;
3179 
3180 	for (i = 0; i < nr; i++)
3181 		bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32);
3182 #endif
3183 }
3184 
3185 /**
3186  * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
3187  * @it: The new bpf_iter_bits to be created
3188  * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
3189  * @nr_words: The size of the specified memory area, measured in 8-byte units.
3190  * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be
3191  * further reduced by the BPF memory allocator implementation.
3192  *
3193  * This function initializes a new bpf_iter_bits structure for iterating over
3194  * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
3195  * copies the data of the memory area to the newly created bpf_iter_bits @it for
3196  * subsequent iteration operations.
3197  *
3198  * On success, 0 is returned. On failure, ERR is returned.
3199  */
3200 __bpf_kfunc int
bpf_iter_bits_new(struct bpf_iter_bits * it,const u64 * unsafe_ptr__ign,u32 nr_words)3201 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
3202 {
3203 	struct bpf_iter_bits_kern *kit = (void *)it;
3204 	u32 nr_bytes = nr_words * sizeof(u64);
3205 	u32 nr_bits = BYTES_TO_BITS(nr_bytes);
3206 	int err;
3207 
3208 	BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
3209 	BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
3210 		     __alignof__(struct bpf_iter_bits));
3211 
3212 	kit->nr_bits = 0;
3213 	kit->bits_copy = 0;
3214 	kit->bit = -1;
3215 
3216 	if (!unsafe_ptr__ign || !nr_words)
3217 		return -EINVAL;
3218 	if (nr_words > BITS_ITER_NR_WORDS_MAX)
3219 		return -E2BIG;
3220 
3221 	/* Optimization for u64 mask */
3222 	if (nr_bits == 64) {
3223 		err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
3224 		if (err)
3225 			return -EFAULT;
3226 
3227 		swap_ulong_in_u64(&kit->bits_copy, nr_words);
3228 
3229 		kit->nr_bits = nr_bits;
3230 		return 0;
3231 	}
3232 
3233 	if (bpf_mem_alloc_check_size(false, nr_bytes))
3234 		return -E2BIG;
3235 
3236 	/* Fallback to memalloc */
3237 	kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
3238 	if (!kit->bits)
3239 		return -ENOMEM;
3240 
3241 	err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
3242 	if (err) {
3243 		bpf_mem_free(&bpf_global_ma, kit->bits);
3244 		return err;
3245 	}
3246 
3247 	swap_ulong_in_u64(kit->bits, nr_words);
3248 
3249 	kit->nr_bits = nr_bits;
3250 	return 0;
3251 }
3252 
3253 /**
3254  * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
3255  * @it: The bpf_iter_bits to be checked
3256  *
3257  * This function returns a pointer to a number representing the value of the
3258  * next bit in the bits.
3259  *
3260  * If there are no further bits available, it returns NULL.
3261  */
bpf_iter_bits_next(struct bpf_iter_bits * it)3262 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
3263 {
3264 	struct bpf_iter_bits_kern *kit = (void *)it;
3265 	int bit = kit->bit, nr_bits = kit->nr_bits;
3266 	const void *bits;
3267 
3268 	if (!nr_bits || bit >= nr_bits)
3269 		return NULL;
3270 
3271 	bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
3272 	bit = find_next_bit(bits, nr_bits, bit + 1);
3273 	if (bit >= nr_bits) {
3274 		kit->bit = bit;
3275 		return NULL;
3276 	}
3277 
3278 	kit->bit = bit;
3279 	return &kit->bit;
3280 }
3281 
3282 /**
3283  * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
3284  * @it: The bpf_iter_bits to be destroyed
3285  *
3286  * Destroy the resource associated with the bpf_iter_bits.
3287  */
bpf_iter_bits_destroy(struct bpf_iter_bits * it)3288 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
3289 {
3290 	struct bpf_iter_bits_kern *kit = (void *)it;
3291 
3292 	if (kit->nr_bits <= 64)
3293 		return;
3294 	bpf_mem_free(&bpf_global_ma, kit->bits);
3295 }
3296 
3297 /**
3298  * bpf_copy_from_user_str() - Copy a string from an unsafe user address
3299  * @dst:             Destination address, in kernel space.  This buffer must be
3300  *                   at least @dst__sz bytes long.
3301  * @dst__sz:         Maximum number of bytes to copy, includes the trailing NUL.
3302  * @unsafe_ptr__ign: Source address, in user space.
3303  * @flags:           The only supported flag is BPF_F_PAD_ZEROS
3304  *
3305  * Copies a NUL-terminated string from userspace to BPF space. If user string is
3306  * too long this will still ensure zero termination in the dst buffer unless
3307  * buffer size is 0.
3308  *
3309  * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and
3310  * memset all of @dst on failure.
3311  */
bpf_copy_from_user_str(void * dst,u32 dst__sz,const void __user * unsafe_ptr__ign,u64 flags)3312 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags)
3313 {
3314 	int ret;
3315 
3316 	if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3317 		return -EINVAL;
3318 
3319 	if (unlikely(!dst__sz))
3320 		return 0;
3321 
3322 	ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1);
3323 	if (ret < 0) {
3324 		if (flags & BPF_F_PAD_ZEROS)
3325 			memset((char *)dst, 0, dst__sz);
3326 
3327 		return ret;
3328 	}
3329 
3330 	if (flags & BPF_F_PAD_ZEROS)
3331 		memset((char *)dst + ret, 0, dst__sz - ret);
3332 	else
3333 		((char *)dst)[ret] = '\0';
3334 
3335 	return ret + 1;
3336 }
3337 
3338 /**
3339  * bpf_copy_from_user_task_str() - Copy a string from an task's address space
3340  * @dst:             Destination address, in kernel space.  This buffer must be
3341  *                   at least @dst__sz bytes long.
3342  * @dst__sz:         Maximum number of bytes to copy, includes the trailing NUL.
3343  * @unsafe_ptr__ign: Source address in the task's address space.
3344  * @tsk:             The task whose address space will be used
3345  * @flags:           The only supported flag is BPF_F_PAD_ZEROS
3346  *
3347  * Copies a NUL terminated string from a task's address space to @dst__sz
3348  * buffer. If user string is too long this will still ensure zero termination
3349  * in the @dst__sz buffer unless buffer size is 0.
3350  *
3351  * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success
3352  * and memset all of @dst__sz on failure.
3353  *
3354  * Return: The number of copied bytes on success including the NUL terminator.
3355  * A negative error code on failure.
3356  */
bpf_copy_from_user_task_str(void * dst,u32 dst__sz,const void __user * unsafe_ptr__ign,struct task_struct * tsk,u64 flags)3357 __bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz,
3358 					    const void __user *unsafe_ptr__ign,
3359 					    struct task_struct *tsk, u64 flags)
3360 {
3361 	int ret;
3362 
3363 	if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3364 		return -EINVAL;
3365 
3366 	if (unlikely(dst__sz == 0))
3367 		return 0;
3368 
3369 	ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0);
3370 	if (ret < 0) {
3371 		if (flags & BPF_F_PAD_ZEROS)
3372 			memset(dst, 0, dst__sz);
3373 		return ret;
3374 	}
3375 
3376 	if (flags & BPF_F_PAD_ZEROS)
3377 		memset(dst + ret, 0, dst__sz - ret);
3378 
3379 	return ret + 1;
3380 }
3381 
3382 /* Keep unsinged long in prototype so that kfunc is usable when emitted to
3383  * vmlinux.h in BPF programs directly, but note that while in BPF prog, the
3384  * unsigned long always points to 8-byte region on stack, the kernel may only
3385  * read and write the 4-bytes on 32-bit.
3386  */
bpf_local_irq_save(unsigned long * flags__irq_flag)3387 __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag)
3388 {
3389 	local_irq_save(*flags__irq_flag);
3390 }
3391 
bpf_local_irq_restore(unsigned long * flags__irq_flag)3392 __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag)
3393 {
3394 	local_irq_restore(*flags__irq_flag);
3395 }
3396 
__bpf_trap(void)3397 __bpf_kfunc void __bpf_trap(void)
3398 {
3399 }
3400 
3401 /*
3402  * Kfuncs for string operations.
3403  *
3404  * Since strings are not necessarily %NUL-terminated, we cannot directly call
3405  * in-kernel implementations. Instead, we open-code the implementations using
3406  * __get_kernel_nofault instead of plain dereference to make them safe.
3407  */
3408 
__bpf_strcasecmp(const char * s1,const char * s2,bool ignore_case)3409 static int __bpf_strcasecmp(const char *s1, const char *s2, bool ignore_case)
3410 {
3411 	char c1, c2;
3412 	int i;
3413 
3414 	if (!copy_from_kernel_nofault_allowed(s1, 1) ||
3415 	    !copy_from_kernel_nofault_allowed(s2, 1)) {
3416 		return -ERANGE;
3417 	}
3418 
3419 	guard(pagefault)();
3420 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3421 		__get_kernel_nofault(&c1, s1, char, err_out);
3422 		__get_kernel_nofault(&c2, s2, char, err_out);
3423 		if (ignore_case) {
3424 			c1 = tolower(c1);
3425 			c2 = tolower(c2);
3426 		}
3427 		if (c1 != c2)
3428 			return c1 < c2 ? -1 : 1;
3429 		if (c1 == '\0')
3430 			return 0;
3431 		s1++;
3432 		s2++;
3433 	}
3434 	return -E2BIG;
3435 err_out:
3436 	return -EFAULT;
3437 }
3438 
3439 /**
3440  * bpf_strcmp - Compare two strings
3441  * @s1__ign: One string
3442  * @s2__ign: Another string
3443  *
3444  * Return:
3445  * * %0       - Strings are equal
3446  * * %-1      - @s1__ign is smaller
3447  * * %1       - @s2__ign is smaller
3448  * * %-EFAULT - Cannot read one of the strings
3449  * * %-E2BIG  - One of strings is too large
3450  * * %-ERANGE - One of strings is outside of kernel address space
3451  */
bpf_strcmp(const char * s1__ign,const char * s2__ign)3452 __bpf_kfunc int bpf_strcmp(const char *s1__ign, const char *s2__ign)
3453 {
3454 	return __bpf_strcasecmp(s1__ign, s2__ign, false);
3455 }
3456 
3457 /**
3458  * bpf_strcasecmp - Compare two strings, ignoring the case of the characters
3459  * @s1__ign: One string
3460  * @s2__ign: Another string
3461  *
3462  * Return:
3463  * * %0       - Strings are equal
3464  * * %-1      - @s1__ign is smaller
3465  * * %1       - @s2__ign is smaller
3466  * * %-EFAULT - Cannot read one of the strings
3467  * * %-E2BIG  - One of strings is too large
3468  * * %-ERANGE - One of strings is outside of kernel address space
3469  */
bpf_strcasecmp(const char * s1__ign,const char * s2__ign)3470 __bpf_kfunc int bpf_strcasecmp(const char *s1__ign, const char *s2__ign)
3471 {
3472 	return __bpf_strcasecmp(s1__ign, s2__ign, true);
3473 }
3474 
3475 /**
3476  * bpf_strnchr - Find a character in a length limited string
3477  * @s__ign: The string to be searched
3478  * @count: The number of characters to be searched
3479  * @c: The character to search for
3480  *
3481  * Note that the %NUL-terminator is considered part of the string, and can
3482  * be searched for.
3483  *
3484  * Return:
3485  * * >=0      - Index of the first occurrence of @c within @s__ign
3486  * * %-ENOENT - @c not found in the first @count characters of @s__ign
3487  * * %-EFAULT - Cannot read @s__ign
3488  * * %-E2BIG  - @s__ign is too large
3489  * * %-ERANGE - @s__ign is outside of kernel address space
3490  */
bpf_strnchr(const char * s__ign,size_t count,char c)3491 __bpf_kfunc int bpf_strnchr(const char *s__ign, size_t count, char c)
3492 {
3493 	char sc;
3494 	int i;
3495 
3496 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3497 		return -ERANGE;
3498 
3499 	guard(pagefault)();
3500 	for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) {
3501 		__get_kernel_nofault(&sc, s__ign, char, err_out);
3502 		if (sc == c)
3503 			return i;
3504 		if (sc == '\0')
3505 			return -ENOENT;
3506 		s__ign++;
3507 	}
3508 	return i == XATTR_SIZE_MAX ? -E2BIG : -ENOENT;
3509 err_out:
3510 	return -EFAULT;
3511 }
3512 
3513 /**
3514  * bpf_strchr - Find the first occurrence of a character in a string
3515  * @s__ign: The string to be searched
3516  * @c: The character to search for
3517  *
3518  * Note that the %NUL-terminator is considered part of the string, and can
3519  * be searched for.
3520  *
3521  * Return:
3522  * * >=0      - The index of the first occurrence of @c within @s__ign
3523  * * %-ENOENT - @c not found in @s__ign
3524  * * %-EFAULT - Cannot read @s__ign
3525  * * %-E2BIG  - @s__ign is too large
3526  * * %-ERANGE - @s__ign is outside of kernel address space
3527  */
bpf_strchr(const char * s__ign,char c)3528 __bpf_kfunc int bpf_strchr(const char *s__ign, char c)
3529 {
3530 	return bpf_strnchr(s__ign, XATTR_SIZE_MAX, c);
3531 }
3532 
3533 /**
3534  * bpf_strchrnul - Find and return a character in a string, or end of string
3535  * @s__ign: The string to be searched
3536  * @c: The character to search for
3537  *
3538  * Return:
3539  * * >=0      - Index of the first occurrence of @c within @s__ign or index of
3540  *              the null byte at the end of @s__ign when @c is not found
3541  * * %-EFAULT - Cannot read @s__ign
3542  * * %-E2BIG  - @s__ign is too large
3543  * * %-ERANGE - @s__ign is outside of kernel address space
3544  */
bpf_strchrnul(const char * s__ign,char c)3545 __bpf_kfunc int bpf_strchrnul(const char *s__ign, char c)
3546 {
3547 	char sc;
3548 	int i;
3549 
3550 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3551 		return -ERANGE;
3552 
3553 	guard(pagefault)();
3554 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3555 		__get_kernel_nofault(&sc, s__ign, char, err_out);
3556 		if (sc == '\0' || sc == c)
3557 			return i;
3558 		s__ign++;
3559 	}
3560 	return -E2BIG;
3561 err_out:
3562 	return -EFAULT;
3563 }
3564 
3565 /**
3566  * bpf_strrchr - Find the last occurrence of a character in a string
3567  * @s__ign: The string to be searched
3568  * @c: The character to search for
3569  *
3570  * Return:
3571  * * >=0      - Index of the last occurrence of @c within @s__ign
3572  * * %-ENOENT - @c not found in @s__ign
3573  * * %-EFAULT - Cannot read @s__ign
3574  * * %-E2BIG  - @s__ign is too large
3575  * * %-ERANGE - @s__ign is outside of kernel address space
3576  */
bpf_strrchr(const char * s__ign,int c)3577 __bpf_kfunc int bpf_strrchr(const char *s__ign, int c)
3578 {
3579 	char sc;
3580 	int i, last = -ENOENT;
3581 
3582 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3583 		return -ERANGE;
3584 
3585 	guard(pagefault)();
3586 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3587 		__get_kernel_nofault(&sc, s__ign, char, err_out);
3588 		if (sc == c)
3589 			last = i;
3590 		if (sc == '\0')
3591 			return last;
3592 		s__ign++;
3593 	}
3594 	return -E2BIG;
3595 err_out:
3596 	return -EFAULT;
3597 }
3598 
3599 /**
3600  * bpf_strnlen - Calculate the length of a length-limited string
3601  * @s__ign: The string
3602  * @count: The maximum number of characters to count
3603  *
3604  * Return:
3605  * * >=0      - The length of @s__ign
3606  * * %-EFAULT - Cannot read @s__ign
3607  * * %-E2BIG  - @s__ign is too large
3608  * * %-ERANGE - @s__ign is outside of kernel address space
3609  */
bpf_strnlen(const char * s__ign,size_t count)3610 __bpf_kfunc int bpf_strnlen(const char *s__ign, size_t count)
3611 {
3612 	char c;
3613 	int i;
3614 
3615 	if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3616 		return -ERANGE;
3617 
3618 	guard(pagefault)();
3619 	for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) {
3620 		__get_kernel_nofault(&c, s__ign, char, err_out);
3621 		if (c == '\0')
3622 			return i;
3623 		s__ign++;
3624 	}
3625 	return i == XATTR_SIZE_MAX ? -E2BIG : i;
3626 err_out:
3627 	return -EFAULT;
3628 }
3629 
3630 /**
3631  * bpf_strlen - Calculate the length of a string
3632  * @s__ign: The string
3633  *
3634  * Return:
3635  * * >=0      - The length of @s__ign
3636  * * %-EFAULT - Cannot read @s__ign
3637  * * %-E2BIG  - @s__ign is too large
3638  * * %-ERANGE - @s__ign is outside of kernel address space
3639  */
bpf_strlen(const char * s__ign)3640 __bpf_kfunc int bpf_strlen(const char *s__ign)
3641 {
3642 	return bpf_strnlen(s__ign, XATTR_SIZE_MAX);
3643 }
3644 
3645 /**
3646  * bpf_strspn - Calculate the length of the initial substring of @s__ign which
3647  *              only contains letters in @accept__ign
3648  * @s__ign: The string to be searched
3649  * @accept__ign: The string to search for
3650  *
3651  * Return:
3652  * * >=0      - The length of the initial substring of @s__ign which only
3653  *              contains letters from @accept__ign
3654  * * %-EFAULT - Cannot read one of the strings
3655  * * %-E2BIG  - One of the strings is too large
3656  * * %-ERANGE - One of the strings is outside of kernel address space
3657  */
bpf_strspn(const char * s__ign,const char * accept__ign)3658 __bpf_kfunc int bpf_strspn(const char *s__ign, const char *accept__ign)
3659 {
3660 	char cs, ca;
3661 	int i, j;
3662 
3663 	if (!copy_from_kernel_nofault_allowed(s__ign, 1) ||
3664 	    !copy_from_kernel_nofault_allowed(accept__ign, 1)) {
3665 		return -ERANGE;
3666 	}
3667 
3668 	guard(pagefault)();
3669 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3670 		__get_kernel_nofault(&cs, s__ign, char, err_out);
3671 		if (cs == '\0')
3672 			return i;
3673 		for (j = 0; j < XATTR_SIZE_MAX; j++) {
3674 			__get_kernel_nofault(&ca, accept__ign + j, char, err_out);
3675 			if (cs == ca || ca == '\0')
3676 				break;
3677 		}
3678 		if (j == XATTR_SIZE_MAX)
3679 			return -E2BIG;
3680 		if (ca == '\0')
3681 			return i;
3682 		s__ign++;
3683 	}
3684 	return -E2BIG;
3685 err_out:
3686 	return -EFAULT;
3687 }
3688 
3689 /**
3690  * bpf_strcspn - Calculate the length of the initial substring of @s__ign which
3691  *               does not contain letters in @reject__ign
3692  * @s__ign: The string to be searched
3693  * @reject__ign: The string to search for
3694  *
3695  * Return:
3696  * * >=0      - The length of the initial substring of @s__ign which does not
3697  *              contain letters from @reject__ign
3698  * * %-EFAULT - Cannot read one of the strings
3699  * * %-E2BIG  - One of the strings is too large
3700  * * %-ERANGE - One of the strings is outside of kernel address space
3701  */
bpf_strcspn(const char * s__ign,const char * reject__ign)3702 __bpf_kfunc int bpf_strcspn(const char *s__ign, const char *reject__ign)
3703 {
3704 	char cs, cr;
3705 	int i, j;
3706 
3707 	if (!copy_from_kernel_nofault_allowed(s__ign, 1) ||
3708 	    !copy_from_kernel_nofault_allowed(reject__ign, 1)) {
3709 		return -ERANGE;
3710 	}
3711 
3712 	guard(pagefault)();
3713 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3714 		__get_kernel_nofault(&cs, s__ign, char, err_out);
3715 		if (cs == '\0')
3716 			return i;
3717 		for (j = 0; j < XATTR_SIZE_MAX; j++) {
3718 			__get_kernel_nofault(&cr, reject__ign + j, char, err_out);
3719 			if (cs == cr || cr == '\0')
3720 				break;
3721 		}
3722 		if (j == XATTR_SIZE_MAX)
3723 			return -E2BIG;
3724 		if (cr != '\0')
3725 			return i;
3726 		s__ign++;
3727 	}
3728 	return -E2BIG;
3729 err_out:
3730 	return -EFAULT;
3731 }
3732 
__bpf_strnstr(const char * s1,const char * s2,size_t len,bool ignore_case)3733 static int __bpf_strnstr(const char *s1, const char *s2, size_t len,
3734 			 bool ignore_case)
3735 {
3736 	char c1, c2;
3737 	int i, j;
3738 
3739 	if (!copy_from_kernel_nofault_allowed(s1, 1) ||
3740 	    !copy_from_kernel_nofault_allowed(s2, 1)) {
3741 		return -ERANGE;
3742 	}
3743 
3744 	guard(pagefault)();
3745 	for (i = 0; i < XATTR_SIZE_MAX; i++) {
3746 		for (j = 0; i + j <= len && j < XATTR_SIZE_MAX; j++) {
3747 			__get_kernel_nofault(&c2, s2 + j, char, err_out);
3748 			if (c2 == '\0')
3749 				return i;
3750 			/*
3751 			 * We allow reading an extra byte from s2 (note the
3752 			 * `i + j <= len` above) to cover the case when s2 is
3753 			 * a suffix of the first len chars of s1.
3754 			 */
3755 			if (i + j == len)
3756 				break;
3757 			__get_kernel_nofault(&c1, s1 + j, char, err_out);
3758 
3759 			if (ignore_case) {
3760 				c1 = tolower(c1);
3761 				c2 = tolower(c2);
3762 			}
3763 
3764 			if (c1 == '\0')
3765 				return -ENOENT;
3766 			if (c1 != c2)
3767 				break;
3768 		}
3769 		if (j == XATTR_SIZE_MAX)
3770 			return -E2BIG;
3771 		if (i + j == len)
3772 			return -ENOENT;
3773 		s1++;
3774 	}
3775 	return -E2BIG;
3776 err_out:
3777 	return -EFAULT;
3778 }
3779 
3780 /**
3781  * bpf_strstr - Find the first substring in a string
3782  * @s1__ign: The string to be searched
3783  * @s2__ign: The string to search for
3784  *
3785  * Return:
3786  * * >=0      - Index of the first character of the first occurrence of @s2__ign
3787  *              within @s1__ign
3788  * * %-ENOENT - @s2__ign is not a substring of @s1__ign
3789  * * %-EFAULT - Cannot read one of the strings
3790  * * %-E2BIG  - One of the strings is too large
3791  * * %-ERANGE - One of the strings is outside of kernel address space
3792  */
bpf_strstr(const char * s1__ign,const char * s2__ign)3793 __bpf_kfunc int bpf_strstr(const char *s1__ign, const char *s2__ign)
3794 {
3795 	return __bpf_strnstr(s1__ign, s2__ign, XATTR_SIZE_MAX, false);
3796 }
3797 
3798 /**
3799  * bpf_strcasestr - Find the first substring in a string, ignoring the case of
3800  *                  the characters
3801  * @s1__ign: The string to be searched
3802  * @s2__ign: The string to search for
3803  *
3804  * Return:
3805  * * >=0      - Index of the first character of the first occurrence of @s2__ign
3806  *              within @s1__ign
3807  * * %-ENOENT - @s2__ign is not a substring of @s1__ign
3808  * * %-EFAULT - Cannot read one of the strings
3809  * * %-E2BIG  - One of the strings is too large
3810  * * %-ERANGE - One of the strings is outside of kernel address space
3811  */
bpf_strcasestr(const char * s1__ign,const char * s2__ign)3812 __bpf_kfunc int bpf_strcasestr(const char *s1__ign, const char *s2__ign)
3813 {
3814 	return __bpf_strnstr(s1__ign, s2__ign, XATTR_SIZE_MAX, true);
3815 }
3816 
3817 /**
3818  * bpf_strnstr - Find the first substring in a length-limited string
3819  * @s1__ign: The string to be searched
3820  * @s2__ign: The string to search for
3821  * @len: the maximum number of characters to search
3822  *
3823  * Return:
3824  * * >=0      - Index of the first character of the first occurrence of @s2__ign
3825  *              within the first @len characters of @s1__ign
3826  * * %-ENOENT - @s2__ign not found in the first @len characters of @s1__ign
3827  * * %-EFAULT - Cannot read one of the strings
3828  * * %-E2BIG  - One of the strings is too large
3829  * * %-ERANGE - One of the strings is outside of kernel address space
3830  */
bpf_strnstr(const char * s1__ign,const char * s2__ign,size_t len)3831 __bpf_kfunc int bpf_strnstr(const char *s1__ign, const char *s2__ign,
3832 			    size_t len)
3833 {
3834 	return __bpf_strnstr(s1__ign, s2__ign, len, false);
3835 }
3836 
3837 /**
3838  * bpf_strncasestr - Find the first substring in a length-limited string,
3839  *                   ignoring the case of the characters
3840  * @s1__ign: The string to be searched
3841  * @s2__ign: The string to search for
3842  * @len: the maximum number of characters to search
3843  *
3844  * Return:
3845  * * >=0      - Index of the first character of the first occurrence of @s2__ign
3846  *              within the first @len characters of @s1__ign
3847  * * %-ENOENT - @s2__ign not found in the first @len characters of @s1__ign
3848  * * %-EFAULT - Cannot read one of the strings
3849  * * %-E2BIG  - One of the strings is too large
3850  * * %-ERANGE - One of the strings is outside of kernel address space
3851  */
bpf_strncasestr(const char * s1__ign,const char * s2__ign,size_t len)3852 __bpf_kfunc int bpf_strncasestr(const char *s1__ign, const char *s2__ign,
3853 				size_t len)
3854 {
3855 	return __bpf_strnstr(s1__ign, s2__ign, len, true);
3856 }
3857 
3858 #ifdef CONFIG_KEYS
3859 /**
3860  * bpf_lookup_user_key - lookup a key by its serial
3861  * @serial: key handle serial number
3862  * @flags: lookup-specific flags
3863  *
3864  * Search a key with a given *serial* and the provided *flags*.
3865  * If found, increment the reference count of the key by one, and
3866  * return it in the bpf_key structure.
3867  *
3868  * The bpf_key structure must be passed to bpf_key_put() when done
3869  * with it, so that the key reference count is decremented and the
3870  * bpf_key structure is freed.
3871  *
3872  * Permission checks are deferred to the time the key is used by
3873  * one of the available key-specific kfuncs.
3874  *
3875  * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
3876  * special keyring (e.g. session keyring), if it doesn't yet exist.
3877  * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
3878  * for the key construction, and to retrieve uninstantiated keys (keys
3879  * without data attached to them).
3880  *
3881  * Return: a bpf_key pointer with a valid key pointer if the key is found, a
3882  *         NULL pointer otherwise.
3883  */
bpf_lookup_user_key(s32 serial,u64 flags)3884 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags)
3885 {
3886 	key_ref_t key_ref;
3887 	struct bpf_key *bkey;
3888 
3889 	if (flags & ~KEY_LOOKUP_ALL)
3890 		return NULL;
3891 
3892 	/*
3893 	 * Permission check is deferred until the key is used, as the
3894 	 * intent of the caller is unknown here.
3895 	 */
3896 	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
3897 	if (IS_ERR(key_ref))
3898 		return NULL;
3899 
3900 	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
3901 	if (!bkey) {
3902 		key_put(key_ref_to_ptr(key_ref));
3903 		return NULL;
3904 	}
3905 
3906 	bkey->key = key_ref_to_ptr(key_ref);
3907 	bkey->has_ref = true;
3908 
3909 	return bkey;
3910 }
3911 
3912 /**
3913  * bpf_lookup_system_key - lookup a key by a system-defined ID
3914  * @id: key ID
3915  *
3916  * Obtain a bpf_key structure with a key pointer set to the passed key ID.
3917  * The key pointer is marked as invalid, to prevent bpf_key_put() from
3918  * attempting to decrement the key reference count on that pointer. The key
3919  * pointer set in such way is currently understood only by
3920  * verify_pkcs7_signature().
3921  *
3922  * Set *id* to one of the values defined in include/linux/verification.h:
3923  * 0 for the primary keyring (immutable keyring of system keys);
3924  * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
3925  * (where keys can be added only if they are vouched for by existing keys
3926  * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
3927  * keyring (primarily used by the integrity subsystem to verify a kexec'ed
3928  * kerned image and, possibly, the initramfs signature).
3929  *
3930  * Return: a bpf_key pointer with an invalid key pointer set from the
3931  *         pre-determined ID on success, a NULL pointer otherwise
3932  */
bpf_lookup_system_key(u64 id)3933 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
3934 {
3935 	struct bpf_key *bkey;
3936 
3937 	if (system_keyring_id_check(id) < 0)
3938 		return NULL;
3939 
3940 	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
3941 	if (!bkey)
3942 		return NULL;
3943 
3944 	bkey->key = (struct key *)(unsigned long)id;
3945 	bkey->has_ref = false;
3946 
3947 	return bkey;
3948 }
3949 
3950 /**
3951  * bpf_key_put - decrement key reference count if key is valid and free bpf_key
3952  * @bkey: bpf_key structure
3953  *
3954  * Decrement the reference count of the key inside *bkey*, if the pointer
3955  * is valid, and free *bkey*.
3956  */
bpf_key_put(struct bpf_key * bkey)3957 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
3958 {
3959 	if (bkey->has_ref)
3960 		key_put(bkey->key);
3961 
3962 	kfree(bkey);
3963 }
3964 
3965 /**
3966  * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
3967  * @data_p: data to verify
3968  * @sig_p: signature of the data
3969  * @trusted_keyring: keyring with keys trusted for signature verification
3970  *
3971  * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
3972  * with keys in a keyring referenced by *trusted_keyring*.
3973  *
3974  * Return: 0 on success, a negative value on error.
3975  */
bpf_verify_pkcs7_signature(struct bpf_dynptr * data_p,struct bpf_dynptr * sig_p,struct bpf_key * trusted_keyring)3976 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
3977 			       struct bpf_dynptr *sig_p,
3978 			       struct bpf_key *trusted_keyring)
3979 {
3980 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
3981 	struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
3982 	struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
3983 	const void *data, *sig;
3984 	u32 data_len, sig_len;
3985 	int ret;
3986 
3987 	if (trusted_keyring->has_ref) {
3988 		/*
3989 		 * Do the permission check deferred in bpf_lookup_user_key().
3990 		 * See bpf_lookup_user_key() for more details.
3991 		 *
3992 		 * A call to key_task_permission() here would be redundant, as
3993 		 * it is already done by keyring_search() called by
3994 		 * find_asymmetric_key().
3995 		 */
3996 		ret = key_validate(trusted_keyring->key);
3997 		if (ret < 0)
3998 			return ret;
3999 	}
4000 
4001 	data_len = __bpf_dynptr_size(data_ptr);
4002 	data = __bpf_dynptr_data(data_ptr, data_len);
4003 	sig_len = __bpf_dynptr_size(sig_ptr);
4004 	sig = __bpf_dynptr_data(sig_ptr, sig_len);
4005 
4006 	return verify_pkcs7_signature(data, data_len, sig, sig_len,
4007 				      trusted_keyring->key,
4008 				      VERIFYING_BPF_SIGNATURE, NULL,
4009 				      NULL);
4010 #else
4011 	return -EOPNOTSUPP;
4012 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
4013 }
4014 #endif /* CONFIG_KEYS */
4015 
4016 typedef int (*bpf_task_work_callback_t)(struct bpf_map *map, void *key, void *value);
4017 
4018 enum bpf_task_work_state {
4019 	/* bpf_task_work is ready to be used */
4020 	BPF_TW_STANDBY = 0,
4021 	/* irq work scheduling in progress */
4022 	BPF_TW_PENDING,
4023 	/* task work scheduling in progress */
4024 	BPF_TW_SCHEDULING,
4025 	/* task work is scheduled successfully */
4026 	BPF_TW_SCHEDULED,
4027 	/* callback is running */
4028 	BPF_TW_RUNNING,
4029 	/* associated BPF map value is deleted */
4030 	BPF_TW_FREED,
4031 };
4032 
4033 struct bpf_task_work_ctx {
4034 	enum bpf_task_work_state state;
4035 	refcount_t refcnt;
4036 	struct callback_head work;
4037 	struct irq_work irq_work;
4038 	/* bpf_prog that schedules task work */
4039 	struct bpf_prog *prog;
4040 	/* task for which callback is scheduled */
4041 	struct task_struct *task;
4042 	/* the map and map value associated with this context */
4043 	struct bpf_map *map;
4044 	void *map_val;
4045 	enum task_work_notify_mode mode;
4046 	bpf_task_work_callback_t callback_fn;
4047 	struct rcu_head rcu;
4048 } __aligned(8);
4049 
4050 /* Actual type for struct bpf_task_work */
4051 struct bpf_task_work_kern {
4052 	struct bpf_task_work_ctx *ctx;
4053 };
4054 
bpf_task_work_ctx_reset(struct bpf_task_work_ctx * ctx)4055 static void bpf_task_work_ctx_reset(struct bpf_task_work_ctx *ctx)
4056 {
4057 	if (ctx->prog) {
4058 		bpf_prog_put(ctx->prog);
4059 		ctx->prog = NULL;
4060 	}
4061 	if (ctx->task) {
4062 		bpf_task_release(ctx->task);
4063 		ctx->task = NULL;
4064 	}
4065 }
4066 
bpf_task_work_ctx_tryget(struct bpf_task_work_ctx * ctx)4067 static bool bpf_task_work_ctx_tryget(struct bpf_task_work_ctx *ctx)
4068 {
4069 	return refcount_inc_not_zero(&ctx->refcnt);
4070 }
4071 
bpf_task_work_ctx_put(struct bpf_task_work_ctx * ctx)4072 static void bpf_task_work_ctx_put(struct bpf_task_work_ctx *ctx)
4073 {
4074 	if (!refcount_dec_and_test(&ctx->refcnt))
4075 		return;
4076 
4077 	bpf_task_work_ctx_reset(ctx);
4078 
4079 	/* bpf_mem_free expects migration to be disabled */
4080 	migrate_disable();
4081 	bpf_mem_free(&bpf_global_ma, ctx);
4082 	migrate_enable();
4083 }
4084 
bpf_task_work_cancel(struct bpf_task_work_ctx * ctx)4085 static void bpf_task_work_cancel(struct bpf_task_work_ctx *ctx)
4086 {
4087 	/*
4088 	 * Scheduled task_work callback holds ctx ref, so if we successfully
4089 	 * cancelled, we put that ref on callback's behalf. If we couldn't
4090 	 * cancel, callback will inevitably run or has already completed
4091 	 * running, and it would have taken care of its ctx ref itself.
4092 	 */
4093 	if (task_work_cancel(ctx->task, &ctx->work))
4094 		bpf_task_work_ctx_put(ctx);
4095 }
4096 
bpf_task_work_callback(struct callback_head * cb)4097 static void bpf_task_work_callback(struct callback_head *cb)
4098 {
4099 	struct bpf_task_work_ctx *ctx = container_of(cb, struct bpf_task_work_ctx, work);
4100 	enum bpf_task_work_state state;
4101 	u32 idx;
4102 	void *key;
4103 
4104 	/* Read lock is needed to protect ctx and map key/value access */
4105 	guard(rcu_tasks_trace)();
4106 	/*
4107 	 * This callback may start running before bpf_task_work_irq() switched to
4108 	 * SCHEDULED state, so handle both transition variants SCHEDULING|SCHEDULED -> RUNNING.
4109 	 */
4110 	state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_RUNNING);
4111 	if (state == BPF_TW_SCHEDULED)
4112 		state = cmpxchg(&ctx->state, BPF_TW_SCHEDULED, BPF_TW_RUNNING);
4113 	if (state == BPF_TW_FREED) {
4114 		bpf_task_work_ctx_put(ctx);
4115 		return;
4116 	}
4117 
4118 	key = (void *)map_key_from_value(ctx->map, ctx->map_val, &idx);
4119 
4120 	migrate_disable();
4121 	ctx->callback_fn(ctx->map, key, ctx->map_val);
4122 	migrate_enable();
4123 
4124 	bpf_task_work_ctx_reset(ctx);
4125 	(void)cmpxchg(&ctx->state, BPF_TW_RUNNING, BPF_TW_STANDBY);
4126 
4127 	bpf_task_work_ctx_put(ctx);
4128 }
4129 
bpf_task_work_irq(struct irq_work * irq_work)4130 static void bpf_task_work_irq(struct irq_work *irq_work)
4131 {
4132 	struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
4133 	enum bpf_task_work_state state;
4134 	int err;
4135 
4136 	guard(rcu_tasks_trace)();
4137 
4138 	if (cmpxchg(&ctx->state, BPF_TW_PENDING, BPF_TW_SCHEDULING) != BPF_TW_PENDING) {
4139 		bpf_task_work_ctx_put(ctx);
4140 		return;
4141 	}
4142 
4143 	err = task_work_add(ctx->task, &ctx->work, ctx->mode);
4144 	if (err) {
4145 		bpf_task_work_ctx_reset(ctx);
4146 		/*
4147 		 * try to switch back to STANDBY for another task_work reuse, but we might have
4148 		 * gone to FREED already, which is fine as we already cleaned up after ourselves
4149 		 */
4150 		(void)cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_STANDBY);
4151 		bpf_task_work_ctx_put(ctx);
4152 		return;
4153 	}
4154 
4155 	/*
4156 	 * It's technically possible for just scheduled task_work callback to
4157 	 * complete running by now, going SCHEDULING -> RUNNING and then
4158 	 * dropping its ctx refcount. Instead of capturing extra ref just to
4159 	 * protected below ctx->state access, we rely on RCU protection to
4160 	 * perform below SCHEDULING -> SCHEDULED attempt.
4161 	 */
4162 	state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_SCHEDULED);
4163 	if (state == BPF_TW_FREED)
4164 		bpf_task_work_cancel(ctx); /* clean up if we switched into FREED state */
4165 }
4166 
bpf_task_work_fetch_ctx(struct bpf_task_work * tw,struct bpf_map * map)4167 static struct bpf_task_work_ctx *bpf_task_work_fetch_ctx(struct bpf_task_work *tw,
4168 							 struct bpf_map *map)
4169 {
4170 	struct bpf_task_work_kern *twk = (void *)tw;
4171 	struct bpf_task_work_ctx *ctx, *old_ctx;
4172 
4173 	ctx = READ_ONCE(twk->ctx);
4174 	if (ctx)
4175 		return ctx;
4176 
4177 	ctx = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_task_work_ctx));
4178 	if (!ctx)
4179 		return ERR_PTR(-ENOMEM);
4180 
4181 	memset(ctx, 0, sizeof(*ctx));
4182 	refcount_set(&ctx->refcnt, 1); /* map's own ref */
4183 	ctx->state = BPF_TW_STANDBY;
4184 
4185 	old_ctx = cmpxchg(&twk->ctx, NULL, ctx);
4186 	if (old_ctx) {
4187 		/*
4188 		 * tw->ctx is set by concurrent BPF program, release allocated
4189 		 * memory and try to reuse already set context.
4190 		 */
4191 		bpf_mem_free(&bpf_global_ma, ctx);
4192 		return old_ctx;
4193 	}
4194 
4195 	return ctx; /* Success */
4196 }
4197 
bpf_task_work_acquire_ctx(struct bpf_task_work * tw,struct bpf_map * map)4198 static struct bpf_task_work_ctx *bpf_task_work_acquire_ctx(struct bpf_task_work *tw,
4199 							   struct bpf_map *map)
4200 {
4201 	struct bpf_task_work_ctx *ctx;
4202 
4203 	ctx = bpf_task_work_fetch_ctx(tw, map);
4204 	if (IS_ERR(ctx))
4205 		return ctx;
4206 
4207 	/* try to get ref for task_work callback to hold */
4208 	if (!bpf_task_work_ctx_tryget(ctx))
4209 		return ERR_PTR(-EBUSY);
4210 
4211 	if (cmpxchg(&ctx->state, BPF_TW_STANDBY, BPF_TW_PENDING) != BPF_TW_STANDBY) {
4212 		/* lost acquiring race or map_release_uref() stole it from us, put ref and bail */
4213 		bpf_task_work_ctx_put(ctx);
4214 		return ERR_PTR(-EBUSY);
4215 	}
4216 
4217 	/*
4218 	 * If no process or bpffs is holding a reference to the map, no new callbacks should be
4219 	 * scheduled. This does not address any race or correctness issue, but rather is a policy
4220 	 * choice: dropping user references should stop everything.
4221 	 */
4222 	if (!atomic64_read(&map->usercnt)) {
4223 		/* drop ref we just got for task_work callback itself */
4224 		bpf_task_work_ctx_put(ctx);
4225 		/* transfer map's ref into cancel_and_free() */
4226 		bpf_task_work_cancel_and_free(tw);
4227 		return ERR_PTR(-EBUSY);
4228 	}
4229 
4230 	return ctx;
4231 }
4232 
bpf_task_work_schedule(struct task_struct * task,struct bpf_task_work * tw,struct bpf_map * map,bpf_task_work_callback_t callback_fn,struct bpf_prog_aux * aux,enum task_work_notify_mode mode)4233 static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work *tw,
4234 				  struct bpf_map *map, bpf_task_work_callback_t callback_fn,
4235 				  struct bpf_prog_aux *aux, enum task_work_notify_mode mode)
4236 {
4237 	struct bpf_prog *prog;
4238 	struct bpf_task_work_ctx *ctx;
4239 	int err;
4240 
4241 	BTF_TYPE_EMIT(struct bpf_task_work);
4242 
4243 	prog = bpf_prog_inc_not_zero(aux->prog);
4244 	if (IS_ERR(prog))
4245 		return -EBADF;
4246 	task = bpf_task_acquire(task);
4247 	if (!task) {
4248 		err = -EBADF;
4249 		goto release_prog;
4250 	}
4251 
4252 	ctx = bpf_task_work_acquire_ctx(tw, map);
4253 	if (IS_ERR(ctx)) {
4254 		err = PTR_ERR(ctx);
4255 		goto release_all;
4256 	}
4257 
4258 	ctx->task = task;
4259 	ctx->callback_fn = callback_fn;
4260 	ctx->prog = prog;
4261 	ctx->mode = mode;
4262 	ctx->map = map;
4263 	ctx->map_val = (void *)tw - map->record->task_work_off;
4264 	init_task_work(&ctx->work, bpf_task_work_callback);
4265 	init_irq_work(&ctx->irq_work, bpf_task_work_irq);
4266 
4267 	irq_work_queue(&ctx->irq_work);
4268 	return 0;
4269 
4270 release_all:
4271 	bpf_task_release(task);
4272 release_prog:
4273 	bpf_prog_put(prog);
4274 	return err;
4275 }
4276 
4277 /**
4278  * bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL
4279  * mode
4280  * @task: Task struct for which callback should be scheduled
4281  * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
4282  * @map__map: bpf_map that embeds struct bpf_task_work in the values
4283  * @callback: pointer to BPF subprogram to call
4284  * @aux__prog: user should pass NULL
4285  *
4286  * Return: 0 if task work has been scheduled successfully, negative error code otherwise
4287  */
bpf_task_work_schedule_signal_impl(struct task_struct * task,struct bpf_task_work * tw,void * map__map,bpf_task_work_callback_t callback,void * aux__prog)4288 __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
4289 						   struct bpf_task_work *tw, void *map__map,
4290 						   bpf_task_work_callback_t callback,
4291 						   void *aux__prog)
4292 {
4293 	return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
4294 }
4295 
4296 /**
4297  * bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME
4298  * mode
4299  * @task: Task struct for which callback should be scheduled
4300  * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
4301  * @map__map: bpf_map that embeds struct bpf_task_work in the values
4302  * @callback: pointer to BPF subprogram to call
4303  * @aux__prog: user should pass NULL
4304  *
4305  * Return: 0 if task work has been scheduled successfully, negative error code otherwise
4306  */
bpf_task_work_schedule_resume_impl(struct task_struct * task,struct bpf_task_work * tw,void * map__map,bpf_task_work_callback_t callback,void * aux__prog)4307 __bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task,
4308 						   struct bpf_task_work *tw, void *map__map,
4309 						   bpf_task_work_callback_t callback,
4310 						   void *aux__prog)
4311 {
4312 	return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
4313 }
4314 
make_file_dynptr(struct file * file,u32 flags,bool may_sleep,struct bpf_dynptr_kern * ptr)4315 static int make_file_dynptr(struct file *file, u32 flags, bool may_sleep,
4316 			    struct bpf_dynptr_kern *ptr)
4317 {
4318 	struct bpf_dynptr_file_impl *state;
4319 
4320 	/* flags is currently unsupported */
4321 	if (flags) {
4322 		bpf_dynptr_set_null(ptr);
4323 		return -EINVAL;
4324 	}
4325 
4326 	state = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_dynptr_file_impl));
4327 	if (!state) {
4328 		bpf_dynptr_set_null(ptr);
4329 		return -ENOMEM;
4330 	}
4331 	state->offset = 0;
4332 	state->size = U64_MAX; /* Don't restrict size, as file may change anyways */
4333 	freader_init_from_file(&state->freader, NULL, 0, file, may_sleep);
4334 	bpf_dynptr_init(ptr, state, BPF_DYNPTR_TYPE_FILE, 0, 0);
4335 	bpf_dynptr_set_rdonly(ptr);
4336 	return 0;
4337 }
4338 
bpf_dynptr_from_file(struct file * file,u32 flags,struct bpf_dynptr * ptr__uninit)4339 __bpf_kfunc int bpf_dynptr_from_file(struct file *file, u32 flags, struct bpf_dynptr *ptr__uninit)
4340 {
4341 	return make_file_dynptr(file, flags, false, (struct bpf_dynptr_kern *)ptr__uninit);
4342 }
4343 
bpf_dynptr_from_file_sleepable(struct file * file,u32 flags,struct bpf_dynptr * ptr__uninit)4344 int bpf_dynptr_from_file_sleepable(struct file *file, u32 flags, struct bpf_dynptr *ptr__uninit)
4345 {
4346 	return make_file_dynptr(file, flags, true, (struct bpf_dynptr_kern *)ptr__uninit);
4347 }
4348 
bpf_dynptr_file_discard(struct bpf_dynptr * dynptr)4349 __bpf_kfunc int bpf_dynptr_file_discard(struct bpf_dynptr *dynptr)
4350 {
4351 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)dynptr;
4352 	struct bpf_dynptr_file_impl *df = ptr->data;
4353 
4354 	if (!df)
4355 		return 0;
4356 
4357 	freader_cleanup(&df->freader);
4358 	bpf_mem_free(&bpf_global_ma, df);
4359 	bpf_dynptr_set_null(ptr);
4360 	return 0;
4361 }
4362 
4363 __bpf_kfunc_end_defs();
4364 
bpf_task_work_cancel_scheduled(struct irq_work * irq_work)4365 static void bpf_task_work_cancel_scheduled(struct irq_work *irq_work)
4366 {
4367 	struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
4368 
4369 	bpf_task_work_cancel(ctx); /* this might put task_work callback's ref */
4370 	bpf_task_work_ctx_put(ctx); /* and here we put map's own ref that was transferred to us */
4371 }
4372 
bpf_task_work_cancel_and_free(void * val)4373 void bpf_task_work_cancel_and_free(void *val)
4374 {
4375 	struct bpf_task_work_kern *twk = val;
4376 	struct bpf_task_work_ctx *ctx;
4377 	enum bpf_task_work_state state;
4378 
4379 	ctx = xchg(&twk->ctx, NULL);
4380 	if (!ctx)
4381 		return;
4382 
4383 	state = xchg(&ctx->state, BPF_TW_FREED);
4384 	if (state == BPF_TW_SCHEDULED) {
4385 		/* run in irq_work to avoid locks in NMI */
4386 		init_irq_work(&ctx->irq_work, bpf_task_work_cancel_scheduled);
4387 		irq_work_queue(&ctx->irq_work);
4388 		return;
4389 	}
4390 
4391 	bpf_task_work_ctx_put(ctx); /* put bpf map's ref */
4392 }
4393 
4394 BTF_KFUNCS_START(generic_btf_ids)
4395 #ifdef CONFIG_CRASH_DUMP
4396 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
4397 #endif
4398 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
4399 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
4400 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
4401 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
4402 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
4403 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
4404 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
4405 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
4406 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
4407 BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
4408 BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
4409 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4410 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
4411 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
4412 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
4413 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
4414 BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL)
4415 BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL)
4416 BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL)
4417 
4418 #ifdef CONFIG_CGROUPS
4419 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4420 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
4421 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4422 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
4423 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
4424 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4425 #endif
4426 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
4427 BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL)
4428 BTF_ID_FLAGS(func, bpf_throw)
4429 #ifdef CONFIG_BPF_EVENTS
4430 BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS)
4431 #endif
4432 #ifdef CONFIG_KEYS
4433 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
4434 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
4435 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
4436 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
4437 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
4438 #endif
4439 #endif
4440 BTF_KFUNCS_END(generic_btf_ids)
4441 
4442 static const struct btf_kfunc_id_set generic_kfunc_set = {
4443 	.owner = THIS_MODULE,
4444 	.set   = &generic_btf_ids,
4445 };
4446 
4447 
4448 BTF_ID_LIST(generic_dtor_ids)
4449 BTF_ID(struct, task_struct)
4450 BTF_ID(func, bpf_task_release_dtor)
4451 #ifdef CONFIG_CGROUPS
4452 BTF_ID(struct, cgroup)
4453 BTF_ID(func, bpf_cgroup_release_dtor)
4454 #endif
4455 
4456 BTF_KFUNCS_START(common_btf_ids)
4457 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL)
4458 BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL)
4459 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
4460 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
4461 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
4462 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
4463 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
4464 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
4465 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
4466 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
4467 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
4468 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
4469 #ifdef CONFIG_CGROUPS
4470 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
4471 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
4472 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
4473 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
4474 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
4475 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
4476 #endif
4477 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
4478 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
4479 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
4480 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
4481 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
4482 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
4483 BTF_ID_FLAGS(func, bpf_dynptr_size)
4484 BTF_ID_FLAGS(func, bpf_dynptr_clone)
4485 BTF_ID_FLAGS(func, bpf_dynptr_copy)
4486 BTF_ID_FLAGS(func, bpf_dynptr_memset)
4487 #ifdef CONFIG_NET
4488 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
4489 #endif
4490 BTF_ID_FLAGS(func, bpf_wq_init)
4491 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
4492 BTF_ID_FLAGS(func, bpf_wq_start)
4493 BTF_ID_FLAGS(func, bpf_preempt_disable)
4494 BTF_ID_FLAGS(func, bpf_preempt_enable)
4495 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
4496 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
4497 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
4498 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
4499 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE)
4500 BTF_ID_FLAGS(func, bpf_get_kmem_cache)
4501 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
4502 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
4503 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
4504 BTF_ID_FLAGS(func, bpf_local_irq_save)
4505 BTF_ID_FLAGS(func, bpf_local_irq_restore)
4506 #ifdef CONFIG_BPF_EVENTS
4507 BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr)
4508 BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr)
4509 BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr)
4510 BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr)
4511 BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE)
4512 BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE)
4513 BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
4514 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
4515 #endif
4516 #ifdef CONFIG_DMA_SHARED_BUFFER
4517 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)
4518 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
4519 BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
4520 #endif
4521 BTF_ID_FLAGS(func, __bpf_trap)
4522 BTF_ID_FLAGS(func, bpf_strcmp);
4523 BTF_ID_FLAGS(func, bpf_strcasecmp);
4524 BTF_ID_FLAGS(func, bpf_strchr);
4525 BTF_ID_FLAGS(func, bpf_strchrnul);
4526 BTF_ID_FLAGS(func, bpf_strnchr);
4527 BTF_ID_FLAGS(func, bpf_strrchr);
4528 BTF_ID_FLAGS(func, bpf_strlen);
4529 BTF_ID_FLAGS(func, bpf_strnlen);
4530 BTF_ID_FLAGS(func, bpf_strspn);
4531 BTF_ID_FLAGS(func, bpf_strcspn);
4532 BTF_ID_FLAGS(func, bpf_strstr);
4533 BTF_ID_FLAGS(func, bpf_strcasestr);
4534 BTF_ID_FLAGS(func, bpf_strnstr);
4535 BTF_ID_FLAGS(func, bpf_strncasestr);
4536 #if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS)
4537 BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
4538 #endif
4539 BTF_ID_FLAGS(func, bpf_stream_vprintk_impl, KF_TRUSTED_ARGS)
4540 BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl, KF_TRUSTED_ARGS)
4541 BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl, KF_TRUSTED_ARGS)
4542 BTF_ID_FLAGS(func, bpf_dynptr_from_file, KF_TRUSTED_ARGS)
4543 BTF_ID_FLAGS(func, bpf_dynptr_file_discard)
4544 BTF_KFUNCS_END(common_btf_ids)
4545 
4546 static const struct btf_kfunc_id_set common_kfunc_set = {
4547 	.owner = THIS_MODULE,
4548 	.set   = &common_btf_ids,
4549 };
4550 
kfunc_init(void)4551 static int __init kfunc_init(void)
4552 {
4553 	int ret;
4554 	const struct btf_id_dtor_kfunc generic_dtors[] = {
4555 		{
4556 			.btf_id       = generic_dtor_ids[0],
4557 			.kfunc_btf_id = generic_dtor_ids[1]
4558 		},
4559 #ifdef CONFIG_CGROUPS
4560 		{
4561 			.btf_id       = generic_dtor_ids[2],
4562 			.kfunc_btf_id = generic_dtor_ids[3]
4563 		},
4564 #endif
4565 	};
4566 
4567 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
4568 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
4569 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
4570 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
4571 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
4572 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set);
4573 	ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
4574 						  ARRAY_SIZE(generic_dtors),
4575 						  THIS_MODULE);
4576 	return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
4577 }
4578 
4579 late_initcall(kfunc_init);
4580 
4581 /* Get a pointer to dynptr data up to len bytes for read only access. If
4582  * the dynptr doesn't have continuous data up to len bytes, return NULL.
4583  */
__bpf_dynptr_data(const struct bpf_dynptr_kern * ptr,u64 len)4584 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u64 len)
4585 {
4586 	const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
4587 
4588 	return bpf_dynptr_slice(p, 0, NULL, len);
4589 }
4590 
4591 /* Get a pointer to dynptr data up to len bytes for read write access. If
4592  * the dynptr doesn't have continuous data up to len bytes, or the dynptr
4593  * is read only, return NULL.
4594  */
__bpf_dynptr_data_rw(const struct bpf_dynptr_kern * ptr,u64 len)4595 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u64 len)
4596 {
4597 	if (__bpf_dynptr_is_rdonly(ptr))
4598 		return NULL;
4599 	return (void *)__bpf_dynptr_data(ptr, len);
4600 }
4601 
bpf_map_free_internal_structs(struct bpf_map * map,void * val)4602 void bpf_map_free_internal_structs(struct bpf_map *map, void *val)
4603 {
4604 	if (btf_record_has_field(map->record, BPF_TIMER))
4605 		bpf_obj_free_timer(map->record, val);
4606 	if (btf_record_has_field(map->record, BPF_WORKQUEUE))
4607 		bpf_obj_free_workqueue(map->record, val);
4608 	if (btf_record_has_field(map->record, BPF_TASK_WORK))
4609 		bpf_obj_free_task_work(map->record, val);
4610 }
4611