1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 #include <linux/kasan.h>
26 #include <linux/bpf_verifier.h>
27 #include <linux/uaccess.h>
28 #include <linux/verification.h>
29 #include <linux/task_work.h>
30 #include <linux/irq_work.h>
31 #include <linux/buildid.h>
32
33 #include "../../lib/kstrtox.h"
34
35 /* If kernel subsystem is allowing eBPF programs to call this function,
36 * inside its own verifier_ops->get_func_proto() callback it should return
37 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
38 *
39 * Different map implementations will rely on rcu in map methods
40 * lookup/update/delete, therefore eBPF programs must run under rcu lock
41 * if program is allowed to access maps, so check rcu_read_lock_held() or
42 * rcu_read_lock_trace_held() in all three functions.
43 */
BPF_CALL_2(bpf_map_lookup_elem,struct bpf_map *,map,void *,key)44 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
45 {
46 WARN_ON_ONCE(!bpf_rcu_lock_held());
47 return (unsigned long) map->ops->map_lookup_elem(map, key);
48 }
49
50 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
51 .func = bpf_map_lookup_elem,
52 .gpl_only = false,
53 .pkt_access = true,
54 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
55 .arg1_type = ARG_CONST_MAP_PTR,
56 .arg2_type = ARG_PTR_TO_MAP_KEY,
57 };
58
BPF_CALL_4(bpf_map_update_elem,struct bpf_map *,map,void *,key,void *,value,u64,flags)59 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
60 void *, value, u64, flags)
61 {
62 WARN_ON_ONCE(!bpf_rcu_lock_held());
63 return map->ops->map_update_elem(map, key, value, flags);
64 }
65
66 const struct bpf_func_proto bpf_map_update_elem_proto = {
67 .func = bpf_map_update_elem,
68 .gpl_only = false,
69 .pkt_access = true,
70 .ret_type = RET_INTEGER,
71 .arg1_type = ARG_CONST_MAP_PTR,
72 .arg2_type = ARG_PTR_TO_MAP_KEY,
73 .arg3_type = ARG_PTR_TO_MAP_VALUE,
74 .arg4_type = ARG_ANYTHING,
75 };
76
BPF_CALL_2(bpf_map_delete_elem,struct bpf_map *,map,void *,key)77 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
78 {
79 WARN_ON_ONCE(!bpf_rcu_lock_held());
80 return map->ops->map_delete_elem(map, key);
81 }
82
83 const struct bpf_func_proto bpf_map_delete_elem_proto = {
84 .func = bpf_map_delete_elem,
85 .gpl_only = false,
86 .pkt_access = true,
87 .ret_type = RET_INTEGER,
88 .arg1_type = ARG_CONST_MAP_PTR,
89 .arg2_type = ARG_PTR_TO_MAP_KEY,
90 };
91
BPF_CALL_3(bpf_map_push_elem,struct bpf_map *,map,void *,value,u64,flags)92 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
93 {
94 return map->ops->map_push_elem(map, value, flags);
95 }
96
97 const struct bpf_func_proto bpf_map_push_elem_proto = {
98 .func = bpf_map_push_elem,
99 .gpl_only = false,
100 .pkt_access = true,
101 .ret_type = RET_INTEGER,
102 .arg1_type = ARG_CONST_MAP_PTR,
103 .arg2_type = ARG_PTR_TO_MAP_VALUE,
104 .arg3_type = ARG_ANYTHING,
105 };
106
BPF_CALL_2(bpf_map_pop_elem,struct bpf_map *,map,void *,value)107 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
108 {
109 return map->ops->map_pop_elem(map, value);
110 }
111
112 const struct bpf_func_proto bpf_map_pop_elem_proto = {
113 .func = bpf_map_pop_elem,
114 .gpl_only = false,
115 .ret_type = RET_INTEGER,
116 .arg1_type = ARG_CONST_MAP_PTR,
117 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
118 };
119
BPF_CALL_2(bpf_map_peek_elem,struct bpf_map *,map,void *,value)120 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
121 {
122 return map->ops->map_peek_elem(map, value);
123 }
124
125 const struct bpf_func_proto bpf_map_peek_elem_proto = {
126 .func = bpf_map_peek_elem,
127 .gpl_only = false,
128 .ret_type = RET_INTEGER,
129 .arg1_type = ARG_CONST_MAP_PTR,
130 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
131 };
132
BPF_CALL_3(bpf_map_lookup_percpu_elem,struct bpf_map *,map,void *,key,u32,cpu)133 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
134 {
135 WARN_ON_ONCE(!bpf_rcu_lock_held());
136 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
137 }
138
139 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
140 .func = bpf_map_lookup_percpu_elem,
141 .gpl_only = false,
142 .pkt_access = true,
143 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
144 .arg1_type = ARG_CONST_MAP_PTR,
145 .arg2_type = ARG_PTR_TO_MAP_KEY,
146 .arg3_type = ARG_ANYTHING,
147 };
148
149 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
150 .func = bpf_user_rnd_u32,
151 .gpl_only = false,
152 .ret_type = RET_INTEGER,
153 };
154
BPF_CALL_0(bpf_get_smp_processor_id)155 BPF_CALL_0(bpf_get_smp_processor_id)
156 {
157 return smp_processor_id();
158 }
159
160 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
161 .func = bpf_get_smp_processor_id,
162 .gpl_only = false,
163 .ret_type = RET_INTEGER,
164 .allow_fastcall = true,
165 };
166
BPF_CALL_0(bpf_get_numa_node_id)167 BPF_CALL_0(bpf_get_numa_node_id)
168 {
169 return numa_node_id();
170 }
171
172 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
173 .func = bpf_get_numa_node_id,
174 .gpl_only = false,
175 .ret_type = RET_INTEGER,
176 };
177
BPF_CALL_0(bpf_ktime_get_ns)178 BPF_CALL_0(bpf_ktime_get_ns)
179 {
180 /* NMI safe access to clock monotonic */
181 return ktime_get_mono_fast_ns();
182 }
183
184 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
185 .func = bpf_ktime_get_ns,
186 .gpl_only = false,
187 .ret_type = RET_INTEGER,
188 };
189
BPF_CALL_0(bpf_ktime_get_boot_ns)190 BPF_CALL_0(bpf_ktime_get_boot_ns)
191 {
192 /* NMI safe access to clock boottime */
193 return ktime_get_boot_fast_ns();
194 }
195
196 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
197 .func = bpf_ktime_get_boot_ns,
198 .gpl_only = false,
199 .ret_type = RET_INTEGER,
200 };
201
BPF_CALL_0(bpf_ktime_get_coarse_ns)202 BPF_CALL_0(bpf_ktime_get_coarse_ns)
203 {
204 return ktime_get_coarse_ns();
205 }
206
207 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
208 .func = bpf_ktime_get_coarse_ns,
209 .gpl_only = false,
210 .ret_type = RET_INTEGER,
211 };
212
BPF_CALL_0(bpf_ktime_get_tai_ns)213 BPF_CALL_0(bpf_ktime_get_tai_ns)
214 {
215 /* NMI safe access to clock tai */
216 return ktime_get_tai_fast_ns();
217 }
218
219 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
220 .func = bpf_ktime_get_tai_ns,
221 .gpl_only = false,
222 .ret_type = RET_INTEGER,
223 };
224
BPF_CALL_0(bpf_get_current_pid_tgid)225 BPF_CALL_0(bpf_get_current_pid_tgid)
226 {
227 struct task_struct *task = current;
228
229 if (unlikely(!task))
230 return -EINVAL;
231
232 return (u64) task->tgid << 32 | task->pid;
233 }
234
235 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
236 .func = bpf_get_current_pid_tgid,
237 .gpl_only = false,
238 .ret_type = RET_INTEGER,
239 };
240
BPF_CALL_0(bpf_get_current_uid_gid)241 BPF_CALL_0(bpf_get_current_uid_gid)
242 {
243 struct task_struct *task = current;
244 kuid_t uid;
245 kgid_t gid;
246
247 if (unlikely(!task))
248 return -EINVAL;
249
250 current_uid_gid(&uid, &gid);
251 return (u64) from_kgid(&init_user_ns, gid) << 32 |
252 from_kuid(&init_user_ns, uid);
253 }
254
255 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
256 .func = bpf_get_current_uid_gid,
257 .gpl_only = false,
258 .ret_type = RET_INTEGER,
259 };
260
BPF_CALL_2(bpf_get_current_comm,char *,buf,u32,size)261 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
262 {
263 struct task_struct *task = current;
264
265 if (unlikely(!task))
266 goto err_clear;
267
268 /* Verifier guarantees that size > 0 */
269 strscpy_pad(buf, task->comm, size);
270 return 0;
271 err_clear:
272 memset(buf, 0, size);
273 return -EINVAL;
274 }
275
276 const struct bpf_func_proto bpf_get_current_comm_proto = {
277 .func = bpf_get_current_comm,
278 .gpl_only = false,
279 .ret_type = RET_INTEGER,
280 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
281 .arg2_type = ARG_CONST_SIZE,
282 };
283
284 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
285
__bpf_spin_lock(struct bpf_spin_lock * lock)286 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
287 {
288 arch_spinlock_t *l = (void *)lock;
289 union {
290 __u32 val;
291 arch_spinlock_t lock;
292 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
293
294 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
295 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
296 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
297 preempt_disable();
298 arch_spin_lock(l);
299 }
300
__bpf_spin_unlock(struct bpf_spin_lock * lock)301 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
302 {
303 arch_spinlock_t *l = (void *)lock;
304
305 arch_spin_unlock(l);
306 preempt_enable();
307 }
308
309 #else
310
__bpf_spin_lock(struct bpf_spin_lock * lock)311 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
312 {
313 atomic_t *l = (void *)lock;
314
315 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
316 do {
317 atomic_cond_read_relaxed(l, !VAL);
318 } while (atomic_xchg(l, 1));
319 }
320
__bpf_spin_unlock(struct bpf_spin_lock * lock)321 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
322 {
323 atomic_t *l = (void *)lock;
324
325 atomic_set_release(l, 0);
326 }
327
328 #endif
329
330 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
331
__bpf_spin_lock_irqsave(struct bpf_spin_lock * lock)332 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
333 {
334 unsigned long flags;
335
336 local_irq_save(flags);
337 __bpf_spin_lock(lock);
338 __this_cpu_write(irqsave_flags, flags);
339 }
340
NOTRACE_BPF_CALL_1(bpf_spin_lock,struct bpf_spin_lock *,lock)341 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
342 {
343 __bpf_spin_lock_irqsave(lock);
344 return 0;
345 }
346
347 const struct bpf_func_proto bpf_spin_lock_proto = {
348 .func = bpf_spin_lock,
349 .gpl_only = false,
350 .ret_type = RET_VOID,
351 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
352 .arg1_btf_id = BPF_PTR_POISON,
353 };
354
__bpf_spin_unlock_irqrestore(struct bpf_spin_lock * lock)355 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
356 {
357 unsigned long flags;
358
359 flags = __this_cpu_read(irqsave_flags);
360 __bpf_spin_unlock(lock);
361 local_irq_restore(flags);
362 }
363
NOTRACE_BPF_CALL_1(bpf_spin_unlock,struct bpf_spin_lock *,lock)364 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
365 {
366 __bpf_spin_unlock_irqrestore(lock);
367 return 0;
368 }
369
370 const struct bpf_func_proto bpf_spin_unlock_proto = {
371 .func = bpf_spin_unlock,
372 .gpl_only = false,
373 .ret_type = RET_VOID,
374 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
375 .arg1_btf_id = BPF_PTR_POISON,
376 };
377
copy_map_value_locked(struct bpf_map * map,void * dst,void * src,bool lock_src)378 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
379 bool lock_src)
380 {
381 struct bpf_spin_lock *lock;
382
383 if (lock_src)
384 lock = src + map->record->spin_lock_off;
385 else
386 lock = dst + map->record->spin_lock_off;
387 preempt_disable();
388 __bpf_spin_lock_irqsave(lock);
389 copy_map_value(map, dst, src);
390 __bpf_spin_unlock_irqrestore(lock);
391 preempt_enable();
392 }
393
BPF_CALL_0(bpf_jiffies64)394 BPF_CALL_0(bpf_jiffies64)
395 {
396 return get_jiffies_64();
397 }
398
399 const struct bpf_func_proto bpf_jiffies64_proto = {
400 .func = bpf_jiffies64,
401 .gpl_only = false,
402 .ret_type = RET_INTEGER,
403 };
404
405 #ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)406 BPF_CALL_0(bpf_get_current_cgroup_id)
407 {
408 struct cgroup *cgrp;
409 u64 cgrp_id;
410
411 rcu_read_lock();
412 cgrp = task_dfl_cgroup(current);
413 cgrp_id = cgroup_id(cgrp);
414 rcu_read_unlock();
415
416 return cgrp_id;
417 }
418
419 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
420 .func = bpf_get_current_cgroup_id,
421 .gpl_only = false,
422 .ret_type = RET_INTEGER,
423 };
424
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id,int,ancestor_level)425 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
426 {
427 struct cgroup *cgrp;
428 struct cgroup *ancestor;
429 u64 cgrp_id;
430
431 rcu_read_lock();
432 cgrp = task_dfl_cgroup(current);
433 ancestor = cgroup_ancestor(cgrp, ancestor_level);
434 cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
435 rcu_read_unlock();
436
437 return cgrp_id;
438 }
439
440 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
441 .func = bpf_get_current_ancestor_cgroup_id,
442 .gpl_only = false,
443 .ret_type = RET_INTEGER,
444 .arg1_type = ARG_ANYTHING,
445 };
446 #endif /* CONFIG_CGROUPS */
447
448 #define BPF_STRTOX_BASE_MASK 0x1F
449
__bpf_strtoull(const char * buf,size_t buf_len,u64 flags,unsigned long long * res,bool * is_negative)450 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
451 unsigned long long *res, bool *is_negative)
452 {
453 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
454 const char *cur_buf = buf;
455 size_t cur_len = buf_len;
456 unsigned int consumed;
457 size_t val_len;
458 char str[64];
459
460 if (!buf || !buf_len || !res || !is_negative)
461 return -EINVAL;
462
463 if (base != 0 && base != 8 && base != 10 && base != 16)
464 return -EINVAL;
465
466 if (flags & ~BPF_STRTOX_BASE_MASK)
467 return -EINVAL;
468
469 while (cur_buf < buf + buf_len && isspace(*cur_buf))
470 ++cur_buf;
471
472 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
473 if (*is_negative)
474 ++cur_buf;
475
476 consumed = cur_buf - buf;
477 cur_len -= consumed;
478 if (!cur_len)
479 return -EINVAL;
480
481 cur_len = min(cur_len, sizeof(str) - 1);
482 memcpy(str, cur_buf, cur_len);
483 str[cur_len] = '\0';
484 cur_buf = str;
485
486 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
487 val_len = _parse_integer(cur_buf, base, res);
488
489 if (val_len & KSTRTOX_OVERFLOW)
490 return -ERANGE;
491
492 if (val_len == 0)
493 return -EINVAL;
494
495 cur_buf += val_len;
496 consumed += cur_buf - str;
497
498 return consumed;
499 }
500
__bpf_strtoll(const char * buf,size_t buf_len,u64 flags,long long * res)501 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
502 long long *res)
503 {
504 unsigned long long _res;
505 bool is_negative;
506 int err;
507
508 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
509 if (err < 0)
510 return err;
511 if (is_negative) {
512 if ((long long)-_res > 0)
513 return -ERANGE;
514 *res = -_res;
515 } else {
516 if ((long long)_res < 0)
517 return -ERANGE;
518 *res = _res;
519 }
520 return err;
521 }
522
BPF_CALL_4(bpf_strtol,const char *,buf,size_t,buf_len,u64,flags,s64 *,res)523 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
524 s64 *, res)
525 {
526 long long _res;
527 int err;
528
529 *res = 0;
530 err = __bpf_strtoll(buf, buf_len, flags, &_res);
531 if (err < 0)
532 return err;
533 *res = _res;
534 return err;
535 }
536
537 const struct bpf_func_proto bpf_strtol_proto = {
538 .func = bpf_strtol,
539 .gpl_only = false,
540 .ret_type = RET_INTEGER,
541 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
542 .arg2_type = ARG_CONST_SIZE,
543 .arg3_type = ARG_ANYTHING,
544 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
545 .arg4_size = sizeof(s64),
546 };
547
BPF_CALL_4(bpf_strtoul,const char *,buf,size_t,buf_len,u64,flags,u64 *,res)548 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
549 u64 *, res)
550 {
551 unsigned long long _res;
552 bool is_negative;
553 int err;
554
555 *res = 0;
556 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
557 if (err < 0)
558 return err;
559 if (is_negative)
560 return -EINVAL;
561 *res = _res;
562 return err;
563 }
564
565 const struct bpf_func_proto bpf_strtoul_proto = {
566 .func = bpf_strtoul,
567 .gpl_only = false,
568 .ret_type = RET_INTEGER,
569 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
570 .arg2_type = ARG_CONST_SIZE,
571 .arg3_type = ARG_ANYTHING,
572 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
573 .arg4_size = sizeof(u64),
574 };
575
BPF_CALL_3(bpf_strncmp,const char *,s1,u32,s1_sz,const char *,s2)576 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
577 {
578 return strncmp(s1, s2, s1_sz);
579 }
580
581 static const struct bpf_func_proto bpf_strncmp_proto = {
582 .func = bpf_strncmp,
583 .gpl_only = false,
584 .ret_type = RET_INTEGER,
585 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
586 .arg2_type = ARG_CONST_SIZE,
587 .arg3_type = ARG_PTR_TO_CONST_STR,
588 };
589
BPF_CALL_4(bpf_get_ns_current_pid_tgid,u64,dev,u64,ino,struct bpf_pidns_info *,nsdata,u32,size)590 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
591 struct bpf_pidns_info *, nsdata, u32, size)
592 {
593 struct task_struct *task = current;
594 struct pid_namespace *pidns;
595 int err = -EINVAL;
596
597 if (unlikely(size != sizeof(struct bpf_pidns_info)))
598 goto clear;
599
600 if (unlikely((u64)(dev_t)dev != dev))
601 goto clear;
602
603 if (unlikely(!task))
604 goto clear;
605
606 pidns = task_active_pid_ns(task);
607 if (unlikely(!pidns)) {
608 err = -ENOENT;
609 goto clear;
610 }
611
612 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
613 goto clear;
614
615 nsdata->pid = task_pid_nr_ns(task, pidns);
616 nsdata->tgid = task_tgid_nr_ns(task, pidns);
617 return 0;
618 clear:
619 memset((void *)nsdata, 0, (size_t) size);
620 return err;
621 }
622
623 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
624 .func = bpf_get_ns_current_pid_tgid,
625 .gpl_only = false,
626 .ret_type = RET_INTEGER,
627 .arg1_type = ARG_ANYTHING,
628 .arg2_type = ARG_ANYTHING,
629 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
630 .arg4_type = ARG_CONST_SIZE,
631 };
632
633 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
634 .func = bpf_get_raw_cpu_id,
635 .gpl_only = false,
636 .ret_type = RET_INTEGER,
637 };
638
BPF_CALL_5(bpf_event_output_data,void *,ctx,struct bpf_map *,map,u64,flags,void *,data,u64,size)639 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
640 u64, flags, void *, data, u64, size)
641 {
642 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
643 return -EINVAL;
644
645 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
646 }
647
648 const struct bpf_func_proto bpf_event_output_data_proto = {
649 .func = bpf_event_output_data,
650 .gpl_only = true,
651 .ret_type = RET_INTEGER,
652 .arg1_type = ARG_PTR_TO_CTX,
653 .arg2_type = ARG_CONST_MAP_PTR,
654 .arg3_type = ARG_ANYTHING,
655 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
656 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
657 };
658
BPF_CALL_3(bpf_copy_from_user,void *,dst,u32,size,const void __user *,user_ptr)659 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
660 const void __user *, user_ptr)
661 {
662 int ret = copy_from_user(dst, user_ptr, size);
663
664 if (unlikely(ret)) {
665 memset(dst, 0, size);
666 ret = -EFAULT;
667 }
668
669 return ret;
670 }
671
672 const struct bpf_func_proto bpf_copy_from_user_proto = {
673 .func = bpf_copy_from_user,
674 .gpl_only = false,
675 .might_sleep = true,
676 .ret_type = RET_INTEGER,
677 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
678 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
679 .arg3_type = ARG_ANYTHING,
680 };
681
BPF_CALL_5(bpf_copy_from_user_task,void *,dst,u32,size,const void __user *,user_ptr,struct task_struct *,tsk,u64,flags)682 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
683 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
684 {
685 int ret;
686
687 /* flags is not used yet */
688 if (unlikely(flags))
689 return -EINVAL;
690
691 if (unlikely(!size))
692 return 0;
693
694 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
695 if (ret == size)
696 return 0;
697
698 memset(dst, 0, size);
699 /* Return -EFAULT for partial read */
700 return ret < 0 ? ret : -EFAULT;
701 }
702
703 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
704 .func = bpf_copy_from_user_task,
705 .gpl_only = true,
706 .might_sleep = true,
707 .ret_type = RET_INTEGER,
708 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
709 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
710 .arg3_type = ARG_ANYTHING,
711 .arg4_type = ARG_PTR_TO_BTF_ID,
712 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
713 .arg5_type = ARG_ANYTHING
714 };
715
BPF_CALL_2(bpf_per_cpu_ptr,const void *,ptr,u32,cpu)716 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
717 {
718 if (cpu >= nr_cpu_ids)
719 return (unsigned long)NULL;
720
721 return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
722 }
723
724 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
725 .func = bpf_per_cpu_ptr,
726 .gpl_only = false,
727 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
728 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
729 .arg2_type = ARG_ANYTHING,
730 };
731
BPF_CALL_1(bpf_this_cpu_ptr,const void *,percpu_ptr)732 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
733 {
734 return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
735 }
736
737 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
738 .func = bpf_this_cpu_ptr,
739 .gpl_only = false,
740 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
741 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
742 };
743
bpf_trace_copy_string(char * buf,void * unsafe_ptr,char fmt_ptype,size_t bufsz)744 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
745 size_t bufsz)
746 {
747 void __user *user_ptr = (__force void __user *)unsafe_ptr;
748
749 buf[0] = 0;
750
751 switch (fmt_ptype) {
752 case 's':
753 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
754 if ((unsigned long)unsafe_ptr < TASK_SIZE)
755 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
756 fallthrough;
757 #endif
758 case 'k':
759 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
760 case 'u':
761 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
762 }
763
764 return -EINVAL;
765 }
766
767 /* Support executing three nested bprintf helper calls on a given CPU */
768 #define MAX_BPRINTF_NEST_LEVEL 3
769
770 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
771 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
772
bpf_try_get_buffers(struct bpf_bprintf_buffers ** bufs)773 int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs)
774 {
775 int nest_level;
776
777 preempt_disable();
778 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
779 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
780 this_cpu_dec(bpf_bprintf_nest_level);
781 preempt_enable();
782 return -EBUSY;
783 }
784 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
785
786 return 0;
787 }
788
bpf_put_buffers(void)789 void bpf_put_buffers(void)
790 {
791 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
792 return;
793 this_cpu_dec(bpf_bprintf_nest_level);
794 preempt_enable();
795 }
796
bpf_bprintf_cleanup(struct bpf_bprintf_data * data)797 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
798 {
799 if (!data->bin_args && !data->buf)
800 return;
801 bpf_put_buffers();
802 }
803
804 /*
805 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
806 *
807 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
808 *
809 * This can be used in two ways:
810 * - Format string verification only: when data->get_bin_args is false
811 * - Arguments preparation: in addition to the above verification, it writes in
812 * data->bin_args a binary representation of arguments usable by bstr_printf
813 * where pointers from BPF have been sanitized.
814 *
815 * In argument preparation mode, if 0 is returned, safe temporary buffers are
816 * allocated and bpf_bprintf_cleanup should be called to free them after use.
817 */
bpf_bprintf_prepare(const char * fmt,u32 fmt_size,const u64 * raw_args,u32 num_args,struct bpf_bprintf_data * data)818 int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args,
819 u32 num_args, struct bpf_bprintf_data *data)
820 {
821 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
822 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
823 struct bpf_bprintf_buffers *buffers = NULL;
824 size_t sizeof_cur_arg, sizeof_cur_ip;
825 int err, i, num_spec = 0;
826 u64 cur_arg;
827 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
828
829 fmt_end = strnchr(fmt, fmt_size, 0);
830 if (!fmt_end)
831 return -EINVAL;
832 fmt_size = fmt_end - fmt;
833
834 if (get_buffers && bpf_try_get_buffers(&buffers))
835 return -EBUSY;
836
837 if (data->get_bin_args) {
838 if (num_args)
839 tmp_buf = buffers->bin_args;
840 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
841 data->bin_args = (u32 *)tmp_buf;
842 }
843
844 if (data->get_buf)
845 data->buf = buffers->buf;
846
847 for (i = 0; i < fmt_size; i++) {
848 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
849 err = -EINVAL;
850 goto out;
851 }
852
853 if (fmt[i] != '%')
854 continue;
855
856 if (fmt[i + 1] == '%') {
857 i++;
858 continue;
859 }
860
861 if (num_spec >= num_args) {
862 err = -EINVAL;
863 goto out;
864 }
865
866 /* The string is zero-terminated so if fmt[i] != 0, we can
867 * always access fmt[i + 1], in the worst case it will be a 0
868 */
869 i++;
870
871 /* skip optional "[0 +-][num]" width formatting field */
872 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
873 fmt[i] == ' ')
874 i++;
875 if (fmt[i] >= '1' && fmt[i] <= '9') {
876 i++;
877 while (fmt[i] >= '0' && fmt[i] <= '9')
878 i++;
879 }
880
881 if (fmt[i] == 'p') {
882 sizeof_cur_arg = sizeof(long);
883
884 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
885 ispunct(fmt[i + 1])) {
886 if (tmp_buf)
887 cur_arg = raw_args[num_spec];
888 goto nocopy_fmt;
889 }
890
891 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
892 fmt[i + 2] == 's') {
893 fmt_ptype = fmt[i + 1];
894 i += 2;
895 goto fmt_str;
896 }
897
898 if (fmt[i + 1] == 'K' ||
899 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
900 fmt[i + 1] == 'S') {
901 if (tmp_buf)
902 cur_arg = raw_args[num_spec];
903 i++;
904 goto nocopy_fmt;
905 }
906
907 if (fmt[i + 1] == 'B') {
908 if (tmp_buf) {
909 err = snprintf(tmp_buf,
910 (tmp_buf_end - tmp_buf),
911 "%pB",
912 (void *)(long)raw_args[num_spec]);
913 tmp_buf += (err + 1);
914 }
915
916 i++;
917 num_spec++;
918 continue;
919 }
920
921 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
922 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
923 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
924 err = -EINVAL;
925 goto out;
926 }
927
928 i += 2;
929 if (!tmp_buf)
930 goto nocopy_fmt;
931
932 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
933 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
934 err = -ENOSPC;
935 goto out;
936 }
937
938 unsafe_ptr = (char *)(long)raw_args[num_spec];
939 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
940 sizeof_cur_ip);
941 if (err < 0)
942 memset(cur_ip, 0, sizeof_cur_ip);
943
944 /* hack: bstr_printf expects IP addresses to be
945 * pre-formatted as strings, ironically, the easiest way
946 * to do that is to call snprintf.
947 */
948 ip_spec[2] = fmt[i - 1];
949 ip_spec[3] = fmt[i];
950 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
951 ip_spec, &cur_ip);
952
953 tmp_buf += err + 1;
954 num_spec++;
955
956 continue;
957 } else if (fmt[i] == 's') {
958 fmt_ptype = fmt[i];
959 fmt_str:
960 if (fmt[i + 1] != 0 &&
961 !isspace(fmt[i + 1]) &&
962 !ispunct(fmt[i + 1])) {
963 err = -EINVAL;
964 goto out;
965 }
966
967 if (!tmp_buf)
968 goto nocopy_fmt;
969
970 if (tmp_buf_end == tmp_buf) {
971 err = -ENOSPC;
972 goto out;
973 }
974
975 unsafe_ptr = (char *)(long)raw_args[num_spec];
976 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
977 fmt_ptype,
978 tmp_buf_end - tmp_buf);
979 if (err < 0) {
980 tmp_buf[0] = '\0';
981 err = 1;
982 }
983
984 tmp_buf += err;
985 num_spec++;
986
987 continue;
988 } else if (fmt[i] == 'c') {
989 if (!tmp_buf)
990 goto nocopy_fmt;
991
992 if (tmp_buf_end == tmp_buf) {
993 err = -ENOSPC;
994 goto out;
995 }
996
997 *tmp_buf = raw_args[num_spec];
998 tmp_buf++;
999 num_spec++;
1000
1001 continue;
1002 }
1003
1004 sizeof_cur_arg = sizeof(int);
1005
1006 if (fmt[i] == 'l') {
1007 sizeof_cur_arg = sizeof(long);
1008 i++;
1009 }
1010 if (fmt[i] == 'l') {
1011 sizeof_cur_arg = sizeof(long long);
1012 i++;
1013 }
1014
1015 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1016 fmt[i] != 'x' && fmt[i] != 'X') {
1017 err = -EINVAL;
1018 goto out;
1019 }
1020
1021 if (tmp_buf)
1022 cur_arg = raw_args[num_spec];
1023 nocopy_fmt:
1024 if (tmp_buf) {
1025 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1026 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1027 err = -ENOSPC;
1028 goto out;
1029 }
1030
1031 if (sizeof_cur_arg == 8) {
1032 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
1033 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1034 } else {
1035 *(u32 *)tmp_buf = (u32)(long)cur_arg;
1036 }
1037 tmp_buf += sizeof_cur_arg;
1038 }
1039 num_spec++;
1040 }
1041
1042 err = 0;
1043 out:
1044 if (err)
1045 bpf_bprintf_cleanup(data);
1046 return err;
1047 }
1048
BPF_CALL_5(bpf_snprintf,char *,str,u32,str_size,char *,fmt,const void *,args,u32,data_len)1049 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1050 const void *, args, u32, data_len)
1051 {
1052 struct bpf_bprintf_data data = {
1053 .get_bin_args = true,
1054 };
1055 int err, num_args;
1056
1057 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1058 (data_len && !args))
1059 return -EINVAL;
1060 num_args = data_len / 8;
1061
1062 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1063 * can safely give an unbounded size.
1064 */
1065 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1066 if (err < 0)
1067 return err;
1068
1069 err = bstr_printf(str, str_size, fmt, data.bin_args);
1070
1071 bpf_bprintf_cleanup(&data);
1072
1073 return err + 1;
1074 }
1075
1076 const struct bpf_func_proto bpf_snprintf_proto = {
1077 .func = bpf_snprintf,
1078 .gpl_only = true,
1079 .ret_type = RET_INTEGER,
1080 .arg1_type = ARG_PTR_TO_MEM_OR_NULL | MEM_WRITE,
1081 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1082 .arg3_type = ARG_PTR_TO_CONST_STR,
1083 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1084 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1085 };
1086
map_key_from_value(struct bpf_map * map,void * value,u32 * arr_idx)1087 static void *map_key_from_value(struct bpf_map *map, void *value, u32 *arr_idx)
1088 {
1089 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1090 struct bpf_array *array = container_of(map, struct bpf_array, map);
1091
1092 *arr_idx = ((char *)value - array->value) / array->elem_size;
1093 return arr_idx;
1094 }
1095 return (void *)value - round_up(map->key_size, 8);
1096 }
1097
1098 enum bpf_async_type {
1099 BPF_ASYNC_TYPE_TIMER = 0,
1100 BPF_ASYNC_TYPE_WQ,
1101 };
1102
1103 enum bpf_async_op {
1104 BPF_ASYNC_START,
1105 BPF_ASYNC_CANCEL
1106 };
1107
1108 struct bpf_async_cmd {
1109 struct llist_node node;
1110 u64 nsec;
1111 u32 mode;
1112 enum bpf_async_op op;
1113 };
1114
1115 struct bpf_async_cb {
1116 struct bpf_map *map;
1117 struct bpf_prog *prog;
1118 void __rcu *callback_fn;
1119 void *value;
1120 struct rcu_head rcu;
1121 u64 flags;
1122 struct irq_work worker;
1123 refcount_t refcnt;
1124 enum bpf_async_type type;
1125 struct llist_head async_cmds;
1126 };
1127
1128 /* BPF map elements can contain 'struct bpf_timer'.
1129 * Such map owns all of its BPF timers.
1130 * 'struct bpf_timer' is allocated as part of map element allocation
1131 * and it's zero initialized.
1132 * That space is used to keep 'struct bpf_async_kern'.
1133 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1134 * remembers 'struct bpf_map *' pointer it's part of.
1135 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1136 * bpf_timer_start() arms the timer.
1137 * If user space reference to a map goes to zero at this point
1138 * ops->map_release_uref callback is responsible for cancelling the timers,
1139 * freeing their memory, and decrementing prog's refcnts.
1140 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1141 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1142 * freeing the timers when inner map is replaced or deleted by user space.
1143 */
1144 struct bpf_hrtimer {
1145 struct bpf_async_cb cb;
1146 struct hrtimer timer;
1147 atomic_t cancelling;
1148 };
1149
1150 struct bpf_work {
1151 struct bpf_async_cb cb;
1152 struct work_struct work;
1153 };
1154
1155 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
1156 struct bpf_async_kern {
1157 union {
1158 struct bpf_async_cb *cb;
1159 struct bpf_hrtimer *timer;
1160 struct bpf_work *work;
1161 };
1162 } __attribute__((aligned(8)));
1163
1164 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1165
1166 static void bpf_async_refcount_put(struct bpf_async_cb *cb);
1167
bpf_timer_cb(struct hrtimer * hrtimer)1168 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1169 {
1170 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1171 struct bpf_map *map = t->cb.map;
1172 void *value = t->cb.value;
1173 bpf_callback_t callback_fn;
1174 void *key;
1175 u32 idx;
1176
1177 BTF_TYPE_EMIT(struct bpf_timer);
1178 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
1179 if (!callback_fn)
1180 goto out;
1181
1182 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1183 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1184 * Remember the timer this callback is servicing to prevent
1185 * deadlock if callback_fn() calls bpf_timer_cancel() or
1186 * bpf_map_delete_elem() on the same timer.
1187 */
1188 this_cpu_write(hrtimer_running, t);
1189
1190 key = map_key_from_value(map, value, &idx);
1191
1192 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1193 /* The verifier checked that return value is zero. */
1194
1195 this_cpu_write(hrtimer_running, NULL);
1196 out:
1197 return HRTIMER_NORESTART;
1198 }
1199
bpf_wq_work(struct work_struct * work)1200 static void bpf_wq_work(struct work_struct *work)
1201 {
1202 struct bpf_work *w = container_of(work, struct bpf_work, work);
1203 struct bpf_async_cb *cb = &w->cb;
1204 struct bpf_map *map = cb->map;
1205 bpf_callback_t callback_fn;
1206 void *value = cb->value;
1207 void *key;
1208 u32 idx;
1209
1210 BTF_TYPE_EMIT(struct bpf_wq);
1211
1212 callback_fn = READ_ONCE(cb->callback_fn);
1213 if (!callback_fn)
1214 return;
1215
1216 key = map_key_from_value(map, value, &idx);
1217
1218 rcu_read_lock_trace();
1219 migrate_disable();
1220
1221 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1222
1223 migrate_enable();
1224 rcu_read_unlock_trace();
1225 }
1226
bpf_async_cb_rcu_free(struct rcu_head * rcu)1227 static void bpf_async_cb_rcu_free(struct rcu_head *rcu)
1228 {
1229 struct bpf_async_cb *cb = container_of(rcu, struct bpf_async_cb, rcu);
1230
1231 /*
1232 * Drop the last reference to prog only after RCU GP, as set_callback()
1233 * may race with cancel_and_free()
1234 */
1235 if (cb->prog)
1236 bpf_prog_put(cb->prog);
1237
1238 kfree_nolock(cb);
1239 }
1240
1241 /* Callback from call_rcu_tasks_trace, chains to call_rcu for final free */
bpf_async_cb_rcu_tasks_trace_free(struct rcu_head * rcu)1242 static void bpf_async_cb_rcu_tasks_trace_free(struct rcu_head *rcu)
1243 {
1244 struct bpf_async_cb *cb = container_of(rcu, struct bpf_async_cb, rcu);
1245 struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
1246 struct bpf_work *w = container_of(cb, struct bpf_work, cb);
1247 bool retry = false;
1248
1249 /*
1250 * bpf_async_cancel_and_free() tried to cancel timer/wq, but it
1251 * could have raced with timer/wq_start. Now refcnt is zero and
1252 * srcu/rcu GP completed. Cancel timer/wq again.
1253 */
1254 switch (cb->type) {
1255 case BPF_ASYNC_TYPE_TIMER:
1256 if (hrtimer_try_to_cancel(&t->timer) < 0)
1257 retry = true;
1258 break;
1259 case BPF_ASYNC_TYPE_WQ:
1260 if (!cancel_work(&w->work) && work_busy(&w->work))
1261 retry = true;
1262 break;
1263 }
1264 if (retry) {
1265 /*
1266 * hrtimer or wq callback may still be running. It must be
1267 * in rcu_tasks_trace or rcu CS, so wait for GP again.
1268 * It won't retry forever, since refcnt zero prevents all
1269 * operations on timer/wq.
1270 */
1271 call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free);
1272 return;
1273 }
1274
1275 /* rcu_trace_implies_rcu_gp() is true and will remain so */
1276 bpf_async_cb_rcu_free(rcu);
1277 }
1278
worker_for_call_rcu(struct irq_work * work)1279 static void worker_for_call_rcu(struct irq_work *work)
1280 {
1281 struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker);
1282
1283 call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free);
1284 }
1285
bpf_async_refcount_put(struct bpf_async_cb * cb)1286 static void bpf_async_refcount_put(struct bpf_async_cb *cb)
1287 {
1288 if (!refcount_dec_and_test(&cb->refcnt))
1289 return;
1290
1291 if (irqs_disabled()) {
1292 cb->worker = IRQ_WORK_INIT(worker_for_call_rcu);
1293 irq_work_queue(&cb->worker);
1294 } else {
1295 call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free);
1296 }
1297 }
1298
1299 static void bpf_async_cancel_and_free(struct bpf_async_kern *async);
1300 static void bpf_async_irq_worker(struct irq_work *work);
1301
__bpf_async_init(struct bpf_async_kern * async,struct bpf_map * map,u64 flags,enum bpf_async_type type)1302 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1303 enum bpf_async_type type)
1304 {
1305 struct bpf_async_cb *cb, *old_cb;
1306 struct bpf_hrtimer *t;
1307 struct bpf_work *w;
1308 clockid_t clockid;
1309 size_t size;
1310
1311 switch (type) {
1312 case BPF_ASYNC_TYPE_TIMER:
1313 size = sizeof(struct bpf_hrtimer);
1314 break;
1315 case BPF_ASYNC_TYPE_WQ:
1316 size = sizeof(struct bpf_work);
1317 break;
1318 default:
1319 return -EINVAL;
1320 }
1321
1322 old_cb = READ_ONCE(async->cb);
1323 if (old_cb)
1324 return -EBUSY;
1325
1326 cb = bpf_map_kmalloc_nolock(map, size, 0, map->numa_node);
1327 if (!cb)
1328 return -ENOMEM;
1329
1330 switch (type) {
1331 case BPF_ASYNC_TYPE_TIMER:
1332 clockid = flags & (MAX_CLOCKS - 1);
1333 t = (struct bpf_hrtimer *)cb;
1334
1335 atomic_set(&t->cancelling, 0);
1336 hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT);
1337 cb->value = (void *)async - map->record->timer_off;
1338 break;
1339 case BPF_ASYNC_TYPE_WQ:
1340 w = (struct bpf_work *)cb;
1341
1342 INIT_WORK(&w->work, bpf_wq_work);
1343 cb->value = (void *)async - map->record->wq_off;
1344 break;
1345 }
1346 cb->map = map;
1347 cb->prog = NULL;
1348 cb->flags = flags;
1349 cb->worker = IRQ_WORK_INIT(bpf_async_irq_worker);
1350 init_llist_head(&cb->async_cmds);
1351 refcount_set(&cb->refcnt, 1); /* map's reference */
1352 cb->type = type;
1353 rcu_assign_pointer(cb->callback_fn, NULL);
1354
1355 old_cb = cmpxchg(&async->cb, NULL, cb);
1356 if (old_cb) {
1357 /* Lost the race to initialize this bpf_async_kern, drop the allocated object */
1358 kfree_nolock(cb);
1359 return -EBUSY;
1360 }
1361 /* Guarantee the order between async->cb and map->usercnt. So
1362 * when there are concurrent uref release and bpf timer init, either
1363 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1364 * timer or atomic64_read() below returns a zero usercnt.
1365 */
1366 smp_mb();
1367 if (!atomic64_read(&map->usercnt)) {
1368 /* maps with timers must be either held by user space
1369 * or pinned in bpffs.
1370 */
1371 bpf_async_cancel_and_free(async);
1372 return -EPERM;
1373 }
1374
1375 return 0;
1376 }
1377
BPF_CALL_3(bpf_timer_init,struct bpf_async_kern *,timer,struct bpf_map *,map,u64,flags)1378 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1379 u64, flags)
1380 {
1381 clock_t clockid = flags & (MAX_CLOCKS - 1);
1382
1383 BUILD_BUG_ON(MAX_CLOCKS != 16);
1384 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1385 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1386
1387 if (flags >= MAX_CLOCKS ||
1388 /* similar to timerfd except _ALARM variants are not supported */
1389 (clockid != CLOCK_MONOTONIC &&
1390 clockid != CLOCK_REALTIME &&
1391 clockid != CLOCK_BOOTTIME))
1392 return -EINVAL;
1393
1394 return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1395 }
1396
1397 static const struct bpf_func_proto bpf_timer_init_proto = {
1398 .func = bpf_timer_init,
1399 .gpl_only = true,
1400 .ret_type = RET_INTEGER,
1401 .arg1_type = ARG_PTR_TO_TIMER,
1402 .arg2_type = ARG_CONST_MAP_PTR,
1403 .arg3_type = ARG_ANYTHING,
1404 };
1405
bpf_async_update_prog_callback(struct bpf_async_cb * cb,struct bpf_prog * prog,void * callback_fn)1406 static int bpf_async_update_prog_callback(struct bpf_async_cb *cb,
1407 struct bpf_prog *prog,
1408 void *callback_fn)
1409 {
1410 struct bpf_prog *prev;
1411
1412 /* Acquire a guard reference on prog to prevent it from being freed during the loop */
1413 if (prog) {
1414 prog = bpf_prog_inc_not_zero(prog);
1415 if (IS_ERR(prog))
1416 return PTR_ERR(prog);
1417 }
1418
1419 do {
1420 if (prog)
1421 prog = bpf_prog_inc_not_zero(prog);
1422 prev = xchg(&cb->prog, prog);
1423 rcu_assign_pointer(cb->callback_fn, callback_fn);
1424
1425 /*
1426 * Release previous prog, make sure that if other CPU is contending,
1427 * to set bpf_prog, references are not leaked as each iteration acquires and
1428 * releases one reference.
1429 */
1430 if (prev)
1431 bpf_prog_put(prev);
1432
1433 } while (READ_ONCE(cb->prog) != prog ||
1434 (void __force *)READ_ONCE(cb->callback_fn) != callback_fn);
1435
1436 if (prog)
1437 bpf_prog_put(prog);
1438
1439 return 0;
1440 }
1441
1442 static DEFINE_PER_CPU(struct bpf_async_cb *, async_cb_running);
1443
bpf_async_schedule_op(struct bpf_async_cb * cb,enum bpf_async_op op,u64 nsec,u32 timer_mode)1444 static int bpf_async_schedule_op(struct bpf_async_cb *cb, enum bpf_async_op op,
1445 u64 nsec, u32 timer_mode)
1446 {
1447 /*
1448 * Do not schedule another operation on this cpu if it's in irq_work
1449 * callback that is processing async_cmds queue. Otherwise the following
1450 * loop is possible:
1451 * bpf_timer_start() -> bpf_async_schedule_op() -> irq_work_queue().
1452 * irqrestore -> bpf_async_irq_worker() -> tracepoint -> bpf_timer_start().
1453 */
1454 if (this_cpu_read(async_cb_running) == cb) {
1455 bpf_async_refcount_put(cb);
1456 return -EDEADLK;
1457 }
1458
1459 struct bpf_async_cmd *cmd = kmalloc_nolock(sizeof(*cmd), 0, NUMA_NO_NODE);
1460
1461 if (!cmd) {
1462 bpf_async_refcount_put(cb);
1463 return -ENOMEM;
1464 }
1465 init_llist_node(&cmd->node);
1466 cmd->nsec = nsec;
1467 cmd->mode = timer_mode;
1468 cmd->op = op;
1469 if (llist_add(&cmd->node, &cb->async_cmds))
1470 irq_work_queue(&cb->worker);
1471 return 0;
1472 }
1473
__bpf_async_set_callback(struct bpf_async_kern * async,void * callback_fn,struct bpf_prog * prog)1474 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
1475 struct bpf_prog *prog)
1476 {
1477 struct bpf_async_cb *cb;
1478
1479 cb = READ_ONCE(async->cb);
1480 if (!cb)
1481 return -EINVAL;
1482
1483 return bpf_async_update_prog_callback(cb, prog, callback_fn);
1484 }
1485
BPF_CALL_3(bpf_timer_set_callback,struct bpf_async_kern *,timer,void *,callback_fn,struct bpf_prog_aux *,aux)1486 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
1487 struct bpf_prog_aux *, aux)
1488 {
1489 return __bpf_async_set_callback(timer, callback_fn, aux->prog);
1490 }
1491
1492 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1493 .func = bpf_timer_set_callback,
1494 .gpl_only = true,
1495 .ret_type = RET_INTEGER,
1496 .arg1_type = ARG_PTR_TO_TIMER,
1497 .arg2_type = ARG_PTR_TO_FUNC,
1498 };
1499
defer_timer_wq_op(void)1500 static bool defer_timer_wq_op(void)
1501 {
1502 return in_hardirq() || irqs_disabled();
1503 }
1504
BPF_CALL_3(bpf_timer_start,struct bpf_async_kern *,async,u64,nsecs,u64,flags)1505 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, async, u64, nsecs, u64, flags)
1506 {
1507 struct bpf_hrtimer *t;
1508 u32 mode;
1509
1510 if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
1511 return -EINVAL;
1512
1513 t = READ_ONCE(async->timer);
1514 if (!t || !READ_ONCE(t->cb.prog))
1515 return -EINVAL;
1516
1517 if (flags & BPF_F_TIMER_ABS)
1518 mode = HRTIMER_MODE_ABS_SOFT;
1519 else
1520 mode = HRTIMER_MODE_REL_SOFT;
1521
1522 if (flags & BPF_F_TIMER_CPU_PIN)
1523 mode |= HRTIMER_MODE_PINNED;
1524
1525 /*
1526 * bpf_async_cancel_and_free() could have dropped refcnt to zero. In
1527 * such case BPF progs are not allowed to arm the timer to prevent UAF.
1528 */
1529 if (!refcount_inc_not_zero(&t->cb.refcnt))
1530 return -ENOENT;
1531
1532 if (!defer_timer_wq_op()) {
1533 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1534 bpf_async_refcount_put(&t->cb);
1535 return 0;
1536 } else {
1537 return bpf_async_schedule_op(&t->cb, BPF_ASYNC_START, nsecs, mode);
1538 }
1539 }
1540
1541 static const struct bpf_func_proto bpf_timer_start_proto = {
1542 .func = bpf_timer_start,
1543 .gpl_only = true,
1544 .ret_type = RET_INTEGER,
1545 .arg1_type = ARG_PTR_TO_TIMER,
1546 .arg2_type = ARG_ANYTHING,
1547 .arg3_type = ARG_ANYTHING,
1548 };
1549
BPF_CALL_1(bpf_timer_cancel,struct bpf_async_kern *,async)1550 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, async)
1551 {
1552 struct bpf_hrtimer *t, *cur_t;
1553 bool inc = false;
1554 int ret = 0;
1555
1556 if (defer_timer_wq_op())
1557 return -EOPNOTSUPP;
1558
1559 t = READ_ONCE(async->timer);
1560 if (!t)
1561 return -EINVAL;
1562
1563 cur_t = this_cpu_read(hrtimer_running);
1564 if (cur_t == t) {
1565 /* If bpf callback_fn is trying to bpf_timer_cancel()
1566 * its own timer the hrtimer_cancel() will deadlock
1567 * since it waits for callback_fn to finish.
1568 */
1569 return -EDEADLK;
1570 }
1571
1572 /* Only account in-flight cancellations when invoked from a timer
1573 * callback, since we want to avoid waiting only if other _callbacks_
1574 * are waiting on us, to avoid introducing lockups. Non-callback paths
1575 * are ok, since nobody would synchronously wait for their completion.
1576 */
1577 if (!cur_t)
1578 goto drop;
1579 atomic_inc(&t->cancelling);
1580 /* Need full barrier after relaxed atomic_inc */
1581 smp_mb__after_atomic();
1582 inc = true;
1583 if (atomic_read(&cur_t->cancelling)) {
1584 /* We're cancelling timer t, while some other timer callback is
1585 * attempting to cancel us. In such a case, it might be possible
1586 * that timer t belongs to the other callback, or some other
1587 * callback waiting upon it (creating transitive dependencies
1588 * upon us), and we will enter a deadlock if we continue
1589 * cancelling and waiting for it synchronously, since it might
1590 * do the same. Bail!
1591 */
1592 atomic_dec(&t->cancelling);
1593 return -EDEADLK;
1594 }
1595 drop:
1596 bpf_async_update_prog_callback(&t->cb, NULL, NULL);
1597 /* Cancel the timer and wait for associated callback to finish
1598 * if it was running.
1599 */
1600 ret = hrtimer_cancel(&t->timer);
1601 if (inc)
1602 atomic_dec(&t->cancelling);
1603 return ret;
1604 }
1605
1606 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1607 .func = bpf_timer_cancel,
1608 .gpl_only = true,
1609 .ret_type = RET_INTEGER,
1610 .arg1_type = ARG_PTR_TO_TIMER,
1611 };
1612
bpf_async_process_op(struct bpf_async_cb * cb,u32 op,u64 timer_nsec,u32 timer_mode)1613 static void bpf_async_process_op(struct bpf_async_cb *cb, u32 op,
1614 u64 timer_nsec, u32 timer_mode)
1615 {
1616 switch (cb->type) {
1617 case BPF_ASYNC_TYPE_TIMER: {
1618 struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
1619
1620 switch (op) {
1621 case BPF_ASYNC_START:
1622 hrtimer_start(&t->timer, ns_to_ktime(timer_nsec), timer_mode);
1623 break;
1624 case BPF_ASYNC_CANCEL:
1625 hrtimer_try_to_cancel(&t->timer);
1626 break;
1627 }
1628 break;
1629 }
1630 case BPF_ASYNC_TYPE_WQ: {
1631 struct bpf_work *w = container_of(cb, struct bpf_work, cb);
1632
1633 switch (op) {
1634 case BPF_ASYNC_START:
1635 schedule_work(&w->work);
1636 break;
1637 case BPF_ASYNC_CANCEL:
1638 cancel_work(&w->work);
1639 break;
1640 }
1641 break;
1642 }
1643 }
1644 bpf_async_refcount_put(cb);
1645 }
1646
bpf_async_irq_worker(struct irq_work * work)1647 static void bpf_async_irq_worker(struct irq_work *work)
1648 {
1649 struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker);
1650 struct llist_node *pos, *n, *list;
1651
1652 list = llist_del_all(&cb->async_cmds);
1653 if (!list)
1654 return;
1655
1656 list = llist_reverse_order(list);
1657 this_cpu_write(async_cb_running, cb);
1658 llist_for_each_safe(pos, n, list) {
1659 struct bpf_async_cmd *cmd;
1660
1661 cmd = container_of(pos, struct bpf_async_cmd, node);
1662 bpf_async_process_op(cb, cmd->op, cmd->nsec, cmd->mode);
1663 kfree_nolock(cmd);
1664 }
1665 this_cpu_write(async_cb_running, NULL);
1666 }
1667
bpf_async_cancel_and_free(struct bpf_async_kern * async)1668 static void bpf_async_cancel_and_free(struct bpf_async_kern *async)
1669 {
1670 struct bpf_async_cb *cb;
1671
1672 if (!READ_ONCE(async->cb))
1673 return;
1674
1675 cb = xchg(&async->cb, NULL);
1676 if (!cb)
1677 return;
1678
1679 bpf_async_update_prog_callback(cb, NULL, NULL);
1680 /*
1681 * No refcount_inc_not_zero(&cb->refcnt) here. Dropping the last
1682 * refcnt. Either synchronously or asynchronously in irq_work.
1683 */
1684
1685 if (!defer_timer_wq_op()) {
1686 bpf_async_process_op(cb, BPF_ASYNC_CANCEL, 0, 0);
1687 } else {
1688 (void)bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0);
1689 /*
1690 * bpf_async_schedule_op() either enqueues allocated cmd into llist
1691 * or fails with ENOMEM and drop the last refcnt.
1692 * This is unlikely, but safe, since bpf_async_cb_rcu_tasks_trace_free()
1693 * callback will do additional timer/wq_cancel due to races anyway.
1694 */
1695 }
1696 }
1697
1698 /*
1699 * This function is called by map_delete/update_elem for individual element and
1700 * by ops->map_release_uref when the user space reference to a map reaches zero.
1701 */
bpf_timer_cancel_and_free(void * val)1702 void bpf_timer_cancel_and_free(void *val)
1703 {
1704 bpf_async_cancel_and_free(val);
1705 }
1706
1707 /*
1708 * This function is called by map_delete/update_elem for individual element and
1709 * by ops->map_release_uref when the user space reference to a map reaches zero.
1710 */
bpf_wq_cancel_and_free(void * val)1711 void bpf_wq_cancel_and_free(void *val)
1712 {
1713 bpf_async_cancel_and_free(val);
1714 }
1715
BPF_CALL_2(bpf_kptr_xchg,void *,dst,void *,ptr)1716 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
1717 {
1718 unsigned long *kptr = dst;
1719
1720 /* This helper may be inlined by verifier. */
1721 return xchg(kptr, (unsigned long)ptr);
1722 }
1723
1724 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1725 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1726 * denote type that verifier will determine.
1727 */
1728 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1729 .func = bpf_kptr_xchg,
1730 .gpl_only = false,
1731 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1732 .ret_btf_id = BPF_PTR_POISON,
1733 .arg1_type = ARG_KPTR_XCHG_DEST,
1734 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1735 .arg2_btf_id = BPF_PTR_POISON,
1736 };
1737
1738 struct bpf_dynptr_file_impl {
1739 struct freader freader;
1740 /* 64 bit offset and size overriding 32 bit ones in bpf_dynptr_kern */
1741 u64 offset;
1742 u64 size;
1743 };
1744
1745 /* Since the upper 8 bits of dynptr->size is reserved, the
1746 * maximum supported size is 2^24 - 1.
1747 */
1748 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1)
1749 #define DYNPTR_TYPE_SHIFT 28
1750 #define DYNPTR_SIZE_MASK 0xFFFFFF
1751 #define DYNPTR_RDONLY_BIT BIT(31)
1752
__bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern * ptr)1753 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1754 {
1755 return ptr->size & DYNPTR_RDONLY_BIT;
1756 }
1757
bpf_dynptr_set_rdonly(struct bpf_dynptr_kern * ptr)1758 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1759 {
1760 ptr->size |= DYNPTR_RDONLY_BIT;
1761 }
1762
bpf_dynptr_set_type(struct bpf_dynptr_kern * ptr,enum bpf_dynptr_type type)1763 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1764 {
1765 ptr->size |= type << DYNPTR_TYPE_SHIFT;
1766 }
1767
bpf_dynptr_get_type(const struct bpf_dynptr_kern * ptr)1768 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1769 {
1770 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1771 }
1772
__bpf_dynptr_size(const struct bpf_dynptr_kern * ptr)1773 u64 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1774 {
1775 if (bpf_dynptr_get_type(ptr) == BPF_DYNPTR_TYPE_FILE) {
1776 struct bpf_dynptr_file_impl *df = ptr->data;
1777
1778 return df->size;
1779 }
1780
1781 return ptr->size & DYNPTR_SIZE_MASK;
1782 }
1783
bpf_dynptr_advance_offset(struct bpf_dynptr_kern * ptr,u64 off)1784 static void bpf_dynptr_advance_offset(struct bpf_dynptr_kern *ptr, u64 off)
1785 {
1786 if (bpf_dynptr_get_type(ptr) == BPF_DYNPTR_TYPE_FILE) {
1787 struct bpf_dynptr_file_impl *df = ptr->data;
1788
1789 df->offset += off;
1790 return;
1791 }
1792 ptr->offset += off;
1793 }
1794
bpf_dynptr_set_size(struct bpf_dynptr_kern * ptr,u64 new_size)1795 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u64 new_size)
1796 {
1797 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1798
1799 if (bpf_dynptr_get_type(ptr) == BPF_DYNPTR_TYPE_FILE) {
1800 struct bpf_dynptr_file_impl *df = ptr->data;
1801
1802 df->size = new_size;
1803 return;
1804 }
1805 ptr->size = (u32)new_size | metadata;
1806 }
1807
bpf_dynptr_check_size(u64 size)1808 int bpf_dynptr_check_size(u64 size)
1809 {
1810 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1811 }
1812
bpf_file_fetch_bytes(struct bpf_dynptr_file_impl * df,u64 offset,void * buf,u64 len)1813 static int bpf_file_fetch_bytes(struct bpf_dynptr_file_impl *df, u64 offset, void *buf, u64 len)
1814 {
1815 const void *ptr;
1816
1817 if (!buf)
1818 return -EINVAL;
1819
1820 df->freader.buf = buf;
1821 df->freader.buf_sz = len;
1822 ptr = freader_fetch(&df->freader, offset + df->offset, len);
1823 if (!ptr)
1824 return df->freader.err;
1825
1826 if (ptr != buf) /* Force copying into the buffer */
1827 memcpy(buf, ptr, len);
1828
1829 return 0;
1830 }
1831
bpf_dynptr_init(struct bpf_dynptr_kern * ptr,void * data,enum bpf_dynptr_type type,u32 offset,u32 size)1832 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1833 enum bpf_dynptr_type type, u32 offset, u32 size)
1834 {
1835 ptr->data = data;
1836 ptr->offset = offset;
1837 ptr->size = size;
1838 bpf_dynptr_set_type(ptr, type);
1839 }
1840
bpf_dynptr_set_null(struct bpf_dynptr_kern * ptr)1841 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1842 {
1843 memset(ptr, 0, sizeof(*ptr));
1844 }
1845
BPF_CALL_4(bpf_dynptr_from_mem,void *,data,u64,size,u64,flags,struct bpf_dynptr_kern *,ptr)1846 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u64, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1847 {
1848 int err;
1849
1850 BTF_TYPE_EMIT(struct bpf_dynptr);
1851
1852 err = bpf_dynptr_check_size(size);
1853 if (err)
1854 goto error;
1855
1856 /* flags is currently unsupported */
1857 if (flags) {
1858 err = -EINVAL;
1859 goto error;
1860 }
1861
1862 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1863
1864 return 0;
1865
1866 error:
1867 bpf_dynptr_set_null(ptr);
1868 return err;
1869 }
1870
1871 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1872 .func = bpf_dynptr_from_mem,
1873 .gpl_only = false,
1874 .ret_type = RET_INTEGER,
1875 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1876 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1877 .arg3_type = ARG_ANYTHING,
1878 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
1879 };
1880
__bpf_dynptr_read(void * dst,u64 len,const struct bpf_dynptr_kern * src,u64 offset,u64 flags)1881 static int __bpf_dynptr_read(void *dst, u64 len, const struct bpf_dynptr_kern *src,
1882 u64 offset, u64 flags)
1883 {
1884 enum bpf_dynptr_type type;
1885 int err;
1886
1887 if (!src->data || flags)
1888 return -EINVAL;
1889
1890 err = bpf_dynptr_check_off_len(src, offset, len);
1891 if (err)
1892 return err;
1893
1894 type = bpf_dynptr_get_type(src);
1895
1896 switch (type) {
1897 case BPF_DYNPTR_TYPE_LOCAL:
1898 case BPF_DYNPTR_TYPE_RINGBUF:
1899 /* Source and destination may possibly overlap, hence use memmove to
1900 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1901 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1902 */
1903 memmove(dst, src->data + src->offset + offset, len);
1904 return 0;
1905 case BPF_DYNPTR_TYPE_SKB:
1906 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1907 case BPF_DYNPTR_TYPE_XDP:
1908 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1909 case BPF_DYNPTR_TYPE_SKB_META:
1910 memmove(dst, bpf_skb_meta_pointer(src->data, src->offset + offset), len);
1911 return 0;
1912 case BPF_DYNPTR_TYPE_FILE:
1913 return bpf_file_fetch_bytes(src->data, offset, dst, len);
1914 default:
1915 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1916 return -EFAULT;
1917 }
1918 }
1919
BPF_CALL_5(bpf_dynptr_read,void *,dst,u64,len,const struct bpf_dynptr_kern *,src,u64,offset,u64,flags)1920 BPF_CALL_5(bpf_dynptr_read, void *, dst, u64, len, const struct bpf_dynptr_kern *, src,
1921 u64, offset, u64, flags)
1922 {
1923 return __bpf_dynptr_read(dst, len, src, offset, flags);
1924 }
1925
1926 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1927 .func = bpf_dynptr_read,
1928 .gpl_only = false,
1929 .ret_type = RET_INTEGER,
1930 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1931 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1932 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1933 .arg4_type = ARG_ANYTHING,
1934 .arg5_type = ARG_ANYTHING,
1935 };
1936
__bpf_dynptr_write(const struct bpf_dynptr_kern * dst,u64 offset,void * src,u64 len,u64 flags)1937 int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u64 offset, void *src,
1938 u64 len, u64 flags)
1939 {
1940 enum bpf_dynptr_type type;
1941 int err;
1942
1943 if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1944 return -EINVAL;
1945
1946 err = bpf_dynptr_check_off_len(dst, offset, len);
1947 if (err)
1948 return err;
1949
1950 type = bpf_dynptr_get_type(dst);
1951
1952 switch (type) {
1953 case BPF_DYNPTR_TYPE_LOCAL:
1954 case BPF_DYNPTR_TYPE_RINGBUF:
1955 if (flags)
1956 return -EINVAL;
1957 /* Source and destination may possibly overlap, hence use memmove to
1958 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1959 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1960 */
1961 memmove(dst->data + dst->offset + offset, src, len);
1962 return 0;
1963 case BPF_DYNPTR_TYPE_SKB:
1964 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1965 flags);
1966 case BPF_DYNPTR_TYPE_XDP:
1967 if (flags)
1968 return -EINVAL;
1969 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1970 case BPF_DYNPTR_TYPE_SKB_META:
1971 return __bpf_skb_meta_store_bytes(dst->data, dst->offset + offset, src,
1972 len, flags);
1973 default:
1974 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1975 return -EFAULT;
1976 }
1977 }
1978
BPF_CALL_5(bpf_dynptr_write,const struct bpf_dynptr_kern *,dst,u64,offset,void *,src,u64,len,u64,flags)1979 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u64, offset, void *, src,
1980 u64, len, u64, flags)
1981 {
1982 return __bpf_dynptr_write(dst, offset, src, len, flags);
1983 }
1984
1985 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1986 .func = bpf_dynptr_write,
1987 .gpl_only = false,
1988 .ret_type = RET_INTEGER,
1989 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1990 .arg2_type = ARG_ANYTHING,
1991 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1992 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
1993 .arg5_type = ARG_ANYTHING,
1994 };
1995
BPF_CALL_3(bpf_dynptr_data,const struct bpf_dynptr_kern *,ptr,u64,offset,u64,len)1996 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u64, offset, u64, len)
1997 {
1998 enum bpf_dynptr_type type;
1999 int err;
2000
2001 if (!ptr->data)
2002 return 0;
2003
2004 err = bpf_dynptr_check_off_len(ptr, offset, len);
2005 if (err)
2006 return 0;
2007
2008 if (__bpf_dynptr_is_rdonly(ptr))
2009 return 0;
2010
2011 type = bpf_dynptr_get_type(ptr);
2012
2013 switch (type) {
2014 case BPF_DYNPTR_TYPE_LOCAL:
2015 case BPF_DYNPTR_TYPE_RINGBUF:
2016 return (unsigned long)(ptr->data + ptr->offset + offset);
2017 case BPF_DYNPTR_TYPE_SKB:
2018 case BPF_DYNPTR_TYPE_XDP:
2019 case BPF_DYNPTR_TYPE_SKB_META:
2020 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
2021 return 0;
2022 default:
2023 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
2024 return 0;
2025 }
2026 }
2027
2028 static const struct bpf_func_proto bpf_dynptr_data_proto = {
2029 .func = bpf_dynptr_data,
2030 .gpl_only = false,
2031 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL,
2032 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
2033 .arg2_type = ARG_ANYTHING,
2034 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
2035 };
2036
2037 const struct bpf_func_proto bpf_get_current_task_proto __weak;
2038 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
2039 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
2040 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
2041 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
2042 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
2043 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
2044 const struct bpf_func_proto bpf_perf_event_read_proto __weak;
2045 const struct bpf_func_proto bpf_send_signal_proto __weak;
2046 const struct bpf_func_proto bpf_send_signal_thread_proto __weak;
2047 const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak;
2048 const struct bpf_func_proto bpf_get_task_stack_proto __weak;
2049 const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak;
2050
2051 const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)2052 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2053 {
2054 switch (func_id) {
2055 case BPF_FUNC_map_lookup_elem:
2056 return &bpf_map_lookup_elem_proto;
2057 case BPF_FUNC_map_update_elem:
2058 return &bpf_map_update_elem_proto;
2059 case BPF_FUNC_map_delete_elem:
2060 return &bpf_map_delete_elem_proto;
2061 case BPF_FUNC_map_push_elem:
2062 return &bpf_map_push_elem_proto;
2063 case BPF_FUNC_map_pop_elem:
2064 return &bpf_map_pop_elem_proto;
2065 case BPF_FUNC_map_peek_elem:
2066 return &bpf_map_peek_elem_proto;
2067 case BPF_FUNC_map_lookup_percpu_elem:
2068 return &bpf_map_lookup_percpu_elem_proto;
2069 case BPF_FUNC_get_prandom_u32:
2070 return &bpf_get_prandom_u32_proto;
2071 case BPF_FUNC_get_smp_processor_id:
2072 return &bpf_get_raw_smp_processor_id_proto;
2073 case BPF_FUNC_get_numa_node_id:
2074 return &bpf_get_numa_node_id_proto;
2075 case BPF_FUNC_tail_call:
2076 return &bpf_tail_call_proto;
2077 case BPF_FUNC_ktime_get_ns:
2078 return &bpf_ktime_get_ns_proto;
2079 case BPF_FUNC_ktime_get_boot_ns:
2080 return &bpf_ktime_get_boot_ns_proto;
2081 case BPF_FUNC_ktime_get_tai_ns:
2082 return &bpf_ktime_get_tai_ns_proto;
2083 case BPF_FUNC_ringbuf_output:
2084 return &bpf_ringbuf_output_proto;
2085 case BPF_FUNC_ringbuf_reserve:
2086 return &bpf_ringbuf_reserve_proto;
2087 case BPF_FUNC_ringbuf_submit:
2088 return &bpf_ringbuf_submit_proto;
2089 case BPF_FUNC_ringbuf_discard:
2090 return &bpf_ringbuf_discard_proto;
2091 case BPF_FUNC_ringbuf_query:
2092 return &bpf_ringbuf_query_proto;
2093 case BPF_FUNC_strncmp:
2094 return &bpf_strncmp_proto;
2095 case BPF_FUNC_strtol:
2096 return &bpf_strtol_proto;
2097 case BPF_FUNC_strtoul:
2098 return &bpf_strtoul_proto;
2099 case BPF_FUNC_get_current_pid_tgid:
2100 return &bpf_get_current_pid_tgid_proto;
2101 case BPF_FUNC_get_ns_current_pid_tgid:
2102 return &bpf_get_ns_current_pid_tgid_proto;
2103 case BPF_FUNC_get_current_uid_gid:
2104 return &bpf_get_current_uid_gid_proto;
2105 default:
2106 break;
2107 }
2108
2109 if (!bpf_token_capable(prog->aux->token, CAP_BPF))
2110 return NULL;
2111
2112 switch (func_id) {
2113 case BPF_FUNC_spin_lock:
2114 return &bpf_spin_lock_proto;
2115 case BPF_FUNC_spin_unlock:
2116 return &bpf_spin_unlock_proto;
2117 case BPF_FUNC_jiffies64:
2118 return &bpf_jiffies64_proto;
2119 case BPF_FUNC_per_cpu_ptr:
2120 return &bpf_per_cpu_ptr_proto;
2121 case BPF_FUNC_this_cpu_ptr:
2122 return &bpf_this_cpu_ptr_proto;
2123 case BPF_FUNC_timer_init:
2124 return &bpf_timer_init_proto;
2125 case BPF_FUNC_timer_set_callback:
2126 return &bpf_timer_set_callback_proto;
2127 case BPF_FUNC_timer_start:
2128 return &bpf_timer_start_proto;
2129 case BPF_FUNC_timer_cancel:
2130 return &bpf_timer_cancel_proto;
2131 case BPF_FUNC_kptr_xchg:
2132 return &bpf_kptr_xchg_proto;
2133 case BPF_FUNC_for_each_map_elem:
2134 return &bpf_for_each_map_elem_proto;
2135 case BPF_FUNC_loop:
2136 return &bpf_loop_proto;
2137 case BPF_FUNC_user_ringbuf_drain:
2138 return &bpf_user_ringbuf_drain_proto;
2139 case BPF_FUNC_ringbuf_reserve_dynptr:
2140 return &bpf_ringbuf_reserve_dynptr_proto;
2141 case BPF_FUNC_ringbuf_submit_dynptr:
2142 return &bpf_ringbuf_submit_dynptr_proto;
2143 case BPF_FUNC_ringbuf_discard_dynptr:
2144 return &bpf_ringbuf_discard_dynptr_proto;
2145 case BPF_FUNC_dynptr_from_mem:
2146 return &bpf_dynptr_from_mem_proto;
2147 case BPF_FUNC_dynptr_read:
2148 return &bpf_dynptr_read_proto;
2149 case BPF_FUNC_dynptr_write:
2150 return &bpf_dynptr_write_proto;
2151 case BPF_FUNC_dynptr_data:
2152 return &bpf_dynptr_data_proto;
2153 #ifdef CONFIG_CGROUPS
2154 case BPF_FUNC_cgrp_storage_get:
2155 return &bpf_cgrp_storage_get_proto;
2156 case BPF_FUNC_cgrp_storage_delete:
2157 return &bpf_cgrp_storage_delete_proto;
2158 case BPF_FUNC_get_current_cgroup_id:
2159 return &bpf_get_current_cgroup_id_proto;
2160 case BPF_FUNC_get_current_ancestor_cgroup_id:
2161 return &bpf_get_current_ancestor_cgroup_id_proto;
2162 case BPF_FUNC_current_task_under_cgroup:
2163 return &bpf_current_task_under_cgroup_proto;
2164 #endif
2165 #ifdef CONFIG_CGROUP_NET_CLASSID
2166 case BPF_FUNC_get_cgroup_classid:
2167 return &bpf_get_cgroup_classid_curr_proto;
2168 #endif
2169 case BPF_FUNC_task_storage_get:
2170 return &bpf_task_storage_get_proto;
2171 case BPF_FUNC_task_storage_delete:
2172 return &bpf_task_storage_delete_proto;
2173 default:
2174 break;
2175 }
2176
2177 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
2178 return NULL;
2179
2180 switch (func_id) {
2181 case BPF_FUNC_trace_printk:
2182 return bpf_get_trace_printk_proto();
2183 case BPF_FUNC_get_current_task:
2184 return &bpf_get_current_task_proto;
2185 case BPF_FUNC_get_current_task_btf:
2186 return &bpf_get_current_task_btf_proto;
2187 case BPF_FUNC_get_current_comm:
2188 return &bpf_get_current_comm_proto;
2189 case BPF_FUNC_probe_read_user:
2190 return &bpf_probe_read_user_proto;
2191 case BPF_FUNC_probe_read_kernel:
2192 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2193 NULL : &bpf_probe_read_kernel_proto;
2194 case BPF_FUNC_probe_read_user_str:
2195 return &bpf_probe_read_user_str_proto;
2196 case BPF_FUNC_probe_read_kernel_str:
2197 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2198 NULL : &bpf_probe_read_kernel_str_proto;
2199 case BPF_FUNC_copy_from_user:
2200 return &bpf_copy_from_user_proto;
2201 case BPF_FUNC_copy_from_user_task:
2202 return &bpf_copy_from_user_task_proto;
2203 case BPF_FUNC_snprintf_btf:
2204 return &bpf_snprintf_btf_proto;
2205 case BPF_FUNC_snprintf:
2206 return &bpf_snprintf_proto;
2207 case BPF_FUNC_task_pt_regs:
2208 return &bpf_task_pt_regs_proto;
2209 case BPF_FUNC_trace_vprintk:
2210 return bpf_get_trace_vprintk_proto();
2211 case BPF_FUNC_perf_event_read_value:
2212 return bpf_get_perf_event_read_value_proto();
2213 case BPF_FUNC_perf_event_read:
2214 return &bpf_perf_event_read_proto;
2215 case BPF_FUNC_send_signal:
2216 return &bpf_send_signal_proto;
2217 case BPF_FUNC_send_signal_thread:
2218 return &bpf_send_signal_thread_proto;
2219 case BPF_FUNC_get_task_stack:
2220 return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
2221 : &bpf_get_task_stack_proto;
2222 case BPF_FUNC_get_branch_snapshot:
2223 return &bpf_get_branch_snapshot_proto;
2224 case BPF_FUNC_find_vma:
2225 return &bpf_find_vma_proto;
2226 default:
2227 return NULL;
2228 }
2229 }
2230 EXPORT_SYMBOL_GPL(bpf_base_func_proto);
2231
bpf_list_head_free(const struct btf_field * field,void * list_head,struct bpf_spin_lock * spin_lock)2232 void bpf_list_head_free(const struct btf_field *field, void *list_head,
2233 struct bpf_spin_lock *spin_lock)
2234 {
2235 struct list_head *head = list_head, *orig_head = list_head;
2236
2237 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
2238 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
2239
2240 /* Do the actual list draining outside the lock to not hold the lock for
2241 * too long, and also prevent deadlocks if tracing programs end up
2242 * executing on entry/exit of functions called inside the critical
2243 * section, and end up doing map ops that call bpf_list_head_free for
2244 * the same map value again.
2245 */
2246 __bpf_spin_lock_irqsave(spin_lock);
2247 if (!head->next || list_empty(head))
2248 goto unlock;
2249 head = head->next;
2250 unlock:
2251 INIT_LIST_HEAD(orig_head);
2252 __bpf_spin_unlock_irqrestore(spin_lock);
2253
2254 while (head != orig_head) {
2255 void *obj = head;
2256
2257 obj -= field->graph_root.node_offset;
2258 head = head->next;
2259 /* The contained type can also have resources, including a
2260 * bpf_list_head which needs to be freed.
2261 */
2262 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2263 }
2264 }
2265
2266 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
2267 * 'rb_node *', so field name of rb_node within containing struct is not
2268 * needed.
2269 *
2270 * Since bpf_rb_tree's node type has a corresponding struct btf_field with
2271 * graph_root.node_offset, it's not necessary to know field name
2272 * or type of node struct
2273 */
2274 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
2275 for (pos = rb_first_postorder(root); \
2276 pos && ({ n = rb_next_postorder(pos); 1; }); \
2277 pos = n)
2278
bpf_rb_root_free(const struct btf_field * field,void * rb_root,struct bpf_spin_lock * spin_lock)2279 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
2280 struct bpf_spin_lock *spin_lock)
2281 {
2282 struct rb_root_cached orig_root, *root = rb_root;
2283 struct rb_node *pos, *n;
2284 void *obj;
2285
2286 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
2287 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
2288
2289 __bpf_spin_lock_irqsave(spin_lock);
2290 orig_root = *root;
2291 *root = RB_ROOT_CACHED;
2292 __bpf_spin_unlock_irqrestore(spin_lock);
2293
2294 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
2295 obj = pos;
2296 obj -= field->graph_root.node_offset;
2297
2298
2299 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2300 }
2301 }
2302
2303 __bpf_kfunc_start_defs();
2304
bpf_obj_new_impl(u64 local_type_id__k,void * meta__ign)2305 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2306 {
2307 struct btf_struct_meta *meta = meta__ign;
2308 u64 size = local_type_id__k;
2309 void *p;
2310
2311 p = bpf_mem_alloc(&bpf_global_ma, size);
2312 if (!p)
2313 return NULL;
2314 if (meta)
2315 bpf_obj_init(meta->record, p);
2316 return p;
2317 }
2318
bpf_percpu_obj_new_impl(u64 local_type_id__k,void * meta__ign)2319 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2320 {
2321 u64 size = local_type_id__k;
2322
2323 /* The verifier has ensured that meta__ign must be NULL */
2324 return bpf_mem_alloc(&bpf_global_percpu_ma, size);
2325 }
2326
2327 /* Must be called under migrate_disable(), as required by bpf_mem_free */
__bpf_obj_drop_impl(void * p,const struct btf_record * rec,bool percpu)2328 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
2329 {
2330 struct bpf_mem_alloc *ma;
2331
2332 if (rec && rec->refcount_off >= 0 &&
2333 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
2334 /* Object is refcounted and refcount_dec didn't result in 0
2335 * refcount. Return without freeing the object
2336 */
2337 return;
2338 }
2339
2340 if (rec)
2341 bpf_obj_free_fields(rec, p);
2342
2343 if (percpu)
2344 ma = &bpf_global_percpu_ma;
2345 else
2346 ma = &bpf_global_ma;
2347 bpf_mem_free_rcu(ma, p);
2348 }
2349
bpf_obj_drop_impl(void * p__alloc,void * meta__ign)2350 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
2351 {
2352 struct btf_struct_meta *meta = meta__ign;
2353 void *p = p__alloc;
2354
2355 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
2356 }
2357
bpf_percpu_obj_drop_impl(void * p__alloc,void * meta__ign)2358 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
2359 {
2360 /* The verifier has ensured that meta__ign must be NULL */
2361 bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
2362 }
2363
bpf_refcount_acquire_impl(void * p__refcounted_kptr,void * meta__ign)2364 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
2365 {
2366 struct btf_struct_meta *meta = meta__ign;
2367 struct bpf_refcount *ref;
2368
2369 /* Could just cast directly to refcount_t *, but need some code using
2370 * bpf_refcount type so that it is emitted in vmlinux BTF
2371 */
2372 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
2373 if (!refcount_inc_not_zero((refcount_t *)ref))
2374 return NULL;
2375
2376 /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
2377 * in verifier.c
2378 */
2379 return (void *)p__refcounted_kptr;
2380 }
2381
__bpf_list_add(struct bpf_list_node_kern * node,struct bpf_list_head * head,bool tail,struct btf_record * rec,u64 off)2382 static int __bpf_list_add(struct bpf_list_node_kern *node,
2383 struct bpf_list_head *head,
2384 bool tail, struct btf_record *rec, u64 off)
2385 {
2386 struct list_head *n = &node->list_head, *h = (void *)head;
2387
2388 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2389 * called on its fields, so init here
2390 */
2391 if (unlikely(!h->next))
2392 INIT_LIST_HEAD(h);
2393
2394 /* node->owner != NULL implies !list_empty(n), no need to separately
2395 * check the latter
2396 */
2397 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2398 /* Only called from BPF prog, no need to migrate_disable */
2399 __bpf_obj_drop_impl((void *)n - off, rec, false);
2400 return -EINVAL;
2401 }
2402
2403 tail ? list_add_tail(n, h) : list_add(n, h);
2404 WRITE_ONCE(node->owner, head);
2405
2406 return 0;
2407 }
2408
bpf_list_push_front_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2409 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
2410 struct bpf_list_node *node,
2411 void *meta__ign, u64 off)
2412 {
2413 struct bpf_list_node_kern *n = (void *)node;
2414 struct btf_struct_meta *meta = meta__ign;
2415
2416 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
2417 }
2418
bpf_list_push_back_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2419 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
2420 struct bpf_list_node *node,
2421 void *meta__ign, u64 off)
2422 {
2423 struct bpf_list_node_kern *n = (void *)node;
2424 struct btf_struct_meta *meta = meta__ign;
2425
2426 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2427 }
2428
__bpf_list_del(struct bpf_list_head * head,bool tail)2429 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2430 {
2431 struct list_head *n, *h = (void *)head;
2432 struct bpf_list_node_kern *node;
2433
2434 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2435 * called on its fields, so init here
2436 */
2437 if (unlikely(!h->next))
2438 INIT_LIST_HEAD(h);
2439 if (list_empty(h))
2440 return NULL;
2441
2442 n = tail ? h->prev : h->next;
2443 node = container_of(n, struct bpf_list_node_kern, list_head);
2444 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2445 return NULL;
2446
2447 list_del_init(n);
2448 WRITE_ONCE(node->owner, NULL);
2449 return (struct bpf_list_node *)n;
2450 }
2451
bpf_list_pop_front(struct bpf_list_head * head)2452 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2453 {
2454 return __bpf_list_del(head, false);
2455 }
2456
bpf_list_pop_back(struct bpf_list_head * head)2457 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2458 {
2459 return __bpf_list_del(head, true);
2460 }
2461
bpf_list_front(struct bpf_list_head * head)2462 __bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head)
2463 {
2464 struct list_head *h = (struct list_head *)head;
2465
2466 if (list_empty(h) || unlikely(!h->next))
2467 return NULL;
2468
2469 return (struct bpf_list_node *)h->next;
2470 }
2471
bpf_list_back(struct bpf_list_head * head)2472 __bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head)
2473 {
2474 struct list_head *h = (struct list_head *)head;
2475
2476 if (list_empty(h) || unlikely(!h->next))
2477 return NULL;
2478
2479 return (struct bpf_list_node *)h->prev;
2480 }
2481
bpf_rbtree_remove(struct bpf_rb_root * root,struct bpf_rb_node * node)2482 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2483 struct bpf_rb_node *node)
2484 {
2485 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2486 struct rb_root_cached *r = (struct rb_root_cached *)root;
2487 struct rb_node *n = &node_internal->rb_node;
2488
2489 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2490 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2491 */
2492 if (READ_ONCE(node_internal->owner) != root)
2493 return NULL;
2494
2495 rb_erase_cached(n, r);
2496 RB_CLEAR_NODE(n);
2497 WRITE_ONCE(node_internal->owner, NULL);
2498 return (struct bpf_rb_node *)n;
2499 }
2500
2501 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2502 * program
2503 */
__bpf_rbtree_add(struct bpf_rb_root * root,struct bpf_rb_node_kern * node,void * less,struct btf_record * rec,u64 off)2504 static int __bpf_rbtree_add(struct bpf_rb_root *root,
2505 struct bpf_rb_node_kern *node,
2506 void *less, struct btf_record *rec, u64 off)
2507 {
2508 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2509 struct rb_node *parent = NULL, *n = &node->rb_node;
2510 bpf_callback_t cb = (bpf_callback_t)less;
2511 bool leftmost = true;
2512
2513 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2514 * check the latter
2515 */
2516 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2517 /* Only called from BPF prog, no need to migrate_disable */
2518 __bpf_obj_drop_impl((void *)n - off, rec, false);
2519 return -EINVAL;
2520 }
2521
2522 while (*link) {
2523 parent = *link;
2524 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2525 link = &parent->rb_left;
2526 } else {
2527 link = &parent->rb_right;
2528 leftmost = false;
2529 }
2530 }
2531
2532 rb_link_node(n, parent, link);
2533 rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2534 WRITE_ONCE(node->owner, root);
2535 return 0;
2536 }
2537
bpf_rbtree_add_impl(struct bpf_rb_root * root,struct bpf_rb_node * node,bool (less)(struct bpf_rb_node * a,const struct bpf_rb_node * b),void * meta__ign,u64 off)2538 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2539 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2540 void *meta__ign, u64 off)
2541 {
2542 struct btf_struct_meta *meta = meta__ign;
2543 struct bpf_rb_node_kern *n = (void *)node;
2544
2545 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2546 }
2547
bpf_rbtree_first(struct bpf_rb_root * root)2548 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2549 {
2550 struct rb_root_cached *r = (struct rb_root_cached *)root;
2551
2552 return (struct bpf_rb_node *)rb_first_cached(r);
2553 }
2554
bpf_rbtree_root(struct bpf_rb_root * root)2555 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root)
2556 {
2557 struct rb_root_cached *r = (struct rb_root_cached *)root;
2558
2559 return (struct bpf_rb_node *)r->rb_root.rb_node;
2560 }
2561
bpf_rbtree_left(struct bpf_rb_root * root,struct bpf_rb_node * node)2562 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node)
2563 {
2564 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2565
2566 if (READ_ONCE(node_internal->owner) != root)
2567 return NULL;
2568
2569 return (struct bpf_rb_node *)node_internal->rb_node.rb_left;
2570 }
2571
bpf_rbtree_right(struct bpf_rb_root * root,struct bpf_rb_node * node)2572 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node)
2573 {
2574 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2575
2576 if (READ_ONCE(node_internal->owner) != root)
2577 return NULL;
2578
2579 return (struct bpf_rb_node *)node_internal->rb_node.rb_right;
2580 }
2581
2582 /**
2583 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2584 * kfunc which is not stored in a map as a kptr, must be released by calling
2585 * bpf_task_release().
2586 * @p: The task on which a reference is being acquired.
2587 */
bpf_task_acquire(struct task_struct * p)2588 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2589 {
2590 if (refcount_inc_not_zero(&p->rcu_users))
2591 return p;
2592 return NULL;
2593 }
2594
2595 /**
2596 * bpf_task_release - Release the reference acquired on a task.
2597 * @p: The task on which a reference is being released.
2598 */
bpf_task_release(struct task_struct * p)2599 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2600 {
2601 put_task_struct_rcu_user(p);
2602 }
2603
bpf_task_release_dtor(void * p)2604 __bpf_kfunc void bpf_task_release_dtor(void *p)
2605 {
2606 put_task_struct_rcu_user(p);
2607 }
2608 CFI_NOSEAL(bpf_task_release_dtor);
2609
2610 #ifdef CONFIG_CGROUPS
2611 /**
2612 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2613 * this kfunc which is not stored in a map as a kptr, must be released by
2614 * calling bpf_cgroup_release().
2615 * @cgrp: The cgroup on which a reference is being acquired.
2616 */
bpf_cgroup_acquire(struct cgroup * cgrp)2617 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2618 {
2619 return cgroup_tryget(cgrp) ? cgrp : NULL;
2620 }
2621
2622 /**
2623 * bpf_cgroup_release - Release the reference acquired on a cgroup.
2624 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2625 * not be freed until the current grace period has ended, even if its refcount
2626 * drops to 0.
2627 * @cgrp: The cgroup on which a reference is being released.
2628 */
bpf_cgroup_release(struct cgroup * cgrp)2629 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2630 {
2631 cgroup_put(cgrp);
2632 }
2633
bpf_cgroup_release_dtor(void * cgrp)2634 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
2635 {
2636 cgroup_put(cgrp);
2637 }
2638 CFI_NOSEAL(bpf_cgroup_release_dtor);
2639
2640 /**
2641 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2642 * array. A cgroup returned by this kfunc which is not subsequently stored in a
2643 * map, must be released by calling bpf_cgroup_release().
2644 * @cgrp: The cgroup for which we're performing a lookup.
2645 * @level: The level of ancestor to look up.
2646 */
bpf_cgroup_ancestor(struct cgroup * cgrp,int level)2647 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2648 {
2649 struct cgroup *ancestor;
2650
2651 if (level > cgrp->level || level < 0)
2652 return NULL;
2653
2654 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2655 ancestor = cgrp->ancestors[level];
2656 if (!cgroup_tryget(ancestor))
2657 return NULL;
2658 return ancestor;
2659 }
2660
2661 /**
2662 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2663 * kfunc which is not subsequently stored in a map, must be released by calling
2664 * bpf_cgroup_release().
2665 * @cgid: cgroup id.
2666 */
bpf_cgroup_from_id(u64 cgid)2667 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2668 {
2669 struct cgroup *cgrp;
2670
2671 cgrp = __cgroup_get_from_id(cgid);
2672 if (IS_ERR(cgrp))
2673 return NULL;
2674 return cgrp;
2675 }
2676
2677 /**
2678 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2679 * task's membership of cgroup ancestry.
2680 * @task: the task to be tested
2681 * @ancestor: possible ancestor of @task's cgroup
2682 *
2683 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2684 * It follows all the same rules as cgroup_is_descendant, and only applies
2685 * to the default hierarchy.
2686 */
bpf_task_under_cgroup(struct task_struct * task,struct cgroup * ancestor)2687 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2688 struct cgroup *ancestor)
2689 {
2690 long ret;
2691
2692 rcu_read_lock();
2693 ret = task_under_cgroup_hierarchy(task, ancestor);
2694 rcu_read_unlock();
2695 return ret;
2696 }
2697
BPF_CALL_2(bpf_current_task_under_cgroup,struct bpf_map *,map,u32,idx)2698 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
2699 {
2700 struct bpf_array *array = container_of(map, struct bpf_array, map);
2701 struct cgroup *cgrp;
2702
2703 if (unlikely(idx >= array->map.max_entries))
2704 return -E2BIG;
2705
2706 cgrp = READ_ONCE(array->ptrs[idx]);
2707 if (unlikely(!cgrp))
2708 return -EAGAIN;
2709
2710 return task_under_cgroup_hierarchy(current, cgrp);
2711 }
2712
2713 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
2714 .func = bpf_current_task_under_cgroup,
2715 .gpl_only = false,
2716 .ret_type = RET_INTEGER,
2717 .arg1_type = ARG_CONST_MAP_PTR,
2718 .arg2_type = ARG_ANYTHING,
2719 };
2720
2721 /**
2722 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2723 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
2724 * hierarchy ID.
2725 * @task: The target task
2726 * @hierarchy_id: The ID of a cgroup1 hierarchy
2727 *
2728 * On success, the cgroup is returen. On failure, NULL is returned.
2729 */
2730 __bpf_kfunc struct cgroup *
bpf_task_get_cgroup1(struct task_struct * task,int hierarchy_id)2731 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
2732 {
2733 struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
2734
2735 if (IS_ERR(cgrp))
2736 return NULL;
2737 return cgrp;
2738 }
2739 #endif /* CONFIG_CGROUPS */
2740
2741 /**
2742 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2743 * in the root pid namespace idr. If a task is returned, it must either be
2744 * stored in a map, or released with bpf_task_release().
2745 * @pid: The pid of the task being looked up.
2746 */
bpf_task_from_pid(s32 pid)2747 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2748 {
2749 struct task_struct *p;
2750
2751 rcu_read_lock();
2752 p = find_task_by_pid_ns(pid, &init_pid_ns);
2753 if (p)
2754 p = bpf_task_acquire(p);
2755 rcu_read_unlock();
2756
2757 return p;
2758 }
2759
2760 /**
2761 * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up
2762 * in the pid namespace of the current task. If a task is returned, it must
2763 * either be stored in a map, or released with bpf_task_release().
2764 * @vpid: The vpid of the task being looked up.
2765 */
bpf_task_from_vpid(s32 vpid)2766 __bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid)
2767 {
2768 struct task_struct *p;
2769
2770 rcu_read_lock();
2771 p = find_task_by_vpid(vpid);
2772 if (p)
2773 p = bpf_task_acquire(p);
2774 rcu_read_unlock();
2775
2776 return p;
2777 }
2778
2779 /**
2780 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2781 * @p: The dynptr whose data slice to retrieve
2782 * @offset: Offset into the dynptr
2783 * @buffer__nullable: User-provided buffer to copy contents into. May be NULL
2784 * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2785 * length of the requested slice. This must be a constant.
2786 *
2787 * For non-skb and non-xdp type dynptrs, there is no difference between
2788 * bpf_dynptr_slice and bpf_dynptr_data.
2789 *
2790 * If buffer__nullable is NULL, the call will fail if buffer_opt was needed.
2791 *
2792 * If the intention is to write to the data slice, please use
2793 * bpf_dynptr_slice_rdwr.
2794 *
2795 * The user must check that the returned pointer is not null before using it.
2796 *
2797 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2798 * does not change the underlying packet data pointers, so a call to
2799 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2800 * the bpf program.
2801 *
2802 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2803 * data slice (can be either direct pointer to the data or a pointer to the user
2804 * provided buffer, with its contents containing the data, if unable to obtain
2805 * direct pointer)
2806 */
bpf_dynptr_slice(const struct bpf_dynptr * p,u64 offset,void * buffer__nullable,u64 buffer__szk)2807 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u64 offset,
2808 void *buffer__nullable, u64 buffer__szk)
2809 {
2810 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2811 enum bpf_dynptr_type type;
2812 u64 len = buffer__szk;
2813 int err;
2814
2815 if (!ptr->data)
2816 return NULL;
2817
2818 err = bpf_dynptr_check_off_len(ptr, offset, len);
2819 if (err)
2820 return NULL;
2821
2822 type = bpf_dynptr_get_type(ptr);
2823
2824 switch (type) {
2825 case BPF_DYNPTR_TYPE_LOCAL:
2826 case BPF_DYNPTR_TYPE_RINGBUF:
2827 return ptr->data + ptr->offset + offset;
2828 case BPF_DYNPTR_TYPE_SKB:
2829 if (buffer__nullable)
2830 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__nullable);
2831 else
2832 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2833 case BPF_DYNPTR_TYPE_XDP:
2834 {
2835 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2836 if (!IS_ERR_OR_NULL(xdp_ptr))
2837 return xdp_ptr;
2838
2839 if (!buffer__nullable)
2840 return NULL;
2841 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__nullable, len, false);
2842 return buffer__nullable;
2843 }
2844 case BPF_DYNPTR_TYPE_SKB_META:
2845 return bpf_skb_meta_pointer(ptr->data, ptr->offset + offset);
2846 case BPF_DYNPTR_TYPE_FILE:
2847 err = bpf_file_fetch_bytes(ptr->data, offset, buffer__nullable, buffer__szk);
2848 return err ? NULL : buffer__nullable;
2849 default:
2850 WARN_ONCE(true, "unknown dynptr type %d\n", type);
2851 return NULL;
2852 }
2853 }
2854
2855 /**
2856 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2857 * @p: The dynptr whose data slice to retrieve
2858 * @offset: Offset into the dynptr
2859 * @buffer__nullable: User-provided buffer to copy contents into. May be NULL
2860 * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2861 * length of the requested slice. This must be a constant.
2862 *
2863 * For non-skb and non-xdp type dynptrs, there is no difference between
2864 * bpf_dynptr_slice and bpf_dynptr_data.
2865 *
2866 * If buffer__nullable is NULL, the call will fail if buffer_opt was needed.
2867 *
2868 * The returned pointer is writable and may point to either directly the dynptr
2869 * data at the requested offset or to the buffer if unable to obtain a direct
2870 * data pointer to (example: the requested slice is to the paged area of an skb
2871 * packet). In the case where the returned pointer is to the buffer, the user
2872 * is responsible for persisting writes through calling bpf_dynptr_write(). This
2873 * usually looks something like this pattern:
2874 *
2875 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2876 * if (!eth)
2877 * return TC_ACT_SHOT;
2878 *
2879 * // mutate eth header //
2880 *
2881 * if (eth == buffer)
2882 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2883 *
2884 * Please note that, as in the example above, the user must check that the
2885 * returned pointer is not null before using it.
2886 *
2887 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2888 * does not change the underlying packet data pointers, so a call to
2889 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2890 * the bpf program.
2891 *
2892 * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2893 * data slice (can be either direct pointer to the data or a pointer to the user
2894 * provided buffer, with its contents containing the data, if unable to obtain
2895 * direct pointer)
2896 */
bpf_dynptr_slice_rdwr(const struct bpf_dynptr * p,u64 offset,void * buffer__nullable,u64 buffer__szk)2897 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u64 offset,
2898 void *buffer__nullable, u64 buffer__szk)
2899 {
2900 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2901
2902 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2903 return NULL;
2904
2905 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2906 *
2907 * For skb-type dynptrs, it is safe to write into the returned pointer
2908 * if the bpf program allows skb data writes. There are two possibilities
2909 * that may occur when calling bpf_dynptr_slice_rdwr:
2910 *
2911 * 1) The requested slice is in the head of the skb. In this case, the
2912 * returned pointer is directly to skb data, and if the skb is cloned, the
2913 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2914 * The pointer can be directly written into.
2915 *
2916 * 2) Some portion of the requested slice is in the paged buffer area.
2917 * In this case, the requested data will be copied out into the buffer
2918 * and the returned pointer will be a pointer to the buffer. The skb
2919 * will not be pulled. To persist the write, the user will need to call
2920 * bpf_dynptr_write(), which will pull the skb and commit the write.
2921 *
2922 * Similarly for xdp programs, if the requested slice is not across xdp
2923 * fragments, then a direct pointer will be returned, otherwise the data
2924 * will be copied out into the buffer and the user will need to call
2925 * bpf_dynptr_write() to commit changes.
2926 */
2927 return bpf_dynptr_slice(p, offset, buffer__nullable, buffer__szk);
2928 }
2929
bpf_dynptr_adjust(const struct bpf_dynptr * p,u64 start,u64 end)2930 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u64 start, u64 end)
2931 {
2932 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2933 u64 size;
2934
2935 if (!ptr->data || start > end)
2936 return -EINVAL;
2937
2938 size = __bpf_dynptr_size(ptr);
2939
2940 if (start > size || end > size)
2941 return -ERANGE;
2942
2943 bpf_dynptr_advance_offset(ptr, start);
2944 bpf_dynptr_set_size(ptr, end - start);
2945
2946 return 0;
2947 }
2948
bpf_dynptr_is_null(const struct bpf_dynptr * p)2949 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
2950 {
2951 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2952
2953 return !ptr->data;
2954 }
2955
bpf_dynptr_is_rdonly(const struct bpf_dynptr * p)2956 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
2957 {
2958 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2959
2960 if (!ptr->data)
2961 return false;
2962
2963 return __bpf_dynptr_is_rdonly(ptr);
2964 }
2965
bpf_dynptr_size(const struct bpf_dynptr * p)2966 __bpf_kfunc u64 bpf_dynptr_size(const struct bpf_dynptr *p)
2967 {
2968 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2969
2970 if (!ptr->data)
2971 return -EINVAL;
2972
2973 return __bpf_dynptr_size(ptr);
2974 }
2975
bpf_dynptr_clone(const struct bpf_dynptr * p,struct bpf_dynptr * clone__uninit)2976 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
2977 struct bpf_dynptr *clone__uninit)
2978 {
2979 struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
2980 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2981
2982 if (!ptr->data) {
2983 bpf_dynptr_set_null(clone);
2984 return -EINVAL;
2985 }
2986
2987 *clone = *ptr;
2988
2989 return 0;
2990 }
2991
2992 /**
2993 * bpf_dynptr_copy() - Copy data from one dynptr to another.
2994 * @dst_ptr: Destination dynptr - where data should be copied to
2995 * @dst_off: Offset into the destination dynptr
2996 * @src_ptr: Source dynptr - where data should be copied from
2997 * @src_off: Offset into the source dynptr
2998 * @size: Length of the data to copy from source to destination
2999 *
3000 * Copies data from source dynptr to destination dynptr.
3001 * Returns 0 on success; negative error, otherwise.
3002 */
bpf_dynptr_copy(struct bpf_dynptr * dst_ptr,u64 dst_off,struct bpf_dynptr * src_ptr,u64 src_off,u64 size)3003 __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u64 dst_off,
3004 struct bpf_dynptr *src_ptr, u64 src_off, u64 size)
3005 {
3006 struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr;
3007 struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr;
3008 void *src_slice, *dst_slice;
3009 char buf[256];
3010 u64 off;
3011
3012 src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size);
3013 dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size);
3014
3015 if (src_slice && dst_slice) {
3016 memmove(dst_slice, src_slice, size);
3017 return 0;
3018 }
3019
3020 if (src_slice)
3021 return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0);
3022
3023 if (dst_slice)
3024 return __bpf_dynptr_read(dst_slice, size, src, src_off, 0);
3025
3026 if (bpf_dynptr_check_off_len(dst, dst_off, size) ||
3027 bpf_dynptr_check_off_len(src, src_off, size))
3028 return -E2BIG;
3029
3030 off = 0;
3031 while (off < size) {
3032 u64 chunk_sz = min_t(u64, sizeof(buf), size - off);
3033 int err;
3034
3035 err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0);
3036 if (err)
3037 return err;
3038 err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0);
3039 if (err)
3040 return err;
3041
3042 off += chunk_sz;
3043 }
3044 return 0;
3045 }
3046
3047 /**
3048 * bpf_dynptr_memset() - Fill dynptr memory with a constant byte.
3049 * @p: Destination dynptr - where data will be filled
3050 * @offset: Offset into the dynptr to start filling from
3051 * @size: Number of bytes to fill
3052 * @val: Constant byte to fill the memory with
3053 *
3054 * Fills the @size bytes of the memory area pointed to by @p
3055 * at @offset with the constant byte @val.
3056 * Returns 0 on success; negative error, otherwise.
3057 */
bpf_dynptr_memset(struct bpf_dynptr * p,u64 offset,u64 size,u8 val)3058 __bpf_kfunc int bpf_dynptr_memset(struct bpf_dynptr *p, u64 offset, u64 size, u8 val)
3059 {
3060 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
3061 u64 chunk_sz, write_off;
3062 char buf[256];
3063 void* slice;
3064 int err;
3065
3066 slice = bpf_dynptr_slice_rdwr(p, offset, NULL, size);
3067 if (likely(slice)) {
3068 memset(slice, val, size);
3069 return 0;
3070 }
3071
3072 if (__bpf_dynptr_is_rdonly(ptr))
3073 return -EINVAL;
3074
3075 err = bpf_dynptr_check_off_len(ptr, offset, size);
3076 if (err)
3077 return err;
3078
3079 /* Non-linear data under the dynptr, write from a local buffer */
3080 chunk_sz = min_t(u64, sizeof(buf), size);
3081 memset(buf, val, chunk_sz);
3082
3083 for (write_off = 0; write_off < size; write_off += chunk_sz) {
3084 chunk_sz = min_t(u64, sizeof(buf), size - write_off);
3085 err = __bpf_dynptr_write(ptr, offset + write_off, buf, chunk_sz, 0);
3086 if (err)
3087 return err;
3088 }
3089
3090 return 0;
3091 }
3092
bpf_cast_to_kern_ctx(void * obj)3093 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
3094 {
3095 return obj;
3096 }
3097
bpf_rdonly_cast(const void * obj__ign,u32 btf_id__k)3098 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
3099 {
3100 return (void *)obj__ign;
3101 }
3102
bpf_rcu_read_lock(void)3103 __bpf_kfunc void bpf_rcu_read_lock(void)
3104 {
3105 rcu_read_lock();
3106 }
3107
bpf_rcu_read_unlock(void)3108 __bpf_kfunc void bpf_rcu_read_unlock(void)
3109 {
3110 rcu_read_unlock();
3111 }
3112
3113 struct bpf_throw_ctx {
3114 struct bpf_prog_aux *aux;
3115 u64 sp;
3116 u64 bp;
3117 int cnt;
3118 };
3119
bpf_stack_walker(void * cookie,u64 ip,u64 sp,u64 bp)3120 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
3121 {
3122 struct bpf_throw_ctx *ctx = cookie;
3123 struct bpf_prog *prog;
3124
3125 /*
3126 * The RCU read lock is held to safely traverse the latch tree, but we
3127 * don't need its protection when accessing the prog, since it has an
3128 * active stack frame on the current stack trace, and won't disappear.
3129 */
3130 rcu_read_lock();
3131 prog = bpf_prog_ksym_find(ip);
3132 rcu_read_unlock();
3133 if (!prog)
3134 return !ctx->cnt;
3135 ctx->cnt++;
3136 if (bpf_is_subprog(prog))
3137 return true;
3138 ctx->aux = prog->aux;
3139 ctx->sp = sp;
3140 ctx->bp = bp;
3141 return false;
3142 }
3143
bpf_throw(u64 cookie)3144 __bpf_kfunc void bpf_throw(u64 cookie)
3145 {
3146 struct bpf_throw_ctx ctx = {};
3147
3148 arch_bpf_stack_walk(bpf_stack_walker, &ctx);
3149 WARN_ON_ONCE(!ctx.aux);
3150 if (ctx.aux)
3151 WARN_ON_ONCE(!ctx.aux->exception_boundary);
3152 WARN_ON_ONCE(!ctx.bp);
3153 WARN_ON_ONCE(!ctx.cnt);
3154 /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
3155 * deeper stack depths than ctx.sp as we do not return from bpf_throw,
3156 * which skips compiler generated instrumentation to do the same.
3157 */
3158 kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
3159 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
3160 WARN(1, "A call to BPF exception callback should never return\n");
3161 }
3162
bpf_wq_init(struct bpf_wq * wq,void * p__map,unsigned int flags)3163 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
3164 {
3165 struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3166 struct bpf_map *map = p__map;
3167
3168 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq));
3169 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq));
3170
3171 if (flags)
3172 return -EINVAL;
3173
3174 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
3175 }
3176
bpf_wq_start(struct bpf_wq * wq,unsigned int flags)3177 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
3178 {
3179 struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3180 struct bpf_work *w;
3181
3182 if (flags)
3183 return -EINVAL;
3184
3185 w = READ_ONCE(async->work);
3186 if (!w || !READ_ONCE(w->cb.prog))
3187 return -EINVAL;
3188
3189 if (!refcount_inc_not_zero(&w->cb.refcnt))
3190 return -ENOENT;
3191
3192 if (!defer_timer_wq_op()) {
3193 schedule_work(&w->work);
3194 bpf_async_refcount_put(&w->cb);
3195 return 0;
3196 } else {
3197 return bpf_async_schedule_op(&w->cb, BPF_ASYNC_START, 0, 0);
3198 }
3199 }
3200
bpf_wq_set_callback(struct bpf_wq * wq,int (callback_fn)(void * map,int * key,void * value),unsigned int flags,struct bpf_prog_aux * aux)3201 __bpf_kfunc int bpf_wq_set_callback(struct bpf_wq *wq,
3202 int (callback_fn)(void *map, int *key, void *value),
3203 unsigned int flags,
3204 struct bpf_prog_aux *aux)
3205 {
3206 struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3207
3208 if (flags)
3209 return -EINVAL;
3210
3211 return __bpf_async_set_callback(async, callback_fn, aux->prog);
3212 }
3213
bpf_preempt_disable(void)3214 __bpf_kfunc void bpf_preempt_disable(void)
3215 {
3216 preempt_disable();
3217 }
3218
bpf_preempt_enable(void)3219 __bpf_kfunc void bpf_preempt_enable(void)
3220 {
3221 preempt_enable();
3222 }
3223
3224 struct bpf_iter_bits {
3225 __u64 __opaque[2];
3226 } __aligned(8);
3227
3228 #define BITS_ITER_NR_WORDS_MAX 511
3229
3230 struct bpf_iter_bits_kern {
3231 union {
3232 __u64 *bits;
3233 __u64 bits_copy;
3234 };
3235 int nr_bits;
3236 int bit;
3237 } __aligned(8);
3238
3239 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing
3240 * a u64 pointer and an unsigned long pointer to find_next_bit() will
3241 * return the same result, as both point to the same 8-byte area.
3242 *
3243 * For 32-bit little-endian hosts, using a u64 pointer or unsigned long
3244 * pointer also makes no difference. This is because the first iterated
3245 * unsigned long is composed of bits 0-31 of the u64 and the second unsigned
3246 * long is composed of bits 32-63 of the u64.
3247 *
3248 * However, for 32-bit big-endian hosts, this is not the case. The first
3249 * iterated unsigned long will be bits 32-63 of the u64, so swap these two
3250 * ulong values within the u64.
3251 */
swap_ulong_in_u64(u64 * bits,unsigned int nr)3252 static void swap_ulong_in_u64(u64 *bits, unsigned int nr)
3253 {
3254 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
3255 unsigned int i;
3256
3257 for (i = 0; i < nr; i++)
3258 bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32);
3259 #endif
3260 }
3261
3262 /**
3263 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
3264 * @it: The new bpf_iter_bits to be created
3265 * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
3266 * @nr_words: The size of the specified memory area, measured in 8-byte units.
3267 * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be
3268 * further reduced by the BPF memory allocator implementation.
3269 *
3270 * This function initializes a new bpf_iter_bits structure for iterating over
3271 * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
3272 * copies the data of the memory area to the newly created bpf_iter_bits @it for
3273 * subsequent iteration operations.
3274 *
3275 * On success, 0 is returned. On failure, ERR is returned.
3276 */
3277 __bpf_kfunc int
bpf_iter_bits_new(struct bpf_iter_bits * it,const u64 * unsafe_ptr__ign,u32 nr_words)3278 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
3279 {
3280 struct bpf_iter_bits_kern *kit = (void *)it;
3281 u32 nr_bytes = nr_words * sizeof(u64);
3282 u32 nr_bits = BYTES_TO_BITS(nr_bytes);
3283 int err;
3284
3285 BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
3286 BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
3287 __alignof__(struct bpf_iter_bits));
3288
3289 kit->nr_bits = 0;
3290 kit->bits_copy = 0;
3291 kit->bit = -1;
3292
3293 if (!unsafe_ptr__ign || !nr_words)
3294 return -EINVAL;
3295 if (nr_words > BITS_ITER_NR_WORDS_MAX)
3296 return -E2BIG;
3297
3298 /* Optimization for u64 mask */
3299 if (nr_bits == 64) {
3300 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
3301 if (err)
3302 return -EFAULT;
3303
3304 swap_ulong_in_u64(&kit->bits_copy, nr_words);
3305
3306 kit->nr_bits = nr_bits;
3307 return 0;
3308 }
3309
3310 if (bpf_mem_alloc_check_size(false, nr_bytes))
3311 return -E2BIG;
3312
3313 /* Fallback to memalloc */
3314 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
3315 if (!kit->bits)
3316 return -ENOMEM;
3317
3318 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
3319 if (err) {
3320 bpf_mem_free(&bpf_global_ma, kit->bits);
3321 return err;
3322 }
3323
3324 swap_ulong_in_u64(kit->bits, nr_words);
3325
3326 kit->nr_bits = nr_bits;
3327 return 0;
3328 }
3329
3330 /**
3331 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
3332 * @it: The bpf_iter_bits to be checked
3333 *
3334 * This function returns a pointer to a number representing the value of the
3335 * next bit in the bits.
3336 *
3337 * If there are no further bits available, it returns NULL.
3338 */
bpf_iter_bits_next(struct bpf_iter_bits * it)3339 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
3340 {
3341 struct bpf_iter_bits_kern *kit = (void *)it;
3342 int bit = kit->bit, nr_bits = kit->nr_bits;
3343 const void *bits;
3344
3345 if (!nr_bits || bit >= nr_bits)
3346 return NULL;
3347
3348 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
3349 bit = find_next_bit(bits, nr_bits, bit + 1);
3350 if (bit >= nr_bits) {
3351 kit->bit = bit;
3352 return NULL;
3353 }
3354
3355 kit->bit = bit;
3356 return &kit->bit;
3357 }
3358
3359 /**
3360 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
3361 * @it: The bpf_iter_bits to be destroyed
3362 *
3363 * Destroy the resource associated with the bpf_iter_bits.
3364 */
bpf_iter_bits_destroy(struct bpf_iter_bits * it)3365 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
3366 {
3367 struct bpf_iter_bits_kern *kit = (void *)it;
3368
3369 if (kit->nr_bits <= 64)
3370 return;
3371 bpf_mem_free(&bpf_global_ma, kit->bits);
3372 }
3373
3374 /**
3375 * bpf_copy_from_user_str() - Copy a string from an unsafe user address
3376 * @dst: Destination address, in kernel space. This buffer must be
3377 * at least @dst__sz bytes long.
3378 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL.
3379 * @unsafe_ptr__ign: Source address, in user space.
3380 * @flags: The only supported flag is BPF_F_PAD_ZEROS
3381 *
3382 * Copies a NUL-terminated string from userspace to BPF space. If user string is
3383 * too long this will still ensure zero termination in the dst buffer unless
3384 * buffer size is 0.
3385 *
3386 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and
3387 * memset all of @dst on failure.
3388 */
bpf_copy_from_user_str(void * dst,u32 dst__sz,const void __user * unsafe_ptr__ign,u64 flags)3389 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags)
3390 {
3391 int ret;
3392
3393 if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3394 return -EINVAL;
3395
3396 if (unlikely(!dst__sz))
3397 return 0;
3398
3399 ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1);
3400 if (ret < 0) {
3401 if (flags & BPF_F_PAD_ZEROS)
3402 memset((char *)dst, 0, dst__sz);
3403
3404 return ret;
3405 }
3406
3407 if (flags & BPF_F_PAD_ZEROS)
3408 memset((char *)dst + ret, 0, dst__sz - ret);
3409 else
3410 ((char *)dst)[ret] = '\0';
3411
3412 return ret + 1;
3413 }
3414
3415 /**
3416 * bpf_copy_from_user_task_str() - Copy a string from an task's address space
3417 * @dst: Destination address, in kernel space. This buffer must be
3418 * at least @dst__sz bytes long.
3419 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL.
3420 * @unsafe_ptr__ign: Source address in the task's address space.
3421 * @tsk: The task whose address space will be used
3422 * @flags: The only supported flag is BPF_F_PAD_ZEROS
3423 *
3424 * Copies a NUL terminated string from a task's address space to @dst__sz
3425 * buffer. If user string is too long this will still ensure zero termination
3426 * in the @dst__sz buffer unless buffer size is 0.
3427 *
3428 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success
3429 * and memset all of @dst__sz on failure.
3430 *
3431 * Return: The number of copied bytes on success including the NUL terminator.
3432 * A negative error code on failure.
3433 */
bpf_copy_from_user_task_str(void * dst,u32 dst__sz,const void __user * unsafe_ptr__ign,struct task_struct * tsk,u64 flags)3434 __bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz,
3435 const void __user *unsafe_ptr__ign,
3436 struct task_struct *tsk, u64 flags)
3437 {
3438 int ret;
3439
3440 if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3441 return -EINVAL;
3442
3443 if (unlikely(dst__sz == 0))
3444 return 0;
3445
3446 ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0);
3447 if (ret < 0) {
3448 if (flags & BPF_F_PAD_ZEROS)
3449 memset(dst, 0, dst__sz);
3450 return ret;
3451 }
3452
3453 if (flags & BPF_F_PAD_ZEROS)
3454 memset(dst + ret, 0, dst__sz - ret);
3455
3456 return ret + 1;
3457 }
3458
3459 /* Keep unsinged long in prototype so that kfunc is usable when emitted to
3460 * vmlinux.h in BPF programs directly, but note that while in BPF prog, the
3461 * unsigned long always points to 8-byte region on stack, the kernel may only
3462 * read and write the 4-bytes on 32-bit.
3463 */
bpf_local_irq_save(unsigned long * flags__irq_flag)3464 __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag)
3465 {
3466 local_irq_save(*flags__irq_flag);
3467 }
3468
bpf_local_irq_restore(unsigned long * flags__irq_flag)3469 __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag)
3470 {
3471 local_irq_restore(*flags__irq_flag);
3472 }
3473
__bpf_trap(void)3474 __bpf_kfunc void __bpf_trap(void)
3475 {
3476 }
3477
3478 /*
3479 * Kfuncs for string operations.
3480 *
3481 * Since strings are not necessarily %NUL-terminated, we cannot directly call
3482 * in-kernel implementations. Instead, we open-code the implementations using
3483 * __get_kernel_nofault instead of plain dereference to make them safe.
3484 */
3485
__bpf_strncasecmp(const char * s1,const char * s2,bool ignore_case,size_t len)3486 static int __bpf_strncasecmp(const char *s1, const char *s2, bool ignore_case, size_t len)
3487 {
3488 char c1, c2;
3489 int i;
3490
3491 if (!copy_from_kernel_nofault_allowed(s1, 1) ||
3492 !copy_from_kernel_nofault_allowed(s2, 1)) {
3493 return -ERANGE;
3494 }
3495
3496 guard(pagefault)();
3497 for (i = 0; i < len && i < XATTR_SIZE_MAX; i++) {
3498 __get_kernel_nofault(&c1, s1, char, err_out);
3499 __get_kernel_nofault(&c2, s2, char, err_out);
3500 if (ignore_case) {
3501 c1 = tolower(c1);
3502 c2 = tolower(c2);
3503 }
3504 if (c1 != c2)
3505 return c1 < c2 ? -1 : 1;
3506 if (c1 == '\0')
3507 return 0;
3508 s1++;
3509 s2++;
3510 }
3511 return i == XATTR_SIZE_MAX ? -E2BIG : 0;
3512 err_out:
3513 return -EFAULT;
3514 }
3515
3516 /**
3517 * bpf_strcmp - Compare two strings
3518 * @s1__ign: One string
3519 * @s2__ign: Another string
3520 *
3521 * Return:
3522 * * %0 - Strings are equal
3523 * * %-1 - @s1__ign is smaller
3524 * * %1 - @s2__ign is smaller
3525 * * %-EFAULT - Cannot read one of the strings
3526 * * %-E2BIG - One of strings is too large
3527 * * %-ERANGE - One of strings is outside of kernel address space
3528 */
bpf_strcmp(const char * s1__ign,const char * s2__ign)3529 __bpf_kfunc int bpf_strcmp(const char *s1__ign, const char *s2__ign)
3530 {
3531 return __bpf_strncasecmp(s1__ign, s2__ign, false, XATTR_SIZE_MAX);
3532 }
3533
3534 /**
3535 * bpf_strcasecmp - Compare two strings, ignoring the case of the characters
3536 * @s1__ign: One string
3537 * @s2__ign: Another string
3538 *
3539 * Return:
3540 * * %0 - Strings are equal
3541 * * %-1 - @s1__ign is smaller
3542 * * %1 - @s2__ign is smaller
3543 * * %-EFAULT - Cannot read one of the strings
3544 * * %-E2BIG - One of strings is too large
3545 * * %-ERANGE - One of strings is outside of kernel address space
3546 */
bpf_strcasecmp(const char * s1__ign,const char * s2__ign)3547 __bpf_kfunc int bpf_strcasecmp(const char *s1__ign, const char *s2__ign)
3548 {
3549 return __bpf_strncasecmp(s1__ign, s2__ign, true, XATTR_SIZE_MAX);
3550 }
3551
3552 /*
3553 * bpf_strncasecmp - Compare two length-limited strings, ignoring case
3554 * @s1__ign: One string
3555 * @s2__ign: Another string
3556 * @len: The maximum number of characters to compare
3557 *
3558 * Return:
3559 * * %0 - Strings are equal
3560 * * %-1 - @s1__ign is smaller
3561 * * %1 - @s2__ign is smaller
3562 * * %-EFAULT - Cannot read one of the strings
3563 * * %-E2BIG - One of strings is too large
3564 * * %-ERANGE - One of strings is outside of kernel address space
3565 */
bpf_strncasecmp(const char * s1__ign,const char * s2__ign,size_t len)3566 __bpf_kfunc int bpf_strncasecmp(const char *s1__ign, const char *s2__ign, size_t len)
3567 {
3568 return __bpf_strncasecmp(s1__ign, s2__ign, true, len);
3569 }
3570
3571 /**
3572 * bpf_strnchr - Find a character in a length limited string
3573 * @s__ign: The string to be searched
3574 * @count: The number of characters to be searched
3575 * @c: The character to search for
3576 *
3577 * Note that the %NUL-terminator is considered part of the string, and can
3578 * be searched for.
3579 *
3580 * Return:
3581 * * >=0 - Index of the first occurrence of @c within @s__ign
3582 * * %-ENOENT - @c not found in the first @count characters of @s__ign
3583 * * %-EFAULT - Cannot read @s__ign
3584 * * %-E2BIG - @s__ign is too large
3585 * * %-ERANGE - @s__ign is outside of kernel address space
3586 */
bpf_strnchr(const char * s__ign,size_t count,char c)3587 __bpf_kfunc int bpf_strnchr(const char *s__ign, size_t count, char c)
3588 {
3589 char sc;
3590 int i;
3591
3592 if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3593 return -ERANGE;
3594
3595 guard(pagefault)();
3596 for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) {
3597 __get_kernel_nofault(&sc, s__ign, char, err_out);
3598 if (sc == c)
3599 return i;
3600 if (sc == '\0')
3601 return -ENOENT;
3602 s__ign++;
3603 }
3604 return i == XATTR_SIZE_MAX ? -E2BIG : -ENOENT;
3605 err_out:
3606 return -EFAULT;
3607 }
3608
3609 /**
3610 * bpf_strchr - Find the first occurrence of a character in a string
3611 * @s__ign: The string to be searched
3612 * @c: The character to search for
3613 *
3614 * Note that the %NUL-terminator is considered part of the string, and can
3615 * be searched for.
3616 *
3617 * Return:
3618 * * >=0 - The index of the first occurrence of @c within @s__ign
3619 * * %-ENOENT - @c not found in @s__ign
3620 * * %-EFAULT - Cannot read @s__ign
3621 * * %-E2BIG - @s__ign is too large
3622 * * %-ERANGE - @s__ign is outside of kernel address space
3623 */
bpf_strchr(const char * s__ign,char c)3624 __bpf_kfunc int bpf_strchr(const char *s__ign, char c)
3625 {
3626 return bpf_strnchr(s__ign, XATTR_SIZE_MAX, c);
3627 }
3628
3629 /**
3630 * bpf_strchrnul - Find and return a character in a string, or end of string
3631 * @s__ign: The string to be searched
3632 * @c: The character to search for
3633 *
3634 * Return:
3635 * * >=0 - Index of the first occurrence of @c within @s__ign or index of
3636 * the null byte at the end of @s__ign when @c is not found
3637 * * %-EFAULT - Cannot read @s__ign
3638 * * %-E2BIG - @s__ign is too large
3639 * * %-ERANGE - @s__ign is outside of kernel address space
3640 */
bpf_strchrnul(const char * s__ign,char c)3641 __bpf_kfunc int bpf_strchrnul(const char *s__ign, char c)
3642 {
3643 char sc;
3644 int i;
3645
3646 if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3647 return -ERANGE;
3648
3649 guard(pagefault)();
3650 for (i = 0; i < XATTR_SIZE_MAX; i++) {
3651 __get_kernel_nofault(&sc, s__ign, char, err_out);
3652 if (sc == '\0' || sc == c)
3653 return i;
3654 s__ign++;
3655 }
3656 return -E2BIG;
3657 err_out:
3658 return -EFAULT;
3659 }
3660
3661 /**
3662 * bpf_strrchr - Find the last occurrence of a character in a string
3663 * @s__ign: The string to be searched
3664 * @c: The character to search for
3665 *
3666 * Return:
3667 * * >=0 - Index of the last occurrence of @c within @s__ign
3668 * * %-ENOENT - @c not found in @s__ign
3669 * * %-EFAULT - Cannot read @s__ign
3670 * * %-E2BIG - @s__ign is too large
3671 * * %-ERANGE - @s__ign is outside of kernel address space
3672 */
bpf_strrchr(const char * s__ign,int c)3673 __bpf_kfunc int bpf_strrchr(const char *s__ign, int c)
3674 {
3675 char sc;
3676 int i, last = -ENOENT;
3677
3678 if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3679 return -ERANGE;
3680
3681 guard(pagefault)();
3682 for (i = 0; i < XATTR_SIZE_MAX; i++) {
3683 __get_kernel_nofault(&sc, s__ign, char, err_out);
3684 if (sc == c)
3685 last = i;
3686 if (sc == '\0')
3687 return last;
3688 s__ign++;
3689 }
3690 return -E2BIG;
3691 err_out:
3692 return -EFAULT;
3693 }
3694
3695 /**
3696 * bpf_strnlen - Calculate the length of a length-limited string
3697 * @s__ign: The string
3698 * @count: The maximum number of characters to count
3699 *
3700 * Return:
3701 * * >=0 - The length of @s__ign
3702 * * %-EFAULT - Cannot read @s__ign
3703 * * %-E2BIG - @s__ign is too large
3704 * * %-ERANGE - @s__ign is outside of kernel address space
3705 */
bpf_strnlen(const char * s__ign,size_t count)3706 __bpf_kfunc int bpf_strnlen(const char *s__ign, size_t count)
3707 {
3708 char c;
3709 int i;
3710
3711 if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3712 return -ERANGE;
3713
3714 guard(pagefault)();
3715 for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) {
3716 __get_kernel_nofault(&c, s__ign, char, err_out);
3717 if (c == '\0')
3718 return i;
3719 s__ign++;
3720 }
3721 return i == XATTR_SIZE_MAX ? -E2BIG : i;
3722 err_out:
3723 return -EFAULT;
3724 }
3725
3726 /**
3727 * bpf_strlen - Calculate the length of a string
3728 * @s__ign: The string
3729 *
3730 * Return:
3731 * * >=0 - The length of @s__ign
3732 * * %-EFAULT - Cannot read @s__ign
3733 * * %-E2BIG - @s__ign is too large
3734 * * %-ERANGE - @s__ign is outside of kernel address space
3735 */
bpf_strlen(const char * s__ign)3736 __bpf_kfunc int bpf_strlen(const char *s__ign)
3737 {
3738 return bpf_strnlen(s__ign, XATTR_SIZE_MAX);
3739 }
3740
3741 /**
3742 * bpf_strspn - Calculate the length of the initial substring of @s__ign which
3743 * only contains letters in @accept__ign
3744 * @s__ign: The string to be searched
3745 * @accept__ign: The string to search for
3746 *
3747 * Return:
3748 * * >=0 - The length of the initial substring of @s__ign which only
3749 * contains letters from @accept__ign
3750 * * %-EFAULT - Cannot read one of the strings
3751 * * %-E2BIG - One of the strings is too large
3752 * * %-ERANGE - One of the strings is outside of kernel address space
3753 */
bpf_strspn(const char * s__ign,const char * accept__ign)3754 __bpf_kfunc int bpf_strspn(const char *s__ign, const char *accept__ign)
3755 {
3756 char cs, ca;
3757 int i, j;
3758
3759 if (!copy_from_kernel_nofault_allowed(s__ign, 1) ||
3760 !copy_from_kernel_nofault_allowed(accept__ign, 1)) {
3761 return -ERANGE;
3762 }
3763
3764 guard(pagefault)();
3765 for (i = 0; i < XATTR_SIZE_MAX; i++) {
3766 __get_kernel_nofault(&cs, s__ign, char, err_out);
3767 if (cs == '\0')
3768 return i;
3769 for (j = 0; j < XATTR_SIZE_MAX; j++) {
3770 __get_kernel_nofault(&ca, accept__ign + j, char, err_out);
3771 if (cs == ca || ca == '\0')
3772 break;
3773 }
3774 if (j == XATTR_SIZE_MAX)
3775 return -E2BIG;
3776 if (ca == '\0')
3777 return i;
3778 s__ign++;
3779 }
3780 return -E2BIG;
3781 err_out:
3782 return -EFAULT;
3783 }
3784
3785 /**
3786 * bpf_strcspn - Calculate the length of the initial substring of @s__ign which
3787 * does not contain letters in @reject__ign
3788 * @s__ign: The string to be searched
3789 * @reject__ign: The string to search for
3790 *
3791 * Return:
3792 * * >=0 - The length of the initial substring of @s__ign which does not
3793 * contain letters from @reject__ign
3794 * * %-EFAULT - Cannot read one of the strings
3795 * * %-E2BIG - One of the strings is too large
3796 * * %-ERANGE - One of the strings is outside of kernel address space
3797 */
bpf_strcspn(const char * s__ign,const char * reject__ign)3798 __bpf_kfunc int bpf_strcspn(const char *s__ign, const char *reject__ign)
3799 {
3800 char cs, cr;
3801 int i, j;
3802
3803 if (!copy_from_kernel_nofault_allowed(s__ign, 1) ||
3804 !copy_from_kernel_nofault_allowed(reject__ign, 1)) {
3805 return -ERANGE;
3806 }
3807
3808 guard(pagefault)();
3809 for (i = 0; i < XATTR_SIZE_MAX; i++) {
3810 __get_kernel_nofault(&cs, s__ign, char, err_out);
3811 if (cs == '\0')
3812 return i;
3813 for (j = 0; j < XATTR_SIZE_MAX; j++) {
3814 __get_kernel_nofault(&cr, reject__ign + j, char, err_out);
3815 if (cs == cr || cr == '\0')
3816 break;
3817 }
3818 if (j == XATTR_SIZE_MAX)
3819 return -E2BIG;
3820 if (cr != '\0')
3821 return i;
3822 s__ign++;
3823 }
3824 return -E2BIG;
3825 err_out:
3826 return -EFAULT;
3827 }
3828
__bpf_strnstr(const char * s1,const char * s2,size_t len,bool ignore_case)3829 static int __bpf_strnstr(const char *s1, const char *s2, size_t len,
3830 bool ignore_case)
3831 {
3832 char c1, c2;
3833 int i, j;
3834
3835 if (!copy_from_kernel_nofault_allowed(s1, 1) ||
3836 !copy_from_kernel_nofault_allowed(s2, 1)) {
3837 return -ERANGE;
3838 }
3839
3840 guard(pagefault)();
3841 for (i = 0; i < XATTR_SIZE_MAX; i++) {
3842 for (j = 0; i + j <= len && j < XATTR_SIZE_MAX; j++) {
3843 __get_kernel_nofault(&c2, s2 + j, char, err_out);
3844 if (c2 == '\0')
3845 return i;
3846 /*
3847 * We allow reading an extra byte from s2 (note the
3848 * `i + j <= len` above) to cover the case when s2 is
3849 * a suffix of the first len chars of s1.
3850 */
3851 if (i + j == len)
3852 break;
3853 __get_kernel_nofault(&c1, s1 + j, char, err_out);
3854
3855 if (ignore_case) {
3856 c1 = tolower(c1);
3857 c2 = tolower(c2);
3858 }
3859
3860 if (c1 == '\0')
3861 return -ENOENT;
3862 if (c1 != c2)
3863 break;
3864 }
3865 if (j == XATTR_SIZE_MAX)
3866 return -E2BIG;
3867 if (i + j == len)
3868 return -ENOENT;
3869 s1++;
3870 }
3871 return -E2BIG;
3872 err_out:
3873 return -EFAULT;
3874 }
3875
3876 /**
3877 * bpf_strstr - Find the first substring in a string
3878 * @s1__ign: The string to be searched
3879 * @s2__ign: The string to search for
3880 *
3881 * Return:
3882 * * >=0 - Index of the first character of the first occurrence of @s2__ign
3883 * within @s1__ign
3884 * * %-ENOENT - @s2__ign is not a substring of @s1__ign
3885 * * %-EFAULT - Cannot read one of the strings
3886 * * %-E2BIG - One of the strings is too large
3887 * * %-ERANGE - One of the strings is outside of kernel address space
3888 */
bpf_strstr(const char * s1__ign,const char * s2__ign)3889 __bpf_kfunc int bpf_strstr(const char *s1__ign, const char *s2__ign)
3890 {
3891 return __bpf_strnstr(s1__ign, s2__ign, XATTR_SIZE_MAX, false);
3892 }
3893
3894 /**
3895 * bpf_strcasestr - Find the first substring in a string, ignoring the case of
3896 * the characters
3897 * @s1__ign: The string to be searched
3898 * @s2__ign: The string to search for
3899 *
3900 * Return:
3901 * * >=0 - Index of the first character of the first occurrence of @s2__ign
3902 * within @s1__ign
3903 * * %-ENOENT - @s2__ign is not a substring of @s1__ign
3904 * * %-EFAULT - Cannot read one of the strings
3905 * * %-E2BIG - One of the strings is too large
3906 * * %-ERANGE - One of the strings is outside of kernel address space
3907 */
bpf_strcasestr(const char * s1__ign,const char * s2__ign)3908 __bpf_kfunc int bpf_strcasestr(const char *s1__ign, const char *s2__ign)
3909 {
3910 return __bpf_strnstr(s1__ign, s2__ign, XATTR_SIZE_MAX, true);
3911 }
3912
3913 /**
3914 * bpf_strnstr - Find the first substring in a length-limited string
3915 * @s1__ign: The string to be searched
3916 * @s2__ign: The string to search for
3917 * @len: the maximum number of characters to search
3918 *
3919 * Return:
3920 * * >=0 - Index of the first character of the first occurrence of @s2__ign
3921 * within the first @len characters of @s1__ign
3922 * * %-ENOENT - @s2__ign not found in the first @len characters of @s1__ign
3923 * * %-EFAULT - Cannot read one of the strings
3924 * * %-E2BIG - One of the strings is too large
3925 * * %-ERANGE - One of the strings is outside of kernel address space
3926 */
bpf_strnstr(const char * s1__ign,const char * s2__ign,size_t len)3927 __bpf_kfunc int bpf_strnstr(const char *s1__ign, const char *s2__ign,
3928 size_t len)
3929 {
3930 return __bpf_strnstr(s1__ign, s2__ign, len, false);
3931 }
3932
3933 /**
3934 * bpf_strncasestr - Find the first substring in a length-limited string,
3935 * ignoring the case of the characters
3936 * @s1__ign: The string to be searched
3937 * @s2__ign: The string to search for
3938 * @len: the maximum number of characters to search
3939 *
3940 * Return:
3941 * * >=0 - Index of the first character of the first occurrence of @s2__ign
3942 * within the first @len characters of @s1__ign
3943 * * %-ENOENT - @s2__ign not found in the first @len characters of @s1__ign
3944 * * %-EFAULT - Cannot read one of the strings
3945 * * %-E2BIG - One of the strings is too large
3946 * * %-ERANGE - One of the strings is outside of kernel address space
3947 */
bpf_strncasestr(const char * s1__ign,const char * s2__ign,size_t len)3948 __bpf_kfunc int bpf_strncasestr(const char *s1__ign, const char *s2__ign,
3949 size_t len)
3950 {
3951 return __bpf_strnstr(s1__ign, s2__ign, len, true);
3952 }
3953
3954 #ifdef CONFIG_KEYS
3955 /**
3956 * bpf_lookup_user_key - lookup a key by its serial
3957 * @serial: key handle serial number
3958 * @flags: lookup-specific flags
3959 *
3960 * Search a key with a given *serial* and the provided *flags*.
3961 * If found, increment the reference count of the key by one, and
3962 * return it in the bpf_key structure.
3963 *
3964 * The bpf_key structure must be passed to bpf_key_put() when done
3965 * with it, so that the key reference count is decremented and the
3966 * bpf_key structure is freed.
3967 *
3968 * Permission checks are deferred to the time the key is used by
3969 * one of the available key-specific kfuncs.
3970 *
3971 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
3972 * special keyring (e.g. session keyring), if it doesn't yet exist.
3973 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
3974 * for the key construction, and to retrieve uninstantiated keys (keys
3975 * without data attached to them).
3976 *
3977 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
3978 * NULL pointer otherwise.
3979 */
bpf_lookup_user_key(s32 serial,u64 flags)3980 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags)
3981 {
3982 key_ref_t key_ref;
3983 struct bpf_key *bkey;
3984
3985 if (flags & ~KEY_LOOKUP_ALL)
3986 return NULL;
3987
3988 /*
3989 * Permission check is deferred until the key is used, as the
3990 * intent of the caller is unknown here.
3991 */
3992 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
3993 if (IS_ERR(key_ref))
3994 return NULL;
3995
3996 bkey = kmalloc_obj(*bkey);
3997 if (!bkey) {
3998 key_put(key_ref_to_ptr(key_ref));
3999 return NULL;
4000 }
4001
4002 bkey->key = key_ref_to_ptr(key_ref);
4003 bkey->has_ref = true;
4004
4005 return bkey;
4006 }
4007
4008 /**
4009 * bpf_lookup_system_key - lookup a key by a system-defined ID
4010 * @id: key ID
4011 *
4012 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
4013 * The key pointer is marked as invalid, to prevent bpf_key_put() from
4014 * attempting to decrement the key reference count on that pointer. The key
4015 * pointer set in such way is currently understood only by
4016 * verify_pkcs7_signature().
4017 *
4018 * Set *id* to one of the values defined in include/linux/verification.h:
4019 * 0 for the primary keyring (immutable keyring of system keys);
4020 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
4021 * (where keys can be added only if they are vouched for by existing keys
4022 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
4023 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
4024 * kerned image and, possibly, the initramfs signature).
4025 *
4026 * Return: a bpf_key pointer with an invalid key pointer set from the
4027 * pre-determined ID on success, a NULL pointer otherwise
4028 */
bpf_lookup_system_key(u64 id)4029 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
4030 {
4031 struct bpf_key *bkey;
4032
4033 if (system_keyring_id_check(id) < 0)
4034 return NULL;
4035
4036 bkey = kmalloc_obj(*bkey, GFP_ATOMIC);
4037 if (!bkey)
4038 return NULL;
4039
4040 bkey->key = (struct key *)(unsigned long)id;
4041 bkey->has_ref = false;
4042
4043 return bkey;
4044 }
4045
4046 /**
4047 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
4048 * @bkey: bpf_key structure
4049 *
4050 * Decrement the reference count of the key inside *bkey*, if the pointer
4051 * is valid, and free *bkey*.
4052 */
bpf_key_put(struct bpf_key * bkey)4053 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
4054 {
4055 if (bkey->has_ref)
4056 key_put(bkey->key);
4057
4058 kfree(bkey);
4059 }
4060
4061 /**
4062 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
4063 * @data_p: data to verify
4064 * @sig_p: signature of the data
4065 * @trusted_keyring: keyring with keys trusted for signature verification
4066 *
4067 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
4068 * with keys in a keyring referenced by *trusted_keyring*.
4069 *
4070 * Return: 0 on success, a negative value on error.
4071 */
bpf_verify_pkcs7_signature(struct bpf_dynptr * data_p,struct bpf_dynptr * sig_p,struct bpf_key * trusted_keyring)4072 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
4073 struct bpf_dynptr *sig_p,
4074 struct bpf_key *trusted_keyring)
4075 {
4076 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
4077 struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
4078 struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
4079 const void *data, *sig;
4080 u32 data_len, sig_len;
4081 int ret;
4082
4083 if (trusted_keyring->has_ref) {
4084 /*
4085 * Do the permission check deferred in bpf_lookup_user_key().
4086 * See bpf_lookup_user_key() for more details.
4087 *
4088 * A call to key_task_permission() here would be redundant, as
4089 * it is already done by keyring_search() called by
4090 * find_asymmetric_key().
4091 */
4092 ret = key_validate(trusted_keyring->key);
4093 if (ret < 0)
4094 return ret;
4095 }
4096
4097 data_len = __bpf_dynptr_size(data_ptr);
4098 data = __bpf_dynptr_data(data_ptr, data_len);
4099 sig_len = __bpf_dynptr_size(sig_ptr);
4100 sig = __bpf_dynptr_data(sig_ptr, sig_len);
4101
4102 return verify_pkcs7_signature(data, data_len, sig, sig_len,
4103 trusted_keyring->key,
4104 VERIFYING_BPF_SIGNATURE, NULL,
4105 NULL);
4106 #else
4107 return -EOPNOTSUPP;
4108 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
4109 }
4110 #endif /* CONFIG_KEYS */
4111
4112 typedef int (*bpf_task_work_callback_t)(struct bpf_map *map, void *key, void *value);
4113
4114 enum bpf_task_work_state {
4115 /* bpf_task_work is ready to be used */
4116 BPF_TW_STANDBY = 0,
4117 /* irq work scheduling in progress */
4118 BPF_TW_PENDING,
4119 /* task work scheduling in progress */
4120 BPF_TW_SCHEDULING,
4121 /* task work is scheduled successfully */
4122 BPF_TW_SCHEDULED,
4123 /* callback is running */
4124 BPF_TW_RUNNING,
4125 /* associated BPF map value is deleted */
4126 BPF_TW_FREED,
4127 };
4128
4129 struct bpf_task_work_ctx {
4130 enum bpf_task_work_state state;
4131 refcount_t refcnt;
4132 struct callback_head work;
4133 struct irq_work irq_work;
4134 /* bpf_prog that schedules task work */
4135 struct bpf_prog *prog;
4136 /* task for which callback is scheduled */
4137 struct task_struct *task;
4138 /* the map and map value associated with this context */
4139 struct bpf_map *map;
4140 void *map_val;
4141 enum task_work_notify_mode mode;
4142 bpf_task_work_callback_t callback_fn;
4143 struct rcu_head rcu;
4144 } __aligned(8);
4145
4146 /* Actual type for struct bpf_task_work */
4147 struct bpf_task_work_kern {
4148 struct bpf_task_work_ctx *ctx;
4149 };
4150
bpf_task_work_ctx_reset(struct bpf_task_work_ctx * ctx)4151 static void bpf_task_work_ctx_reset(struct bpf_task_work_ctx *ctx)
4152 {
4153 if (ctx->prog) {
4154 bpf_prog_put(ctx->prog);
4155 ctx->prog = NULL;
4156 }
4157 if (ctx->task) {
4158 bpf_task_release(ctx->task);
4159 ctx->task = NULL;
4160 }
4161 }
4162
bpf_task_work_ctx_tryget(struct bpf_task_work_ctx * ctx)4163 static bool bpf_task_work_ctx_tryget(struct bpf_task_work_ctx *ctx)
4164 {
4165 return refcount_inc_not_zero(&ctx->refcnt);
4166 }
4167
bpf_task_work_ctx_put(struct bpf_task_work_ctx * ctx)4168 static void bpf_task_work_ctx_put(struct bpf_task_work_ctx *ctx)
4169 {
4170 if (!refcount_dec_and_test(&ctx->refcnt))
4171 return;
4172
4173 bpf_task_work_ctx_reset(ctx);
4174
4175 /* bpf_mem_free expects migration to be disabled */
4176 migrate_disable();
4177 bpf_mem_free(&bpf_global_ma, ctx);
4178 migrate_enable();
4179 }
4180
bpf_task_work_cancel(struct bpf_task_work_ctx * ctx)4181 static void bpf_task_work_cancel(struct bpf_task_work_ctx *ctx)
4182 {
4183 /*
4184 * Scheduled task_work callback holds ctx ref, so if we successfully
4185 * cancelled, we put that ref on callback's behalf. If we couldn't
4186 * cancel, callback will inevitably run or has already completed
4187 * running, and it would have taken care of its ctx ref itself.
4188 */
4189 if (task_work_cancel(ctx->task, &ctx->work))
4190 bpf_task_work_ctx_put(ctx);
4191 }
4192
bpf_task_work_callback(struct callback_head * cb)4193 static void bpf_task_work_callback(struct callback_head *cb)
4194 {
4195 struct bpf_task_work_ctx *ctx = container_of(cb, struct bpf_task_work_ctx, work);
4196 enum bpf_task_work_state state;
4197 u32 idx;
4198 void *key;
4199
4200 /* Read lock is needed to protect ctx and map key/value access */
4201 guard(rcu_tasks_trace)();
4202 /*
4203 * This callback may start running before bpf_task_work_irq() switched to
4204 * SCHEDULED state, so handle both transition variants SCHEDULING|SCHEDULED -> RUNNING.
4205 */
4206 state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_RUNNING);
4207 if (state == BPF_TW_SCHEDULED)
4208 state = cmpxchg(&ctx->state, BPF_TW_SCHEDULED, BPF_TW_RUNNING);
4209 if (state == BPF_TW_FREED) {
4210 bpf_task_work_ctx_put(ctx);
4211 return;
4212 }
4213
4214 key = (void *)map_key_from_value(ctx->map, ctx->map_val, &idx);
4215
4216 migrate_disable();
4217 ctx->callback_fn(ctx->map, key, ctx->map_val);
4218 migrate_enable();
4219
4220 bpf_task_work_ctx_reset(ctx);
4221 (void)cmpxchg(&ctx->state, BPF_TW_RUNNING, BPF_TW_STANDBY);
4222
4223 bpf_task_work_ctx_put(ctx);
4224 }
4225
bpf_task_work_irq(struct irq_work * irq_work)4226 static void bpf_task_work_irq(struct irq_work *irq_work)
4227 {
4228 struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
4229 enum bpf_task_work_state state;
4230 int err;
4231
4232 guard(rcu_tasks_trace)();
4233
4234 if (cmpxchg(&ctx->state, BPF_TW_PENDING, BPF_TW_SCHEDULING) != BPF_TW_PENDING) {
4235 bpf_task_work_ctx_put(ctx);
4236 return;
4237 }
4238
4239 err = task_work_add(ctx->task, &ctx->work, ctx->mode);
4240 if (err) {
4241 bpf_task_work_ctx_reset(ctx);
4242 /*
4243 * try to switch back to STANDBY for another task_work reuse, but we might have
4244 * gone to FREED already, which is fine as we already cleaned up after ourselves
4245 */
4246 (void)cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_STANDBY);
4247 bpf_task_work_ctx_put(ctx);
4248 return;
4249 }
4250
4251 /*
4252 * It's technically possible for just scheduled task_work callback to
4253 * complete running by now, going SCHEDULING -> RUNNING and then
4254 * dropping its ctx refcount. Instead of capturing extra ref just to
4255 * protected below ctx->state access, we rely on RCU protection to
4256 * perform below SCHEDULING -> SCHEDULED attempt.
4257 */
4258 state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_SCHEDULED);
4259 if (state == BPF_TW_FREED)
4260 bpf_task_work_cancel(ctx); /* clean up if we switched into FREED state */
4261 }
4262
bpf_task_work_fetch_ctx(struct bpf_task_work * tw,struct bpf_map * map)4263 static struct bpf_task_work_ctx *bpf_task_work_fetch_ctx(struct bpf_task_work *tw,
4264 struct bpf_map *map)
4265 {
4266 struct bpf_task_work_kern *twk = (void *)tw;
4267 struct bpf_task_work_ctx *ctx, *old_ctx;
4268
4269 ctx = READ_ONCE(twk->ctx);
4270 if (ctx)
4271 return ctx;
4272
4273 ctx = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_task_work_ctx));
4274 if (!ctx)
4275 return ERR_PTR(-ENOMEM);
4276
4277 memset(ctx, 0, sizeof(*ctx));
4278 refcount_set(&ctx->refcnt, 1); /* map's own ref */
4279 ctx->state = BPF_TW_STANDBY;
4280
4281 old_ctx = cmpxchg(&twk->ctx, NULL, ctx);
4282 if (old_ctx) {
4283 /*
4284 * tw->ctx is set by concurrent BPF program, release allocated
4285 * memory and try to reuse already set context.
4286 */
4287 bpf_mem_free(&bpf_global_ma, ctx);
4288 return old_ctx;
4289 }
4290
4291 return ctx; /* Success */
4292 }
4293
bpf_task_work_acquire_ctx(struct bpf_task_work * tw,struct bpf_map * map)4294 static struct bpf_task_work_ctx *bpf_task_work_acquire_ctx(struct bpf_task_work *tw,
4295 struct bpf_map *map)
4296 {
4297 struct bpf_task_work_ctx *ctx;
4298
4299 ctx = bpf_task_work_fetch_ctx(tw, map);
4300 if (IS_ERR(ctx))
4301 return ctx;
4302
4303 /* try to get ref for task_work callback to hold */
4304 if (!bpf_task_work_ctx_tryget(ctx))
4305 return ERR_PTR(-EBUSY);
4306
4307 if (cmpxchg(&ctx->state, BPF_TW_STANDBY, BPF_TW_PENDING) != BPF_TW_STANDBY) {
4308 /* lost acquiring race or map_release_uref() stole it from us, put ref and bail */
4309 bpf_task_work_ctx_put(ctx);
4310 return ERR_PTR(-EBUSY);
4311 }
4312
4313 /*
4314 * If no process or bpffs is holding a reference to the map, no new callbacks should be
4315 * scheduled. This does not address any race or correctness issue, but rather is a policy
4316 * choice: dropping user references should stop everything.
4317 */
4318 if (!atomic64_read(&map->usercnt)) {
4319 /* drop ref we just got for task_work callback itself */
4320 bpf_task_work_ctx_put(ctx);
4321 /* transfer map's ref into cancel_and_free() */
4322 bpf_task_work_cancel_and_free(tw);
4323 return ERR_PTR(-EBUSY);
4324 }
4325
4326 return ctx;
4327 }
4328
bpf_task_work_schedule(struct task_struct * task,struct bpf_task_work * tw,struct bpf_map * map,bpf_task_work_callback_t callback_fn,struct bpf_prog_aux * aux,enum task_work_notify_mode mode)4329 static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work *tw,
4330 struct bpf_map *map, bpf_task_work_callback_t callback_fn,
4331 struct bpf_prog_aux *aux, enum task_work_notify_mode mode)
4332 {
4333 struct bpf_prog *prog;
4334 struct bpf_task_work_ctx *ctx;
4335 int err;
4336
4337 BTF_TYPE_EMIT(struct bpf_task_work);
4338
4339 prog = bpf_prog_inc_not_zero(aux->prog);
4340 if (IS_ERR(prog))
4341 return -EBADF;
4342 task = bpf_task_acquire(task);
4343 if (!task) {
4344 err = -EBADF;
4345 goto release_prog;
4346 }
4347
4348 ctx = bpf_task_work_acquire_ctx(tw, map);
4349 if (IS_ERR(ctx)) {
4350 err = PTR_ERR(ctx);
4351 goto release_all;
4352 }
4353
4354 ctx->task = task;
4355 ctx->callback_fn = callback_fn;
4356 ctx->prog = prog;
4357 ctx->mode = mode;
4358 ctx->map = map;
4359 ctx->map_val = (void *)tw - map->record->task_work_off;
4360 init_task_work(&ctx->work, bpf_task_work_callback);
4361 init_irq_work(&ctx->irq_work, bpf_task_work_irq);
4362
4363 irq_work_queue(&ctx->irq_work);
4364 return 0;
4365
4366 release_all:
4367 bpf_task_release(task);
4368 release_prog:
4369 bpf_prog_put(prog);
4370 return err;
4371 }
4372
4373 /**
4374 * bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL
4375 * mode
4376 * @task: Task struct for which callback should be scheduled
4377 * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
4378 * @map__map: bpf_map that embeds struct bpf_task_work in the values
4379 * @callback: pointer to BPF subprogram to call
4380 * @aux: pointer to bpf_prog_aux of the caller BPF program, implicitly set by the verifier
4381 *
4382 * Return: 0 if task work has been scheduled successfully, negative error code otherwise
4383 */
bpf_task_work_schedule_signal(struct task_struct * task,struct bpf_task_work * tw,void * map__map,bpf_task_work_callback_t callback,struct bpf_prog_aux * aux)4384 __bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
4385 void *map__map, bpf_task_work_callback_t callback,
4386 struct bpf_prog_aux *aux)
4387 {
4388 return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
4389 }
4390
4391 /**
4392 * bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME
4393 * mode
4394 * @task: Task struct for which callback should be scheduled
4395 * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
4396 * @map__map: bpf_map that embeds struct bpf_task_work in the values
4397 * @callback: pointer to BPF subprogram to call
4398 * @aux: pointer to bpf_prog_aux of the caller BPF program, implicitly set by the verifier
4399 *
4400 * Return: 0 if task work has been scheduled successfully, negative error code otherwise
4401 */
bpf_task_work_schedule_resume(struct task_struct * task,struct bpf_task_work * tw,void * map__map,bpf_task_work_callback_t callback,struct bpf_prog_aux * aux)4402 __bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
4403 void *map__map, bpf_task_work_callback_t callback,
4404 struct bpf_prog_aux *aux)
4405 {
4406 return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_RESUME);
4407 }
4408
make_file_dynptr(struct file * file,u32 flags,bool may_sleep,struct bpf_dynptr_kern * ptr)4409 static int make_file_dynptr(struct file *file, u32 flags, bool may_sleep,
4410 struct bpf_dynptr_kern *ptr)
4411 {
4412 struct bpf_dynptr_file_impl *state;
4413
4414 /* flags is currently unsupported */
4415 if (flags) {
4416 bpf_dynptr_set_null(ptr);
4417 return -EINVAL;
4418 }
4419
4420 state = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_dynptr_file_impl));
4421 if (!state) {
4422 bpf_dynptr_set_null(ptr);
4423 return -ENOMEM;
4424 }
4425 state->offset = 0;
4426 state->size = U64_MAX; /* Don't restrict size, as file may change anyways */
4427 freader_init_from_file(&state->freader, NULL, 0, file, may_sleep);
4428 bpf_dynptr_init(ptr, state, BPF_DYNPTR_TYPE_FILE, 0, 0);
4429 bpf_dynptr_set_rdonly(ptr);
4430 return 0;
4431 }
4432
bpf_dynptr_from_file(struct file * file,u32 flags,struct bpf_dynptr * ptr__uninit)4433 __bpf_kfunc int bpf_dynptr_from_file(struct file *file, u32 flags, struct bpf_dynptr *ptr__uninit)
4434 {
4435 return make_file_dynptr(file, flags, false, (struct bpf_dynptr_kern *)ptr__uninit);
4436 }
4437
bpf_dynptr_from_file_sleepable(struct file * file,u32 flags,struct bpf_dynptr * ptr__uninit)4438 int bpf_dynptr_from_file_sleepable(struct file *file, u32 flags, struct bpf_dynptr *ptr__uninit)
4439 {
4440 return make_file_dynptr(file, flags, true, (struct bpf_dynptr_kern *)ptr__uninit);
4441 }
4442
bpf_dynptr_file_discard(struct bpf_dynptr * dynptr)4443 __bpf_kfunc int bpf_dynptr_file_discard(struct bpf_dynptr *dynptr)
4444 {
4445 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)dynptr;
4446 struct bpf_dynptr_file_impl *df = ptr->data;
4447
4448 if (!df)
4449 return 0;
4450
4451 freader_cleanup(&df->freader);
4452 bpf_mem_free(&bpf_global_ma, df);
4453 bpf_dynptr_set_null(ptr);
4454 return 0;
4455 }
4456
4457 /**
4458 * bpf_timer_cancel_async - try to deactivate a timer
4459 * @timer: bpf_timer to stop
4460 *
4461 * Returns:
4462 *
4463 * * 0 when the timer was not active
4464 * * 1 when the timer was active
4465 * * -1 when the timer is currently executing the callback function and
4466 * cannot be stopped
4467 * * -ECANCELED when the timer will be cancelled asynchronously
4468 * * -ENOMEM when out of memory
4469 * * -EINVAL when the timer was not initialized
4470 * * -ENOENT when this kfunc is racing with timer deletion
4471 */
bpf_timer_cancel_async(struct bpf_timer * timer)4472 __bpf_kfunc int bpf_timer_cancel_async(struct bpf_timer *timer)
4473 {
4474 struct bpf_async_kern *async = (void *)timer;
4475 struct bpf_async_cb *cb;
4476 int ret;
4477
4478 cb = READ_ONCE(async->cb);
4479 if (!cb)
4480 return -EINVAL;
4481
4482 /*
4483 * Unlike hrtimer_start() it's ok to synchronously call
4484 * hrtimer_try_to_cancel() when refcnt reached zero, but deferring to
4485 * irq_work is not, since irq callback may execute after RCU GP and
4486 * cb could be freed at that time. Check for refcnt zero for
4487 * consistency.
4488 */
4489 if (!refcount_inc_not_zero(&cb->refcnt))
4490 return -ENOENT;
4491
4492 if (!defer_timer_wq_op()) {
4493 struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
4494
4495 ret = hrtimer_try_to_cancel(&t->timer);
4496 bpf_async_refcount_put(cb);
4497 return ret;
4498 } else {
4499 ret = bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0);
4500 return ret ? ret : -ECANCELED;
4501 }
4502 }
4503
4504 __bpf_kfunc_end_defs();
4505
bpf_task_work_cancel_scheduled(struct irq_work * irq_work)4506 static void bpf_task_work_cancel_scheduled(struct irq_work *irq_work)
4507 {
4508 struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
4509
4510 bpf_task_work_cancel(ctx); /* this might put task_work callback's ref */
4511 bpf_task_work_ctx_put(ctx); /* and here we put map's own ref that was transferred to us */
4512 }
4513
bpf_task_work_cancel_and_free(void * val)4514 void bpf_task_work_cancel_and_free(void *val)
4515 {
4516 struct bpf_task_work_kern *twk = val;
4517 struct bpf_task_work_ctx *ctx;
4518 enum bpf_task_work_state state;
4519
4520 ctx = xchg(&twk->ctx, NULL);
4521 if (!ctx)
4522 return;
4523
4524 state = xchg(&ctx->state, BPF_TW_FREED);
4525 if (state == BPF_TW_SCHEDULED) {
4526 /* run in irq_work to avoid locks in NMI */
4527 init_irq_work(&ctx->irq_work, bpf_task_work_cancel_scheduled);
4528 irq_work_queue(&ctx->irq_work);
4529 return;
4530 }
4531
4532 bpf_task_work_ctx_put(ctx); /* put bpf map's ref */
4533 }
4534
4535 BTF_KFUNCS_START(generic_btf_ids)
4536 #ifdef CONFIG_CRASH_DUMP
4537 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
4538 #endif
4539 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
4540 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
4541 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
4542 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
4543 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
4544 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
4545 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
4546 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
4547 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
4548 BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
4549 BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
4550 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4551 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
4552 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
4553 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
4554 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
4555 BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL)
4556 BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL)
4557 BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL)
4558
4559 #ifdef CONFIG_CGROUPS
4560 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4561 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
4562 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4563 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
4564 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
4565 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4566 #endif
4567 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
4568 BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL)
4569 BTF_ID_FLAGS(func, bpf_throw)
4570 #ifdef CONFIG_BPF_EVENTS
4571 BTF_ID_FLAGS(func, bpf_send_signal_task)
4572 #endif
4573 #ifdef CONFIG_KEYS
4574 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
4575 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
4576 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
4577 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
4578 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
4579 #endif
4580 #endif
4581 BTF_KFUNCS_END(generic_btf_ids)
4582
4583 static const struct btf_kfunc_id_set generic_kfunc_set = {
4584 .owner = THIS_MODULE,
4585 .set = &generic_btf_ids,
4586 };
4587
4588
4589 BTF_ID_LIST(generic_dtor_ids)
4590 BTF_ID(struct, task_struct)
4591 BTF_ID(func, bpf_task_release_dtor)
4592 #ifdef CONFIG_CGROUPS
4593 BTF_ID(struct, cgroup)
4594 BTF_ID(func, bpf_cgroup_release_dtor)
4595 #endif
4596
4597 BTF_KFUNCS_START(common_btf_ids)
4598 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL)
4599 BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL)
4600 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
4601 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
4602 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
4603 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
4604 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
4605 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
4606 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
4607 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
4608 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
4609 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
4610 #ifdef CONFIG_CGROUPS
4611 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW)
4612 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
4613 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
4614 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_RCU_PROTECTED)
4615 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
4616 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
4617 #endif
4618 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_RCU_PROTECTED)
4619 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
4620 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
4621 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
4622 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
4623 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
4624 BTF_ID_FLAGS(func, bpf_dynptr_size)
4625 BTF_ID_FLAGS(func, bpf_dynptr_clone)
4626 BTF_ID_FLAGS(func, bpf_dynptr_copy)
4627 BTF_ID_FLAGS(func, bpf_dynptr_memset)
4628 #ifdef CONFIG_NET
4629 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
4630 #endif
4631 BTF_ID_FLAGS(func, bpf_wq_init)
4632 BTF_ID_FLAGS(func, bpf_wq_set_callback, KF_IMPLICIT_ARGS)
4633 BTF_ID_FLAGS(func, bpf_wq_start)
4634 BTF_ID_FLAGS(func, bpf_preempt_disable)
4635 BTF_ID_FLAGS(func, bpf_preempt_enable)
4636 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
4637 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
4638 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
4639 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
4640 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE)
4641 BTF_ID_FLAGS(func, bpf_get_kmem_cache)
4642 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
4643 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
4644 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
4645 BTF_ID_FLAGS(func, bpf_local_irq_save)
4646 BTF_ID_FLAGS(func, bpf_local_irq_restore)
4647 #ifdef CONFIG_BPF_EVENTS
4648 BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr)
4649 BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr)
4650 BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr)
4651 BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr)
4652 BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE)
4653 BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE)
4654 BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE)
4655 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE)
4656 #endif
4657 #ifdef CONFIG_DMA_SHARED_BUFFER
4658 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)
4659 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
4660 BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
4661 #endif
4662 BTF_ID_FLAGS(func, __bpf_trap)
4663 BTF_ID_FLAGS(func, bpf_strcmp);
4664 BTF_ID_FLAGS(func, bpf_strcasecmp);
4665 BTF_ID_FLAGS(func, bpf_strncasecmp);
4666 BTF_ID_FLAGS(func, bpf_strchr);
4667 BTF_ID_FLAGS(func, bpf_strchrnul);
4668 BTF_ID_FLAGS(func, bpf_strnchr);
4669 BTF_ID_FLAGS(func, bpf_strrchr);
4670 BTF_ID_FLAGS(func, bpf_strlen);
4671 BTF_ID_FLAGS(func, bpf_strnlen);
4672 BTF_ID_FLAGS(func, bpf_strspn);
4673 BTF_ID_FLAGS(func, bpf_strcspn);
4674 BTF_ID_FLAGS(func, bpf_strstr);
4675 BTF_ID_FLAGS(func, bpf_strcasestr);
4676 BTF_ID_FLAGS(func, bpf_strnstr);
4677 BTF_ID_FLAGS(func, bpf_strncasestr);
4678 #if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS)
4679 BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
4680 #endif
4681 BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_IMPLICIT_ARGS)
4682 BTF_ID_FLAGS(func, bpf_stream_print_stack, KF_IMPLICIT_ARGS)
4683 BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS)
4684 BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_IMPLICIT_ARGS)
4685 BTF_ID_FLAGS(func, bpf_dynptr_from_file)
4686 BTF_ID_FLAGS(func, bpf_dynptr_file_discard)
4687 BTF_ID_FLAGS(func, bpf_timer_cancel_async)
4688 BTF_KFUNCS_END(common_btf_ids)
4689
4690 static const struct btf_kfunc_id_set common_kfunc_set = {
4691 .owner = THIS_MODULE,
4692 .set = &common_btf_ids,
4693 };
4694
kfunc_init(void)4695 static int __init kfunc_init(void)
4696 {
4697 int ret;
4698 const struct btf_id_dtor_kfunc generic_dtors[] = {
4699 {
4700 .btf_id = generic_dtor_ids[0],
4701 .kfunc_btf_id = generic_dtor_ids[1]
4702 },
4703 #ifdef CONFIG_CGROUPS
4704 {
4705 .btf_id = generic_dtor_ids[2],
4706 .kfunc_btf_id = generic_dtor_ids[3]
4707 },
4708 #endif
4709 };
4710
4711 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
4712 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
4713 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
4714 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
4715 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
4716 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set);
4717 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
4718 ARRAY_SIZE(generic_dtors),
4719 THIS_MODULE);
4720 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
4721 }
4722
4723 late_initcall(kfunc_init);
4724
4725 /* Get a pointer to dynptr data up to len bytes for read only access. If
4726 * the dynptr doesn't have continuous data up to len bytes, return NULL.
4727 */
__bpf_dynptr_data(const struct bpf_dynptr_kern * ptr,u64 len)4728 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u64 len)
4729 {
4730 const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
4731
4732 return bpf_dynptr_slice(p, 0, NULL, len);
4733 }
4734
4735 /* Get a pointer to dynptr data up to len bytes for read write access. If
4736 * the dynptr doesn't have continuous data up to len bytes, or the dynptr
4737 * is read only, return NULL.
4738 */
__bpf_dynptr_data_rw(const struct bpf_dynptr_kern * ptr,u64 len)4739 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u64 len)
4740 {
4741 if (__bpf_dynptr_is_rdonly(ptr))
4742 return NULL;
4743 return (void *)__bpf_dynptr_data(ptr, len);
4744 }
4745
bpf_map_free_internal_structs(struct bpf_map * map,void * val)4746 void bpf_map_free_internal_structs(struct bpf_map *map, void *val)
4747 {
4748 if (btf_record_has_field(map->record, BPF_TIMER))
4749 bpf_obj_free_timer(map->record, val);
4750 if (btf_record_has_field(map->record, BPF_WORKQUEUE))
4751 bpf_obj_free_workqueue(map->record, val);
4752 if (btf_record_has_field(map->record, BPF_TASK_WORK))
4753 bpf_obj_free_task_work(map->record, val);
4754 }
4755