xref: /linux/kernel/trace/bpf_trace.c (revision 51a8f9d7f587290944d6fc733d1f897091c63159)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/bpf_perf_event.h>
11 #include <linux/btf.h>
12 #include <linux/filter.h>
13 #include <linux/uaccess.h>
14 #include <linux/ctype.h>
15 #include <linux/kprobes.h>
16 #include <linux/spinlock.h>
17 #include <linux/syscalls.h>
18 #include <linux/error-injection.h>
19 #include <linux/btf_ids.h>
20 #include <linux/bpf_lsm.h>
21 #include <linux/fprobe.h>
22 #include <linux/bsearch.h>
23 #include <linux/sort.h>
24 #include <linux/key.h>
25 #include <linux/verification.h>
26 
27 #include <net/bpf_sk_storage.h>
28 
29 #include <uapi/linux/bpf.h>
30 #include <uapi/linux/btf.h>
31 
32 #include <asm/tlb.h>
33 
34 #include "trace_probe.h"
35 #include "trace.h"
36 
37 #define CREATE_TRACE_POINTS
38 #include "bpf_trace.h"
39 
40 #define bpf_event_rcu_dereference(p)					\
41 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
42 
43 #ifdef CONFIG_MODULES
44 struct bpf_trace_module {
45 	struct module *module;
46 	struct list_head list;
47 };
48 
49 static LIST_HEAD(bpf_trace_modules);
50 static DEFINE_MUTEX(bpf_module_mutex);
51 
52 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
53 {
54 	struct bpf_raw_event_map *btp, *ret = NULL;
55 	struct bpf_trace_module *btm;
56 	unsigned int i;
57 
58 	mutex_lock(&bpf_module_mutex);
59 	list_for_each_entry(btm, &bpf_trace_modules, list) {
60 		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
61 			btp = &btm->module->bpf_raw_events[i];
62 			if (!strcmp(btp->tp->name, name)) {
63 				if (try_module_get(btm->module))
64 					ret = btp;
65 				goto out;
66 			}
67 		}
68 	}
69 out:
70 	mutex_unlock(&bpf_module_mutex);
71 	return ret;
72 }
73 #else
74 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
75 {
76 	return NULL;
77 }
78 #endif /* CONFIG_MODULES */
79 
80 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
81 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
82 
83 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
84 				  u64 flags, const struct btf **btf,
85 				  s32 *btf_id);
86 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
87 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
88 
89 /**
90  * trace_call_bpf - invoke BPF program
91  * @call: tracepoint event
92  * @ctx: opaque context pointer
93  *
94  * kprobe handlers execute BPF programs via this helper.
95  * Can be used from static tracepoints in the future.
96  *
97  * Return: BPF programs always return an integer which is interpreted by
98  * kprobe handler as:
99  * 0 - return from kprobe (event is filtered out)
100  * 1 - store kprobe event into ring buffer
101  * Other values are reserved and currently alias to 1
102  */
103 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
104 {
105 	unsigned int ret;
106 
107 	cant_sleep();
108 
109 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
110 		/*
111 		 * since some bpf program is already running on this cpu,
112 		 * don't call into another bpf program (same or different)
113 		 * and don't send kprobe event into ring-buffer,
114 		 * so return zero here
115 		 */
116 		ret = 0;
117 		goto out;
118 	}
119 
120 	/*
121 	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
122 	 * to all call sites, we did a bpf_prog_array_valid() there to check
123 	 * whether call->prog_array is empty or not, which is
124 	 * a heuristic to speed up execution.
125 	 *
126 	 * If bpf_prog_array_valid() fetched prog_array was
127 	 * non-NULL, we go into trace_call_bpf() and do the actual
128 	 * proper rcu_dereference() under RCU lock.
129 	 * If it turns out that prog_array is NULL then, we bail out.
130 	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
131 	 * was NULL, you'll skip the prog_array with the risk of missing
132 	 * out of events when it was updated in between this and the
133 	 * rcu_dereference() which is accepted risk.
134 	 */
135 	rcu_read_lock();
136 	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
137 				 ctx, bpf_prog_run);
138 	rcu_read_unlock();
139 
140  out:
141 	__this_cpu_dec(bpf_prog_active);
142 
143 	return ret;
144 }
145 
146 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
147 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
148 {
149 	regs_set_return_value(regs, rc);
150 	override_function_with_return(regs);
151 	return 0;
152 }
153 
154 static const struct bpf_func_proto bpf_override_return_proto = {
155 	.func		= bpf_override_return,
156 	.gpl_only	= true,
157 	.ret_type	= RET_INTEGER,
158 	.arg1_type	= ARG_PTR_TO_CTX,
159 	.arg2_type	= ARG_ANYTHING,
160 };
161 #endif
162 
163 static __always_inline int
164 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
165 {
166 	int ret;
167 
168 	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
169 	if (unlikely(ret < 0))
170 		memset(dst, 0, size);
171 	return ret;
172 }
173 
174 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
175 	   const void __user *, unsafe_ptr)
176 {
177 	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
178 }
179 
180 const struct bpf_func_proto bpf_probe_read_user_proto = {
181 	.func		= bpf_probe_read_user,
182 	.gpl_only	= true,
183 	.ret_type	= RET_INTEGER,
184 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
185 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
186 	.arg3_type	= ARG_ANYTHING,
187 };
188 
189 static __always_inline int
190 bpf_probe_read_user_str_common(void *dst, u32 size,
191 			       const void __user *unsafe_ptr)
192 {
193 	int ret;
194 
195 	/*
196 	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
197 	 * terminator into `dst`.
198 	 *
199 	 * strncpy_from_user() does long-sized strides in the fast path. If the
200 	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
201 	 * then there could be junk after the NUL in `dst`. If user takes `dst`
202 	 * and keys a hash map with it, then semantically identical strings can
203 	 * occupy multiple entries in the map.
204 	 */
205 	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
206 	if (unlikely(ret < 0))
207 		memset(dst, 0, size);
208 	return ret;
209 }
210 
211 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
212 	   const void __user *, unsafe_ptr)
213 {
214 	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
215 }
216 
217 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
218 	.func		= bpf_probe_read_user_str,
219 	.gpl_only	= true,
220 	.ret_type	= RET_INTEGER,
221 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
222 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
223 	.arg3_type	= ARG_ANYTHING,
224 };
225 
226 static __always_inline int
227 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
228 {
229 	int ret;
230 
231 	ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
232 	if (unlikely(ret < 0))
233 		memset(dst, 0, size);
234 	return ret;
235 }
236 
237 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
238 	   const void *, unsafe_ptr)
239 {
240 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
241 }
242 
243 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
244 	.func		= bpf_probe_read_kernel,
245 	.gpl_only	= true,
246 	.ret_type	= RET_INTEGER,
247 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
248 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
249 	.arg3_type	= ARG_ANYTHING,
250 };
251 
252 static __always_inline int
253 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
254 {
255 	int ret;
256 
257 	/*
258 	 * The strncpy_from_kernel_nofault() call will likely not fill the
259 	 * entire buffer, but that's okay in this circumstance as we're probing
260 	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
261 	 * as well probe the stack. Thus, memory is explicitly cleared
262 	 * only in error case, so that improper users ignoring return
263 	 * code altogether don't copy garbage; otherwise length of string
264 	 * is returned that can be used for bpf_perf_event_output() et al.
265 	 */
266 	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
267 	if (unlikely(ret < 0))
268 		memset(dst, 0, size);
269 	return ret;
270 }
271 
272 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
273 	   const void *, unsafe_ptr)
274 {
275 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
276 }
277 
278 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
279 	.func		= bpf_probe_read_kernel_str,
280 	.gpl_only	= true,
281 	.ret_type	= RET_INTEGER,
282 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
283 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
284 	.arg3_type	= ARG_ANYTHING,
285 };
286 
287 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
288 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
289 	   const void *, unsafe_ptr)
290 {
291 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
292 		return bpf_probe_read_user_common(dst, size,
293 				(__force void __user *)unsafe_ptr);
294 	}
295 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
296 }
297 
298 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
299 	.func		= bpf_probe_read_compat,
300 	.gpl_only	= true,
301 	.ret_type	= RET_INTEGER,
302 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
303 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
304 	.arg3_type	= ARG_ANYTHING,
305 };
306 
307 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
308 	   const void *, unsafe_ptr)
309 {
310 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
311 		return bpf_probe_read_user_str_common(dst, size,
312 				(__force void __user *)unsafe_ptr);
313 	}
314 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
315 }
316 
317 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
318 	.func		= bpf_probe_read_compat_str,
319 	.gpl_only	= true,
320 	.ret_type	= RET_INTEGER,
321 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
322 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
323 	.arg3_type	= ARG_ANYTHING,
324 };
325 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
326 
327 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
328 	   u32, size)
329 {
330 	/*
331 	 * Ensure we're in user context which is safe for the helper to
332 	 * run. This helper has no business in a kthread.
333 	 *
334 	 * access_ok() should prevent writing to non-user memory, but in
335 	 * some situations (nommu, temporary switch, etc) access_ok() does
336 	 * not provide enough validation, hence the check on KERNEL_DS.
337 	 *
338 	 * nmi_uaccess_okay() ensures the probe is not run in an interim
339 	 * state, when the task or mm are switched. This is specifically
340 	 * required to prevent the use of temporary mm.
341 	 */
342 
343 	if (unlikely(in_interrupt() ||
344 		     current->flags & (PF_KTHREAD | PF_EXITING)))
345 		return -EPERM;
346 	if (unlikely(!nmi_uaccess_okay()))
347 		return -EPERM;
348 
349 	return copy_to_user_nofault(unsafe_ptr, src, size);
350 }
351 
352 static const struct bpf_func_proto bpf_probe_write_user_proto = {
353 	.func		= bpf_probe_write_user,
354 	.gpl_only	= true,
355 	.ret_type	= RET_INTEGER,
356 	.arg1_type	= ARG_ANYTHING,
357 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
358 	.arg3_type	= ARG_CONST_SIZE,
359 };
360 
361 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
362 {
363 	if (!capable(CAP_SYS_ADMIN))
364 		return NULL;
365 
366 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
367 			    current->comm, task_pid_nr(current));
368 
369 	return &bpf_probe_write_user_proto;
370 }
371 
372 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
373 
374 #define MAX_TRACE_PRINTK_VARARGS	3
375 #define BPF_TRACE_PRINTK_SIZE		1024
376 
377 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
378 	   u64, arg2, u64, arg3)
379 {
380 	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
381 	u32 *bin_args;
382 	static char buf[BPF_TRACE_PRINTK_SIZE];
383 	unsigned long flags;
384 	int ret;
385 
386 	ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
387 				  MAX_TRACE_PRINTK_VARARGS);
388 	if (ret < 0)
389 		return ret;
390 
391 	raw_spin_lock_irqsave(&trace_printk_lock, flags);
392 	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
393 
394 	trace_bpf_trace_printk(buf);
395 	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
396 
397 	bpf_bprintf_cleanup();
398 
399 	return ret;
400 }
401 
402 static const struct bpf_func_proto bpf_trace_printk_proto = {
403 	.func		= bpf_trace_printk,
404 	.gpl_only	= true,
405 	.ret_type	= RET_INTEGER,
406 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
407 	.arg2_type	= ARG_CONST_SIZE,
408 };
409 
410 static void __set_printk_clr_event(void)
411 {
412 	/*
413 	 * This program might be calling bpf_trace_printk,
414 	 * so enable the associated bpf_trace/bpf_trace_printk event.
415 	 * Repeat this each time as it is possible a user has
416 	 * disabled bpf_trace_printk events.  By loading a program
417 	 * calling bpf_trace_printk() however the user has expressed
418 	 * the intent to see such events.
419 	 */
420 	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
421 		pr_warn_ratelimited("could not enable bpf_trace_printk events");
422 }
423 
424 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
425 {
426 	__set_printk_clr_event();
427 	return &bpf_trace_printk_proto;
428 }
429 
430 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
431 	   u32, data_len)
432 {
433 	static char buf[BPF_TRACE_PRINTK_SIZE];
434 	unsigned long flags;
435 	int ret, num_args;
436 	u32 *bin_args;
437 
438 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
439 	    (data_len && !data))
440 		return -EINVAL;
441 	num_args = data_len / 8;
442 
443 	ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
444 	if (ret < 0)
445 		return ret;
446 
447 	raw_spin_lock_irqsave(&trace_printk_lock, flags);
448 	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
449 
450 	trace_bpf_trace_printk(buf);
451 	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
452 
453 	bpf_bprintf_cleanup();
454 
455 	return ret;
456 }
457 
458 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
459 	.func		= bpf_trace_vprintk,
460 	.gpl_only	= true,
461 	.ret_type	= RET_INTEGER,
462 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
463 	.arg2_type	= ARG_CONST_SIZE,
464 	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
465 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
466 };
467 
468 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
469 {
470 	__set_printk_clr_event();
471 	return &bpf_trace_vprintk_proto;
472 }
473 
474 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
475 	   const void *, data, u32, data_len)
476 {
477 	int err, num_args;
478 	u32 *bin_args;
479 
480 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
481 	    (data_len && !data))
482 		return -EINVAL;
483 	num_args = data_len / 8;
484 
485 	err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
486 	if (err < 0)
487 		return err;
488 
489 	seq_bprintf(m, fmt, bin_args);
490 
491 	bpf_bprintf_cleanup();
492 
493 	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
494 }
495 
496 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
497 
498 static const struct bpf_func_proto bpf_seq_printf_proto = {
499 	.func		= bpf_seq_printf,
500 	.gpl_only	= true,
501 	.ret_type	= RET_INTEGER,
502 	.arg1_type	= ARG_PTR_TO_BTF_ID,
503 	.arg1_btf_id	= &btf_seq_file_ids[0],
504 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
505 	.arg3_type	= ARG_CONST_SIZE,
506 	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
507 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
508 };
509 
510 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
511 {
512 	return seq_write(m, data, len) ? -EOVERFLOW : 0;
513 }
514 
515 static const struct bpf_func_proto bpf_seq_write_proto = {
516 	.func		= bpf_seq_write,
517 	.gpl_only	= true,
518 	.ret_type	= RET_INTEGER,
519 	.arg1_type	= ARG_PTR_TO_BTF_ID,
520 	.arg1_btf_id	= &btf_seq_file_ids[0],
521 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
522 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
523 };
524 
525 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
526 	   u32, btf_ptr_size, u64, flags)
527 {
528 	const struct btf *btf;
529 	s32 btf_id;
530 	int ret;
531 
532 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
533 	if (ret)
534 		return ret;
535 
536 	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
537 }
538 
539 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
540 	.func		= bpf_seq_printf_btf,
541 	.gpl_only	= true,
542 	.ret_type	= RET_INTEGER,
543 	.arg1_type	= ARG_PTR_TO_BTF_ID,
544 	.arg1_btf_id	= &btf_seq_file_ids[0],
545 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
546 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
547 	.arg4_type	= ARG_ANYTHING,
548 };
549 
550 static __always_inline int
551 get_map_perf_counter(struct bpf_map *map, u64 flags,
552 		     u64 *value, u64 *enabled, u64 *running)
553 {
554 	struct bpf_array *array = container_of(map, struct bpf_array, map);
555 	unsigned int cpu = smp_processor_id();
556 	u64 index = flags & BPF_F_INDEX_MASK;
557 	struct bpf_event_entry *ee;
558 
559 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
560 		return -EINVAL;
561 	if (index == BPF_F_CURRENT_CPU)
562 		index = cpu;
563 	if (unlikely(index >= array->map.max_entries))
564 		return -E2BIG;
565 
566 	ee = READ_ONCE(array->ptrs[index]);
567 	if (!ee)
568 		return -ENOENT;
569 
570 	return perf_event_read_local(ee->event, value, enabled, running);
571 }
572 
573 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
574 {
575 	u64 value = 0;
576 	int err;
577 
578 	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
579 	/*
580 	 * this api is ugly since we miss [-22..-2] range of valid
581 	 * counter values, but that's uapi
582 	 */
583 	if (err)
584 		return err;
585 	return value;
586 }
587 
588 static const struct bpf_func_proto bpf_perf_event_read_proto = {
589 	.func		= bpf_perf_event_read,
590 	.gpl_only	= true,
591 	.ret_type	= RET_INTEGER,
592 	.arg1_type	= ARG_CONST_MAP_PTR,
593 	.arg2_type	= ARG_ANYTHING,
594 };
595 
596 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
597 	   struct bpf_perf_event_value *, buf, u32, size)
598 {
599 	int err = -EINVAL;
600 
601 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
602 		goto clear;
603 	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
604 				   &buf->running);
605 	if (unlikely(err))
606 		goto clear;
607 	return 0;
608 clear:
609 	memset(buf, 0, size);
610 	return err;
611 }
612 
613 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
614 	.func		= bpf_perf_event_read_value,
615 	.gpl_only	= true,
616 	.ret_type	= RET_INTEGER,
617 	.arg1_type	= ARG_CONST_MAP_PTR,
618 	.arg2_type	= ARG_ANYTHING,
619 	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
620 	.arg4_type	= ARG_CONST_SIZE,
621 };
622 
623 static __always_inline u64
624 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
625 			u64 flags, struct perf_sample_data *sd)
626 {
627 	struct bpf_array *array = container_of(map, struct bpf_array, map);
628 	unsigned int cpu = smp_processor_id();
629 	u64 index = flags & BPF_F_INDEX_MASK;
630 	struct bpf_event_entry *ee;
631 	struct perf_event *event;
632 
633 	if (index == BPF_F_CURRENT_CPU)
634 		index = cpu;
635 	if (unlikely(index >= array->map.max_entries))
636 		return -E2BIG;
637 
638 	ee = READ_ONCE(array->ptrs[index]);
639 	if (!ee)
640 		return -ENOENT;
641 
642 	event = ee->event;
643 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
644 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
645 		return -EINVAL;
646 
647 	if (unlikely(event->oncpu != cpu))
648 		return -EOPNOTSUPP;
649 
650 	return perf_event_output(event, sd, regs);
651 }
652 
653 /*
654  * Support executing tracepoints in normal, irq, and nmi context that each call
655  * bpf_perf_event_output
656  */
657 struct bpf_trace_sample_data {
658 	struct perf_sample_data sds[3];
659 };
660 
661 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
662 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
663 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
664 	   u64, flags, void *, data, u64, size)
665 {
666 	struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
667 	int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
668 	struct perf_raw_record raw = {
669 		.frag = {
670 			.size = size,
671 			.data = data,
672 		},
673 	};
674 	struct perf_sample_data *sd;
675 	int err;
676 
677 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
678 		err = -EBUSY;
679 		goto out;
680 	}
681 
682 	sd = &sds->sds[nest_level - 1];
683 
684 	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
685 		err = -EINVAL;
686 		goto out;
687 	}
688 
689 	perf_sample_data_init(sd, 0, 0);
690 	sd->raw = &raw;
691 	sd->sample_flags |= PERF_SAMPLE_RAW;
692 
693 	err = __bpf_perf_event_output(regs, map, flags, sd);
694 
695 out:
696 	this_cpu_dec(bpf_trace_nest_level);
697 	return err;
698 }
699 
700 static const struct bpf_func_proto bpf_perf_event_output_proto = {
701 	.func		= bpf_perf_event_output,
702 	.gpl_only	= true,
703 	.ret_type	= RET_INTEGER,
704 	.arg1_type	= ARG_PTR_TO_CTX,
705 	.arg2_type	= ARG_CONST_MAP_PTR,
706 	.arg3_type	= ARG_ANYTHING,
707 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
708 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
709 };
710 
711 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
712 struct bpf_nested_pt_regs {
713 	struct pt_regs regs[3];
714 };
715 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
716 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
717 
718 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
719 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
720 {
721 	int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
722 	struct perf_raw_frag frag = {
723 		.copy		= ctx_copy,
724 		.size		= ctx_size,
725 		.data		= ctx,
726 	};
727 	struct perf_raw_record raw = {
728 		.frag = {
729 			{
730 				.next	= ctx_size ? &frag : NULL,
731 			},
732 			.size	= meta_size,
733 			.data	= meta,
734 		},
735 	};
736 	struct perf_sample_data *sd;
737 	struct pt_regs *regs;
738 	u64 ret;
739 
740 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
741 		ret = -EBUSY;
742 		goto out;
743 	}
744 	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
745 	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
746 
747 	perf_fetch_caller_regs(regs);
748 	perf_sample_data_init(sd, 0, 0);
749 	sd->raw = &raw;
750 	sd->sample_flags |= PERF_SAMPLE_RAW;
751 
752 	ret = __bpf_perf_event_output(regs, map, flags, sd);
753 out:
754 	this_cpu_dec(bpf_event_output_nest_level);
755 	return ret;
756 }
757 
758 BPF_CALL_0(bpf_get_current_task)
759 {
760 	return (long) current;
761 }
762 
763 const struct bpf_func_proto bpf_get_current_task_proto = {
764 	.func		= bpf_get_current_task,
765 	.gpl_only	= true,
766 	.ret_type	= RET_INTEGER,
767 };
768 
769 BPF_CALL_0(bpf_get_current_task_btf)
770 {
771 	return (unsigned long) current;
772 }
773 
774 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
775 	.func		= bpf_get_current_task_btf,
776 	.gpl_only	= true,
777 	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
778 	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
779 };
780 
781 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
782 {
783 	return (unsigned long) task_pt_regs(task);
784 }
785 
786 BTF_ID_LIST(bpf_task_pt_regs_ids)
787 BTF_ID(struct, pt_regs)
788 
789 const struct bpf_func_proto bpf_task_pt_regs_proto = {
790 	.func		= bpf_task_pt_regs,
791 	.gpl_only	= true,
792 	.arg1_type	= ARG_PTR_TO_BTF_ID,
793 	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
794 	.ret_type	= RET_PTR_TO_BTF_ID,
795 	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
796 };
797 
798 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
799 {
800 	struct bpf_array *array = container_of(map, struct bpf_array, map);
801 	struct cgroup *cgrp;
802 
803 	if (unlikely(idx >= array->map.max_entries))
804 		return -E2BIG;
805 
806 	cgrp = READ_ONCE(array->ptrs[idx]);
807 	if (unlikely(!cgrp))
808 		return -EAGAIN;
809 
810 	return task_under_cgroup_hierarchy(current, cgrp);
811 }
812 
813 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
814 	.func           = bpf_current_task_under_cgroup,
815 	.gpl_only       = false,
816 	.ret_type       = RET_INTEGER,
817 	.arg1_type      = ARG_CONST_MAP_PTR,
818 	.arg2_type      = ARG_ANYTHING,
819 };
820 
821 struct send_signal_irq_work {
822 	struct irq_work irq_work;
823 	struct task_struct *task;
824 	u32 sig;
825 	enum pid_type type;
826 };
827 
828 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
829 
830 static void do_bpf_send_signal(struct irq_work *entry)
831 {
832 	struct send_signal_irq_work *work;
833 
834 	work = container_of(entry, struct send_signal_irq_work, irq_work);
835 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
836 	put_task_struct(work->task);
837 }
838 
839 static int bpf_send_signal_common(u32 sig, enum pid_type type)
840 {
841 	struct send_signal_irq_work *work = NULL;
842 
843 	/* Similar to bpf_probe_write_user, task needs to be
844 	 * in a sound condition and kernel memory access be
845 	 * permitted in order to send signal to the current
846 	 * task.
847 	 */
848 	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
849 		return -EPERM;
850 	if (unlikely(!nmi_uaccess_okay()))
851 		return -EPERM;
852 	/* Task should not be pid=1 to avoid kernel panic. */
853 	if (unlikely(is_global_init(current)))
854 		return -EPERM;
855 
856 	if (irqs_disabled()) {
857 		/* Do an early check on signal validity. Otherwise,
858 		 * the error is lost in deferred irq_work.
859 		 */
860 		if (unlikely(!valid_signal(sig)))
861 			return -EINVAL;
862 
863 		work = this_cpu_ptr(&send_signal_work);
864 		if (irq_work_is_busy(&work->irq_work))
865 			return -EBUSY;
866 
867 		/* Add the current task, which is the target of sending signal,
868 		 * to the irq_work. The current task may change when queued
869 		 * irq works get executed.
870 		 */
871 		work->task = get_task_struct(current);
872 		work->sig = sig;
873 		work->type = type;
874 		irq_work_queue(&work->irq_work);
875 		return 0;
876 	}
877 
878 	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
879 }
880 
881 BPF_CALL_1(bpf_send_signal, u32, sig)
882 {
883 	return bpf_send_signal_common(sig, PIDTYPE_TGID);
884 }
885 
886 static const struct bpf_func_proto bpf_send_signal_proto = {
887 	.func		= bpf_send_signal,
888 	.gpl_only	= false,
889 	.ret_type	= RET_INTEGER,
890 	.arg1_type	= ARG_ANYTHING,
891 };
892 
893 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
894 {
895 	return bpf_send_signal_common(sig, PIDTYPE_PID);
896 }
897 
898 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
899 	.func		= bpf_send_signal_thread,
900 	.gpl_only	= false,
901 	.ret_type	= RET_INTEGER,
902 	.arg1_type	= ARG_ANYTHING,
903 };
904 
905 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
906 {
907 	long len;
908 	char *p;
909 
910 	if (!sz)
911 		return 0;
912 
913 	p = d_path(path, buf, sz);
914 	if (IS_ERR(p)) {
915 		len = PTR_ERR(p);
916 	} else {
917 		len = buf + sz - p;
918 		memmove(buf, p, len);
919 	}
920 
921 	return len;
922 }
923 
924 BTF_SET_START(btf_allowlist_d_path)
925 #ifdef CONFIG_SECURITY
926 BTF_ID(func, security_file_permission)
927 BTF_ID(func, security_inode_getattr)
928 BTF_ID(func, security_file_open)
929 #endif
930 #ifdef CONFIG_SECURITY_PATH
931 BTF_ID(func, security_path_truncate)
932 #endif
933 BTF_ID(func, vfs_truncate)
934 BTF_ID(func, vfs_fallocate)
935 BTF_ID(func, dentry_open)
936 BTF_ID(func, vfs_getattr)
937 BTF_ID(func, filp_close)
938 BTF_SET_END(btf_allowlist_d_path)
939 
940 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
941 {
942 	if (prog->type == BPF_PROG_TYPE_TRACING &&
943 	    prog->expected_attach_type == BPF_TRACE_ITER)
944 		return true;
945 
946 	if (prog->type == BPF_PROG_TYPE_LSM)
947 		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
948 
949 	return btf_id_set_contains(&btf_allowlist_d_path,
950 				   prog->aux->attach_btf_id);
951 }
952 
953 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
954 
955 static const struct bpf_func_proto bpf_d_path_proto = {
956 	.func		= bpf_d_path,
957 	.gpl_only	= false,
958 	.ret_type	= RET_INTEGER,
959 	.arg1_type	= ARG_PTR_TO_BTF_ID,
960 	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
961 	.arg2_type	= ARG_PTR_TO_MEM,
962 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
963 	.allowed	= bpf_d_path_allowed,
964 };
965 
966 #define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
967 			 BTF_F_PTR_RAW | BTF_F_ZERO)
968 
969 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
970 				  u64 flags, const struct btf **btf,
971 				  s32 *btf_id)
972 {
973 	const struct btf_type *t;
974 
975 	if (unlikely(flags & ~(BTF_F_ALL)))
976 		return -EINVAL;
977 
978 	if (btf_ptr_size != sizeof(struct btf_ptr))
979 		return -EINVAL;
980 
981 	*btf = bpf_get_btf_vmlinux();
982 
983 	if (IS_ERR_OR_NULL(*btf))
984 		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
985 
986 	if (ptr->type_id > 0)
987 		*btf_id = ptr->type_id;
988 	else
989 		return -EINVAL;
990 
991 	if (*btf_id > 0)
992 		t = btf_type_by_id(*btf, *btf_id);
993 	if (*btf_id <= 0 || !t)
994 		return -ENOENT;
995 
996 	return 0;
997 }
998 
999 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1000 	   u32, btf_ptr_size, u64, flags)
1001 {
1002 	const struct btf *btf;
1003 	s32 btf_id;
1004 	int ret;
1005 
1006 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1007 	if (ret)
1008 		return ret;
1009 
1010 	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1011 				      flags);
1012 }
1013 
1014 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1015 	.func		= bpf_snprintf_btf,
1016 	.gpl_only	= false,
1017 	.ret_type	= RET_INTEGER,
1018 	.arg1_type	= ARG_PTR_TO_MEM,
1019 	.arg2_type	= ARG_CONST_SIZE,
1020 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1021 	.arg4_type	= ARG_CONST_SIZE,
1022 	.arg5_type	= ARG_ANYTHING,
1023 };
1024 
1025 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1026 {
1027 	/* This helper call is inlined by verifier. */
1028 	return ((u64 *)ctx)[-2];
1029 }
1030 
1031 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1032 	.func		= bpf_get_func_ip_tracing,
1033 	.gpl_only	= true,
1034 	.ret_type	= RET_INTEGER,
1035 	.arg1_type	= ARG_PTR_TO_CTX,
1036 };
1037 
1038 #ifdef CONFIG_X86_KERNEL_IBT
1039 static unsigned long get_entry_ip(unsigned long fentry_ip)
1040 {
1041 	u32 instr;
1042 
1043 	/* Being extra safe in here in case entry ip is on the page-edge. */
1044 	if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1045 		return fentry_ip;
1046 	if (is_endbr(instr))
1047 		fentry_ip -= ENDBR_INSN_SIZE;
1048 	return fentry_ip;
1049 }
1050 #else
1051 #define get_entry_ip(fentry_ip) fentry_ip
1052 #endif
1053 
1054 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1055 {
1056 	struct kprobe *kp = kprobe_running();
1057 
1058 	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1059 		return 0;
1060 
1061 	return get_entry_ip((uintptr_t)kp->addr);
1062 }
1063 
1064 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1065 	.func		= bpf_get_func_ip_kprobe,
1066 	.gpl_only	= true,
1067 	.ret_type	= RET_INTEGER,
1068 	.arg1_type	= ARG_PTR_TO_CTX,
1069 };
1070 
1071 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1072 {
1073 	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1074 }
1075 
1076 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1077 	.func		= bpf_get_func_ip_kprobe_multi,
1078 	.gpl_only	= false,
1079 	.ret_type	= RET_INTEGER,
1080 	.arg1_type	= ARG_PTR_TO_CTX,
1081 };
1082 
1083 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1084 {
1085 	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1086 }
1087 
1088 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1089 	.func		= bpf_get_attach_cookie_kprobe_multi,
1090 	.gpl_only	= false,
1091 	.ret_type	= RET_INTEGER,
1092 	.arg1_type	= ARG_PTR_TO_CTX,
1093 };
1094 
1095 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1096 {
1097 	struct bpf_trace_run_ctx *run_ctx;
1098 
1099 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1100 	return run_ctx->bpf_cookie;
1101 }
1102 
1103 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1104 	.func		= bpf_get_attach_cookie_trace,
1105 	.gpl_only	= false,
1106 	.ret_type	= RET_INTEGER,
1107 	.arg1_type	= ARG_PTR_TO_CTX,
1108 };
1109 
1110 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1111 {
1112 	return ctx->event->bpf_cookie;
1113 }
1114 
1115 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1116 	.func		= bpf_get_attach_cookie_pe,
1117 	.gpl_only	= false,
1118 	.ret_type	= RET_INTEGER,
1119 	.arg1_type	= ARG_PTR_TO_CTX,
1120 };
1121 
1122 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1123 {
1124 	struct bpf_trace_run_ctx *run_ctx;
1125 
1126 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1127 	return run_ctx->bpf_cookie;
1128 }
1129 
1130 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1131 	.func		= bpf_get_attach_cookie_tracing,
1132 	.gpl_only	= false,
1133 	.ret_type	= RET_INTEGER,
1134 	.arg1_type	= ARG_PTR_TO_CTX,
1135 };
1136 
1137 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1138 {
1139 #ifndef CONFIG_X86
1140 	return -ENOENT;
1141 #else
1142 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1143 	u32 entry_cnt = size / br_entry_size;
1144 
1145 	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1146 
1147 	if (unlikely(flags))
1148 		return -EINVAL;
1149 
1150 	if (!entry_cnt)
1151 		return -ENOENT;
1152 
1153 	return entry_cnt * br_entry_size;
1154 #endif
1155 }
1156 
1157 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1158 	.func		= bpf_get_branch_snapshot,
1159 	.gpl_only	= true,
1160 	.ret_type	= RET_INTEGER,
1161 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1162 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1163 };
1164 
1165 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1166 {
1167 	/* This helper call is inlined by verifier. */
1168 	u64 nr_args = ((u64 *)ctx)[-1];
1169 
1170 	if ((u64) n >= nr_args)
1171 		return -EINVAL;
1172 	*value = ((u64 *)ctx)[n];
1173 	return 0;
1174 }
1175 
1176 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1177 	.func		= get_func_arg,
1178 	.ret_type	= RET_INTEGER,
1179 	.arg1_type	= ARG_PTR_TO_CTX,
1180 	.arg2_type	= ARG_ANYTHING,
1181 	.arg3_type	= ARG_PTR_TO_LONG,
1182 };
1183 
1184 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1185 {
1186 	/* This helper call is inlined by verifier. */
1187 	u64 nr_args = ((u64 *)ctx)[-1];
1188 
1189 	*value = ((u64 *)ctx)[nr_args];
1190 	return 0;
1191 }
1192 
1193 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1194 	.func		= get_func_ret,
1195 	.ret_type	= RET_INTEGER,
1196 	.arg1_type	= ARG_PTR_TO_CTX,
1197 	.arg2_type	= ARG_PTR_TO_LONG,
1198 };
1199 
1200 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1201 {
1202 	/* This helper call is inlined by verifier. */
1203 	return ((u64 *)ctx)[-1];
1204 }
1205 
1206 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1207 	.func		= get_func_arg_cnt,
1208 	.ret_type	= RET_INTEGER,
1209 	.arg1_type	= ARG_PTR_TO_CTX,
1210 };
1211 
1212 #ifdef CONFIG_KEYS
1213 __diag_push();
1214 __diag_ignore_all("-Wmissing-prototypes",
1215 		  "kfuncs which will be used in BPF programs");
1216 
1217 /**
1218  * bpf_lookup_user_key - lookup a key by its serial
1219  * @serial: key handle serial number
1220  * @flags: lookup-specific flags
1221  *
1222  * Search a key with a given *serial* and the provided *flags*.
1223  * If found, increment the reference count of the key by one, and
1224  * return it in the bpf_key structure.
1225  *
1226  * The bpf_key structure must be passed to bpf_key_put() when done
1227  * with it, so that the key reference count is decremented and the
1228  * bpf_key structure is freed.
1229  *
1230  * Permission checks are deferred to the time the key is used by
1231  * one of the available key-specific kfuncs.
1232  *
1233  * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1234  * special keyring (e.g. session keyring), if it doesn't yet exist.
1235  * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1236  * for the key construction, and to retrieve uninstantiated keys (keys
1237  * without data attached to them).
1238  *
1239  * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1240  *         NULL pointer otherwise.
1241  */
1242 struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1243 {
1244 	key_ref_t key_ref;
1245 	struct bpf_key *bkey;
1246 
1247 	if (flags & ~KEY_LOOKUP_ALL)
1248 		return NULL;
1249 
1250 	/*
1251 	 * Permission check is deferred until the key is used, as the
1252 	 * intent of the caller is unknown here.
1253 	 */
1254 	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1255 	if (IS_ERR(key_ref))
1256 		return NULL;
1257 
1258 	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1259 	if (!bkey) {
1260 		key_put(key_ref_to_ptr(key_ref));
1261 		return NULL;
1262 	}
1263 
1264 	bkey->key = key_ref_to_ptr(key_ref);
1265 	bkey->has_ref = true;
1266 
1267 	return bkey;
1268 }
1269 
1270 /**
1271  * bpf_lookup_system_key - lookup a key by a system-defined ID
1272  * @id: key ID
1273  *
1274  * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1275  * The key pointer is marked as invalid, to prevent bpf_key_put() from
1276  * attempting to decrement the key reference count on that pointer. The key
1277  * pointer set in such way is currently understood only by
1278  * verify_pkcs7_signature().
1279  *
1280  * Set *id* to one of the values defined in include/linux/verification.h:
1281  * 0 for the primary keyring (immutable keyring of system keys);
1282  * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1283  * (where keys can be added only if they are vouched for by existing keys
1284  * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1285  * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1286  * kerned image and, possibly, the initramfs signature).
1287  *
1288  * Return: a bpf_key pointer with an invalid key pointer set from the
1289  *         pre-determined ID on success, a NULL pointer otherwise
1290  */
1291 struct bpf_key *bpf_lookup_system_key(u64 id)
1292 {
1293 	struct bpf_key *bkey;
1294 
1295 	if (system_keyring_id_check(id) < 0)
1296 		return NULL;
1297 
1298 	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1299 	if (!bkey)
1300 		return NULL;
1301 
1302 	bkey->key = (struct key *)(unsigned long)id;
1303 	bkey->has_ref = false;
1304 
1305 	return bkey;
1306 }
1307 
1308 /**
1309  * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1310  * @bkey: bpf_key structure
1311  *
1312  * Decrement the reference count of the key inside *bkey*, if the pointer
1313  * is valid, and free *bkey*.
1314  */
1315 void bpf_key_put(struct bpf_key *bkey)
1316 {
1317 	if (bkey->has_ref)
1318 		key_put(bkey->key);
1319 
1320 	kfree(bkey);
1321 }
1322 
1323 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1324 /**
1325  * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1326  * @data_ptr: data to verify
1327  * @sig_ptr: signature of the data
1328  * @trusted_keyring: keyring with keys trusted for signature verification
1329  *
1330  * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1331  * with keys in a keyring referenced by *trusted_keyring*.
1332  *
1333  * Return: 0 on success, a negative value on error.
1334  */
1335 int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1336 			       struct bpf_dynptr_kern *sig_ptr,
1337 			       struct bpf_key *trusted_keyring)
1338 {
1339 	int ret;
1340 
1341 	if (trusted_keyring->has_ref) {
1342 		/*
1343 		 * Do the permission check deferred in bpf_lookup_user_key().
1344 		 * See bpf_lookup_user_key() for more details.
1345 		 *
1346 		 * A call to key_task_permission() here would be redundant, as
1347 		 * it is already done by keyring_search() called by
1348 		 * find_asymmetric_key().
1349 		 */
1350 		ret = key_validate(trusted_keyring->key);
1351 		if (ret < 0)
1352 			return ret;
1353 	}
1354 
1355 	return verify_pkcs7_signature(data_ptr->data,
1356 				      bpf_dynptr_get_size(data_ptr),
1357 				      sig_ptr->data,
1358 				      bpf_dynptr_get_size(sig_ptr),
1359 				      trusted_keyring->key,
1360 				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1361 				      NULL);
1362 }
1363 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1364 
1365 __diag_pop();
1366 
1367 BTF_SET8_START(key_sig_kfunc_set)
1368 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1369 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1370 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1371 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1372 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1373 #endif
1374 BTF_SET8_END(key_sig_kfunc_set)
1375 
1376 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1377 	.owner = THIS_MODULE,
1378 	.set = &key_sig_kfunc_set,
1379 };
1380 
1381 static int __init bpf_key_sig_kfuncs_init(void)
1382 {
1383 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1384 					 &bpf_key_sig_kfunc_set);
1385 }
1386 
1387 late_initcall(bpf_key_sig_kfuncs_init);
1388 #endif /* CONFIG_KEYS */
1389 
1390 static const struct bpf_func_proto *
1391 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1392 {
1393 	switch (func_id) {
1394 	case BPF_FUNC_map_lookup_elem:
1395 		return &bpf_map_lookup_elem_proto;
1396 	case BPF_FUNC_map_update_elem:
1397 		return &bpf_map_update_elem_proto;
1398 	case BPF_FUNC_map_delete_elem:
1399 		return &bpf_map_delete_elem_proto;
1400 	case BPF_FUNC_map_push_elem:
1401 		return &bpf_map_push_elem_proto;
1402 	case BPF_FUNC_map_pop_elem:
1403 		return &bpf_map_pop_elem_proto;
1404 	case BPF_FUNC_map_peek_elem:
1405 		return &bpf_map_peek_elem_proto;
1406 	case BPF_FUNC_map_lookup_percpu_elem:
1407 		return &bpf_map_lookup_percpu_elem_proto;
1408 	case BPF_FUNC_ktime_get_ns:
1409 		return &bpf_ktime_get_ns_proto;
1410 	case BPF_FUNC_ktime_get_boot_ns:
1411 		return &bpf_ktime_get_boot_ns_proto;
1412 	case BPF_FUNC_tail_call:
1413 		return &bpf_tail_call_proto;
1414 	case BPF_FUNC_get_current_pid_tgid:
1415 		return &bpf_get_current_pid_tgid_proto;
1416 	case BPF_FUNC_get_current_task:
1417 		return &bpf_get_current_task_proto;
1418 	case BPF_FUNC_get_current_task_btf:
1419 		return &bpf_get_current_task_btf_proto;
1420 	case BPF_FUNC_task_pt_regs:
1421 		return &bpf_task_pt_regs_proto;
1422 	case BPF_FUNC_get_current_uid_gid:
1423 		return &bpf_get_current_uid_gid_proto;
1424 	case BPF_FUNC_get_current_comm:
1425 		return &bpf_get_current_comm_proto;
1426 	case BPF_FUNC_trace_printk:
1427 		return bpf_get_trace_printk_proto();
1428 	case BPF_FUNC_get_smp_processor_id:
1429 		return &bpf_get_smp_processor_id_proto;
1430 	case BPF_FUNC_get_numa_node_id:
1431 		return &bpf_get_numa_node_id_proto;
1432 	case BPF_FUNC_perf_event_read:
1433 		return &bpf_perf_event_read_proto;
1434 	case BPF_FUNC_current_task_under_cgroup:
1435 		return &bpf_current_task_under_cgroup_proto;
1436 	case BPF_FUNC_get_prandom_u32:
1437 		return &bpf_get_prandom_u32_proto;
1438 	case BPF_FUNC_probe_write_user:
1439 		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1440 		       NULL : bpf_get_probe_write_proto();
1441 	case BPF_FUNC_probe_read_user:
1442 		return &bpf_probe_read_user_proto;
1443 	case BPF_FUNC_probe_read_kernel:
1444 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1445 		       NULL : &bpf_probe_read_kernel_proto;
1446 	case BPF_FUNC_probe_read_user_str:
1447 		return &bpf_probe_read_user_str_proto;
1448 	case BPF_FUNC_probe_read_kernel_str:
1449 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1450 		       NULL : &bpf_probe_read_kernel_str_proto;
1451 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1452 	case BPF_FUNC_probe_read:
1453 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1454 		       NULL : &bpf_probe_read_compat_proto;
1455 	case BPF_FUNC_probe_read_str:
1456 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1457 		       NULL : &bpf_probe_read_compat_str_proto;
1458 #endif
1459 #ifdef CONFIG_CGROUPS
1460 	case BPF_FUNC_get_current_cgroup_id:
1461 		return &bpf_get_current_cgroup_id_proto;
1462 	case BPF_FUNC_get_current_ancestor_cgroup_id:
1463 		return &bpf_get_current_ancestor_cgroup_id_proto;
1464 	case BPF_FUNC_cgrp_storage_get:
1465 		return &bpf_cgrp_storage_get_proto;
1466 	case BPF_FUNC_cgrp_storage_delete:
1467 		return &bpf_cgrp_storage_delete_proto;
1468 #endif
1469 	case BPF_FUNC_send_signal:
1470 		return &bpf_send_signal_proto;
1471 	case BPF_FUNC_send_signal_thread:
1472 		return &bpf_send_signal_thread_proto;
1473 	case BPF_FUNC_perf_event_read_value:
1474 		return &bpf_perf_event_read_value_proto;
1475 	case BPF_FUNC_get_ns_current_pid_tgid:
1476 		return &bpf_get_ns_current_pid_tgid_proto;
1477 	case BPF_FUNC_ringbuf_output:
1478 		return &bpf_ringbuf_output_proto;
1479 	case BPF_FUNC_ringbuf_reserve:
1480 		return &bpf_ringbuf_reserve_proto;
1481 	case BPF_FUNC_ringbuf_submit:
1482 		return &bpf_ringbuf_submit_proto;
1483 	case BPF_FUNC_ringbuf_discard:
1484 		return &bpf_ringbuf_discard_proto;
1485 	case BPF_FUNC_ringbuf_query:
1486 		return &bpf_ringbuf_query_proto;
1487 	case BPF_FUNC_jiffies64:
1488 		return &bpf_jiffies64_proto;
1489 	case BPF_FUNC_get_task_stack:
1490 		return &bpf_get_task_stack_proto;
1491 	case BPF_FUNC_copy_from_user:
1492 		return &bpf_copy_from_user_proto;
1493 	case BPF_FUNC_copy_from_user_task:
1494 		return &bpf_copy_from_user_task_proto;
1495 	case BPF_FUNC_snprintf_btf:
1496 		return &bpf_snprintf_btf_proto;
1497 	case BPF_FUNC_per_cpu_ptr:
1498 		return &bpf_per_cpu_ptr_proto;
1499 	case BPF_FUNC_this_cpu_ptr:
1500 		return &bpf_this_cpu_ptr_proto;
1501 	case BPF_FUNC_task_storage_get:
1502 		if (bpf_prog_check_recur(prog))
1503 			return &bpf_task_storage_get_recur_proto;
1504 		return &bpf_task_storage_get_proto;
1505 	case BPF_FUNC_task_storage_delete:
1506 		if (bpf_prog_check_recur(prog))
1507 			return &bpf_task_storage_delete_recur_proto;
1508 		return &bpf_task_storage_delete_proto;
1509 	case BPF_FUNC_for_each_map_elem:
1510 		return &bpf_for_each_map_elem_proto;
1511 	case BPF_FUNC_snprintf:
1512 		return &bpf_snprintf_proto;
1513 	case BPF_FUNC_get_func_ip:
1514 		return &bpf_get_func_ip_proto_tracing;
1515 	case BPF_FUNC_get_branch_snapshot:
1516 		return &bpf_get_branch_snapshot_proto;
1517 	case BPF_FUNC_find_vma:
1518 		return &bpf_find_vma_proto;
1519 	case BPF_FUNC_trace_vprintk:
1520 		return bpf_get_trace_vprintk_proto();
1521 	default:
1522 		return bpf_base_func_proto(func_id);
1523 	}
1524 }
1525 
1526 static const struct bpf_func_proto *
1527 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1528 {
1529 	switch (func_id) {
1530 	case BPF_FUNC_perf_event_output:
1531 		return &bpf_perf_event_output_proto;
1532 	case BPF_FUNC_get_stackid:
1533 		return &bpf_get_stackid_proto;
1534 	case BPF_FUNC_get_stack:
1535 		return &bpf_get_stack_proto;
1536 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1537 	case BPF_FUNC_override_return:
1538 		return &bpf_override_return_proto;
1539 #endif
1540 	case BPF_FUNC_get_func_ip:
1541 		return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1542 			&bpf_get_func_ip_proto_kprobe_multi :
1543 			&bpf_get_func_ip_proto_kprobe;
1544 	case BPF_FUNC_get_attach_cookie:
1545 		return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1546 			&bpf_get_attach_cookie_proto_kmulti :
1547 			&bpf_get_attach_cookie_proto_trace;
1548 	default:
1549 		return bpf_tracing_func_proto(func_id, prog);
1550 	}
1551 }
1552 
1553 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1554 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1555 					const struct bpf_prog *prog,
1556 					struct bpf_insn_access_aux *info)
1557 {
1558 	if (off < 0 || off >= sizeof(struct pt_regs))
1559 		return false;
1560 	if (type != BPF_READ)
1561 		return false;
1562 	if (off % size != 0)
1563 		return false;
1564 	/*
1565 	 * Assertion for 32 bit to make sure last 8 byte access
1566 	 * (BPF_DW) to the last 4 byte member is disallowed.
1567 	 */
1568 	if (off + size > sizeof(struct pt_regs))
1569 		return false;
1570 
1571 	return true;
1572 }
1573 
1574 const struct bpf_verifier_ops kprobe_verifier_ops = {
1575 	.get_func_proto  = kprobe_prog_func_proto,
1576 	.is_valid_access = kprobe_prog_is_valid_access,
1577 };
1578 
1579 const struct bpf_prog_ops kprobe_prog_ops = {
1580 };
1581 
1582 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1583 	   u64, flags, void *, data, u64, size)
1584 {
1585 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1586 
1587 	/*
1588 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1589 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1590 	 * from there and call the same bpf_perf_event_output() helper inline.
1591 	 */
1592 	return ____bpf_perf_event_output(regs, map, flags, data, size);
1593 }
1594 
1595 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1596 	.func		= bpf_perf_event_output_tp,
1597 	.gpl_only	= true,
1598 	.ret_type	= RET_INTEGER,
1599 	.arg1_type	= ARG_PTR_TO_CTX,
1600 	.arg2_type	= ARG_CONST_MAP_PTR,
1601 	.arg3_type	= ARG_ANYTHING,
1602 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1603 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1604 };
1605 
1606 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1607 	   u64, flags)
1608 {
1609 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1610 
1611 	/*
1612 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1613 	 * the other helper's function body cannot be inlined due to being
1614 	 * external, thus we need to call raw helper function.
1615 	 */
1616 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1617 			       flags, 0, 0);
1618 }
1619 
1620 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1621 	.func		= bpf_get_stackid_tp,
1622 	.gpl_only	= true,
1623 	.ret_type	= RET_INTEGER,
1624 	.arg1_type	= ARG_PTR_TO_CTX,
1625 	.arg2_type	= ARG_CONST_MAP_PTR,
1626 	.arg3_type	= ARG_ANYTHING,
1627 };
1628 
1629 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1630 	   u64, flags)
1631 {
1632 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1633 
1634 	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1635 			     (unsigned long) size, flags, 0);
1636 }
1637 
1638 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1639 	.func		= bpf_get_stack_tp,
1640 	.gpl_only	= true,
1641 	.ret_type	= RET_INTEGER,
1642 	.arg1_type	= ARG_PTR_TO_CTX,
1643 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1644 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1645 	.arg4_type	= ARG_ANYTHING,
1646 };
1647 
1648 static const struct bpf_func_proto *
1649 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1650 {
1651 	switch (func_id) {
1652 	case BPF_FUNC_perf_event_output:
1653 		return &bpf_perf_event_output_proto_tp;
1654 	case BPF_FUNC_get_stackid:
1655 		return &bpf_get_stackid_proto_tp;
1656 	case BPF_FUNC_get_stack:
1657 		return &bpf_get_stack_proto_tp;
1658 	case BPF_FUNC_get_attach_cookie:
1659 		return &bpf_get_attach_cookie_proto_trace;
1660 	default:
1661 		return bpf_tracing_func_proto(func_id, prog);
1662 	}
1663 }
1664 
1665 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1666 				    const struct bpf_prog *prog,
1667 				    struct bpf_insn_access_aux *info)
1668 {
1669 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1670 		return false;
1671 	if (type != BPF_READ)
1672 		return false;
1673 	if (off % size != 0)
1674 		return false;
1675 
1676 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1677 	return true;
1678 }
1679 
1680 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1681 	.get_func_proto  = tp_prog_func_proto,
1682 	.is_valid_access = tp_prog_is_valid_access,
1683 };
1684 
1685 const struct bpf_prog_ops tracepoint_prog_ops = {
1686 };
1687 
1688 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1689 	   struct bpf_perf_event_value *, buf, u32, size)
1690 {
1691 	int err = -EINVAL;
1692 
1693 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1694 		goto clear;
1695 	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1696 				    &buf->running);
1697 	if (unlikely(err))
1698 		goto clear;
1699 	return 0;
1700 clear:
1701 	memset(buf, 0, size);
1702 	return err;
1703 }
1704 
1705 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1706          .func           = bpf_perf_prog_read_value,
1707          .gpl_only       = true,
1708          .ret_type       = RET_INTEGER,
1709          .arg1_type      = ARG_PTR_TO_CTX,
1710          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1711          .arg3_type      = ARG_CONST_SIZE,
1712 };
1713 
1714 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1715 	   void *, buf, u32, size, u64, flags)
1716 {
1717 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1718 	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1719 	u32 to_copy;
1720 
1721 	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1722 		return -EINVAL;
1723 
1724 	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1725 		return -ENOENT;
1726 
1727 	if (unlikely(!br_stack))
1728 		return -ENOENT;
1729 
1730 	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1731 		return br_stack->nr * br_entry_size;
1732 
1733 	if (!buf || (size % br_entry_size != 0))
1734 		return -EINVAL;
1735 
1736 	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1737 	memcpy(buf, br_stack->entries, to_copy);
1738 
1739 	return to_copy;
1740 }
1741 
1742 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1743 	.func           = bpf_read_branch_records,
1744 	.gpl_only       = true,
1745 	.ret_type       = RET_INTEGER,
1746 	.arg1_type      = ARG_PTR_TO_CTX,
1747 	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1748 	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1749 	.arg4_type      = ARG_ANYTHING,
1750 };
1751 
1752 static const struct bpf_func_proto *
1753 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1754 {
1755 	switch (func_id) {
1756 	case BPF_FUNC_perf_event_output:
1757 		return &bpf_perf_event_output_proto_tp;
1758 	case BPF_FUNC_get_stackid:
1759 		return &bpf_get_stackid_proto_pe;
1760 	case BPF_FUNC_get_stack:
1761 		return &bpf_get_stack_proto_pe;
1762 	case BPF_FUNC_perf_prog_read_value:
1763 		return &bpf_perf_prog_read_value_proto;
1764 	case BPF_FUNC_read_branch_records:
1765 		return &bpf_read_branch_records_proto;
1766 	case BPF_FUNC_get_attach_cookie:
1767 		return &bpf_get_attach_cookie_proto_pe;
1768 	default:
1769 		return bpf_tracing_func_proto(func_id, prog);
1770 	}
1771 }
1772 
1773 /*
1774  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1775  * to avoid potential recursive reuse issue when/if tracepoints are added
1776  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1777  *
1778  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1779  * in normal, irq, and nmi context.
1780  */
1781 struct bpf_raw_tp_regs {
1782 	struct pt_regs regs[3];
1783 };
1784 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1785 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1786 static struct pt_regs *get_bpf_raw_tp_regs(void)
1787 {
1788 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1789 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1790 
1791 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1792 		this_cpu_dec(bpf_raw_tp_nest_level);
1793 		return ERR_PTR(-EBUSY);
1794 	}
1795 
1796 	return &tp_regs->regs[nest_level - 1];
1797 }
1798 
1799 static void put_bpf_raw_tp_regs(void)
1800 {
1801 	this_cpu_dec(bpf_raw_tp_nest_level);
1802 }
1803 
1804 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1805 	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1806 {
1807 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1808 	int ret;
1809 
1810 	if (IS_ERR(regs))
1811 		return PTR_ERR(regs);
1812 
1813 	perf_fetch_caller_regs(regs);
1814 	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1815 
1816 	put_bpf_raw_tp_regs();
1817 	return ret;
1818 }
1819 
1820 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1821 	.func		= bpf_perf_event_output_raw_tp,
1822 	.gpl_only	= true,
1823 	.ret_type	= RET_INTEGER,
1824 	.arg1_type	= ARG_PTR_TO_CTX,
1825 	.arg2_type	= ARG_CONST_MAP_PTR,
1826 	.arg3_type	= ARG_ANYTHING,
1827 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1828 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1829 };
1830 
1831 extern const struct bpf_func_proto bpf_skb_output_proto;
1832 extern const struct bpf_func_proto bpf_xdp_output_proto;
1833 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1834 
1835 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1836 	   struct bpf_map *, map, u64, flags)
1837 {
1838 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1839 	int ret;
1840 
1841 	if (IS_ERR(regs))
1842 		return PTR_ERR(regs);
1843 
1844 	perf_fetch_caller_regs(regs);
1845 	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1846 	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1847 			      flags, 0, 0);
1848 	put_bpf_raw_tp_regs();
1849 	return ret;
1850 }
1851 
1852 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1853 	.func		= bpf_get_stackid_raw_tp,
1854 	.gpl_only	= true,
1855 	.ret_type	= RET_INTEGER,
1856 	.arg1_type	= ARG_PTR_TO_CTX,
1857 	.arg2_type	= ARG_CONST_MAP_PTR,
1858 	.arg3_type	= ARG_ANYTHING,
1859 };
1860 
1861 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1862 	   void *, buf, u32, size, u64, flags)
1863 {
1864 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1865 	int ret;
1866 
1867 	if (IS_ERR(regs))
1868 		return PTR_ERR(regs);
1869 
1870 	perf_fetch_caller_regs(regs);
1871 	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1872 			    (unsigned long) size, flags, 0);
1873 	put_bpf_raw_tp_regs();
1874 	return ret;
1875 }
1876 
1877 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1878 	.func		= bpf_get_stack_raw_tp,
1879 	.gpl_only	= true,
1880 	.ret_type	= RET_INTEGER,
1881 	.arg1_type	= ARG_PTR_TO_CTX,
1882 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1883 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1884 	.arg4_type	= ARG_ANYTHING,
1885 };
1886 
1887 static const struct bpf_func_proto *
1888 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1889 {
1890 	switch (func_id) {
1891 	case BPF_FUNC_perf_event_output:
1892 		return &bpf_perf_event_output_proto_raw_tp;
1893 	case BPF_FUNC_get_stackid:
1894 		return &bpf_get_stackid_proto_raw_tp;
1895 	case BPF_FUNC_get_stack:
1896 		return &bpf_get_stack_proto_raw_tp;
1897 	default:
1898 		return bpf_tracing_func_proto(func_id, prog);
1899 	}
1900 }
1901 
1902 const struct bpf_func_proto *
1903 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1904 {
1905 	const struct bpf_func_proto *fn;
1906 
1907 	switch (func_id) {
1908 #ifdef CONFIG_NET
1909 	case BPF_FUNC_skb_output:
1910 		return &bpf_skb_output_proto;
1911 	case BPF_FUNC_xdp_output:
1912 		return &bpf_xdp_output_proto;
1913 	case BPF_FUNC_skc_to_tcp6_sock:
1914 		return &bpf_skc_to_tcp6_sock_proto;
1915 	case BPF_FUNC_skc_to_tcp_sock:
1916 		return &bpf_skc_to_tcp_sock_proto;
1917 	case BPF_FUNC_skc_to_tcp_timewait_sock:
1918 		return &bpf_skc_to_tcp_timewait_sock_proto;
1919 	case BPF_FUNC_skc_to_tcp_request_sock:
1920 		return &bpf_skc_to_tcp_request_sock_proto;
1921 	case BPF_FUNC_skc_to_udp6_sock:
1922 		return &bpf_skc_to_udp6_sock_proto;
1923 	case BPF_FUNC_skc_to_unix_sock:
1924 		return &bpf_skc_to_unix_sock_proto;
1925 	case BPF_FUNC_skc_to_mptcp_sock:
1926 		return &bpf_skc_to_mptcp_sock_proto;
1927 	case BPF_FUNC_sk_storage_get:
1928 		return &bpf_sk_storage_get_tracing_proto;
1929 	case BPF_FUNC_sk_storage_delete:
1930 		return &bpf_sk_storage_delete_tracing_proto;
1931 	case BPF_FUNC_sock_from_file:
1932 		return &bpf_sock_from_file_proto;
1933 	case BPF_FUNC_get_socket_cookie:
1934 		return &bpf_get_socket_ptr_cookie_proto;
1935 	case BPF_FUNC_xdp_get_buff_len:
1936 		return &bpf_xdp_get_buff_len_trace_proto;
1937 #endif
1938 	case BPF_FUNC_seq_printf:
1939 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1940 		       &bpf_seq_printf_proto :
1941 		       NULL;
1942 	case BPF_FUNC_seq_write:
1943 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1944 		       &bpf_seq_write_proto :
1945 		       NULL;
1946 	case BPF_FUNC_seq_printf_btf:
1947 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1948 		       &bpf_seq_printf_btf_proto :
1949 		       NULL;
1950 	case BPF_FUNC_d_path:
1951 		return &bpf_d_path_proto;
1952 	case BPF_FUNC_get_func_arg:
1953 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1954 	case BPF_FUNC_get_func_ret:
1955 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1956 	case BPF_FUNC_get_func_arg_cnt:
1957 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
1958 	case BPF_FUNC_get_attach_cookie:
1959 		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
1960 	default:
1961 		fn = raw_tp_prog_func_proto(func_id, prog);
1962 		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1963 			fn = bpf_iter_get_func_proto(func_id, prog);
1964 		return fn;
1965 	}
1966 }
1967 
1968 static bool raw_tp_prog_is_valid_access(int off, int size,
1969 					enum bpf_access_type type,
1970 					const struct bpf_prog *prog,
1971 					struct bpf_insn_access_aux *info)
1972 {
1973 	return bpf_tracing_ctx_access(off, size, type);
1974 }
1975 
1976 static bool tracing_prog_is_valid_access(int off, int size,
1977 					 enum bpf_access_type type,
1978 					 const struct bpf_prog *prog,
1979 					 struct bpf_insn_access_aux *info)
1980 {
1981 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1982 }
1983 
1984 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1985 				     const union bpf_attr *kattr,
1986 				     union bpf_attr __user *uattr)
1987 {
1988 	return -ENOTSUPP;
1989 }
1990 
1991 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1992 	.get_func_proto  = raw_tp_prog_func_proto,
1993 	.is_valid_access = raw_tp_prog_is_valid_access,
1994 };
1995 
1996 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1997 #ifdef CONFIG_NET
1998 	.test_run = bpf_prog_test_run_raw_tp,
1999 #endif
2000 };
2001 
2002 const struct bpf_verifier_ops tracing_verifier_ops = {
2003 	.get_func_proto  = tracing_prog_func_proto,
2004 	.is_valid_access = tracing_prog_is_valid_access,
2005 };
2006 
2007 const struct bpf_prog_ops tracing_prog_ops = {
2008 	.test_run = bpf_prog_test_run_tracing,
2009 };
2010 
2011 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2012 						 enum bpf_access_type type,
2013 						 const struct bpf_prog *prog,
2014 						 struct bpf_insn_access_aux *info)
2015 {
2016 	if (off == 0) {
2017 		if (size != sizeof(u64) || type != BPF_READ)
2018 			return false;
2019 		info->reg_type = PTR_TO_TP_BUFFER;
2020 	}
2021 	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2022 }
2023 
2024 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2025 	.get_func_proto  = raw_tp_prog_func_proto,
2026 	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2027 };
2028 
2029 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2030 };
2031 
2032 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2033 				    const struct bpf_prog *prog,
2034 				    struct bpf_insn_access_aux *info)
2035 {
2036 	const int size_u64 = sizeof(u64);
2037 
2038 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2039 		return false;
2040 	if (type != BPF_READ)
2041 		return false;
2042 	if (off % size != 0) {
2043 		if (sizeof(unsigned long) != 4)
2044 			return false;
2045 		if (size != 8)
2046 			return false;
2047 		if (off % size != 4)
2048 			return false;
2049 	}
2050 
2051 	switch (off) {
2052 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2053 		bpf_ctx_record_field_size(info, size_u64);
2054 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2055 			return false;
2056 		break;
2057 	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2058 		bpf_ctx_record_field_size(info, size_u64);
2059 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2060 			return false;
2061 		break;
2062 	default:
2063 		if (size != sizeof(long))
2064 			return false;
2065 	}
2066 
2067 	return true;
2068 }
2069 
2070 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2071 				      const struct bpf_insn *si,
2072 				      struct bpf_insn *insn_buf,
2073 				      struct bpf_prog *prog, u32 *target_size)
2074 {
2075 	struct bpf_insn *insn = insn_buf;
2076 
2077 	switch (si->off) {
2078 	case offsetof(struct bpf_perf_event_data, sample_period):
2079 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2080 						       data), si->dst_reg, si->src_reg,
2081 				      offsetof(struct bpf_perf_event_data_kern, data));
2082 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2083 				      bpf_target_off(struct perf_sample_data, period, 8,
2084 						     target_size));
2085 		break;
2086 	case offsetof(struct bpf_perf_event_data, addr):
2087 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2088 						       data), si->dst_reg, si->src_reg,
2089 				      offsetof(struct bpf_perf_event_data_kern, data));
2090 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2091 				      bpf_target_off(struct perf_sample_data, addr, 8,
2092 						     target_size));
2093 		break;
2094 	default:
2095 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2096 						       regs), si->dst_reg, si->src_reg,
2097 				      offsetof(struct bpf_perf_event_data_kern, regs));
2098 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2099 				      si->off);
2100 		break;
2101 	}
2102 
2103 	return insn - insn_buf;
2104 }
2105 
2106 const struct bpf_verifier_ops perf_event_verifier_ops = {
2107 	.get_func_proto		= pe_prog_func_proto,
2108 	.is_valid_access	= pe_prog_is_valid_access,
2109 	.convert_ctx_access	= pe_prog_convert_ctx_access,
2110 };
2111 
2112 const struct bpf_prog_ops perf_event_prog_ops = {
2113 };
2114 
2115 static DEFINE_MUTEX(bpf_event_mutex);
2116 
2117 #define BPF_TRACE_MAX_PROGS 64
2118 
2119 int perf_event_attach_bpf_prog(struct perf_event *event,
2120 			       struct bpf_prog *prog,
2121 			       u64 bpf_cookie)
2122 {
2123 	struct bpf_prog_array *old_array;
2124 	struct bpf_prog_array *new_array;
2125 	int ret = -EEXIST;
2126 
2127 	/*
2128 	 * Kprobe override only works if they are on the function entry,
2129 	 * and only if they are on the opt-in list.
2130 	 */
2131 	if (prog->kprobe_override &&
2132 	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2133 	     !trace_kprobe_error_injectable(event->tp_event)))
2134 		return -EINVAL;
2135 
2136 	mutex_lock(&bpf_event_mutex);
2137 
2138 	if (event->prog)
2139 		goto unlock;
2140 
2141 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2142 	if (old_array &&
2143 	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2144 		ret = -E2BIG;
2145 		goto unlock;
2146 	}
2147 
2148 	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2149 	if (ret < 0)
2150 		goto unlock;
2151 
2152 	/* set the new array to event->tp_event and set event->prog */
2153 	event->prog = prog;
2154 	event->bpf_cookie = bpf_cookie;
2155 	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2156 	bpf_prog_array_free_sleepable(old_array);
2157 
2158 unlock:
2159 	mutex_unlock(&bpf_event_mutex);
2160 	return ret;
2161 }
2162 
2163 void perf_event_detach_bpf_prog(struct perf_event *event)
2164 {
2165 	struct bpf_prog_array *old_array;
2166 	struct bpf_prog_array *new_array;
2167 	int ret;
2168 
2169 	mutex_lock(&bpf_event_mutex);
2170 
2171 	if (!event->prog)
2172 		goto unlock;
2173 
2174 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2175 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2176 	if (ret == -ENOENT)
2177 		goto unlock;
2178 	if (ret < 0) {
2179 		bpf_prog_array_delete_safe(old_array, event->prog);
2180 	} else {
2181 		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2182 		bpf_prog_array_free_sleepable(old_array);
2183 	}
2184 
2185 	bpf_prog_put(event->prog);
2186 	event->prog = NULL;
2187 
2188 unlock:
2189 	mutex_unlock(&bpf_event_mutex);
2190 }
2191 
2192 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2193 {
2194 	struct perf_event_query_bpf __user *uquery = info;
2195 	struct perf_event_query_bpf query = {};
2196 	struct bpf_prog_array *progs;
2197 	u32 *ids, prog_cnt, ids_len;
2198 	int ret;
2199 
2200 	if (!perfmon_capable())
2201 		return -EPERM;
2202 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2203 		return -EINVAL;
2204 	if (copy_from_user(&query, uquery, sizeof(query)))
2205 		return -EFAULT;
2206 
2207 	ids_len = query.ids_len;
2208 	if (ids_len > BPF_TRACE_MAX_PROGS)
2209 		return -E2BIG;
2210 	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2211 	if (!ids)
2212 		return -ENOMEM;
2213 	/*
2214 	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2215 	 * is required when user only wants to check for uquery->prog_cnt.
2216 	 * There is no need to check for it since the case is handled
2217 	 * gracefully in bpf_prog_array_copy_info.
2218 	 */
2219 
2220 	mutex_lock(&bpf_event_mutex);
2221 	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2222 	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2223 	mutex_unlock(&bpf_event_mutex);
2224 
2225 	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2226 	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2227 		ret = -EFAULT;
2228 
2229 	kfree(ids);
2230 	return ret;
2231 }
2232 
2233 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2234 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2235 
2236 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2237 {
2238 	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2239 
2240 	for (; btp < __stop__bpf_raw_tp; btp++) {
2241 		if (!strcmp(btp->tp->name, name))
2242 			return btp;
2243 	}
2244 
2245 	return bpf_get_raw_tracepoint_module(name);
2246 }
2247 
2248 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2249 {
2250 	struct module *mod;
2251 
2252 	preempt_disable();
2253 	mod = __module_address((unsigned long)btp);
2254 	module_put(mod);
2255 	preempt_enable();
2256 }
2257 
2258 static __always_inline
2259 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2260 {
2261 	cant_sleep();
2262 	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2263 		bpf_prog_inc_misses_counter(prog);
2264 		goto out;
2265 	}
2266 	rcu_read_lock();
2267 	(void) bpf_prog_run(prog, args);
2268 	rcu_read_unlock();
2269 out:
2270 	this_cpu_dec(*(prog->active));
2271 }
2272 
2273 #define UNPACK(...)			__VA_ARGS__
2274 #define REPEAT_1(FN, DL, X, ...)	FN(X)
2275 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2276 #define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2277 #define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2278 #define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2279 #define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2280 #define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2281 #define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2282 #define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2283 #define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2284 #define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2285 #define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2286 #define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2287 
2288 #define SARG(X)		u64 arg##X
2289 #define COPY(X)		args[X] = arg##X
2290 
2291 #define __DL_COM	(,)
2292 #define __DL_SEM	(;)
2293 
2294 #define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2295 
2296 #define BPF_TRACE_DEFN_x(x)						\
2297 	void bpf_trace_run##x(struct bpf_prog *prog,			\
2298 			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2299 	{								\
2300 		u64 args[x];						\
2301 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2302 		__bpf_trace_run(prog, args);				\
2303 	}								\
2304 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2305 BPF_TRACE_DEFN_x(1);
2306 BPF_TRACE_DEFN_x(2);
2307 BPF_TRACE_DEFN_x(3);
2308 BPF_TRACE_DEFN_x(4);
2309 BPF_TRACE_DEFN_x(5);
2310 BPF_TRACE_DEFN_x(6);
2311 BPF_TRACE_DEFN_x(7);
2312 BPF_TRACE_DEFN_x(8);
2313 BPF_TRACE_DEFN_x(9);
2314 BPF_TRACE_DEFN_x(10);
2315 BPF_TRACE_DEFN_x(11);
2316 BPF_TRACE_DEFN_x(12);
2317 
2318 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2319 {
2320 	struct tracepoint *tp = btp->tp;
2321 
2322 	/*
2323 	 * check that program doesn't access arguments beyond what's
2324 	 * available in this tracepoint
2325 	 */
2326 	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2327 		return -EINVAL;
2328 
2329 	if (prog->aux->max_tp_access > btp->writable_size)
2330 		return -EINVAL;
2331 
2332 	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2333 						   prog);
2334 }
2335 
2336 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2337 {
2338 	return __bpf_probe_register(btp, prog);
2339 }
2340 
2341 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2342 {
2343 	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2344 }
2345 
2346 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2347 			    u32 *fd_type, const char **buf,
2348 			    u64 *probe_offset, u64 *probe_addr)
2349 {
2350 	bool is_tracepoint, is_syscall_tp;
2351 	struct bpf_prog *prog;
2352 	int flags, err = 0;
2353 
2354 	prog = event->prog;
2355 	if (!prog)
2356 		return -ENOENT;
2357 
2358 	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2359 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2360 		return -EOPNOTSUPP;
2361 
2362 	*prog_id = prog->aux->id;
2363 	flags = event->tp_event->flags;
2364 	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2365 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2366 
2367 	if (is_tracepoint || is_syscall_tp) {
2368 		*buf = is_tracepoint ? event->tp_event->tp->name
2369 				     : event->tp_event->name;
2370 		*fd_type = BPF_FD_TYPE_TRACEPOINT;
2371 		*probe_offset = 0x0;
2372 		*probe_addr = 0x0;
2373 	} else {
2374 		/* kprobe/uprobe */
2375 		err = -EOPNOTSUPP;
2376 #ifdef CONFIG_KPROBE_EVENTS
2377 		if (flags & TRACE_EVENT_FL_KPROBE)
2378 			err = bpf_get_kprobe_info(event, fd_type, buf,
2379 						  probe_offset, probe_addr,
2380 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2381 #endif
2382 #ifdef CONFIG_UPROBE_EVENTS
2383 		if (flags & TRACE_EVENT_FL_UPROBE)
2384 			err = bpf_get_uprobe_info(event, fd_type, buf,
2385 						  probe_offset,
2386 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2387 #endif
2388 	}
2389 
2390 	return err;
2391 }
2392 
2393 static int __init send_signal_irq_work_init(void)
2394 {
2395 	int cpu;
2396 	struct send_signal_irq_work *work;
2397 
2398 	for_each_possible_cpu(cpu) {
2399 		work = per_cpu_ptr(&send_signal_work, cpu);
2400 		init_irq_work(&work->irq_work, do_bpf_send_signal);
2401 	}
2402 	return 0;
2403 }
2404 
2405 subsys_initcall(send_signal_irq_work_init);
2406 
2407 #ifdef CONFIG_MODULES
2408 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2409 			    void *module)
2410 {
2411 	struct bpf_trace_module *btm, *tmp;
2412 	struct module *mod = module;
2413 	int ret = 0;
2414 
2415 	if (mod->num_bpf_raw_events == 0 ||
2416 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2417 		goto out;
2418 
2419 	mutex_lock(&bpf_module_mutex);
2420 
2421 	switch (op) {
2422 	case MODULE_STATE_COMING:
2423 		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2424 		if (btm) {
2425 			btm->module = module;
2426 			list_add(&btm->list, &bpf_trace_modules);
2427 		} else {
2428 			ret = -ENOMEM;
2429 		}
2430 		break;
2431 	case MODULE_STATE_GOING:
2432 		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2433 			if (btm->module == module) {
2434 				list_del(&btm->list);
2435 				kfree(btm);
2436 				break;
2437 			}
2438 		}
2439 		break;
2440 	}
2441 
2442 	mutex_unlock(&bpf_module_mutex);
2443 
2444 out:
2445 	return notifier_from_errno(ret);
2446 }
2447 
2448 static struct notifier_block bpf_module_nb = {
2449 	.notifier_call = bpf_event_notify,
2450 };
2451 
2452 static int __init bpf_event_init(void)
2453 {
2454 	register_module_notifier(&bpf_module_nb);
2455 	return 0;
2456 }
2457 
2458 fs_initcall(bpf_event_init);
2459 #endif /* CONFIG_MODULES */
2460 
2461 #ifdef CONFIG_FPROBE
2462 struct bpf_kprobe_multi_link {
2463 	struct bpf_link link;
2464 	struct fprobe fp;
2465 	unsigned long *addrs;
2466 	u64 *cookies;
2467 	u32 cnt;
2468 	u32 mods_cnt;
2469 	struct module **mods;
2470 };
2471 
2472 struct bpf_kprobe_multi_run_ctx {
2473 	struct bpf_run_ctx run_ctx;
2474 	struct bpf_kprobe_multi_link *link;
2475 	unsigned long entry_ip;
2476 };
2477 
2478 struct user_syms {
2479 	const char **syms;
2480 	char *buf;
2481 };
2482 
2483 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2484 {
2485 	unsigned long __user usymbol;
2486 	const char **syms = NULL;
2487 	char *buf = NULL, *p;
2488 	int err = -ENOMEM;
2489 	unsigned int i;
2490 
2491 	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2492 	if (!syms)
2493 		goto error;
2494 
2495 	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2496 	if (!buf)
2497 		goto error;
2498 
2499 	for (p = buf, i = 0; i < cnt; i++) {
2500 		if (__get_user(usymbol, usyms + i)) {
2501 			err = -EFAULT;
2502 			goto error;
2503 		}
2504 		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2505 		if (err == KSYM_NAME_LEN)
2506 			err = -E2BIG;
2507 		if (err < 0)
2508 			goto error;
2509 		syms[i] = p;
2510 		p += err + 1;
2511 	}
2512 
2513 	us->syms = syms;
2514 	us->buf = buf;
2515 	return 0;
2516 
2517 error:
2518 	if (err) {
2519 		kvfree(syms);
2520 		kvfree(buf);
2521 	}
2522 	return err;
2523 }
2524 
2525 static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2526 {
2527 	u32 i;
2528 
2529 	for (i = 0; i < cnt; i++)
2530 		module_put(mods[i]);
2531 }
2532 
2533 static void free_user_syms(struct user_syms *us)
2534 {
2535 	kvfree(us->syms);
2536 	kvfree(us->buf);
2537 }
2538 
2539 static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2540 {
2541 	struct bpf_kprobe_multi_link *kmulti_link;
2542 
2543 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2544 	unregister_fprobe(&kmulti_link->fp);
2545 	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2546 }
2547 
2548 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2549 {
2550 	struct bpf_kprobe_multi_link *kmulti_link;
2551 
2552 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2553 	kvfree(kmulti_link->addrs);
2554 	kvfree(kmulti_link->cookies);
2555 	kfree(kmulti_link->mods);
2556 	kfree(kmulti_link);
2557 }
2558 
2559 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2560 	.release = bpf_kprobe_multi_link_release,
2561 	.dealloc = bpf_kprobe_multi_link_dealloc,
2562 };
2563 
2564 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2565 {
2566 	const struct bpf_kprobe_multi_link *link = priv;
2567 	unsigned long *addr_a = a, *addr_b = b;
2568 	u64 *cookie_a, *cookie_b;
2569 
2570 	cookie_a = link->cookies + (addr_a - link->addrs);
2571 	cookie_b = link->cookies + (addr_b - link->addrs);
2572 
2573 	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2574 	swap(*addr_a, *addr_b);
2575 	swap(*cookie_a, *cookie_b);
2576 }
2577 
2578 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2579 {
2580 	const unsigned long *addr_a = a, *addr_b = b;
2581 
2582 	if (*addr_a == *addr_b)
2583 		return 0;
2584 	return *addr_a < *addr_b ? -1 : 1;
2585 }
2586 
2587 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2588 {
2589 	return bpf_kprobe_multi_addrs_cmp(a, b);
2590 }
2591 
2592 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2593 {
2594 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2595 	struct bpf_kprobe_multi_link *link;
2596 	u64 *cookie, entry_ip;
2597 	unsigned long *addr;
2598 
2599 	if (WARN_ON_ONCE(!ctx))
2600 		return 0;
2601 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2602 	link = run_ctx->link;
2603 	if (!link->cookies)
2604 		return 0;
2605 	entry_ip = run_ctx->entry_ip;
2606 	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2607 		       bpf_kprobe_multi_addrs_cmp);
2608 	if (!addr)
2609 		return 0;
2610 	cookie = link->cookies + (addr - link->addrs);
2611 	return *cookie;
2612 }
2613 
2614 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2615 {
2616 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2617 
2618 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2619 	return run_ctx->entry_ip;
2620 }
2621 
2622 static int
2623 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2624 			   unsigned long entry_ip, struct pt_regs *regs)
2625 {
2626 	struct bpf_kprobe_multi_run_ctx run_ctx = {
2627 		.link = link,
2628 		.entry_ip = entry_ip,
2629 	};
2630 	struct bpf_run_ctx *old_run_ctx;
2631 	int err;
2632 
2633 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2634 		err = 0;
2635 		goto out;
2636 	}
2637 
2638 	migrate_disable();
2639 	rcu_read_lock();
2640 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2641 	err = bpf_prog_run(link->link.prog, regs);
2642 	bpf_reset_run_ctx(old_run_ctx);
2643 	rcu_read_unlock();
2644 	migrate_enable();
2645 
2646  out:
2647 	__this_cpu_dec(bpf_prog_active);
2648 	return err;
2649 }
2650 
2651 static void
2652 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2653 			  struct pt_regs *regs)
2654 {
2655 	struct bpf_kprobe_multi_link *link;
2656 
2657 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2658 	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2659 }
2660 
2661 static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2662 {
2663 	const char **str_a = (const char **) a;
2664 	const char **str_b = (const char **) b;
2665 
2666 	return strcmp(*str_a, *str_b);
2667 }
2668 
2669 struct multi_symbols_sort {
2670 	const char **funcs;
2671 	u64 *cookies;
2672 };
2673 
2674 static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2675 {
2676 	const struct multi_symbols_sort *data = priv;
2677 	const char **name_a = a, **name_b = b;
2678 
2679 	swap(*name_a, *name_b);
2680 
2681 	/* If defined, swap also related cookies. */
2682 	if (data->cookies) {
2683 		u64 *cookie_a, *cookie_b;
2684 
2685 		cookie_a = data->cookies + (name_a - data->funcs);
2686 		cookie_b = data->cookies + (name_b - data->funcs);
2687 		swap(*cookie_a, *cookie_b);
2688 	}
2689 }
2690 
2691 struct module_addr_args {
2692 	unsigned long *addrs;
2693 	u32 addrs_cnt;
2694 	struct module **mods;
2695 	int mods_cnt;
2696 	int mods_cap;
2697 };
2698 
2699 static int module_callback(void *data, const char *name,
2700 			   struct module *mod, unsigned long addr)
2701 {
2702 	struct module_addr_args *args = data;
2703 	struct module **mods;
2704 
2705 	/* We iterate all modules symbols and for each we:
2706 	 * - search for it in provided addresses array
2707 	 * - if found we check if we already have the module pointer stored
2708 	 *   (we iterate modules sequentially, so we can check just the last
2709 	 *   module pointer)
2710 	 * - take module reference and store it
2711 	 */
2712 	if (!bsearch(&addr, args->addrs, args->addrs_cnt, sizeof(addr),
2713 		       bpf_kprobe_multi_addrs_cmp))
2714 		return 0;
2715 
2716 	if (args->mods && args->mods[args->mods_cnt - 1] == mod)
2717 		return 0;
2718 
2719 	if (args->mods_cnt == args->mods_cap) {
2720 		args->mods_cap = max(16, args->mods_cap * 3 / 2);
2721 		mods = krealloc_array(args->mods, args->mods_cap, sizeof(*mods), GFP_KERNEL);
2722 		if (!mods)
2723 			return -ENOMEM;
2724 		args->mods = mods;
2725 	}
2726 
2727 	if (!try_module_get(mod))
2728 		return -EINVAL;
2729 
2730 	args->mods[args->mods_cnt] = mod;
2731 	args->mods_cnt++;
2732 	return 0;
2733 }
2734 
2735 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2736 {
2737 	struct module_addr_args args = {
2738 		.addrs     = addrs,
2739 		.addrs_cnt = addrs_cnt,
2740 	};
2741 	int err;
2742 
2743 	/* We return either err < 0 in case of error, ... */
2744 	err = module_kallsyms_on_each_symbol(module_callback, &args);
2745 	if (err) {
2746 		kprobe_multi_put_modules(args.mods, args.mods_cnt);
2747 		kfree(args.mods);
2748 		return err;
2749 	}
2750 
2751 	/* or number of modules found if everything is ok. */
2752 	*mods = args.mods;
2753 	return args.mods_cnt;
2754 }
2755 
2756 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2757 {
2758 	struct bpf_kprobe_multi_link *link = NULL;
2759 	struct bpf_link_primer link_primer;
2760 	void __user *ucookies;
2761 	unsigned long *addrs;
2762 	u32 flags, cnt, size;
2763 	void __user *uaddrs;
2764 	u64 *cookies = NULL;
2765 	void __user *usyms;
2766 	int err;
2767 
2768 	/* no support for 32bit archs yet */
2769 	if (sizeof(u64) != sizeof(void *))
2770 		return -EOPNOTSUPP;
2771 
2772 	if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2773 		return -EINVAL;
2774 
2775 	flags = attr->link_create.kprobe_multi.flags;
2776 	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2777 		return -EINVAL;
2778 
2779 	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2780 	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2781 	if (!!uaddrs == !!usyms)
2782 		return -EINVAL;
2783 
2784 	cnt = attr->link_create.kprobe_multi.cnt;
2785 	if (!cnt)
2786 		return -EINVAL;
2787 
2788 	size = cnt * sizeof(*addrs);
2789 	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2790 	if (!addrs)
2791 		return -ENOMEM;
2792 
2793 	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2794 	if (ucookies) {
2795 		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2796 		if (!cookies) {
2797 			err = -ENOMEM;
2798 			goto error;
2799 		}
2800 		if (copy_from_user(cookies, ucookies, size)) {
2801 			err = -EFAULT;
2802 			goto error;
2803 		}
2804 	}
2805 
2806 	if (uaddrs) {
2807 		if (copy_from_user(addrs, uaddrs, size)) {
2808 			err = -EFAULT;
2809 			goto error;
2810 		}
2811 	} else {
2812 		struct multi_symbols_sort data = {
2813 			.cookies = cookies,
2814 		};
2815 		struct user_syms us;
2816 
2817 		err = copy_user_syms(&us, usyms, cnt);
2818 		if (err)
2819 			goto error;
2820 
2821 		if (cookies)
2822 			data.funcs = us.syms;
2823 
2824 		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2825 		       symbols_swap_r, &data);
2826 
2827 		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2828 		free_user_syms(&us);
2829 		if (err)
2830 			goto error;
2831 	}
2832 
2833 	link = kzalloc(sizeof(*link), GFP_KERNEL);
2834 	if (!link) {
2835 		err = -ENOMEM;
2836 		goto error;
2837 	}
2838 
2839 	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2840 		      &bpf_kprobe_multi_link_lops, prog);
2841 
2842 	err = bpf_link_prime(&link->link, &link_primer);
2843 	if (err)
2844 		goto error;
2845 
2846 	if (flags & BPF_F_KPROBE_MULTI_RETURN)
2847 		link->fp.exit_handler = kprobe_multi_link_handler;
2848 	else
2849 		link->fp.entry_handler = kprobe_multi_link_handler;
2850 
2851 	link->addrs = addrs;
2852 	link->cookies = cookies;
2853 	link->cnt = cnt;
2854 
2855 	if (cookies) {
2856 		/*
2857 		 * Sorting addresses will trigger sorting cookies as well
2858 		 * (check bpf_kprobe_multi_cookie_swap). This way we can
2859 		 * find cookie based on the address in bpf_get_attach_cookie
2860 		 * helper.
2861 		 */
2862 		sort_r(addrs, cnt, sizeof(*addrs),
2863 		       bpf_kprobe_multi_cookie_cmp,
2864 		       bpf_kprobe_multi_cookie_swap,
2865 		       link);
2866 	} else {
2867 		/*
2868 		 * We need to sort addrs array even if there are no cookies
2869 		 * provided, to allow bsearch in get_modules_for_addrs.
2870 		 */
2871 		sort(addrs, cnt, sizeof(*addrs),
2872 		       bpf_kprobe_multi_addrs_cmp, NULL);
2873 	}
2874 
2875 	err = get_modules_for_addrs(&link->mods, addrs, cnt);
2876 	if (err < 0) {
2877 		bpf_link_cleanup(&link_primer);
2878 		return err;
2879 	}
2880 	link->mods_cnt = err;
2881 
2882 	err = register_fprobe_ips(&link->fp, addrs, cnt);
2883 	if (err) {
2884 		kprobe_multi_put_modules(link->mods, link->mods_cnt);
2885 		bpf_link_cleanup(&link_primer);
2886 		return err;
2887 	}
2888 
2889 	return bpf_link_settle(&link_primer);
2890 
2891 error:
2892 	kfree(link);
2893 	kvfree(addrs);
2894 	kvfree(cookies);
2895 	return err;
2896 }
2897 #else /* !CONFIG_FPROBE */
2898 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2899 {
2900 	return -EOPNOTSUPP;
2901 }
2902 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2903 {
2904 	return 0;
2905 }
2906 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2907 {
2908 	return 0;
2909 }
2910 #endif
2911