xref: /linux/kernel/trace/bpf_trace.c (revision 4c30f5ce4f7af4f639af99e0bdeada8b268b7361)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/bpf_perf_event.h>
11 #include <linux/btf.h>
12 #include <linux/filter.h>
13 #include <linux/uaccess.h>
14 #include <linux/ctype.h>
15 #include <linux/kprobes.h>
16 #include <linux/spinlock.h>
17 #include <linux/syscalls.h>
18 #include <linux/error-injection.h>
19 #include <linux/btf_ids.h>
20 #include <linux/bpf_lsm.h>
21 #include <linux/fprobe.h>
22 #include <linux/bsearch.h>
23 #include <linux/sort.h>
24 #include <linux/key.h>
25 #include <linux/verification.h>
26 #include <linux/namei.h>
27 
28 #include <net/bpf_sk_storage.h>
29 
30 #include <uapi/linux/bpf.h>
31 #include <uapi/linux/btf.h>
32 
33 #include <asm/tlb.h>
34 
35 #include "trace_probe.h"
36 #include "trace.h"
37 
38 #define CREATE_TRACE_POINTS
39 #include "bpf_trace.h"
40 
41 #define bpf_event_rcu_dereference(p)					\
42 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
43 
44 #define MAX_UPROBE_MULTI_CNT (1U << 20)
45 #define MAX_KPROBE_MULTI_CNT (1U << 20)
46 
47 #ifdef CONFIG_MODULES
48 struct bpf_trace_module {
49 	struct module *module;
50 	struct list_head list;
51 };
52 
53 static LIST_HEAD(bpf_trace_modules);
54 static DEFINE_MUTEX(bpf_module_mutex);
55 
56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57 {
58 	struct bpf_raw_event_map *btp, *ret = NULL;
59 	struct bpf_trace_module *btm;
60 	unsigned int i;
61 
62 	mutex_lock(&bpf_module_mutex);
63 	list_for_each_entry(btm, &bpf_trace_modules, list) {
64 		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
65 			btp = &btm->module->bpf_raw_events[i];
66 			if (!strcmp(btp->tp->name, name)) {
67 				if (try_module_get(btm->module))
68 					ret = btp;
69 				goto out;
70 			}
71 		}
72 	}
73 out:
74 	mutex_unlock(&bpf_module_mutex);
75 	return ret;
76 }
77 #else
78 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
79 {
80 	return NULL;
81 }
82 #endif /* CONFIG_MODULES */
83 
84 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
85 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
86 
87 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
88 				  u64 flags, const struct btf **btf,
89 				  s32 *btf_id);
90 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
91 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
92 
93 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
94 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
95 
96 /**
97  * trace_call_bpf - invoke BPF program
98  * @call: tracepoint event
99  * @ctx: opaque context pointer
100  *
101  * kprobe handlers execute BPF programs via this helper.
102  * Can be used from static tracepoints in the future.
103  *
104  * Return: BPF programs always return an integer which is interpreted by
105  * kprobe handler as:
106  * 0 - return from kprobe (event is filtered out)
107  * 1 - store kprobe event into ring buffer
108  * Other values are reserved and currently alias to 1
109  */
110 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
111 {
112 	unsigned int ret;
113 
114 	cant_sleep();
115 
116 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
117 		/*
118 		 * since some bpf program is already running on this cpu,
119 		 * don't call into another bpf program (same or different)
120 		 * and don't send kprobe event into ring-buffer,
121 		 * so return zero here
122 		 */
123 		rcu_read_lock();
124 		bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
125 		rcu_read_unlock();
126 		ret = 0;
127 		goto out;
128 	}
129 
130 	/*
131 	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
132 	 * to all call sites, we did a bpf_prog_array_valid() there to check
133 	 * whether call->prog_array is empty or not, which is
134 	 * a heuristic to speed up execution.
135 	 *
136 	 * If bpf_prog_array_valid() fetched prog_array was
137 	 * non-NULL, we go into trace_call_bpf() and do the actual
138 	 * proper rcu_dereference() under RCU lock.
139 	 * If it turns out that prog_array is NULL then, we bail out.
140 	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
141 	 * was NULL, you'll skip the prog_array with the risk of missing
142 	 * out of events when it was updated in between this and the
143 	 * rcu_dereference() which is accepted risk.
144 	 */
145 	rcu_read_lock();
146 	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
147 				 ctx, bpf_prog_run);
148 	rcu_read_unlock();
149 
150  out:
151 	__this_cpu_dec(bpf_prog_active);
152 
153 	return ret;
154 }
155 
156 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
157 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
158 {
159 	regs_set_return_value(regs, rc);
160 	override_function_with_return(regs);
161 	return 0;
162 }
163 
164 static const struct bpf_func_proto bpf_override_return_proto = {
165 	.func		= bpf_override_return,
166 	.gpl_only	= true,
167 	.ret_type	= RET_INTEGER,
168 	.arg1_type	= ARG_PTR_TO_CTX,
169 	.arg2_type	= ARG_ANYTHING,
170 };
171 #endif
172 
173 static __always_inline int
174 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
175 {
176 	int ret;
177 
178 	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
179 	if (unlikely(ret < 0))
180 		memset(dst, 0, size);
181 	return ret;
182 }
183 
184 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
185 	   const void __user *, unsafe_ptr)
186 {
187 	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
188 }
189 
190 const struct bpf_func_proto bpf_probe_read_user_proto = {
191 	.func		= bpf_probe_read_user,
192 	.gpl_only	= true,
193 	.ret_type	= RET_INTEGER,
194 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
195 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
196 	.arg3_type	= ARG_ANYTHING,
197 };
198 
199 static __always_inline int
200 bpf_probe_read_user_str_common(void *dst, u32 size,
201 			       const void __user *unsafe_ptr)
202 {
203 	int ret;
204 
205 	/*
206 	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
207 	 * terminator into `dst`.
208 	 *
209 	 * strncpy_from_user() does long-sized strides in the fast path. If the
210 	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
211 	 * then there could be junk after the NUL in `dst`. If user takes `dst`
212 	 * and keys a hash map with it, then semantically identical strings can
213 	 * occupy multiple entries in the map.
214 	 */
215 	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
216 	if (unlikely(ret < 0))
217 		memset(dst, 0, size);
218 	return ret;
219 }
220 
221 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
222 	   const void __user *, unsafe_ptr)
223 {
224 	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
225 }
226 
227 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
228 	.func		= bpf_probe_read_user_str,
229 	.gpl_only	= true,
230 	.ret_type	= RET_INTEGER,
231 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
232 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
233 	.arg3_type	= ARG_ANYTHING,
234 };
235 
236 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
237 	   const void *, unsafe_ptr)
238 {
239 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
240 }
241 
242 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
243 	.func		= bpf_probe_read_kernel,
244 	.gpl_only	= true,
245 	.ret_type	= RET_INTEGER,
246 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
247 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
248 	.arg3_type	= ARG_ANYTHING,
249 };
250 
251 static __always_inline int
252 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
253 {
254 	int ret;
255 
256 	/*
257 	 * The strncpy_from_kernel_nofault() call will likely not fill the
258 	 * entire buffer, but that's okay in this circumstance as we're probing
259 	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
260 	 * as well probe the stack. Thus, memory is explicitly cleared
261 	 * only in error case, so that improper users ignoring return
262 	 * code altogether don't copy garbage; otherwise length of string
263 	 * is returned that can be used for bpf_perf_event_output() et al.
264 	 */
265 	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
266 	if (unlikely(ret < 0))
267 		memset(dst, 0, size);
268 	return ret;
269 }
270 
271 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
272 	   const void *, unsafe_ptr)
273 {
274 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
275 }
276 
277 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
278 	.func		= bpf_probe_read_kernel_str,
279 	.gpl_only	= true,
280 	.ret_type	= RET_INTEGER,
281 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
282 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
283 	.arg3_type	= ARG_ANYTHING,
284 };
285 
286 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
287 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
288 	   const void *, unsafe_ptr)
289 {
290 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
291 		return bpf_probe_read_user_common(dst, size,
292 				(__force void __user *)unsafe_ptr);
293 	}
294 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
295 }
296 
297 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
298 	.func		= bpf_probe_read_compat,
299 	.gpl_only	= true,
300 	.ret_type	= RET_INTEGER,
301 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
302 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
303 	.arg3_type	= ARG_ANYTHING,
304 };
305 
306 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
307 	   const void *, unsafe_ptr)
308 {
309 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
310 		return bpf_probe_read_user_str_common(dst, size,
311 				(__force void __user *)unsafe_ptr);
312 	}
313 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
314 }
315 
316 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
317 	.func		= bpf_probe_read_compat_str,
318 	.gpl_only	= true,
319 	.ret_type	= RET_INTEGER,
320 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
321 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
322 	.arg3_type	= ARG_ANYTHING,
323 };
324 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
325 
326 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
327 	   u32, size)
328 {
329 	/*
330 	 * Ensure we're in user context which is safe for the helper to
331 	 * run. This helper has no business in a kthread.
332 	 *
333 	 * access_ok() should prevent writing to non-user memory, but in
334 	 * some situations (nommu, temporary switch, etc) access_ok() does
335 	 * not provide enough validation, hence the check on KERNEL_DS.
336 	 *
337 	 * nmi_uaccess_okay() ensures the probe is not run in an interim
338 	 * state, when the task or mm are switched. This is specifically
339 	 * required to prevent the use of temporary mm.
340 	 */
341 
342 	if (unlikely(in_interrupt() ||
343 		     current->flags & (PF_KTHREAD | PF_EXITING)))
344 		return -EPERM;
345 	if (unlikely(!nmi_uaccess_okay()))
346 		return -EPERM;
347 
348 	return copy_to_user_nofault(unsafe_ptr, src, size);
349 }
350 
351 static const struct bpf_func_proto bpf_probe_write_user_proto = {
352 	.func		= bpf_probe_write_user,
353 	.gpl_only	= true,
354 	.ret_type	= RET_INTEGER,
355 	.arg1_type	= ARG_ANYTHING,
356 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
357 	.arg3_type	= ARG_CONST_SIZE,
358 };
359 
360 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
361 {
362 	if (!capable(CAP_SYS_ADMIN))
363 		return NULL;
364 
365 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
366 			    current->comm, task_pid_nr(current));
367 
368 	return &bpf_probe_write_user_proto;
369 }
370 
371 #define MAX_TRACE_PRINTK_VARARGS	3
372 #define BPF_TRACE_PRINTK_SIZE		1024
373 
374 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
375 	   u64, arg2, u64, arg3)
376 {
377 	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
378 	struct bpf_bprintf_data data = {
379 		.get_bin_args	= true,
380 		.get_buf	= true,
381 	};
382 	int ret;
383 
384 	ret = bpf_bprintf_prepare(fmt, fmt_size, args,
385 				  MAX_TRACE_PRINTK_VARARGS, &data);
386 	if (ret < 0)
387 		return ret;
388 
389 	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
390 
391 	trace_bpf_trace_printk(data.buf);
392 
393 	bpf_bprintf_cleanup(&data);
394 
395 	return ret;
396 }
397 
398 static const struct bpf_func_proto bpf_trace_printk_proto = {
399 	.func		= bpf_trace_printk,
400 	.gpl_only	= true,
401 	.ret_type	= RET_INTEGER,
402 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
403 	.arg2_type	= ARG_CONST_SIZE,
404 };
405 
406 static void __set_printk_clr_event(void)
407 {
408 	/*
409 	 * This program might be calling bpf_trace_printk,
410 	 * so enable the associated bpf_trace/bpf_trace_printk event.
411 	 * Repeat this each time as it is possible a user has
412 	 * disabled bpf_trace_printk events.  By loading a program
413 	 * calling bpf_trace_printk() however the user has expressed
414 	 * the intent to see such events.
415 	 */
416 	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
417 		pr_warn_ratelimited("could not enable bpf_trace_printk events");
418 }
419 
420 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
421 {
422 	__set_printk_clr_event();
423 	return &bpf_trace_printk_proto;
424 }
425 
426 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
427 	   u32, data_len)
428 {
429 	struct bpf_bprintf_data data = {
430 		.get_bin_args	= true,
431 		.get_buf	= true,
432 	};
433 	int ret, num_args;
434 
435 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
436 	    (data_len && !args))
437 		return -EINVAL;
438 	num_args = data_len / 8;
439 
440 	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
441 	if (ret < 0)
442 		return ret;
443 
444 	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
445 
446 	trace_bpf_trace_printk(data.buf);
447 
448 	bpf_bprintf_cleanup(&data);
449 
450 	return ret;
451 }
452 
453 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
454 	.func		= bpf_trace_vprintk,
455 	.gpl_only	= true,
456 	.ret_type	= RET_INTEGER,
457 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
458 	.arg2_type	= ARG_CONST_SIZE,
459 	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
460 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
461 };
462 
463 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
464 {
465 	__set_printk_clr_event();
466 	return &bpf_trace_vprintk_proto;
467 }
468 
469 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
470 	   const void *, args, u32, data_len)
471 {
472 	struct bpf_bprintf_data data = {
473 		.get_bin_args	= true,
474 	};
475 	int err, num_args;
476 
477 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
478 	    (data_len && !args))
479 		return -EINVAL;
480 	num_args = data_len / 8;
481 
482 	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
483 	if (err < 0)
484 		return err;
485 
486 	seq_bprintf(m, fmt, data.bin_args);
487 
488 	bpf_bprintf_cleanup(&data);
489 
490 	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
491 }
492 
493 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
494 
495 static const struct bpf_func_proto bpf_seq_printf_proto = {
496 	.func		= bpf_seq_printf,
497 	.gpl_only	= true,
498 	.ret_type	= RET_INTEGER,
499 	.arg1_type	= ARG_PTR_TO_BTF_ID,
500 	.arg1_btf_id	= &btf_seq_file_ids[0],
501 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
502 	.arg3_type	= ARG_CONST_SIZE,
503 	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
504 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
505 };
506 
507 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
508 {
509 	return seq_write(m, data, len) ? -EOVERFLOW : 0;
510 }
511 
512 static const struct bpf_func_proto bpf_seq_write_proto = {
513 	.func		= bpf_seq_write,
514 	.gpl_only	= true,
515 	.ret_type	= RET_INTEGER,
516 	.arg1_type	= ARG_PTR_TO_BTF_ID,
517 	.arg1_btf_id	= &btf_seq_file_ids[0],
518 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
519 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
520 };
521 
522 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
523 	   u32, btf_ptr_size, u64, flags)
524 {
525 	const struct btf *btf;
526 	s32 btf_id;
527 	int ret;
528 
529 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
530 	if (ret)
531 		return ret;
532 
533 	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
534 }
535 
536 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
537 	.func		= bpf_seq_printf_btf,
538 	.gpl_only	= true,
539 	.ret_type	= RET_INTEGER,
540 	.arg1_type	= ARG_PTR_TO_BTF_ID,
541 	.arg1_btf_id	= &btf_seq_file_ids[0],
542 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
543 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
544 	.arg4_type	= ARG_ANYTHING,
545 };
546 
547 static __always_inline int
548 get_map_perf_counter(struct bpf_map *map, u64 flags,
549 		     u64 *value, u64 *enabled, u64 *running)
550 {
551 	struct bpf_array *array = container_of(map, struct bpf_array, map);
552 	unsigned int cpu = smp_processor_id();
553 	u64 index = flags & BPF_F_INDEX_MASK;
554 	struct bpf_event_entry *ee;
555 
556 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
557 		return -EINVAL;
558 	if (index == BPF_F_CURRENT_CPU)
559 		index = cpu;
560 	if (unlikely(index >= array->map.max_entries))
561 		return -E2BIG;
562 
563 	ee = READ_ONCE(array->ptrs[index]);
564 	if (!ee)
565 		return -ENOENT;
566 
567 	return perf_event_read_local(ee->event, value, enabled, running);
568 }
569 
570 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
571 {
572 	u64 value = 0;
573 	int err;
574 
575 	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
576 	/*
577 	 * this api is ugly since we miss [-22..-2] range of valid
578 	 * counter values, but that's uapi
579 	 */
580 	if (err)
581 		return err;
582 	return value;
583 }
584 
585 static const struct bpf_func_proto bpf_perf_event_read_proto = {
586 	.func		= bpf_perf_event_read,
587 	.gpl_only	= true,
588 	.ret_type	= RET_INTEGER,
589 	.arg1_type	= ARG_CONST_MAP_PTR,
590 	.arg2_type	= ARG_ANYTHING,
591 };
592 
593 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
594 	   struct bpf_perf_event_value *, buf, u32, size)
595 {
596 	int err = -EINVAL;
597 
598 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
599 		goto clear;
600 	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
601 				   &buf->running);
602 	if (unlikely(err))
603 		goto clear;
604 	return 0;
605 clear:
606 	memset(buf, 0, size);
607 	return err;
608 }
609 
610 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
611 	.func		= bpf_perf_event_read_value,
612 	.gpl_only	= true,
613 	.ret_type	= RET_INTEGER,
614 	.arg1_type	= ARG_CONST_MAP_PTR,
615 	.arg2_type	= ARG_ANYTHING,
616 	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
617 	.arg4_type	= ARG_CONST_SIZE,
618 };
619 
620 static __always_inline u64
621 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
622 			u64 flags, struct perf_sample_data *sd)
623 {
624 	struct bpf_array *array = container_of(map, struct bpf_array, map);
625 	unsigned int cpu = smp_processor_id();
626 	u64 index = flags & BPF_F_INDEX_MASK;
627 	struct bpf_event_entry *ee;
628 	struct perf_event *event;
629 
630 	if (index == BPF_F_CURRENT_CPU)
631 		index = cpu;
632 	if (unlikely(index >= array->map.max_entries))
633 		return -E2BIG;
634 
635 	ee = READ_ONCE(array->ptrs[index]);
636 	if (!ee)
637 		return -ENOENT;
638 
639 	event = ee->event;
640 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
641 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
642 		return -EINVAL;
643 
644 	if (unlikely(event->oncpu != cpu))
645 		return -EOPNOTSUPP;
646 
647 	return perf_event_output(event, sd, regs);
648 }
649 
650 /*
651  * Support executing tracepoints in normal, irq, and nmi context that each call
652  * bpf_perf_event_output
653  */
654 struct bpf_trace_sample_data {
655 	struct perf_sample_data sds[3];
656 };
657 
658 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
659 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
660 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
661 	   u64, flags, void *, data, u64, size)
662 {
663 	struct bpf_trace_sample_data *sds;
664 	struct perf_raw_record raw = {
665 		.frag = {
666 			.size = size,
667 			.data = data,
668 		},
669 	};
670 	struct perf_sample_data *sd;
671 	int nest_level, err;
672 
673 	preempt_disable();
674 	sds = this_cpu_ptr(&bpf_trace_sds);
675 	nest_level = this_cpu_inc_return(bpf_trace_nest_level);
676 
677 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
678 		err = -EBUSY;
679 		goto out;
680 	}
681 
682 	sd = &sds->sds[nest_level - 1];
683 
684 	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
685 		err = -EINVAL;
686 		goto out;
687 	}
688 
689 	perf_sample_data_init(sd, 0, 0);
690 	perf_sample_save_raw_data(sd, &raw);
691 
692 	err = __bpf_perf_event_output(regs, map, flags, sd);
693 out:
694 	this_cpu_dec(bpf_trace_nest_level);
695 	preempt_enable();
696 	return err;
697 }
698 
699 static const struct bpf_func_proto bpf_perf_event_output_proto = {
700 	.func		= bpf_perf_event_output,
701 	.gpl_only	= true,
702 	.ret_type	= RET_INTEGER,
703 	.arg1_type	= ARG_PTR_TO_CTX,
704 	.arg2_type	= ARG_CONST_MAP_PTR,
705 	.arg3_type	= ARG_ANYTHING,
706 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
707 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
708 };
709 
710 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
711 struct bpf_nested_pt_regs {
712 	struct pt_regs regs[3];
713 };
714 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
715 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
716 
717 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
718 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
719 {
720 	struct perf_raw_frag frag = {
721 		.copy		= ctx_copy,
722 		.size		= ctx_size,
723 		.data		= ctx,
724 	};
725 	struct perf_raw_record raw = {
726 		.frag = {
727 			{
728 				.next	= ctx_size ? &frag : NULL,
729 			},
730 			.size	= meta_size,
731 			.data	= meta,
732 		},
733 	};
734 	struct perf_sample_data *sd;
735 	struct pt_regs *regs;
736 	int nest_level;
737 	u64 ret;
738 
739 	preempt_disable();
740 	nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
741 
742 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
743 		ret = -EBUSY;
744 		goto out;
745 	}
746 	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
747 	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
748 
749 	perf_fetch_caller_regs(regs);
750 	perf_sample_data_init(sd, 0, 0);
751 	perf_sample_save_raw_data(sd, &raw);
752 
753 	ret = __bpf_perf_event_output(regs, map, flags, sd);
754 out:
755 	this_cpu_dec(bpf_event_output_nest_level);
756 	preempt_enable();
757 	return ret;
758 }
759 
760 BPF_CALL_0(bpf_get_current_task)
761 {
762 	return (long) current;
763 }
764 
765 const struct bpf_func_proto bpf_get_current_task_proto = {
766 	.func		= bpf_get_current_task,
767 	.gpl_only	= true,
768 	.ret_type	= RET_INTEGER,
769 };
770 
771 BPF_CALL_0(bpf_get_current_task_btf)
772 {
773 	return (unsigned long) current;
774 }
775 
776 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
777 	.func		= bpf_get_current_task_btf,
778 	.gpl_only	= true,
779 	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
780 	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
781 };
782 
783 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
784 {
785 	return (unsigned long) task_pt_regs(task);
786 }
787 
788 BTF_ID_LIST(bpf_task_pt_regs_ids)
789 BTF_ID(struct, pt_regs)
790 
791 const struct bpf_func_proto bpf_task_pt_regs_proto = {
792 	.func		= bpf_task_pt_regs,
793 	.gpl_only	= true,
794 	.arg1_type	= ARG_PTR_TO_BTF_ID,
795 	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
796 	.ret_type	= RET_PTR_TO_BTF_ID,
797 	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
798 };
799 
800 struct send_signal_irq_work {
801 	struct irq_work irq_work;
802 	struct task_struct *task;
803 	u32 sig;
804 	enum pid_type type;
805 };
806 
807 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
808 
809 static void do_bpf_send_signal(struct irq_work *entry)
810 {
811 	struct send_signal_irq_work *work;
812 
813 	work = container_of(entry, struct send_signal_irq_work, irq_work);
814 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
815 	put_task_struct(work->task);
816 }
817 
818 static int bpf_send_signal_common(u32 sig, enum pid_type type)
819 {
820 	struct send_signal_irq_work *work = NULL;
821 
822 	/* Similar to bpf_probe_write_user, task needs to be
823 	 * in a sound condition and kernel memory access be
824 	 * permitted in order to send signal to the current
825 	 * task.
826 	 */
827 	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
828 		return -EPERM;
829 	if (unlikely(!nmi_uaccess_okay()))
830 		return -EPERM;
831 	/* Task should not be pid=1 to avoid kernel panic. */
832 	if (unlikely(is_global_init(current)))
833 		return -EPERM;
834 
835 	if (irqs_disabled()) {
836 		/* Do an early check on signal validity. Otherwise,
837 		 * the error is lost in deferred irq_work.
838 		 */
839 		if (unlikely(!valid_signal(sig)))
840 			return -EINVAL;
841 
842 		work = this_cpu_ptr(&send_signal_work);
843 		if (irq_work_is_busy(&work->irq_work))
844 			return -EBUSY;
845 
846 		/* Add the current task, which is the target of sending signal,
847 		 * to the irq_work. The current task may change when queued
848 		 * irq works get executed.
849 		 */
850 		work->task = get_task_struct(current);
851 		work->sig = sig;
852 		work->type = type;
853 		irq_work_queue(&work->irq_work);
854 		return 0;
855 	}
856 
857 	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
858 }
859 
860 BPF_CALL_1(bpf_send_signal, u32, sig)
861 {
862 	return bpf_send_signal_common(sig, PIDTYPE_TGID);
863 }
864 
865 static const struct bpf_func_proto bpf_send_signal_proto = {
866 	.func		= bpf_send_signal,
867 	.gpl_only	= false,
868 	.ret_type	= RET_INTEGER,
869 	.arg1_type	= ARG_ANYTHING,
870 };
871 
872 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
873 {
874 	return bpf_send_signal_common(sig, PIDTYPE_PID);
875 }
876 
877 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
878 	.func		= bpf_send_signal_thread,
879 	.gpl_only	= false,
880 	.ret_type	= RET_INTEGER,
881 	.arg1_type	= ARG_ANYTHING,
882 };
883 
884 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
885 {
886 	struct path copy;
887 	long len;
888 	char *p;
889 
890 	if (!sz)
891 		return 0;
892 
893 	/*
894 	 * The path pointer is verified as trusted and safe to use,
895 	 * but let's double check it's valid anyway to workaround
896 	 * potentially broken verifier.
897 	 */
898 	len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
899 	if (len < 0)
900 		return len;
901 
902 	p = d_path(&copy, buf, sz);
903 	if (IS_ERR(p)) {
904 		len = PTR_ERR(p);
905 	} else {
906 		len = buf + sz - p;
907 		memmove(buf, p, len);
908 	}
909 
910 	return len;
911 }
912 
913 BTF_SET_START(btf_allowlist_d_path)
914 #ifdef CONFIG_SECURITY
915 BTF_ID(func, security_file_permission)
916 BTF_ID(func, security_inode_getattr)
917 BTF_ID(func, security_file_open)
918 #endif
919 #ifdef CONFIG_SECURITY_PATH
920 BTF_ID(func, security_path_truncate)
921 #endif
922 BTF_ID(func, vfs_truncate)
923 BTF_ID(func, vfs_fallocate)
924 BTF_ID(func, dentry_open)
925 BTF_ID(func, vfs_getattr)
926 BTF_ID(func, filp_close)
927 BTF_SET_END(btf_allowlist_d_path)
928 
929 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
930 {
931 	if (prog->type == BPF_PROG_TYPE_TRACING &&
932 	    prog->expected_attach_type == BPF_TRACE_ITER)
933 		return true;
934 
935 	if (prog->type == BPF_PROG_TYPE_LSM)
936 		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
937 
938 	return btf_id_set_contains(&btf_allowlist_d_path,
939 				   prog->aux->attach_btf_id);
940 }
941 
942 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
943 
944 static const struct bpf_func_proto bpf_d_path_proto = {
945 	.func		= bpf_d_path,
946 	.gpl_only	= false,
947 	.ret_type	= RET_INTEGER,
948 	.arg1_type	= ARG_PTR_TO_BTF_ID,
949 	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
950 	.arg2_type	= ARG_PTR_TO_MEM,
951 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
952 	.allowed	= bpf_d_path_allowed,
953 };
954 
955 #define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
956 			 BTF_F_PTR_RAW | BTF_F_ZERO)
957 
958 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
959 				  u64 flags, const struct btf **btf,
960 				  s32 *btf_id)
961 {
962 	const struct btf_type *t;
963 
964 	if (unlikely(flags & ~(BTF_F_ALL)))
965 		return -EINVAL;
966 
967 	if (btf_ptr_size != sizeof(struct btf_ptr))
968 		return -EINVAL;
969 
970 	*btf = bpf_get_btf_vmlinux();
971 
972 	if (IS_ERR_OR_NULL(*btf))
973 		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
974 
975 	if (ptr->type_id > 0)
976 		*btf_id = ptr->type_id;
977 	else
978 		return -EINVAL;
979 
980 	if (*btf_id > 0)
981 		t = btf_type_by_id(*btf, *btf_id);
982 	if (*btf_id <= 0 || !t)
983 		return -ENOENT;
984 
985 	return 0;
986 }
987 
988 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
989 	   u32, btf_ptr_size, u64, flags)
990 {
991 	const struct btf *btf;
992 	s32 btf_id;
993 	int ret;
994 
995 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
996 	if (ret)
997 		return ret;
998 
999 	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1000 				      flags);
1001 }
1002 
1003 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1004 	.func		= bpf_snprintf_btf,
1005 	.gpl_only	= false,
1006 	.ret_type	= RET_INTEGER,
1007 	.arg1_type	= ARG_PTR_TO_MEM,
1008 	.arg2_type	= ARG_CONST_SIZE,
1009 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1010 	.arg4_type	= ARG_CONST_SIZE,
1011 	.arg5_type	= ARG_ANYTHING,
1012 };
1013 
1014 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1015 {
1016 	/* This helper call is inlined by verifier. */
1017 	return ((u64 *)ctx)[-2];
1018 }
1019 
1020 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1021 	.func		= bpf_get_func_ip_tracing,
1022 	.gpl_only	= true,
1023 	.ret_type	= RET_INTEGER,
1024 	.arg1_type	= ARG_PTR_TO_CTX,
1025 };
1026 
1027 #ifdef CONFIG_X86_KERNEL_IBT
1028 static unsigned long get_entry_ip(unsigned long fentry_ip)
1029 {
1030 	u32 instr;
1031 
1032 	/* We want to be extra safe in case entry ip is on the page edge,
1033 	 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
1034 	 */
1035 	if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
1036 		if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
1037 			return fentry_ip;
1038 	} else {
1039 		instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
1040 	}
1041 	if (is_endbr(instr))
1042 		fentry_ip -= ENDBR_INSN_SIZE;
1043 	return fentry_ip;
1044 }
1045 #else
1046 #define get_entry_ip(fentry_ip) fentry_ip
1047 #endif
1048 
1049 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1050 {
1051 	struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1052 	struct kprobe *kp;
1053 
1054 #ifdef CONFIG_UPROBES
1055 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1056 	if (run_ctx->is_uprobe)
1057 		return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1058 #endif
1059 
1060 	kp = kprobe_running();
1061 
1062 	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1063 		return 0;
1064 
1065 	return get_entry_ip((uintptr_t)kp->addr);
1066 }
1067 
1068 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1069 	.func		= bpf_get_func_ip_kprobe,
1070 	.gpl_only	= true,
1071 	.ret_type	= RET_INTEGER,
1072 	.arg1_type	= ARG_PTR_TO_CTX,
1073 };
1074 
1075 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1076 {
1077 	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1078 }
1079 
1080 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1081 	.func		= bpf_get_func_ip_kprobe_multi,
1082 	.gpl_only	= false,
1083 	.ret_type	= RET_INTEGER,
1084 	.arg1_type	= ARG_PTR_TO_CTX,
1085 };
1086 
1087 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1088 {
1089 	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1090 }
1091 
1092 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1093 	.func		= bpf_get_attach_cookie_kprobe_multi,
1094 	.gpl_only	= false,
1095 	.ret_type	= RET_INTEGER,
1096 	.arg1_type	= ARG_PTR_TO_CTX,
1097 };
1098 
1099 BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1100 {
1101 	return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1102 }
1103 
1104 static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1105 	.func		= bpf_get_func_ip_uprobe_multi,
1106 	.gpl_only	= false,
1107 	.ret_type	= RET_INTEGER,
1108 	.arg1_type	= ARG_PTR_TO_CTX,
1109 };
1110 
1111 BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1112 {
1113 	return bpf_uprobe_multi_cookie(current->bpf_ctx);
1114 }
1115 
1116 static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1117 	.func		= bpf_get_attach_cookie_uprobe_multi,
1118 	.gpl_only	= false,
1119 	.ret_type	= RET_INTEGER,
1120 	.arg1_type	= ARG_PTR_TO_CTX,
1121 };
1122 
1123 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1124 {
1125 	struct bpf_trace_run_ctx *run_ctx;
1126 
1127 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1128 	return run_ctx->bpf_cookie;
1129 }
1130 
1131 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1132 	.func		= bpf_get_attach_cookie_trace,
1133 	.gpl_only	= false,
1134 	.ret_type	= RET_INTEGER,
1135 	.arg1_type	= ARG_PTR_TO_CTX,
1136 };
1137 
1138 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1139 {
1140 	return ctx->event->bpf_cookie;
1141 }
1142 
1143 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1144 	.func		= bpf_get_attach_cookie_pe,
1145 	.gpl_only	= false,
1146 	.ret_type	= RET_INTEGER,
1147 	.arg1_type	= ARG_PTR_TO_CTX,
1148 };
1149 
1150 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1151 {
1152 	struct bpf_trace_run_ctx *run_ctx;
1153 
1154 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1155 	return run_ctx->bpf_cookie;
1156 }
1157 
1158 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1159 	.func		= bpf_get_attach_cookie_tracing,
1160 	.gpl_only	= false,
1161 	.ret_type	= RET_INTEGER,
1162 	.arg1_type	= ARG_PTR_TO_CTX,
1163 };
1164 
1165 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1166 {
1167 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1168 	u32 entry_cnt = size / br_entry_size;
1169 
1170 	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1171 
1172 	if (unlikely(flags))
1173 		return -EINVAL;
1174 
1175 	if (!entry_cnt)
1176 		return -ENOENT;
1177 
1178 	return entry_cnt * br_entry_size;
1179 }
1180 
1181 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1182 	.func		= bpf_get_branch_snapshot,
1183 	.gpl_only	= true,
1184 	.ret_type	= RET_INTEGER,
1185 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1186 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1187 };
1188 
1189 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1190 {
1191 	/* This helper call is inlined by verifier. */
1192 	u64 nr_args = ((u64 *)ctx)[-1];
1193 
1194 	if ((u64) n >= nr_args)
1195 		return -EINVAL;
1196 	*value = ((u64 *)ctx)[n];
1197 	return 0;
1198 }
1199 
1200 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1201 	.func		= get_func_arg,
1202 	.ret_type	= RET_INTEGER,
1203 	.arg1_type	= ARG_PTR_TO_CTX,
1204 	.arg2_type	= ARG_ANYTHING,
1205 	.arg3_type	= ARG_PTR_TO_LONG,
1206 };
1207 
1208 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1209 {
1210 	/* This helper call is inlined by verifier. */
1211 	u64 nr_args = ((u64 *)ctx)[-1];
1212 
1213 	*value = ((u64 *)ctx)[nr_args];
1214 	return 0;
1215 }
1216 
1217 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1218 	.func		= get_func_ret,
1219 	.ret_type	= RET_INTEGER,
1220 	.arg1_type	= ARG_PTR_TO_CTX,
1221 	.arg2_type	= ARG_PTR_TO_LONG,
1222 };
1223 
1224 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1225 {
1226 	/* This helper call is inlined by verifier. */
1227 	return ((u64 *)ctx)[-1];
1228 }
1229 
1230 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1231 	.func		= get_func_arg_cnt,
1232 	.ret_type	= RET_INTEGER,
1233 	.arg1_type	= ARG_PTR_TO_CTX,
1234 };
1235 
1236 #ifdef CONFIG_KEYS
1237 __bpf_kfunc_start_defs();
1238 
1239 /**
1240  * bpf_lookup_user_key - lookup a key by its serial
1241  * @serial: key handle serial number
1242  * @flags: lookup-specific flags
1243  *
1244  * Search a key with a given *serial* and the provided *flags*.
1245  * If found, increment the reference count of the key by one, and
1246  * return it in the bpf_key structure.
1247  *
1248  * The bpf_key structure must be passed to bpf_key_put() when done
1249  * with it, so that the key reference count is decremented and the
1250  * bpf_key structure is freed.
1251  *
1252  * Permission checks are deferred to the time the key is used by
1253  * one of the available key-specific kfuncs.
1254  *
1255  * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1256  * special keyring (e.g. session keyring), if it doesn't yet exist.
1257  * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1258  * for the key construction, and to retrieve uninstantiated keys (keys
1259  * without data attached to them).
1260  *
1261  * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1262  *         NULL pointer otherwise.
1263  */
1264 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1265 {
1266 	key_ref_t key_ref;
1267 	struct bpf_key *bkey;
1268 
1269 	if (flags & ~KEY_LOOKUP_ALL)
1270 		return NULL;
1271 
1272 	/*
1273 	 * Permission check is deferred until the key is used, as the
1274 	 * intent of the caller is unknown here.
1275 	 */
1276 	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1277 	if (IS_ERR(key_ref))
1278 		return NULL;
1279 
1280 	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1281 	if (!bkey) {
1282 		key_put(key_ref_to_ptr(key_ref));
1283 		return NULL;
1284 	}
1285 
1286 	bkey->key = key_ref_to_ptr(key_ref);
1287 	bkey->has_ref = true;
1288 
1289 	return bkey;
1290 }
1291 
1292 /**
1293  * bpf_lookup_system_key - lookup a key by a system-defined ID
1294  * @id: key ID
1295  *
1296  * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1297  * The key pointer is marked as invalid, to prevent bpf_key_put() from
1298  * attempting to decrement the key reference count on that pointer. The key
1299  * pointer set in such way is currently understood only by
1300  * verify_pkcs7_signature().
1301  *
1302  * Set *id* to one of the values defined in include/linux/verification.h:
1303  * 0 for the primary keyring (immutable keyring of system keys);
1304  * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1305  * (where keys can be added only if they are vouched for by existing keys
1306  * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1307  * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1308  * kerned image and, possibly, the initramfs signature).
1309  *
1310  * Return: a bpf_key pointer with an invalid key pointer set from the
1311  *         pre-determined ID on success, a NULL pointer otherwise
1312  */
1313 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1314 {
1315 	struct bpf_key *bkey;
1316 
1317 	if (system_keyring_id_check(id) < 0)
1318 		return NULL;
1319 
1320 	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1321 	if (!bkey)
1322 		return NULL;
1323 
1324 	bkey->key = (struct key *)(unsigned long)id;
1325 	bkey->has_ref = false;
1326 
1327 	return bkey;
1328 }
1329 
1330 /**
1331  * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1332  * @bkey: bpf_key structure
1333  *
1334  * Decrement the reference count of the key inside *bkey*, if the pointer
1335  * is valid, and free *bkey*.
1336  */
1337 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1338 {
1339 	if (bkey->has_ref)
1340 		key_put(bkey->key);
1341 
1342 	kfree(bkey);
1343 }
1344 
1345 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1346 /**
1347  * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1348  * @data_p: data to verify
1349  * @sig_p: signature of the data
1350  * @trusted_keyring: keyring with keys trusted for signature verification
1351  *
1352  * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1353  * with keys in a keyring referenced by *trusted_keyring*.
1354  *
1355  * Return: 0 on success, a negative value on error.
1356  */
1357 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
1358 			       struct bpf_dynptr *sig_p,
1359 			       struct bpf_key *trusted_keyring)
1360 {
1361 	struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
1362 	struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
1363 	const void *data, *sig;
1364 	u32 data_len, sig_len;
1365 	int ret;
1366 
1367 	if (trusted_keyring->has_ref) {
1368 		/*
1369 		 * Do the permission check deferred in bpf_lookup_user_key().
1370 		 * See bpf_lookup_user_key() for more details.
1371 		 *
1372 		 * A call to key_task_permission() here would be redundant, as
1373 		 * it is already done by keyring_search() called by
1374 		 * find_asymmetric_key().
1375 		 */
1376 		ret = key_validate(trusted_keyring->key);
1377 		if (ret < 0)
1378 			return ret;
1379 	}
1380 
1381 	data_len = __bpf_dynptr_size(data_ptr);
1382 	data = __bpf_dynptr_data(data_ptr, data_len);
1383 	sig_len = __bpf_dynptr_size(sig_ptr);
1384 	sig = __bpf_dynptr_data(sig_ptr, sig_len);
1385 
1386 	return verify_pkcs7_signature(data, data_len, sig, sig_len,
1387 				      trusted_keyring->key,
1388 				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1389 				      NULL);
1390 }
1391 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1392 
1393 __bpf_kfunc_end_defs();
1394 
1395 BTF_KFUNCS_START(key_sig_kfunc_set)
1396 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1397 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1398 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1399 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1400 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1401 #endif
1402 BTF_KFUNCS_END(key_sig_kfunc_set)
1403 
1404 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1405 	.owner = THIS_MODULE,
1406 	.set = &key_sig_kfunc_set,
1407 };
1408 
1409 static int __init bpf_key_sig_kfuncs_init(void)
1410 {
1411 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1412 					 &bpf_key_sig_kfunc_set);
1413 }
1414 
1415 late_initcall(bpf_key_sig_kfuncs_init);
1416 #endif /* CONFIG_KEYS */
1417 
1418 static const struct bpf_func_proto *
1419 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1420 {
1421 	switch (func_id) {
1422 	case BPF_FUNC_map_lookup_elem:
1423 		return &bpf_map_lookup_elem_proto;
1424 	case BPF_FUNC_map_update_elem:
1425 		return &bpf_map_update_elem_proto;
1426 	case BPF_FUNC_map_delete_elem:
1427 		return &bpf_map_delete_elem_proto;
1428 	case BPF_FUNC_map_push_elem:
1429 		return &bpf_map_push_elem_proto;
1430 	case BPF_FUNC_map_pop_elem:
1431 		return &bpf_map_pop_elem_proto;
1432 	case BPF_FUNC_map_peek_elem:
1433 		return &bpf_map_peek_elem_proto;
1434 	case BPF_FUNC_map_lookup_percpu_elem:
1435 		return &bpf_map_lookup_percpu_elem_proto;
1436 	case BPF_FUNC_ktime_get_ns:
1437 		return &bpf_ktime_get_ns_proto;
1438 	case BPF_FUNC_ktime_get_boot_ns:
1439 		return &bpf_ktime_get_boot_ns_proto;
1440 	case BPF_FUNC_tail_call:
1441 		return &bpf_tail_call_proto;
1442 	case BPF_FUNC_get_current_task:
1443 		return &bpf_get_current_task_proto;
1444 	case BPF_FUNC_get_current_task_btf:
1445 		return &bpf_get_current_task_btf_proto;
1446 	case BPF_FUNC_task_pt_regs:
1447 		return &bpf_task_pt_regs_proto;
1448 	case BPF_FUNC_get_current_uid_gid:
1449 		return &bpf_get_current_uid_gid_proto;
1450 	case BPF_FUNC_get_current_comm:
1451 		return &bpf_get_current_comm_proto;
1452 	case BPF_FUNC_trace_printk:
1453 		return bpf_get_trace_printk_proto();
1454 	case BPF_FUNC_get_smp_processor_id:
1455 		return &bpf_get_smp_processor_id_proto;
1456 	case BPF_FUNC_get_numa_node_id:
1457 		return &bpf_get_numa_node_id_proto;
1458 	case BPF_FUNC_perf_event_read:
1459 		return &bpf_perf_event_read_proto;
1460 	case BPF_FUNC_get_prandom_u32:
1461 		return &bpf_get_prandom_u32_proto;
1462 	case BPF_FUNC_probe_write_user:
1463 		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1464 		       NULL : bpf_get_probe_write_proto();
1465 	case BPF_FUNC_probe_read_user:
1466 		return &bpf_probe_read_user_proto;
1467 	case BPF_FUNC_probe_read_kernel:
1468 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1469 		       NULL : &bpf_probe_read_kernel_proto;
1470 	case BPF_FUNC_probe_read_user_str:
1471 		return &bpf_probe_read_user_str_proto;
1472 	case BPF_FUNC_probe_read_kernel_str:
1473 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1474 		       NULL : &bpf_probe_read_kernel_str_proto;
1475 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1476 	case BPF_FUNC_probe_read:
1477 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1478 		       NULL : &bpf_probe_read_compat_proto;
1479 	case BPF_FUNC_probe_read_str:
1480 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1481 		       NULL : &bpf_probe_read_compat_str_proto;
1482 #endif
1483 #ifdef CONFIG_CGROUPS
1484 	case BPF_FUNC_cgrp_storage_get:
1485 		return &bpf_cgrp_storage_get_proto;
1486 	case BPF_FUNC_cgrp_storage_delete:
1487 		return &bpf_cgrp_storage_delete_proto;
1488 	case BPF_FUNC_current_task_under_cgroup:
1489 		return &bpf_current_task_under_cgroup_proto;
1490 #endif
1491 	case BPF_FUNC_send_signal:
1492 		return &bpf_send_signal_proto;
1493 	case BPF_FUNC_send_signal_thread:
1494 		return &bpf_send_signal_thread_proto;
1495 	case BPF_FUNC_perf_event_read_value:
1496 		return &bpf_perf_event_read_value_proto;
1497 	case BPF_FUNC_ringbuf_output:
1498 		return &bpf_ringbuf_output_proto;
1499 	case BPF_FUNC_ringbuf_reserve:
1500 		return &bpf_ringbuf_reserve_proto;
1501 	case BPF_FUNC_ringbuf_submit:
1502 		return &bpf_ringbuf_submit_proto;
1503 	case BPF_FUNC_ringbuf_discard:
1504 		return &bpf_ringbuf_discard_proto;
1505 	case BPF_FUNC_ringbuf_query:
1506 		return &bpf_ringbuf_query_proto;
1507 	case BPF_FUNC_jiffies64:
1508 		return &bpf_jiffies64_proto;
1509 	case BPF_FUNC_get_task_stack:
1510 		return &bpf_get_task_stack_proto;
1511 	case BPF_FUNC_copy_from_user:
1512 		return &bpf_copy_from_user_proto;
1513 	case BPF_FUNC_copy_from_user_task:
1514 		return &bpf_copy_from_user_task_proto;
1515 	case BPF_FUNC_snprintf_btf:
1516 		return &bpf_snprintf_btf_proto;
1517 	case BPF_FUNC_per_cpu_ptr:
1518 		return &bpf_per_cpu_ptr_proto;
1519 	case BPF_FUNC_this_cpu_ptr:
1520 		return &bpf_this_cpu_ptr_proto;
1521 	case BPF_FUNC_task_storage_get:
1522 		if (bpf_prog_check_recur(prog))
1523 			return &bpf_task_storage_get_recur_proto;
1524 		return &bpf_task_storage_get_proto;
1525 	case BPF_FUNC_task_storage_delete:
1526 		if (bpf_prog_check_recur(prog))
1527 			return &bpf_task_storage_delete_recur_proto;
1528 		return &bpf_task_storage_delete_proto;
1529 	case BPF_FUNC_for_each_map_elem:
1530 		return &bpf_for_each_map_elem_proto;
1531 	case BPF_FUNC_snprintf:
1532 		return &bpf_snprintf_proto;
1533 	case BPF_FUNC_get_func_ip:
1534 		return &bpf_get_func_ip_proto_tracing;
1535 	case BPF_FUNC_get_branch_snapshot:
1536 		return &bpf_get_branch_snapshot_proto;
1537 	case BPF_FUNC_find_vma:
1538 		return &bpf_find_vma_proto;
1539 	case BPF_FUNC_trace_vprintk:
1540 		return bpf_get_trace_vprintk_proto();
1541 	default:
1542 		return bpf_base_func_proto(func_id, prog);
1543 	}
1544 }
1545 
1546 static bool is_kprobe_multi(const struct bpf_prog *prog)
1547 {
1548 	return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ||
1549 	       prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1550 }
1551 
1552 static inline bool is_kprobe_session(const struct bpf_prog *prog)
1553 {
1554 	return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1555 }
1556 
1557 static const struct bpf_func_proto *
1558 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1559 {
1560 	switch (func_id) {
1561 	case BPF_FUNC_perf_event_output:
1562 		return &bpf_perf_event_output_proto;
1563 	case BPF_FUNC_get_stackid:
1564 		return &bpf_get_stackid_proto;
1565 	case BPF_FUNC_get_stack:
1566 		return &bpf_get_stack_proto;
1567 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1568 	case BPF_FUNC_override_return:
1569 		return &bpf_override_return_proto;
1570 #endif
1571 	case BPF_FUNC_get_func_ip:
1572 		if (is_kprobe_multi(prog))
1573 			return &bpf_get_func_ip_proto_kprobe_multi;
1574 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1575 			return &bpf_get_func_ip_proto_uprobe_multi;
1576 		return &bpf_get_func_ip_proto_kprobe;
1577 	case BPF_FUNC_get_attach_cookie:
1578 		if (is_kprobe_multi(prog))
1579 			return &bpf_get_attach_cookie_proto_kmulti;
1580 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1581 			return &bpf_get_attach_cookie_proto_umulti;
1582 		return &bpf_get_attach_cookie_proto_trace;
1583 	default:
1584 		return bpf_tracing_func_proto(func_id, prog);
1585 	}
1586 }
1587 
1588 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1589 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1590 					const struct bpf_prog *prog,
1591 					struct bpf_insn_access_aux *info)
1592 {
1593 	if (off < 0 || off >= sizeof(struct pt_regs))
1594 		return false;
1595 	if (type != BPF_READ)
1596 		return false;
1597 	if (off % size != 0)
1598 		return false;
1599 	/*
1600 	 * Assertion for 32 bit to make sure last 8 byte access
1601 	 * (BPF_DW) to the last 4 byte member is disallowed.
1602 	 */
1603 	if (off + size > sizeof(struct pt_regs))
1604 		return false;
1605 
1606 	return true;
1607 }
1608 
1609 const struct bpf_verifier_ops kprobe_verifier_ops = {
1610 	.get_func_proto  = kprobe_prog_func_proto,
1611 	.is_valid_access = kprobe_prog_is_valid_access,
1612 };
1613 
1614 const struct bpf_prog_ops kprobe_prog_ops = {
1615 };
1616 
1617 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1618 	   u64, flags, void *, data, u64, size)
1619 {
1620 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1621 
1622 	/*
1623 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1624 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1625 	 * from there and call the same bpf_perf_event_output() helper inline.
1626 	 */
1627 	return ____bpf_perf_event_output(regs, map, flags, data, size);
1628 }
1629 
1630 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1631 	.func		= bpf_perf_event_output_tp,
1632 	.gpl_only	= true,
1633 	.ret_type	= RET_INTEGER,
1634 	.arg1_type	= ARG_PTR_TO_CTX,
1635 	.arg2_type	= ARG_CONST_MAP_PTR,
1636 	.arg3_type	= ARG_ANYTHING,
1637 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1638 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1639 };
1640 
1641 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1642 	   u64, flags)
1643 {
1644 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1645 
1646 	/*
1647 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1648 	 * the other helper's function body cannot be inlined due to being
1649 	 * external, thus we need to call raw helper function.
1650 	 */
1651 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1652 			       flags, 0, 0);
1653 }
1654 
1655 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1656 	.func		= bpf_get_stackid_tp,
1657 	.gpl_only	= true,
1658 	.ret_type	= RET_INTEGER,
1659 	.arg1_type	= ARG_PTR_TO_CTX,
1660 	.arg2_type	= ARG_CONST_MAP_PTR,
1661 	.arg3_type	= ARG_ANYTHING,
1662 };
1663 
1664 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1665 	   u64, flags)
1666 {
1667 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1668 
1669 	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1670 			     (unsigned long) size, flags, 0);
1671 }
1672 
1673 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1674 	.func		= bpf_get_stack_tp,
1675 	.gpl_only	= true,
1676 	.ret_type	= RET_INTEGER,
1677 	.arg1_type	= ARG_PTR_TO_CTX,
1678 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1679 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1680 	.arg4_type	= ARG_ANYTHING,
1681 };
1682 
1683 static const struct bpf_func_proto *
1684 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1685 {
1686 	switch (func_id) {
1687 	case BPF_FUNC_perf_event_output:
1688 		return &bpf_perf_event_output_proto_tp;
1689 	case BPF_FUNC_get_stackid:
1690 		return &bpf_get_stackid_proto_tp;
1691 	case BPF_FUNC_get_stack:
1692 		return &bpf_get_stack_proto_tp;
1693 	case BPF_FUNC_get_attach_cookie:
1694 		return &bpf_get_attach_cookie_proto_trace;
1695 	default:
1696 		return bpf_tracing_func_proto(func_id, prog);
1697 	}
1698 }
1699 
1700 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1701 				    const struct bpf_prog *prog,
1702 				    struct bpf_insn_access_aux *info)
1703 {
1704 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1705 		return false;
1706 	if (type != BPF_READ)
1707 		return false;
1708 	if (off % size != 0)
1709 		return false;
1710 
1711 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1712 	return true;
1713 }
1714 
1715 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1716 	.get_func_proto  = tp_prog_func_proto,
1717 	.is_valid_access = tp_prog_is_valid_access,
1718 };
1719 
1720 const struct bpf_prog_ops tracepoint_prog_ops = {
1721 };
1722 
1723 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1724 	   struct bpf_perf_event_value *, buf, u32, size)
1725 {
1726 	int err = -EINVAL;
1727 
1728 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1729 		goto clear;
1730 	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1731 				    &buf->running);
1732 	if (unlikely(err))
1733 		goto clear;
1734 	return 0;
1735 clear:
1736 	memset(buf, 0, size);
1737 	return err;
1738 }
1739 
1740 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1741          .func           = bpf_perf_prog_read_value,
1742          .gpl_only       = true,
1743          .ret_type       = RET_INTEGER,
1744          .arg1_type      = ARG_PTR_TO_CTX,
1745          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1746          .arg3_type      = ARG_CONST_SIZE,
1747 };
1748 
1749 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1750 	   void *, buf, u32, size, u64, flags)
1751 {
1752 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1753 	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1754 	u32 to_copy;
1755 
1756 	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1757 		return -EINVAL;
1758 
1759 	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1760 		return -ENOENT;
1761 
1762 	if (unlikely(!br_stack))
1763 		return -ENOENT;
1764 
1765 	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1766 		return br_stack->nr * br_entry_size;
1767 
1768 	if (!buf || (size % br_entry_size != 0))
1769 		return -EINVAL;
1770 
1771 	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1772 	memcpy(buf, br_stack->entries, to_copy);
1773 
1774 	return to_copy;
1775 }
1776 
1777 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1778 	.func           = bpf_read_branch_records,
1779 	.gpl_only       = true,
1780 	.ret_type       = RET_INTEGER,
1781 	.arg1_type      = ARG_PTR_TO_CTX,
1782 	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1783 	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1784 	.arg4_type      = ARG_ANYTHING,
1785 };
1786 
1787 static const struct bpf_func_proto *
1788 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1789 {
1790 	switch (func_id) {
1791 	case BPF_FUNC_perf_event_output:
1792 		return &bpf_perf_event_output_proto_tp;
1793 	case BPF_FUNC_get_stackid:
1794 		return &bpf_get_stackid_proto_pe;
1795 	case BPF_FUNC_get_stack:
1796 		return &bpf_get_stack_proto_pe;
1797 	case BPF_FUNC_perf_prog_read_value:
1798 		return &bpf_perf_prog_read_value_proto;
1799 	case BPF_FUNC_read_branch_records:
1800 		return &bpf_read_branch_records_proto;
1801 	case BPF_FUNC_get_attach_cookie:
1802 		return &bpf_get_attach_cookie_proto_pe;
1803 	default:
1804 		return bpf_tracing_func_proto(func_id, prog);
1805 	}
1806 }
1807 
1808 /*
1809  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1810  * to avoid potential recursive reuse issue when/if tracepoints are added
1811  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1812  *
1813  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1814  * in normal, irq, and nmi context.
1815  */
1816 struct bpf_raw_tp_regs {
1817 	struct pt_regs regs[3];
1818 };
1819 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1820 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1821 static struct pt_regs *get_bpf_raw_tp_regs(void)
1822 {
1823 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1824 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1825 
1826 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1827 		this_cpu_dec(bpf_raw_tp_nest_level);
1828 		return ERR_PTR(-EBUSY);
1829 	}
1830 
1831 	return &tp_regs->regs[nest_level - 1];
1832 }
1833 
1834 static void put_bpf_raw_tp_regs(void)
1835 {
1836 	this_cpu_dec(bpf_raw_tp_nest_level);
1837 }
1838 
1839 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1840 	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1841 {
1842 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1843 	int ret;
1844 
1845 	if (IS_ERR(regs))
1846 		return PTR_ERR(regs);
1847 
1848 	perf_fetch_caller_regs(regs);
1849 	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1850 
1851 	put_bpf_raw_tp_regs();
1852 	return ret;
1853 }
1854 
1855 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1856 	.func		= bpf_perf_event_output_raw_tp,
1857 	.gpl_only	= true,
1858 	.ret_type	= RET_INTEGER,
1859 	.arg1_type	= ARG_PTR_TO_CTX,
1860 	.arg2_type	= ARG_CONST_MAP_PTR,
1861 	.arg3_type	= ARG_ANYTHING,
1862 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1863 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1864 };
1865 
1866 extern const struct bpf_func_proto bpf_skb_output_proto;
1867 extern const struct bpf_func_proto bpf_xdp_output_proto;
1868 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1869 
1870 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1871 	   struct bpf_map *, map, u64, flags)
1872 {
1873 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1874 	int ret;
1875 
1876 	if (IS_ERR(regs))
1877 		return PTR_ERR(regs);
1878 
1879 	perf_fetch_caller_regs(regs);
1880 	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1881 	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1882 			      flags, 0, 0);
1883 	put_bpf_raw_tp_regs();
1884 	return ret;
1885 }
1886 
1887 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1888 	.func		= bpf_get_stackid_raw_tp,
1889 	.gpl_only	= true,
1890 	.ret_type	= RET_INTEGER,
1891 	.arg1_type	= ARG_PTR_TO_CTX,
1892 	.arg2_type	= ARG_CONST_MAP_PTR,
1893 	.arg3_type	= ARG_ANYTHING,
1894 };
1895 
1896 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1897 	   void *, buf, u32, size, u64, flags)
1898 {
1899 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1900 	int ret;
1901 
1902 	if (IS_ERR(regs))
1903 		return PTR_ERR(regs);
1904 
1905 	perf_fetch_caller_regs(regs);
1906 	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1907 			    (unsigned long) size, flags, 0);
1908 	put_bpf_raw_tp_regs();
1909 	return ret;
1910 }
1911 
1912 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1913 	.func		= bpf_get_stack_raw_tp,
1914 	.gpl_only	= true,
1915 	.ret_type	= RET_INTEGER,
1916 	.arg1_type	= ARG_PTR_TO_CTX,
1917 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1918 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1919 	.arg4_type	= ARG_ANYTHING,
1920 };
1921 
1922 static const struct bpf_func_proto *
1923 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1924 {
1925 	switch (func_id) {
1926 	case BPF_FUNC_perf_event_output:
1927 		return &bpf_perf_event_output_proto_raw_tp;
1928 	case BPF_FUNC_get_stackid:
1929 		return &bpf_get_stackid_proto_raw_tp;
1930 	case BPF_FUNC_get_stack:
1931 		return &bpf_get_stack_proto_raw_tp;
1932 	case BPF_FUNC_get_attach_cookie:
1933 		return &bpf_get_attach_cookie_proto_tracing;
1934 	default:
1935 		return bpf_tracing_func_proto(func_id, prog);
1936 	}
1937 }
1938 
1939 const struct bpf_func_proto *
1940 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1941 {
1942 	const struct bpf_func_proto *fn;
1943 
1944 	switch (func_id) {
1945 #ifdef CONFIG_NET
1946 	case BPF_FUNC_skb_output:
1947 		return &bpf_skb_output_proto;
1948 	case BPF_FUNC_xdp_output:
1949 		return &bpf_xdp_output_proto;
1950 	case BPF_FUNC_skc_to_tcp6_sock:
1951 		return &bpf_skc_to_tcp6_sock_proto;
1952 	case BPF_FUNC_skc_to_tcp_sock:
1953 		return &bpf_skc_to_tcp_sock_proto;
1954 	case BPF_FUNC_skc_to_tcp_timewait_sock:
1955 		return &bpf_skc_to_tcp_timewait_sock_proto;
1956 	case BPF_FUNC_skc_to_tcp_request_sock:
1957 		return &bpf_skc_to_tcp_request_sock_proto;
1958 	case BPF_FUNC_skc_to_udp6_sock:
1959 		return &bpf_skc_to_udp6_sock_proto;
1960 	case BPF_FUNC_skc_to_unix_sock:
1961 		return &bpf_skc_to_unix_sock_proto;
1962 	case BPF_FUNC_skc_to_mptcp_sock:
1963 		return &bpf_skc_to_mptcp_sock_proto;
1964 	case BPF_FUNC_sk_storage_get:
1965 		return &bpf_sk_storage_get_tracing_proto;
1966 	case BPF_FUNC_sk_storage_delete:
1967 		return &bpf_sk_storage_delete_tracing_proto;
1968 	case BPF_FUNC_sock_from_file:
1969 		return &bpf_sock_from_file_proto;
1970 	case BPF_FUNC_get_socket_cookie:
1971 		return &bpf_get_socket_ptr_cookie_proto;
1972 	case BPF_FUNC_xdp_get_buff_len:
1973 		return &bpf_xdp_get_buff_len_trace_proto;
1974 #endif
1975 	case BPF_FUNC_seq_printf:
1976 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1977 		       &bpf_seq_printf_proto :
1978 		       NULL;
1979 	case BPF_FUNC_seq_write:
1980 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1981 		       &bpf_seq_write_proto :
1982 		       NULL;
1983 	case BPF_FUNC_seq_printf_btf:
1984 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1985 		       &bpf_seq_printf_btf_proto :
1986 		       NULL;
1987 	case BPF_FUNC_d_path:
1988 		return &bpf_d_path_proto;
1989 	case BPF_FUNC_get_func_arg:
1990 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1991 	case BPF_FUNC_get_func_ret:
1992 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1993 	case BPF_FUNC_get_func_arg_cnt:
1994 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
1995 	case BPF_FUNC_get_attach_cookie:
1996 		if (prog->type == BPF_PROG_TYPE_TRACING &&
1997 		    prog->expected_attach_type == BPF_TRACE_RAW_TP)
1998 			return &bpf_get_attach_cookie_proto_tracing;
1999 		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2000 	default:
2001 		fn = raw_tp_prog_func_proto(func_id, prog);
2002 		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2003 			fn = bpf_iter_get_func_proto(func_id, prog);
2004 		return fn;
2005 	}
2006 }
2007 
2008 static bool raw_tp_prog_is_valid_access(int off, int size,
2009 					enum bpf_access_type type,
2010 					const struct bpf_prog *prog,
2011 					struct bpf_insn_access_aux *info)
2012 {
2013 	return bpf_tracing_ctx_access(off, size, type);
2014 }
2015 
2016 static bool tracing_prog_is_valid_access(int off, int size,
2017 					 enum bpf_access_type type,
2018 					 const struct bpf_prog *prog,
2019 					 struct bpf_insn_access_aux *info)
2020 {
2021 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
2022 }
2023 
2024 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2025 				     const union bpf_attr *kattr,
2026 				     union bpf_attr __user *uattr)
2027 {
2028 	return -ENOTSUPP;
2029 }
2030 
2031 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2032 	.get_func_proto  = raw_tp_prog_func_proto,
2033 	.is_valid_access = raw_tp_prog_is_valid_access,
2034 };
2035 
2036 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2037 #ifdef CONFIG_NET
2038 	.test_run = bpf_prog_test_run_raw_tp,
2039 #endif
2040 };
2041 
2042 const struct bpf_verifier_ops tracing_verifier_ops = {
2043 	.get_func_proto  = tracing_prog_func_proto,
2044 	.is_valid_access = tracing_prog_is_valid_access,
2045 };
2046 
2047 const struct bpf_prog_ops tracing_prog_ops = {
2048 	.test_run = bpf_prog_test_run_tracing,
2049 };
2050 
2051 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2052 						 enum bpf_access_type type,
2053 						 const struct bpf_prog *prog,
2054 						 struct bpf_insn_access_aux *info)
2055 {
2056 	if (off == 0) {
2057 		if (size != sizeof(u64) || type != BPF_READ)
2058 			return false;
2059 		info->reg_type = PTR_TO_TP_BUFFER;
2060 	}
2061 	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2062 }
2063 
2064 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2065 	.get_func_proto  = raw_tp_prog_func_proto,
2066 	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2067 };
2068 
2069 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2070 };
2071 
2072 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2073 				    const struct bpf_prog *prog,
2074 				    struct bpf_insn_access_aux *info)
2075 {
2076 	const int size_u64 = sizeof(u64);
2077 
2078 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2079 		return false;
2080 	if (type != BPF_READ)
2081 		return false;
2082 	if (off % size != 0) {
2083 		if (sizeof(unsigned long) != 4)
2084 			return false;
2085 		if (size != 8)
2086 			return false;
2087 		if (off % size != 4)
2088 			return false;
2089 	}
2090 
2091 	switch (off) {
2092 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2093 		bpf_ctx_record_field_size(info, size_u64);
2094 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2095 			return false;
2096 		break;
2097 	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2098 		bpf_ctx_record_field_size(info, size_u64);
2099 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2100 			return false;
2101 		break;
2102 	default:
2103 		if (size != sizeof(long))
2104 			return false;
2105 	}
2106 
2107 	return true;
2108 }
2109 
2110 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2111 				      const struct bpf_insn *si,
2112 				      struct bpf_insn *insn_buf,
2113 				      struct bpf_prog *prog, u32 *target_size)
2114 {
2115 	struct bpf_insn *insn = insn_buf;
2116 
2117 	switch (si->off) {
2118 	case offsetof(struct bpf_perf_event_data, sample_period):
2119 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2120 						       data), si->dst_reg, si->src_reg,
2121 				      offsetof(struct bpf_perf_event_data_kern, data));
2122 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2123 				      bpf_target_off(struct perf_sample_data, period, 8,
2124 						     target_size));
2125 		break;
2126 	case offsetof(struct bpf_perf_event_data, addr):
2127 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2128 						       data), si->dst_reg, si->src_reg,
2129 				      offsetof(struct bpf_perf_event_data_kern, data));
2130 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2131 				      bpf_target_off(struct perf_sample_data, addr, 8,
2132 						     target_size));
2133 		break;
2134 	default:
2135 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2136 						       regs), si->dst_reg, si->src_reg,
2137 				      offsetof(struct bpf_perf_event_data_kern, regs));
2138 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2139 				      si->off);
2140 		break;
2141 	}
2142 
2143 	return insn - insn_buf;
2144 }
2145 
2146 const struct bpf_verifier_ops perf_event_verifier_ops = {
2147 	.get_func_proto		= pe_prog_func_proto,
2148 	.is_valid_access	= pe_prog_is_valid_access,
2149 	.convert_ctx_access	= pe_prog_convert_ctx_access,
2150 };
2151 
2152 const struct bpf_prog_ops perf_event_prog_ops = {
2153 };
2154 
2155 static DEFINE_MUTEX(bpf_event_mutex);
2156 
2157 #define BPF_TRACE_MAX_PROGS 64
2158 
2159 int perf_event_attach_bpf_prog(struct perf_event *event,
2160 			       struct bpf_prog *prog,
2161 			       u64 bpf_cookie)
2162 {
2163 	struct bpf_prog_array *old_array;
2164 	struct bpf_prog_array *new_array;
2165 	int ret = -EEXIST;
2166 
2167 	/*
2168 	 * Kprobe override only works if they are on the function entry,
2169 	 * and only if they are on the opt-in list.
2170 	 */
2171 	if (prog->kprobe_override &&
2172 	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2173 	     !trace_kprobe_error_injectable(event->tp_event)))
2174 		return -EINVAL;
2175 
2176 	mutex_lock(&bpf_event_mutex);
2177 
2178 	if (event->prog)
2179 		goto unlock;
2180 
2181 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2182 	if (old_array &&
2183 	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2184 		ret = -E2BIG;
2185 		goto unlock;
2186 	}
2187 
2188 	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2189 	if (ret < 0)
2190 		goto unlock;
2191 
2192 	/* set the new array to event->tp_event and set event->prog */
2193 	event->prog = prog;
2194 	event->bpf_cookie = bpf_cookie;
2195 	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2196 	bpf_prog_array_free_sleepable(old_array);
2197 
2198 unlock:
2199 	mutex_unlock(&bpf_event_mutex);
2200 	return ret;
2201 }
2202 
2203 void perf_event_detach_bpf_prog(struct perf_event *event)
2204 {
2205 	struct bpf_prog_array *old_array;
2206 	struct bpf_prog_array *new_array;
2207 	int ret;
2208 
2209 	mutex_lock(&bpf_event_mutex);
2210 
2211 	if (!event->prog)
2212 		goto unlock;
2213 
2214 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2215 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2216 	if (ret == -ENOENT)
2217 		goto unlock;
2218 	if (ret < 0) {
2219 		bpf_prog_array_delete_safe(old_array, event->prog);
2220 	} else {
2221 		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2222 		bpf_prog_array_free_sleepable(old_array);
2223 	}
2224 
2225 	bpf_prog_put(event->prog);
2226 	event->prog = NULL;
2227 
2228 unlock:
2229 	mutex_unlock(&bpf_event_mutex);
2230 }
2231 
2232 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2233 {
2234 	struct perf_event_query_bpf __user *uquery = info;
2235 	struct perf_event_query_bpf query = {};
2236 	struct bpf_prog_array *progs;
2237 	u32 *ids, prog_cnt, ids_len;
2238 	int ret;
2239 
2240 	if (!perfmon_capable())
2241 		return -EPERM;
2242 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2243 		return -EINVAL;
2244 	if (copy_from_user(&query, uquery, sizeof(query)))
2245 		return -EFAULT;
2246 
2247 	ids_len = query.ids_len;
2248 	if (ids_len > BPF_TRACE_MAX_PROGS)
2249 		return -E2BIG;
2250 	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2251 	if (!ids)
2252 		return -ENOMEM;
2253 	/*
2254 	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2255 	 * is required when user only wants to check for uquery->prog_cnt.
2256 	 * There is no need to check for it since the case is handled
2257 	 * gracefully in bpf_prog_array_copy_info.
2258 	 */
2259 
2260 	mutex_lock(&bpf_event_mutex);
2261 	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2262 	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2263 	mutex_unlock(&bpf_event_mutex);
2264 
2265 	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2266 	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2267 		ret = -EFAULT;
2268 
2269 	kfree(ids);
2270 	return ret;
2271 }
2272 
2273 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2274 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2275 
2276 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2277 {
2278 	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2279 
2280 	for (; btp < __stop__bpf_raw_tp; btp++) {
2281 		if (!strcmp(btp->tp->name, name))
2282 			return btp;
2283 	}
2284 
2285 	return bpf_get_raw_tracepoint_module(name);
2286 }
2287 
2288 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2289 {
2290 	struct module *mod;
2291 
2292 	preempt_disable();
2293 	mod = __module_address((unsigned long)btp);
2294 	module_put(mod);
2295 	preempt_enable();
2296 }
2297 
2298 static __always_inline
2299 void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
2300 {
2301 	struct bpf_prog *prog = link->link.prog;
2302 	struct bpf_run_ctx *old_run_ctx;
2303 	struct bpf_trace_run_ctx run_ctx;
2304 
2305 	cant_sleep();
2306 	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2307 		bpf_prog_inc_misses_counter(prog);
2308 		goto out;
2309 	}
2310 
2311 	run_ctx.bpf_cookie = link->cookie;
2312 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2313 
2314 	rcu_read_lock();
2315 	(void) bpf_prog_run(prog, args);
2316 	rcu_read_unlock();
2317 
2318 	bpf_reset_run_ctx(old_run_ctx);
2319 out:
2320 	this_cpu_dec(*(prog->active));
2321 }
2322 
2323 #define UNPACK(...)			__VA_ARGS__
2324 #define REPEAT_1(FN, DL, X, ...)	FN(X)
2325 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2326 #define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2327 #define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2328 #define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2329 #define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2330 #define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2331 #define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2332 #define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2333 #define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2334 #define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2335 #define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2336 #define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2337 
2338 #define SARG(X)		u64 arg##X
2339 #define COPY(X)		args[X] = arg##X
2340 
2341 #define __DL_COM	(,)
2342 #define __DL_SEM	(;)
2343 
2344 #define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2345 
2346 #define BPF_TRACE_DEFN_x(x)						\
2347 	void bpf_trace_run##x(struct bpf_raw_tp_link *link,		\
2348 			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2349 	{								\
2350 		u64 args[x];						\
2351 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2352 		__bpf_trace_run(link, args);				\
2353 	}								\
2354 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2355 BPF_TRACE_DEFN_x(1);
2356 BPF_TRACE_DEFN_x(2);
2357 BPF_TRACE_DEFN_x(3);
2358 BPF_TRACE_DEFN_x(4);
2359 BPF_TRACE_DEFN_x(5);
2360 BPF_TRACE_DEFN_x(6);
2361 BPF_TRACE_DEFN_x(7);
2362 BPF_TRACE_DEFN_x(8);
2363 BPF_TRACE_DEFN_x(9);
2364 BPF_TRACE_DEFN_x(10);
2365 BPF_TRACE_DEFN_x(11);
2366 BPF_TRACE_DEFN_x(12);
2367 
2368 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2369 {
2370 	struct tracepoint *tp = btp->tp;
2371 	struct bpf_prog *prog = link->link.prog;
2372 
2373 	/*
2374 	 * check that program doesn't access arguments beyond what's
2375 	 * available in this tracepoint
2376 	 */
2377 	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2378 		return -EINVAL;
2379 
2380 	if (prog->aux->max_tp_access > btp->writable_size)
2381 		return -EINVAL;
2382 
2383 	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link);
2384 }
2385 
2386 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2387 {
2388 	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link);
2389 }
2390 
2391 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2392 			    u32 *fd_type, const char **buf,
2393 			    u64 *probe_offset, u64 *probe_addr,
2394 			    unsigned long *missed)
2395 {
2396 	bool is_tracepoint, is_syscall_tp;
2397 	struct bpf_prog *prog;
2398 	int flags, err = 0;
2399 
2400 	prog = event->prog;
2401 	if (!prog)
2402 		return -ENOENT;
2403 
2404 	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2405 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2406 		return -EOPNOTSUPP;
2407 
2408 	*prog_id = prog->aux->id;
2409 	flags = event->tp_event->flags;
2410 	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2411 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2412 
2413 	if (is_tracepoint || is_syscall_tp) {
2414 		*buf = is_tracepoint ? event->tp_event->tp->name
2415 				     : event->tp_event->name;
2416 		/* We allow NULL pointer for tracepoint */
2417 		if (fd_type)
2418 			*fd_type = BPF_FD_TYPE_TRACEPOINT;
2419 		if (probe_offset)
2420 			*probe_offset = 0x0;
2421 		if (probe_addr)
2422 			*probe_addr = 0x0;
2423 	} else {
2424 		/* kprobe/uprobe */
2425 		err = -EOPNOTSUPP;
2426 #ifdef CONFIG_KPROBE_EVENTS
2427 		if (flags & TRACE_EVENT_FL_KPROBE)
2428 			err = bpf_get_kprobe_info(event, fd_type, buf,
2429 						  probe_offset, probe_addr, missed,
2430 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2431 #endif
2432 #ifdef CONFIG_UPROBE_EVENTS
2433 		if (flags & TRACE_EVENT_FL_UPROBE)
2434 			err = bpf_get_uprobe_info(event, fd_type, buf,
2435 						  probe_offset, probe_addr,
2436 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2437 #endif
2438 	}
2439 
2440 	return err;
2441 }
2442 
2443 static int __init send_signal_irq_work_init(void)
2444 {
2445 	int cpu;
2446 	struct send_signal_irq_work *work;
2447 
2448 	for_each_possible_cpu(cpu) {
2449 		work = per_cpu_ptr(&send_signal_work, cpu);
2450 		init_irq_work(&work->irq_work, do_bpf_send_signal);
2451 	}
2452 	return 0;
2453 }
2454 
2455 subsys_initcall(send_signal_irq_work_init);
2456 
2457 #ifdef CONFIG_MODULES
2458 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2459 			    void *module)
2460 {
2461 	struct bpf_trace_module *btm, *tmp;
2462 	struct module *mod = module;
2463 	int ret = 0;
2464 
2465 	if (mod->num_bpf_raw_events == 0 ||
2466 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2467 		goto out;
2468 
2469 	mutex_lock(&bpf_module_mutex);
2470 
2471 	switch (op) {
2472 	case MODULE_STATE_COMING:
2473 		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2474 		if (btm) {
2475 			btm->module = module;
2476 			list_add(&btm->list, &bpf_trace_modules);
2477 		} else {
2478 			ret = -ENOMEM;
2479 		}
2480 		break;
2481 	case MODULE_STATE_GOING:
2482 		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2483 			if (btm->module == module) {
2484 				list_del(&btm->list);
2485 				kfree(btm);
2486 				break;
2487 			}
2488 		}
2489 		break;
2490 	}
2491 
2492 	mutex_unlock(&bpf_module_mutex);
2493 
2494 out:
2495 	return notifier_from_errno(ret);
2496 }
2497 
2498 static struct notifier_block bpf_module_nb = {
2499 	.notifier_call = bpf_event_notify,
2500 };
2501 
2502 static int __init bpf_event_init(void)
2503 {
2504 	register_module_notifier(&bpf_module_nb);
2505 	return 0;
2506 }
2507 
2508 fs_initcall(bpf_event_init);
2509 #endif /* CONFIG_MODULES */
2510 
2511 struct bpf_session_run_ctx {
2512 	struct bpf_run_ctx run_ctx;
2513 	bool is_return;
2514 	void *data;
2515 };
2516 
2517 #ifdef CONFIG_FPROBE
2518 struct bpf_kprobe_multi_link {
2519 	struct bpf_link link;
2520 	struct fprobe fp;
2521 	unsigned long *addrs;
2522 	u64 *cookies;
2523 	u32 cnt;
2524 	u32 mods_cnt;
2525 	struct module **mods;
2526 	u32 flags;
2527 };
2528 
2529 struct bpf_kprobe_multi_run_ctx {
2530 	struct bpf_session_run_ctx session_ctx;
2531 	struct bpf_kprobe_multi_link *link;
2532 	unsigned long entry_ip;
2533 };
2534 
2535 struct user_syms {
2536 	const char **syms;
2537 	char *buf;
2538 };
2539 
2540 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2541 {
2542 	unsigned long __user usymbol;
2543 	const char **syms = NULL;
2544 	char *buf = NULL, *p;
2545 	int err = -ENOMEM;
2546 	unsigned int i;
2547 
2548 	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2549 	if (!syms)
2550 		goto error;
2551 
2552 	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2553 	if (!buf)
2554 		goto error;
2555 
2556 	for (p = buf, i = 0; i < cnt; i++) {
2557 		if (__get_user(usymbol, usyms + i)) {
2558 			err = -EFAULT;
2559 			goto error;
2560 		}
2561 		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2562 		if (err == KSYM_NAME_LEN)
2563 			err = -E2BIG;
2564 		if (err < 0)
2565 			goto error;
2566 		syms[i] = p;
2567 		p += err + 1;
2568 	}
2569 
2570 	us->syms = syms;
2571 	us->buf = buf;
2572 	return 0;
2573 
2574 error:
2575 	if (err) {
2576 		kvfree(syms);
2577 		kvfree(buf);
2578 	}
2579 	return err;
2580 }
2581 
2582 static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2583 {
2584 	u32 i;
2585 
2586 	for (i = 0; i < cnt; i++)
2587 		module_put(mods[i]);
2588 }
2589 
2590 static void free_user_syms(struct user_syms *us)
2591 {
2592 	kvfree(us->syms);
2593 	kvfree(us->buf);
2594 }
2595 
2596 static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2597 {
2598 	struct bpf_kprobe_multi_link *kmulti_link;
2599 
2600 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2601 	unregister_fprobe(&kmulti_link->fp);
2602 	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2603 }
2604 
2605 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2606 {
2607 	struct bpf_kprobe_multi_link *kmulti_link;
2608 
2609 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2610 	kvfree(kmulti_link->addrs);
2611 	kvfree(kmulti_link->cookies);
2612 	kfree(kmulti_link->mods);
2613 	kfree(kmulti_link);
2614 }
2615 
2616 static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2617 						struct bpf_link_info *info)
2618 {
2619 	u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
2620 	u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2621 	struct bpf_kprobe_multi_link *kmulti_link;
2622 	u32 ucount = info->kprobe_multi.count;
2623 	int err = 0, i;
2624 
2625 	if (!uaddrs ^ !ucount)
2626 		return -EINVAL;
2627 	if (ucookies && !ucount)
2628 		return -EINVAL;
2629 
2630 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2631 	info->kprobe_multi.count = kmulti_link->cnt;
2632 	info->kprobe_multi.flags = kmulti_link->flags;
2633 	info->kprobe_multi.missed = kmulti_link->fp.nmissed;
2634 
2635 	if (!uaddrs)
2636 		return 0;
2637 	if (ucount < kmulti_link->cnt)
2638 		err = -ENOSPC;
2639 	else
2640 		ucount = kmulti_link->cnt;
2641 
2642 	if (ucookies) {
2643 		if (kmulti_link->cookies) {
2644 			if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
2645 				return -EFAULT;
2646 		} else {
2647 			for (i = 0; i < ucount; i++) {
2648 				if (put_user(0, ucookies + i))
2649 					return -EFAULT;
2650 			}
2651 		}
2652 	}
2653 
2654 	if (kallsyms_show_value(current_cred())) {
2655 		if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2656 			return -EFAULT;
2657 	} else {
2658 		for (i = 0; i < ucount; i++) {
2659 			if (put_user(0, uaddrs + i))
2660 				return -EFAULT;
2661 		}
2662 	}
2663 	return err;
2664 }
2665 
2666 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2667 	.release = bpf_kprobe_multi_link_release,
2668 	.dealloc_deferred = bpf_kprobe_multi_link_dealloc,
2669 	.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2670 };
2671 
2672 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2673 {
2674 	const struct bpf_kprobe_multi_link *link = priv;
2675 	unsigned long *addr_a = a, *addr_b = b;
2676 	u64 *cookie_a, *cookie_b;
2677 
2678 	cookie_a = link->cookies + (addr_a - link->addrs);
2679 	cookie_b = link->cookies + (addr_b - link->addrs);
2680 
2681 	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2682 	swap(*addr_a, *addr_b);
2683 	swap(*cookie_a, *cookie_b);
2684 }
2685 
2686 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2687 {
2688 	const unsigned long *addr_a = a, *addr_b = b;
2689 
2690 	if (*addr_a == *addr_b)
2691 		return 0;
2692 	return *addr_a < *addr_b ? -1 : 1;
2693 }
2694 
2695 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2696 {
2697 	return bpf_kprobe_multi_addrs_cmp(a, b);
2698 }
2699 
2700 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2701 {
2702 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2703 	struct bpf_kprobe_multi_link *link;
2704 	u64 *cookie, entry_ip;
2705 	unsigned long *addr;
2706 
2707 	if (WARN_ON_ONCE(!ctx))
2708 		return 0;
2709 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2710 			       session_ctx.run_ctx);
2711 	link = run_ctx->link;
2712 	if (!link->cookies)
2713 		return 0;
2714 	entry_ip = run_ctx->entry_ip;
2715 	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2716 		       bpf_kprobe_multi_addrs_cmp);
2717 	if (!addr)
2718 		return 0;
2719 	cookie = link->cookies + (addr - link->addrs);
2720 	return *cookie;
2721 }
2722 
2723 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2724 {
2725 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2726 
2727 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2728 			       session_ctx.run_ctx);
2729 	return run_ctx->entry_ip;
2730 }
2731 
2732 static int
2733 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2734 			   unsigned long entry_ip, struct pt_regs *regs,
2735 			   bool is_return, void *data)
2736 {
2737 	struct bpf_kprobe_multi_run_ctx run_ctx = {
2738 		.session_ctx = {
2739 			.is_return = is_return,
2740 			.data = data,
2741 		},
2742 		.link = link,
2743 		.entry_ip = entry_ip,
2744 	};
2745 	struct bpf_run_ctx *old_run_ctx;
2746 	int err;
2747 
2748 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2749 		bpf_prog_inc_misses_counter(link->link.prog);
2750 		err = 0;
2751 		goto out;
2752 	}
2753 
2754 	migrate_disable();
2755 	rcu_read_lock();
2756 	old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
2757 	err = bpf_prog_run(link->link.prog, regs);
2758 	bpf_reset_run_ctx(old_run_ctx);
2759 	rcu_read_unlock();
2760 	migrate_enable();
2761 
2762  out:
2763 	__this_cpu_dec(bpf_prog_active);
2764 	return err;
2765 }
2766 
2767 static int
2768 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2769 			  unsigned long ret_ip, struct pt_regs *regs,
2770 			  void *data)
2771 {
2772 	struct bpf_kprobe_multi_link *link;
2773 	int err;
2774 
2775 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2776 	err = kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, false, data);
2777 	return is_kprobe_session(link->link.prog) ? err : 0;
2778 }
2779 
2780 static void
2781 kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2782 			       unsigned long ret_ip, struct pt_regs *regs,
2783 			       void *data)
2784 {
2785 	struct bpf_kprobe_multi_link *link;
2786 
2787 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2788 	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, true, data);
2789 }
2790 
2791 static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2792 {
2793 	const char **str_a = (const char **) a;
2794 	const char **str_b = (const char **) b;
2795 
2796 	return strcmp(*str_a, *str_b);
2797 }
2798 
2799 struct multi_symbols_sort {
2800 	const char **funcs;
2801 	u64 *cookies;
2802 };
2803 
2804 static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2805 {
2806 	const struct multi_symbols_sort *data = priv;
2807 	const char **name_a = a, **name_b = b;
2808 
2809 	swap(*name_a, *name_b);
2810 
2811 	/* If defined, swap also related cookies. */
2812 	if (data->cookies) {
2813 		u64 *cookie_a, *cookie_b;
2814 
2815 		cookie_a = data->cookies + (name_a - data->funcs);
2816 		cookie_b = data->cookies + (name_b - data->funcs);
2817 		swap(*cookie_a, *cookie_b);
2818 	}
2819 }
2820 
2821 struct modules_array {
2822 	struct module **mods;
2823 	int mods_cnt;
2824 	int mods_cap;
2825 };
2826 
2827 static int add_module(struct modules_array *arr, struct module *mod)
2828 {
2829 	struct module **mods;
2830 
2831 	if (arr->mods_cnt == arr->mods_cap) {
2832 		arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2833 		mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2834 		if (!mods)
2835 			return -ENOMEM;
2836 		arr->mods = mods;
2837 	}
2838 
2839 	arr->mods[arr->mods_cnt] = mod;
2840 	arr->mods_cnt++;
2841 	return 0;
2842 }
2843 
2844 static bool has_module(struct modules_array *arr, struct module *mod)
2845 {
2846 	int i;
2847 
2848 	for (i = arr->mods_cnt - 1; i >= 0; i--) {
2849 		if (arr->mods[i] == mod)
2850 			return true;
2851 	}
2852 	return false;
2853 }
2854 
2855 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2856 {
2857 	struct modules_array arr = {};
2858 	u32 i, err = 0;
2859 
2860 	for (i = 0; i < addrs_cnt; i++) {
2861 		struct module *mod;
2862 
2863 		preempt_disable();
2864 		mod = __module_address(addrs[i]);
2865 		/* Either no module or we it's already stored  */
2866 		if (!mod || has_module(&arr, mod)) {
2867 			preempt_enable();
2868 			continue;
2869 		}
2870 		if (!try_module_get(mod))
2871 			err = -EINVAL;
2872 		preempt_enable();
2873 		if (err)
2874 			break;
2875 		err = add_module(&arr, mod);
2876 		if (err) {
2877 			module_put(mod);
2878 			break;
2879 		}
2880 	}
2881 
2882 	/* We return either err < 0 in case of error, ... */
2883 	if (err) {
2884 		kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2885 		kfree(arr.mods);
2886 		return err;
2887 	}
2888 
2889 	/* or number of modules found if everything is ok. */
2890 	*mods = arr.mods;
2891 	return arr.mods_cnt;
2892 }
2893 
2894 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2895 {
2896 	u32 i;
2897 
2898 	for (i = 0; i < cnt; i++) {
2899 		if (!within_error_injection_list(addrs[i]))
2900 			return -EINVAL;
2901 	}
2902 	return 0;
2903 }
2904 
2905 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2906 {
2907 	struct bpf_kprobe_multi_link *link = NULL;
2908 	struct bpf_link_primer link_primer;
2909 	void __user *ucookies;
2910 	unsigned long *addrs;
2911 	u32 flags, cnt, size;
2912 	void __user *uaddrs;
2913 	u64 *cookies = NULL;
2914 	void __user *usyms;
2915 	int err;
2916 
2917 	/* no support for 32bit archs yet */
2918 	if (sizeof(u64) != sizeof(void *))
2919 		return -EOPNOTSUPP;
2920 
2921 	if (!is_kprobe_multi(prog))
2922 		return -EINVAL;
2923 
2924 	flags = attr->link_create.kprobe_multi.flags;
2925 	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2926 		return -EINVAL;
2927 
2928 	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2929 	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2930 	if (!!uaddrs == !!usyms)
2931 		return -EINVAL;
2932 
2933 	cnt = attr->link_create.kprobe_multi.cnt;
2934 	if (!cnt)
2935 		return -EINVAL;
2936 	if (cnt > MAX_KPROBE_MULTI_CNT)
2937 		return -E2BIG;
2938 
2939 	size = cnt * sizeof(*addrs);
2940 	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2941 	if (!addrs)
2942 		return -ENOMEM;
2943 
2944 	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2945 	if (ucookies) {
2946 		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2947 		if (!cookies) {
2948 			err = -ENOMEM;
2949 			goto error;
2950 		}
2951 		if (copy_from_user(cookies, ucookies, size)) {
2952 			err = -EFAULT;
2953 			goto error;
2954 		}
2955 	}
2956 
2957 	if (uaddrs) {
2958 		if (copy_from_user(addrs, uaddrs, size)) {
2959 			err = -EFAULT;
2960 			goto error;
2961 		}
2962 	} else {
2963 		struct multi_symbols_sort data = {
2964 			.cookies = cookies,
2965 		};
2966 		struct user_syms us;
2967 
2968 		err = copy_user_syms(&us, usyms, cnt);
2969 		if (err)
2970 			goto error;
2971 
2972 		if (cookies)
2973 			data.funcs = us.syms;
2974 
2975 		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2976 		       symbols_swap_r, &data);
2977 
2978 		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2979 		free_user_syms(&us);
2980 		if (err)
2981 			goto error;
2982 	}
2983 
2984 	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
2985 		err = -EINVAL;
2986 		goto error;
2987 	}
2988 
2989 	link = kzalloc(sizeof(*link), GFP_KERNEL);
2990 	if (!link) {
2991 		err = -ENOMEM;
2992 		goto error;
2993 	}
2994 
2995 	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2996 		      &bpf_kprobe_multi_link_lops, prog);
2997 
2998 	err = bpf_link_prime(&link->link, &link_primer);
2999 	if (err)
3000 		goto error;
3001 
3002 	if (!(flags & BPF_F_KPROBE_MULTI_RETURN))
3003 		link->fp.entry_handler = kprobe_multi_link_handler;
3004 	if ((flags & BPF_F_KPROBE_MULTI_RETURN) || is_kprobe_session(prog))
3005 		link->fp.exit_handler = kprobe_multi_link_exit_handler;
3006 	if (is_kprobe_session(prog))
3007 		link->fp.entry_data_size = sizeof(u64);
3008 
3009 	link->addrs = addrs;
3010 	link->cookies = cookies;
3011 	link->cnt = cnt;
3012 	link->flags = flags;
3013 
3014 	if (cookies) {
3015 		/*
3016 		 * Sorting addresses will trigger sorting cookies as well
3017 		 * (check bpf_kprobe_multi_cookie_swap). This way we can
3018 		 * find cookie based on the address in bpf_get_attach_cookie
3019 		 * helper.
3020 		 */
3021 		sort_r(addrs, cnt, sizeof(*addrs),
3022 		       bpf_kprobe_multi_cookie_cmp,
3023 		       bpf_kprobe_multi_cookie_swap,
3024 		       link);
3025 	}
3026 
3027 	err = get_modules_for_addrs(&link->mods, addrs, cnt);
3028 	if (err < 0) {
3029 		bpf_link_cleanup(&link_primer);
3030 		return err;
3031 	}
3032 	link->mods_cnt = err;
3033 
3034 	err = register_fprobe_ips(&link->fp, addrs, cnt);
3035 	if (err) {
3036 		kprobe_multi_put_modules(link->mods, link->mods_cnt);
3037 		bpf_link_cleanup(&link_primer);
3038 		return err;
3039 	}
3040 
3041 	return bpf_link_settle(&link_primer);
3042 
3043 error:
3044 	kfree(link);
3045 	kvfree(addrs);
3046 	kvfree(cookies);
3047 	return err;
3048 }
3049 #else /* !CONFIG_FPROBE */
3050 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3051 {
3052 	return -EOPNOTSUPP;
3053 }
3054 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3055 {
3056 	return 0;
3057 }
3058 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3059 {
3060 	return 0;
3061 }
3062 #endif
3063 
3064 #ifdef CONFIG_UPROBES
3065 struct bpf_uprobe_multi_link;
3066 
3067 struct bpf_uprobe {
3068 	struct bpf_uprobe_multi_link *link;
3069 	loff_t offset;
3070 	unsigned long ref_ctr_offset;
3071 	u64 cookie;
3072 	struct uprobe_consumer consumer;
3073 };
3074 
3075 struct bpf_uprobe_multi_link {
3076 	struct path path;
3077 	struct bpf_link link;
3078 	u32 cnt;
3079 	u32 flags;
3080 	struct bpf_uprobe *uprobes;
3081 	struct task_struct *task;
3082 };
3083 
3084 struct bpf_uprobe_multi_run_ctx {
3085 	struct bpf_run_ctx run_ctx;
3086 	unsigned long entry_ip;
3087 	struct bpf_uprobe *uprobe;
3088 };
3089 
3090 static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
3091 				  u32 cnt)
3092 {
3093 	u32 i;
3094 
3095 	for (i = 0; i < cnt; i++) {
3096 		uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
3097 				  &uprobes[i].consumer);
3098 	}
3099 }
3100 
3101 static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3102 {
3103 	struct bpf_uprobe_multi_link *umulti_link;
3104 
3105 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3106 	bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
3107 	if (umulti_link->task)
3108 		put_task_struct(umulti_link->task);
3109 	path_put(&umulti_link->path);
3110 }
3111 
3112 static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3113 {
3114 	struct bpf_uprobe_multi_link *umulti_link;
3115 
3116 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3117 	kvfree(umulti_link->uprobes);
3118 	kfree(umulti_link);
3119 }
3120 
3121 static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
3122 						struct bpf_link_info *info)
3123 {
3124 	u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3125 	u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3126 	u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3127 	u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3128 	u32 upath_size = info->uprobe_multi.path_size;
3129 	struct bpf_uprobe_multi_link *umulti_link;
3130 	u32 ucount = info->uprobe_multi.count;
3131 	int err = 0, i;
3132 	long left;
3133 
3134 	if (!upath ^ !upath_size)
3135 		return -EINVAL;
3136 
3137 	if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
3138 		return -EINVAL;
3139 
3140 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3141 	info->uprobe_multi.count = umulti_link->cnt;
3142 	info->uprobe_multi.flags = umulti_link->flags;
3143 	info->uprobe_multi.pid = umulti_link->task ?
3144 				 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3145 
3146 	if (upath) {
3147 		char *p, *buf;
3148 
3149 		upath_size = min_t(u32, upath_size, PATH_MAX);
3150 
3151 		buf = kmalloc(upath_size, GFP_KERNEL);
3152 		if (!buf)
3153 			return -ENOMEM;
3154 		p = d_path(&umulti_link->path, buf, upath_size);
3155 		if (IS_ERR(p)) {
3156 			kfree(buf);
3157 			return PTR_ERR(p);
3158 		}
3159 		upath_size = buf + upath_size - p;
3160 		left = copy_to_user(upath, p, upath_size);
3161 		kfree(buf);
3162 		if (left)
3163 			return -EFAULT;
3164 		info->uprobe_multi.path_size = upath_size;
3165 	}
3166 
3167 	if (!uoffsets && !ucookies && !uref_ctr_offsets)
3168 		return 0;
3169 
3170 	if (ucount < umulti_link->cnt)
3171 		err = -ENOSPC;
3172 	else
3173 		ucount = umulti_link->cnt;
3174 
3175 	for (i = 0; i < ucount; i++) {
3176 		if (uoffsets &&
3177 		    put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3178 			return -EFAULT;
3179 		if (uref_ctr_offsets &&
3180 		    put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3181 			return -EFAULT;
3182 		if (ucookies &&
3183 		    put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3184 			return -EFAULT;
3185 	}
3186 
3187 	return err;
3188 }
3189 
3190 static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3191 	.release = bpf_uprobe_multi_link_release,
3192 	.dealloc_deferred = bpf_uprobe_multi_link_dealloc,
3193 	.fill_link_info = bpf_uprobe_multi_link_fill_link_info,
3194 };
3195 
3196 static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3197 			   unsigned long entry_ip,
3198 			   struct pt_regs *regs)
3199 {
3200 	struct bpf_uprobe_multi_link *link = uprobe->link;
3201 	struct bpf_uprobe_multi_run_ctx run_ctx = {
3202 		.entry_ip = entry_ip,
3203 		.uprobe = uprobe,
3204 	};
3205 	struct bpf_prog *prog = link->link.prog;
3206 	bool sleepable = prog->sleepable;
3207 	struct bpf_run_ctx *old_run_ctx;
3208 	int err = 0;
3209 
3210 	if (link->task && current->mm != link->task->mm)
3211 		return 0;
3212 
3213 	if (sleepable)
3214 		rcu_read_lock_trace();
3215 	else
3216 		rcu_read_lock();
3217 
3218 	migrate_disable();
3219 
3220 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
3221 	err = bpf_prog_run(link->link.prog, regs);
3222 	bpf_reset_run_ctx(old_run_ctx);
3223 
3224 	migrate_enable();
3225 
3226 	if (sleepable)
3227 		rcu_read_unlock_trace();
3228 	else
3229 		rcu_read_unlock();
3230 	return err;
3231 }
3232 
3233 static bool
3234 uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
3235 			 struct mm_struct *mm)
3236 {
3237 	struct bpf_uprobe *uprobe;
3238 
3239 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3240 	return uprobe->link->task->mm == mm;
3241 }
3242 
3243 static int
3244 uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
3245 {
3246 	struct bpf_uprobe *uprobe;
3247 
3248 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3249 	return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
3250 }
3251 
3252 static int
3253 uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
3254 {
3255 	struct bpf_uprobe *uprobe;
3256 
3257 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3258 	return uprobe_prog_run(uprobe, func, regs);
3259 }
3260 
3261 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3262 {
3263 	struct bpf_uprobe_multi_run_ctx *run_ctx;
3264 
3265 	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3266 	return run_ctx->entry_ip;
3267 }
3268 
3269 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3270 {
3271 	struct bpf_uprobe_multi_run_ctx *run_ctx;
3272 
3273 	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3274 	return run_ctx->uprobe->cookie;
3275 }
3276 
3277 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3278 {
3279 	struct bpf_uprobe_multi_link *link = NULL;
3280 	unsigned long __user *uref_ctr_offsets;
3281 	struct bpf_link_primer link_primer;
3282 	struct bpf_uprobe *uprobes = NULL;
3283 	struct task_struct *task = NULL;
3284 	unsigned long __user *uoffsets;
3285 	u64 __user *ucookies;
3286 	void __user *upath;
3287 	u32 flags, cnt, i;
3288 	struct path path;
3289 	char *name;
3290 	pid_t pid;
3291 	int err;
3292 
3293 	/* no support for 32bit archs yet */
3294 	if (sizeof(u64) != sizeof(void *))
3295 		return -EOPNOTSUPP;
3296 
3297 	if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
3298 		return -EINVAL;
3299 
3300 	flags = attr->link_create.uprobe_multi.flags;
3301 	if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3302 		return -EINVAL;
3303 
3304 	/*
3305 	 * path, offsets and cnt are mandatory,
3306 	 * ref_ctr_offsets and cookies are optional
3307 	 */
3308 	upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3309 	uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3310 	cnt = attr->link_create.uprobe_multi.cnt;
3311 	pid = attr->link_create.uprobe_multi.pid;
3312 
3313 	if (!upath || !uoffsets || !cnt || pid < 0)
3314 		return -EINVAL;
3315 	if (cnt > MAX_UPROBE_MULTI_CNT)
3316 		return -E2BIG;
3317 
3318 	uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3319 	ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3320 
3321 	name = strndup_user(upath, PATH_MAX);
3322 	if (IS_ERR(name)) {
3323 		err = PTR_ERR(name);
3324 		return err;
3325 	}
3326 
3327 	err = kern_path(name, LOOKUP_FOLLOW, &path);
3328 	kfree(name);
3329 	if (err)
3330 		return err;
3331 
3332 	if (!d_is_reg(path.dentry)) {
3333 		err = -EBADF;
3334 		goto error_path_put;
3335 	}
3336 
3337 	if (pid) {
3338 		task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
3339 		if (!task) {
3340 			err = -ESRCH;
3341 			goto error_path_put;
3342 		}
3343 	}
3344 
3345 	err = -ENOMEM;
3346 
3347 	link = kzalloc(sizeof(*link), GFP_KERNEL);
3348 	uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3349 
3350 	if (!uprobes || !link)
3351 		goto error_free;
3352 
3353 	for (i = 0; i < cnt; i++) {
3354 		if (__get_user(uprobes[i].offset, uoffsets + i)) {
3355 			err = -EFAULT;
3356 			goto error_free;
3357 		}
3358 		if (uprobes[i].offset < 0) {
3359 			err = -EINVAL;
3360 			goto error_free;
3361 		}
3362 		if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
3363 			err = -EFAULT;
3364 			goto error_free;
3365 		}
3366 		if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3367 			err = -EFAULT;
3368 			goto error_free;
3369 		}
3370 
3371 		uprobes[i].link = link;
3372 
3373 		if (flags & BPF_F_UPROBE_MULTI_RETURN)
3374 			uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3375 		else
3376 			uprobes[i].consumer.handler = uprobe_multi_link_handler;
3377 
3378 		if (pid)
3379 			uprobes[i].consumer.filter = uprobe_multi_link_filter;
3380 	}
3381 
3382 	link->cnt = cnt;
3383 	link->uprobes = uprobes;
3384 	link->path = path;
3385 	link->task = task;
3386 	link->flags = flags;
3387 
3388 	bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3389 		      &bpf_uprobe_multi_link_lops, prog);
3390 
3391 	for (i = 0; i < cnt; i++) {
3392 		err = uprobe_register_refctr(d_real_inode(link->path.dentry),
3393 					     uprobes[i].offset,
3394 					     uprobes[i].ref_ctr_offset,
3395 					     &uprobes[i].consumer);
3396 		if (err) {
3397 			bpf_uprobe_unregister(&path, uprobes, i);
3398 			goto error_free;
3399 		}
3400 	}
3401 
3402 	err = bpf_link_prime(&link->link, &link_primer);
3403 	if (err)
3404 		goto error_free;
3405 
3406 	return bpf_link_settle(&link_primer);
3407 
3408 error_free:
3409 	kvfree(uprobes);
3410 	kfree(link);
3411 	if (task)
3412 		put_task_struct(task);
3413 error_path_put:
3414 	path_put(&path);
3415 	return err;
3416 }
3417 #else /* !CONFIG_UPROBES */
3418 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3419 {
3420 	return -EOPNOTSUPP;
3421 }
3422 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3423 {
3424 	return 0;
3425 }
3426 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3427 {
3428 	return 0;
3429 }
3430 #endif /* CONFIG_UPROBES */
3431 
3432 __bpf_kfunc_start_defs();
3433 
3434 __bpf_kfunc bool bpf_session_is_return(void)
3435 {
3436 	struct bpf_session_run_ctx *session_ctx;
3437 
3438 	session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3439 	return session_ctx->is_return;
3440 }
3441 
3442 __bpf_kfunc __u64 *bpf_session_cookie(void)
3443 {
3444 	struct bpf_session_run_ctx *session_ctx;
3445 
3446 	session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3447 	return session_ctx->data;
3448 }
3449 
3450 __bpf_kfunc_end_defs();
3451 
3452 BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids)
3453 BTF_ID_FLAGS(func, bpf_session_is_return)
3454 BTF_ID_FLAGS(func, bpf_session_cookie)
3455 BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids)
3456 
3457 static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
3458 {
3459 	if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id))
3460 		return 0;
3461 
3462 	if (!is_kprobe_session(prog))
3463 		return -EACCES;
3464 
3465 	return 0;
3466 }
3467 
3468 static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
3469 	.owner = THIS_MODULE,
3470 	.set = &kprobe_multi_kfunc_set_ids,
3471 	.filter = bpf_kprobe_multi_filter,
3472 };
3473 
3474 static int __init bpf_kprobe_multi_kfuncs_init(void)
3475 {
3476 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
3477 }
3478 
3479 late_initcall(bpf_kprobe_multi_kfuncs_init);
3480