xref: /linux/kernel/trace/bpf_trace.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/btf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/syscalls.h>
17 #include <linux/error-injection.h>
18 #include <linux/btf_ids.h>
19 #include <linux/bpf_lsm.h>
20 
21 #include <net/bpf_sk_storage.h>
22 
23 #include <uapi/linux/bpf.h>
24 #include <uapi/linux/btf.h>
25 
26 #include <asm/tlb.h>
27 
28 #include "trace_probe.h"
29 #include "trace.h"
30 
31 #define CREATE_TRACE_POINTS
32 #include "bpf_trace.h"
33 
34 #define bpf_event_rcu_dereference(p)					\
35 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
36 
37 #ifdef CONFIG_MODULES
38 struct bpf_trace_module {
39 	struct module *module;
40 	struct list_head list;
41 };
42 
43 static LIST_HEAD(bpf_trace_modules);
44 static DEFINE_MUTEX(bpf_module_mutex);
45 
46 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
47 {
48 	struct bpf_raw_event_map *btp, *ret = NULL;
49 	struct bpf_trace_module *btm;
50 	unsigned int i;
51 
52 	mutex_lock(&bpf_module_mutex);
53 	list_for_each_entry(btm, &bpf_trace_modules, list) {
54 		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
55 			btp = &btm->module->bpf_raw_events[i];
56 			if (!strcmp(btp->tp->name, name)) {
57 				if (try_module_get(btm->module))
58 					ret = btp;
59 				goto out;
60 			}
61 		}
62 	}
63 out:
64 	mutex_unlock(&bpf_module_mutex);
65 	return ret;
66 }
67 #else
68 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
69 {
70 	return NULL;
71 }
72 #endif /* CONFIG_MODULES */
73 
74 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
75 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
76 
77 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
78 				  u64 flags, const struct btf **btf,
79 				  s32 *btf_id);
80 
81 /**
82  * trace_call_bpf - invoke BPF program
83  * @call: tracepoint event
84  * @ctx: opaque context pointer
85  *
86  * kprobe handlers execute BPF programs via this helper.
87  * Can be used from static tracepoints in the future.
88  *
89  * Return: BPF programs always return an integer which is interpreted by
90  * kprobe handler as:
91  * 0 - return from kprobe (event is filtered out)
92  * 1 - store kprobe event into ring buffer
93  * Other values are reserved and currently alias to 1
94  */
95 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
96 {
97 	unsigned int ret;
98 
99 	cant_sleep();
100 
101 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
102 		/*
103 		 * since some bpf program is already running on this cpu,
104 		 * don't call into another bpf program (same or different)
105 		 * and don't send kprobe event into ring-buffer,
106 		 * so return zero here
107 		 */
108 		ret = 0;
109 		goto out;
110 	}
111 
112 	/*
113 	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114 	 * to all call sites, we did a bpf_prog_array_valid() there to check
115 	 * whether call->prog_array is empty or not, which is
116 	 * a heuristic to speed up execution.
117 	 *
118 	 * If bpf_prog_array_valid() fetched prog_array was
119 	 * non-NULL, we go into trace_call_bpf() and do the actual
120 	 * proper rcu_dereference() under RCU lock.
121 	 * If it turns out that prog_array is NULL then, we bail out.
122 	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
123 	 * was NULL, you'll skip the prog_array with the risk of missing
124 	 * out of events when it was updated in between this and the
125 	 * rcu_dereference() which is accepted risk.
126 	 */
127 	ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
128 
129  out:
130 	__this_cpu_dec(bpf_prog_active);
131 
132 	return ret;
133 }
134 
135 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
136 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
137 {
138 	regs_set_return_value(regs, rc);
139 	override_function_with_return(regs);
140 	return 0;
141 }
142 
143 static const struct bpf_func_proto bpf_override_return_proto = {
144 	.func		= bpf_override_return,
145 	.gpl_only	= true,
146 	.ret_type	= RET_INTEGER,
147 	.arg1_type	= ARG_PTR_TO_CTX,
148 	.arg2_type	= ARG_ANYTHING,
149 };
150 #endif
151 
152 static __always_inline int
153 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
154 {
155 	int ret;
156 
157 	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
158 	if (unlikely(ret < 0))
159 		memset(dst, 0, size);
160 	return ret;
161 }
162 
163 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164 	   const void __user *, unsafe_ptr)
165 {
166 	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
167 }
168 
169 const struct bpf_func_proto bpf_probe_read_user_proto = {
170 	.func		= bpf_probe_read_user,
171 	.gpl_only	= true,
172 	.ret_type	= RET_INTEGER,
173 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
174 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
175 	.arg3_type	= ARG_ANYTHING,
176 };
177 
178 static __always_inline int
179 bpf_probe_read_user_str_common(void *dst, u32 size,
180 			       const void __user *unsafe_ptr)
181 {
182 	int ret;
183 
184 	/*
185 	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
186 	 * terminator into `dst`.
187 	 *
188 	 * strncpy_from_user() does long-sized strides in the fast path. If the
189 	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
190 	 * then there could be junk after the NUL in `dst`. If user takes `dst`
191 	 * and keys a hash map with it, then semantically identical strings can
192 	 * occupy multiple entries in the map.
193 	 */
194 	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
195 	if (unlikely(ret < 0))
196 		memset(dst, 0, size);
197 	return ret;
198 }
199 
200 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
201 	   const void __user *, unsafe_ptr)
202 {
203 	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
204 }
205 
206 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
207 	.func		= bpf_probe_read_user_str,
208 	.gpl_only	= true,
209 	.ret_type	= RET_INTEGER,
210 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
211 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
212 	.arg3_type	= ARG_ANYTHING,
213 };
214 
215 static __always_inline int
216 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
217 {
218 	int ret;
219 
220 	ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
221 	if (unlikely(ret < 0))
222 		memset(dst, 0, size);
223 	return ret;
224 }
225 
226 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
227 	   const void *, unsafe_ptr)
228 {
229 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
230 }
231 
232 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
233 	.func		= bpf_probe_read_kernel,
234 	.gpl_only	= true,
235 	.ret_type	= RET_INTEGER,
236 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
237 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
238 	.arg3_type	= ARG_ANYTHING,
239 };
240 
241 static __always_inline int
242 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
243 {
244 	int ret;
245 
246 	/*
247 	 * The strncpy_from_kernel_nofault() call will likely not fill the
248 	 * entire buffer, but that's okay in this circumstance as we're probing
249 	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
250 	 * as well probe the stack. Thus, memory is explicitly cleared
251 	 * only in error case, so that improper users ignoring return
252 	 * code altogether don't copy garbage; otherwise length of string
253 	 * is returned that can be used for bpf_perf_event_output() et al.
254 	 */
255 	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
256 	if (unlikely(ret < 0))
257 		memset(dst, 0, size);
258 	return ret;
259 }
260 
261 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
262 	   const void *, unsafe_ptr)
263 {
264 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
265 }
266 
267 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
268 	.func		= bpf_probe_read_kernel_str,
269 	.gpl_only	= true,
270 	.ret_type	= RET_INTEGER,
271 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
272 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
273 	.arg3_type	= ARG_ANYTHING,
274 };
275 
276 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
277 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
278 	   const void *, unsafe_ptr)
279 {
280 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
281 		return bpf_probe_read_user_common(dst, size,
282 				(__force void __user *)unsafe_ptr);
283 	}
284 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
285 }
286 
287 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
288 	.func		= bpf_probe_read_compat,
289 	.gpl_only	= true,
290 	.ret_type	= RET_INTEGER,
291 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
292 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
293 	.arg3_type	= ARG_ANYTHING,
294 };
295 
296 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
297 	   const void *, unsafe_ptr)
298 {
299 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
300 		return bpf_probe_read_user_str_common(dst, size,
301 				(__force void __user *)unsafe_ptr);
302 	}
303 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
304 }
305 
306 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
307 	.func		= bpf_probe_read_compat_str,
308 	.gpl_only	= true,
309 	.ret_type	= RET_INTEGER,
310 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
311 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
312 	.arg3_type	= ARG_ANYTHING,
313 };
314 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
315 
316 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
317 	   u32, size)
318 {
319 	/*
320 	 * Ensure we're in user context which is safe for the helper to
321 	 * run. This helper has no business in a kthread.
322 	 *
323 	 * access_ok() should prevent writing to non-user memory, but in
324 	 * some situations (nommu, temporary switch, etc) access_ok() does
325 	 * not provide enough validation, hence the check on KERNEL_DS.
326 	 *
327 	 * nmi_uaccess_okay() ensures the probe is not run in an interim
328 	 * state, when the task or mm are switched. This is specifically
329 	 * required to prevent the use of temporary mm.
330 	 */
331 
332 	if (unlikely(in_interrupt() ||
333 		     current->flags & (PF_KTHREAD | PF_EXITING)))
334 		return -EPERM;
335 	if (unlikely(uaccess_kernel()))
336 		return -EPERM;
337 	if (unlikely(!nmi_uaccess_okay()))
338 		return -EPERM;
339 
340 	return copy_to_user_nofault(unsafe_ptr, src, size);
341 }
342 
343 static const struct bpf_func_proto bpf_probe_write_user_proto = {
344 	.func		= bpf_probe_write_user,
345 	.gpl_only	= true,
346 	.ret_type	= RET_INTEGER,
347 	.arg1_type	= ARG_ANYTHING,
348 	.arg2_type	= ARG_PTR_TO_MEM,
349 	.arg3_type	= ARG_CONST_SIZE,
350 };
351 
352 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
353 {
354 	if (!capable(CAP_SYS_ADMIN))
355 		return NULL;
356 
357 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
358 			    current->comm, task_pid_nr(current));
359 
360 	return &bpf_probe_write_user_proto;
361 }
362 
363 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
364 
365 #define MAX_TRACE_PRINTK_VARARGS	3
366 #define BPF_TRACE_PRINTK_SIZE		1024
367 
368 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
369 	   u64, arg2, u64, arg3)
370 {
371 	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
372 	u32 *bin_args;
373 	static char buf[BPF_TRACE_PRINTK_SIZE];
374 	unsigned long flags;
375 	int ret;
376 
377 	ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
378 				  MAX_TRACE_PRINTK_VARARGS);
379 	if (ret < 0)
380 		return ret;
381 
382 	raw_spin_lock_irqsave(&trace_printk_lock, flags);
383 	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
384 
385 	trace_bpf_trace_printk(buf);
386 	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
387 
388 	bpf_bprintf_cleanup();
389 
390 	return ret;
391 }
392 
393 static const struct bpf_func_proto bpf_trace_printk_proto = {
394 	.func		= bpf_trace_printk,
395 	.gpl_only	= true,
396 	.ret_type	= RET_INTEGER,
397 	.arg1_type	= ARG_PTR_TO_MEM,
398 	.arg2_type	= ARG_CONST_SIZE,
399 };
400 
401 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
402 {
403 	/*
404 	 * This program might be calling bpf_trace_printk,
405 	 * so enable the associated bpf_trace/bpf_trace_printk event.
406 	 * Repeat this each time as it is possible a user has
407 	 * disabled bpf_trace_printk events.  By loading a program
408 	 * calling bpf_trace_printk() however the user has expressed
409 	 * the intent to see such events.
410 	 */
411 	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
412 		pr_warn_ratelimited("could not enable bpf_trace_printk events");
413 
414 	return &bpf_trace_printk_proto;
415 }
416 
417 #define MAX_SEQ_PRINTF_VARARGS		12
418 
419 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
420 	   const void *, data, u32, data_len)
421 {
422 	int err, num_args;
423 	u32 *bin_args;
424 
425 	if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 ||
426 	    (data_len && !data))
427 		return -EINVAL;
428 	num_args = data_len / 8;
429 
430 	err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
431 	if (err < 0)
432 		return err;
433 
434 	seq_bprintf(m, fmt, bin_args);
435 
436 	bpf_bprintf_cleanup();
437 
438 	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
439 }
440 
441 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
442 
443 static const struct bpf_func_proto bpf_seq_printf_proto = {
444 	.func		= bpf_seq_printf,
445 	.gpl_only	= true,
446 	.ret_type	= RET_INTEGER,
447 	.arg1_type	= ARG_PTR_TO_BTF_ID,
448 	.arg1_btf_id	= &btf_seq_file_ids[0],
449 	.arg2_type	= ARG_PTR_TO_MEM,
450 	.arg3_type	= ARG_CONST_SIZE,
451 	.arg4_type      = ARG_PTR_TO_MEM_OR_NULL,
452 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
453 };
454 
455 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
456 {
457 	return seq_write(m, data, len) ? -EOVERFLOW : 0;
458 }
459 
460 static const struct bpf_func_proto bpf_seq_write_proto = {
461 	.func		= bpf_seq_write,
462 	.gpl_only	= true,
463 	.ret_type	= RET_INTEGER,
464 	.arg1_type	= ARG_PTR_TO_BTF_ID,
465 	.arg1_btf_id	= &btf_seq_file_ids[0],
466 	.arg2_type	= ARG_PTR_TO_MEM,
467 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
468 };
469 
470 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
471 	   u32, btf_ptr_size, u64, flags)
472 {
473 	const struct btf *btf;
474 	s32 btf_id;
475 	int ret;
476 
477 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
478 	if (ret)
479 		return ret;
480 
481 	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
482 }
483 
484 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
485 	.func		= bpf_seq_printf_btf,
486 	.gpl_only	= true,
487 	.ret_type	= RET_INTEGER,
488 	.arg1_type	= ARG_PTR_TO_BTF_ID,
489 	.arg1_btf_id	= &btf_seq_file_ids[0],
490 	.arg2_type	= ARG_PTR_TO_MEM,
491 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
492 	.arg4_type	= ARG_ANYTHING,
493 };
494 
495 static __always_inline int
496 get_map_perf_counter(struct bpf_map *map, u64 flags,
497 		     u64 *value, u64 *enabled, u64 *running)
498 {
499 	struct bpf_array *array = container_of(map, struct bpf_array, map);
500 	unsigned int cpu = smp_processor_id();
501 	u64 index = flags & BPF_F_INDEX_MASK;
502 	struct bpf_event_entry *ee;
503 
504 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
505 		return -EINVAL;
506 	if (index == BPF_F_CURRENT_CPU)
507 		index = cpu;
508 	if (unlikely(index >= array->map.max_entries))
509 		return -E2BIG;
510 
511 	ee = READ_ONCE(array->ptrs[index]);
512 	if (!ee)
513 		return -ENOENT;
514 
515 	return perf_event_read_local(ee->event, value, enabled, running);
516 }
517 
518 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
519 {
520 	u64 value = 0;
521 	int err;
522 
523 	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
524 	/*
525 	 * this api is ugly since we miss [-22..-2] range of valid
526 	 * counter values, but that's uapi
527 	 */
528 	if (err)
529 		return err;
530 	return value;
531 }
532 
533 static const struct bpf_func_proto bpf_perf_event_read_proto = {
534 	.func		= bpf_perf_event_read,
535 	.gpl_only	= true,
536 	.ret_type	= RET_INTEGER,
537 	.arg1_type	= ARG_CONST_MAP_PTR,
538 	.arg2_type	= ARG_ANYTHING,
539 };
540 
541 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
542 	   struct bpf_perf_event_value *, buf, u32, size)
543 {
544 	int err = -EINVAL;
545 
546 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
547 		goto clear;
548 	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
549 				   &buf->running);
550 	if (unlikely(err))
551 		goto clear;
552 	return 0;
553 clear:
554 	memset(buf, 0, size);
555 	return err;
556 }
557 
558 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
559 	.func		= bpf_perf_event_read_value,
560 	.gpl_only	= true,
561 	.ret_type	= RET_INTEGER,
562 	.arg1_type	= ARG_CONST_MAP_PTR,
563 	.arg2_type	= ARG_ANYTHING,
564 	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
565 	.arg4_type	= ARG_CONST_SIZE,
566 };
567 
568 static __always_inline u64
569 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
570 			u64 flags, struct perf_sample_data *sd)
571 {
572 	struct bpf_array *array = container_of(map, struct bpf_array, map);
573 	unsigned int cpu = smp_processor_id();
574 	u64 index = flags & BPF_F_INDEX_MASK;
575 	struct bpf_event_entry *ee;
576 	struct perf_event *event;
577 
578 	if (index == BPF_F_CURRENT_CPU)
579 		index = cpu;
580 	if (unlikely(index >= array->map.max_entries))
581 		return -E2BIG;
582 
583 	ee = READ_ONCE(array->ptrs[index]);
584 	if (!ee)
585 		return -ENOENT;
586 
587 	event = ee->event;
588 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
589 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
590 		return -EINVAL;
591 
592 	if (unlikely(event->oncpu != cpu))
593 		return -EOPNOTSUPP;
594 
595 	return perf_event_output(event, sd, regs);
596 }
597 
598 /*
599  * Support executing tracepoints in normal, irq, and nmi context that each call
600  * bpf_perf_event_output
601  */
602 struct bpf_trace_sample_data {
603 	struct perf_sample_data sds[3];
604 };
605 
606 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
607 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
608 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
609 	   u64, flags, void *, data, u64, size)
610 {
611 	struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
612 	int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
613 	struct perf_raw_record raw = {
614 		.frag = {
615 			.size = size,
616 			.data = data,
617 		},
618 	};
619 	struct perf_sample_data *sd;
620 	int err;
621 
622 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
623 		err = -EBUSY;
624 		goto out;
625 	}
626 
627 	sd = &sds->sds[nest_level - 1];
628 
629 	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
630 		err = -EINVAL;
631 		goto out;
632 	}
633 
634 	perf_sample_data_init(sd, 0, 0);
635 	sd->raw = &raw;
636 
637 	err = __bpf_perf_event_output(regs, map, flags, sd);
638 
639 out:
640 	this_cpu_dec(bpf_trace_nest_level);
641 	return err;
642 }
643 
644 static const struct bpf_func_proto bpf_perf_event_output_proto = {
645 	.func		= bpf_perf_event_output,
646 	.gpl_only	= true,
647 	.ret_type	= RET_INTEGER,
648 	.arg1_type	= ARG_PTR_TO_CTX,
649 	.arg2_type	= ARG_CONST_MAP_PTR,
650 	.arg3_type	= ARG_ANYTHING,
651 	.arg4_type	= ARG_PTR_TO_MEM,
652 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
653 };
654 
655 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
656 struct bpf_nested_pt_regs {
657 	struct pt_regs regs[3];
658 };
659 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
660 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
661 
662 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
663 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
664 {
665 	int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
666 	struct perf_raw_frag frag = {
667 		.copy		= ctx_copy,
668 		.size		= ctx_size,
669 		.data		= ctx,
670 	};
671 	struct perf_raw_record raw = {
672 		.frag = {
673 			{
674 				.next	= ctx_size ? &frag : NULL,
675 			},
676 			.size	= meta_size,
677 			.data	= meta,
678 		},
679 	};
680 	struct perf_sample_data *sd;
681 	struct pt_regs *regs;
682 	u64 ret;
683 
684 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
685 		ret = -EBUSY;
686 		goto out;
687 	}
688 	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
689 	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
690 
691 	perf_fetch_caller_regs(regs);
692 	perf_sample_data_init(sd, 0, 0);
693 	sd->raw = &raw;
694 
695 	ret = __bpf_perf_event_output(regs, map, flags, sd);
696 out:
697 	this_cpu_dec(bpf_event_output_nest_level);
698 	return ret;
699 }
700 
701 BPF_CALL_0(bpf_get_current_task)
702 {
703 	return (long) current;
704 }
705 
706 const struct bpf_func_proto bpf_get_current_task_proto = {
707 	.func		= bpf_get_current_task,
708 	.gpl_only	= true,
709 	.ret_type	= RET_INTEGER,
710 };
711 
712 BPF_CALL_0(bpf_get_current_task_btf)
713 {
714 	return (unsigned long) current;
715 }
716 
717 BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
718 
719 static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
720 	.func		= bpf_get_current_task_btf,
721 	.gpl_only	= true,
722 	.ret_type	= RET_PTR_TO_BTF_ID,
723 	.ret_btf_id	= &bpf_get_current_btf_ids[0],
724 };
725 
726 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
727 {
728 	struct bpf_array *array = container_of(map, struct bpf_array, map);
729 	struct cgroup *cgrp;
730 
731 	if (unlikely(idx >= array->map.max_entries))
732 		return -E2BIG;
733 
734 	cgrp = READ_ONCE(array->ptrs[idx]);
735 	if (unlikely(!cgrp))
736 		return -EAGAIN;
737 
738 	return task_under_cgroup_hierarchy(current, cgrp);
739 }
740 
741 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
742 	.func           = bpf_current_task_under_cgroup,
743 	.gpl_only       = false,
744 	.ret_type       = RET_INTEGER,
745 	.arg1_type      = ARG_CONST_MAP_PTR,
746 	.arg2_type      = ARG_ANYTHING,
747 };
748 
749 struct send_signal_irq_work {
750 	struct irq_work irq_work;
751 	struct task_struct *task;
752 	u32 sig;
753 	enum pid_type type;
754 };
755 
756 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
757 
758 static void do_bpf_send_signal(struct irq_work *entry)
759 {
760 	struct send_signal_irq_work *work;
761 
762 	work = container_of(entry, struct send_signal_irq_work, irq_work);
763 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
764 }
765 
766 static int bpf_send_signal_common(u32 sig, enum pid_type type)
767 {
768 	struct send_signal_irq_work *work = NULL;
769 
770 	/* Similar to bpf_probe_write_user, task needs to be
771 	 * in a sound condition and kernel memory access be
772 	 * permitted in order to send signal to the current
773 	 * task.
774 	 */
775 	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
776 		return -EPERM;
777 	if (unlikely(uaccess_kernel()))
778 		return -EPERM;
779 	if (unlikely(!nmi_uaccess_okay()))
780 		return -EPERM;
781 
782 	if (irqs_disabled()) {
783 		/* Do an early check on signal validity. Otherwise,
784 		 * the error is lost in deferred irq_work.
785 		 */
786 		if (unlikely(!valid_signal(sig)))
787 			return -EINVAL;
788 
789 		work = this_cpu_ptr(&send_signal_work);
790 		if (irq_work_is_busy(&work->irq_work))
791 			return -EBUSY;
792 
793 		/* Add the current task, which is the target of sending signal,
794 		 * to the irq_work. The current task may change when queued
795 		 * irq works get executed.
796 		 */
797 		work->task = current;
798 		work->sig = sig;
799 		work->type = type;
800 		irq_work_queue(&work->irq_work);
801 		return 0;
802 	}
803 
804 	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
805 }
806 
807 BPF_CALL_1(bpf_send_signal, u32, sig)
808 {
809 	return bpf_send_signal_common(sig, PIDTYPE_TGID);
810 }
811 
812 static const struct bpf_func_proto bpf_send_signal_proto = {
813 	.func		= bpf_send_signal,
814 	.gpl_only	= false,
815 	.ret_type	= RET_INTEGER,
816 	.arg1_type	= ARG_ANYTHING,
817 };
818 
819 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
820 {
821 	return bpf_send_signal_common(sig, PIDTYPE_PID);
822 }
823 
824 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
825 	.func		= bpf_send_signal_thread,
826 	.gpl_only	= false,
827 	.ret_type	= RET_INTEGER,
828 	.arg1_type	= ARG_ANYTHING,
829 };
830 
831 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
832 {
833 	long len;
834 	char *p;
835 
836 	if (!sz)
837 		return 0;
838 
839 	p = d_path(path, buf, sz);
840 	if (IS_ERR(p)) {
841 		len = PTR_ERR(p);
842 	} else {
843 		len = buf + sz - p;
844 		memmove(buf, p, len);
845 	}
846 
847 	return len;
848 }
849 
850 BTF_SET_START(btf_allowlist_d_path)
851 #ifdef CONFIG_SECURITY
852 BTF_ID(func, security_file_permission)
853 BTF_ID(func, security_inode_getattr)
854 BTF_ID(func, security_file_open)
855 #endif
856 #ifdef CONFIG_SECURITY_PATH
857 BTF_ID(func, security_path_truncate)
858 #endif
859 BTF_ID(func, vfs_truncate)
860 BTF_ID(func, vfs_fallocate)
861 BTF_ID(func, dentry_open)
862 BTF_ID(func, vfs_getattr)
863 BTF_ID(func, filp_close)
864 BTF_SET_END(btf_allowlist_d_path)
865 
866 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
867 {
868 	if (prog->type == BPF_PROG_TYPE_TRACING &&
869 	    prog->expected_attach_type == BPF_TRACE_ITER)
870 		return true;
871 
872 	if (prog->type == BPF_PROG_TYPE_LSM)
873 		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
874 
875 	return btf_id_set_contains(&btf_allowlist_d_path,
876 				   prog->aux->attach_btf_id);
877 }
878 
879 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
880 
881 static const struct bpf_func_proto bpf_d_path_proto = {
882 	.func		= bpf_d_path,
883 	.gpl_only	= false,
884 	.ret_type	= RET_INTEGER,
885 	.arg1_type	= ARG_PTR_TO_BTF_ID,
886 	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
887 	.arg2_type	= ARG_PTR_TO_MEM,
888 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
889 	.allowed	= bpf_d_path_allowed,
890 };
891 
892 #define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
893 			 BTF_F_PTR_RAW | BTF_F_ZERO)
894 
895 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
896 				  u64 flags, const struct btf **btf,
897 				  s32 *btf_id)
898 {
899 	const struct btf_type *t;
900 
901 	if (unlikely(flags & ~(BTF_F_ALL)))
902 		return -EINVAL;
903 
904 	if (btf_ptr_size != sizeof(struct btf_ptr))
905 		return -EINVAL;
906 
907 	*btf = bpf_get_btf_vmlinux();
908 
909 	if (IS_ERR_OR_NULL(*btf))
910 		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
911 
912 	if (ptr->type_id > 0)
913 		*btf_id = ptr->type_id;
914 	else
915 		return -EINVAL;
916 
917 	if (*btf_id > 0)
918 		t = btf_type_by_id(*btf, *btf_id);
919 	if (*btf_id <= 0 || !t)
920 		return -ENOENT;
921 
922 	return 0;
923 }
924 
925 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
926 	   u32, btf_ptr_size, u64, flags)
927 {
928 	const struct btf *btf;
929 	s32 btf_id;
930 	int ret;
931 
932 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
933 	if (ret)
934 		return ret;
935 
936 	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
937 				      flags);
938 }
939 
940 const struct bpf_func_proto bpf_snprintf_btf_proto = {
941 	.func		= bpf_snprintf_btf,
942 	.gpl_only	= false,
943 	.ret_type	= RET_INTEGER,
944 	.arg1_type	= ARG_PTR_TO_MEM,
945 	.arg2_type	= ARG_CONST_SIZE,
946 	.arg3_type	= ARG_PTR_TO_MEM,
947 	.arg4_type	= ARG_CONST_SIZE,
948 	.arg5_type	= ARG_ANYTHING,
949 };
950 
951 const struct bpf_func_proto *
952 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
953 {
954 	switch (func_id) {
955 	case BPF_FUNC_map_lookup_elem:
956 		return &bpf_map_lookup_elem_proto;
957 	case BPF_FUNC_map_update_elem:
958 		return &bpf_map_update_elem_proto;
959 	case BPF_FUNC_map_delete_elem:
960 		return &bpf_map_delete_elem_proto;
961 	case BPF_FUNC_map_push_elem:
962 		return &bpf_map_push_elem_proto;
963 	case BPF_FUNC_map_pop_elem:
964 		return &bpf_map_pop_elem_proto;
965 	case BPF_FUNC_map_peek_elem:
966 		return &bpf_map_peek_elem_proto;
967 	case BPF_FUNC_ktime_get_ns:
968 		return &bpf_ktime_get_ns_proto;
969 	case BPF_FUNC_ktime_get_boot_ns:
970 		return &bpf_ktime_get_boot_ns_proto;
971 	case BPF_FUNC_ktime_get_coarse_ns:
972 		return &bpf_ktime_get_coarse_ns_proto;
973 	case BPF_FUNC_tail_call:
974 		return &bpf_tail_call_proto;
975 	case BPF_FUNC_get_current_pid_tgid:
976 		return &bpf_get_current_pid_tgid_proto;
977 	case BPF_FUNC_get_current_task:
978 		return &bpf_get_current_task_proto;
979 	case BPF_FUNC_get_current_task_btf:
980 		return &bpf_get_current_task_btf_proto;
981 	case BPF_FUNC_get_current_uid_gid:
982 		return &bpf_get_current_uid_gid_proto;
983 	case BPF_FUNC_get_current_comm:
984 		return &bpf_get_current_comm_proto;
985 	case BPF_FUNC_trace_printk:
986 		return bpf_get_trace_printk_proto();
987 	case BPF_FUNC_get_smp_processor_id:
988 		return &bpf_get_smp_processor_id_proto;
989 	case BPF_FUNC_get_numa_node_id:
990 		return &bpf_get_numa_node_id_proto;
991 	case BPF_FUNC_perf_event_read:
992 		return &bpf_perf_event_read_proto;
993 	case BPF_FUNC_probe_write_user:
994 		return bpf_get_probe_write_proto();
995 	case BPF_FUNC_current_task_under_cgroup:
996 		return &bpf_current_task_under_cgroup_proto;
997 	case BPF_FUNC_get_prandom_u32:
998 		return &bpf_get_prandom_u32_proto;
999 	case BPF_FUNC_probe_read_user:
1000 		return &bpf_probe_read_user_proto;
1001 	case BPF_FUNC_probe_read_kernel:
1002 		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1003 		       NULL : &bpf_probe_read_kernel_proto;
1004 	case BPF_FUNC_probe_read_user_str:
1005 		return &bpf_probe_read_user_str_proto;
1006 	case BPF_FUNC_probe_read_kernel_str:
1007 		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1008 		       NULL : &bpf_probe_read_kernel_str_proto;
1009 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1010 	case BPF_FUNC_probe_read:
1011 		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1012 		       NULL : &bpf_probe_read_compat_proto;
1013 	case BPF_FUNC_probe_read_str:
1014 		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1015 		       NULL : &bpf_probe_read_compat_str_proto;
1016 #endif
1017 #ifdef CONFIG_CGROUPS
1018 	case BPF_FUNC_get_current_cgroup_id:
1019 		return &bpf_get_current_cgroup_id_proto;
1020 	case BPF_FUNC_get_current_ancestor_cgroup_id:
1021 		return &bpf_get_current_ancestor_cgroup_id_proto;
1022 #endif
1023 	case BPF_FUNC_send_signal:
1024 		return &bpf_send_signal_proto;
1025 	case BPF_FUNC_send_signal_thread:
1026 		return &bpf_send_signal_thread_proto;
1027 	case BPF_FUNC_perf_event_read_value:
1028 		return &bpf_perf_event_read_value_proto;
1029 	case BPF_FUNC_get_ns_current_pid_tgid:
1030 		return &bpf_get_ns_current_pid_tgid_proto;
1031 	case BPF_FUNC_ringbuf_output:
1032 		return &bpf_ringbuf_output_proto;
1033 	case BPF_FUNC_ringbuf_reserve:
1034 		return &bpf_ringbuf_reserve_proto;
1035 	case BPF_FUNC_ringbuf_submit:
1036 		return &bpf_ringbuf_submit_proto;
1037 	case BPF_FUNC_ringbuf_discard:
1038 		return &bpf_ringbuf_discard_proto;
1039 	case BPF_FUNC_ringbuf_query:
1040 		return &bpf_ringbuf_query_proto;
1041 	case BPF_FUNC_jiffies64:
1042 		return &bpf_jiffies64_proto;
1043 	case BPF_FUNC_get_task_stack:
1044 		return &bpf_get_task_stack_proto;
1045 	case BPF_FUNC_copy_from_user:
1046 		return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1047 	case BPF_FUNC_snprintf_btf:
1048 		return &bpf_snprintf_btf_proto;
1049 	case BPF_FUNC_per_cpu_ptr:
1050 		return &bpf_per_cpu_ptr_proto;
1051 	case BPF_FUNC_this_cpu_ptr:
1052 		return &bpf_this_cpu_ptr_proto;
1053 	case BPF_FUNC_task_storage_get:
1054 		return &bpf_task_storage_get_proto;
1055 	case BPF_FUNC_task_storage_delete:
1056 		return &bpf_task_storage_delete_proto;
1057 	case BPF_FUNC_for_each_map_elem:
1058 		return &bpf_for_each_map_elem_proto;
1059 	case BPF_FUNC_snprintf:
1060 		return &bpf_snprintf_proto;
1061 	default:
1062 		return NULL;
1063 	}
1064 }
1065 
1066 static const struct bpf_func_proto *
1067 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1068 {
1069 	switch (func_id) {
1070 	case BPF_FUNC_perf_event_output:
1071 		return &bpf_perf_event_output_proto;
1072 	case BPF_FUNC_get_stackid:
1073 		return &bpf_get_stackid_proto;
1074 	case BPF_FUNC_get_stack:
1075 		return &bpf_get_stack_proto;
1076 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1077 	case BPF_FUNC_override_return:
1078 		return &bpf_override_return_proto;
1079 #endif
1080 	default:
1081 		return bpf_tracing_func_proto(func_id, prog);
1082 	}
1083 }
1084 
1085 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1086 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1087 					const struct bpf_prog *prog,
1088 					struct bpf_insn_access_aux *info)
1089 {
1090 	if (off < 0 || off >= sizeof(struct pt_regs))
1091 		return false;
1092 	if (type != BPF_READ)
1093 		return false;
1094 	if (off % size != 0)
1095 		return false;
1096 	/*
1097 	 * Assertion for 32 bit to make sure last 8 byte access
1098 	 * (BPF_DW) to the last 4 byte member is disallowed.
1099 	 */
1100 	if (off + size > sizeof(struct pt_regs))
1101 		return false;
1102 
1103 	return true;
1104 }
1105 
1106 const struct bpf_verifier_ops kprobe_verifier_ops = {
1107 	.get_func_proto  = kprobe_prog_func_proto,
1108 	.is_valid_access = kprobe_prog_is_valid_access,
1109 };
1110 
1111 const struct bpf_prog_ops kprobe_prog_ops = {
1112 };
1113 
1114 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1115 	   u64, flags, void *, data, u64, size)
1116 {
1117 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1118 
1119 	/*
1120 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1121 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1122 	 * from there and call the same bpf_perf_event_output() helper inline.
1123 	 */
1124 	return ____bpf_perf_event_output(regs, map, flags, data, size);
1125 }
1126 
1127 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1128 	.func		= bpf_perf_event_output_tp,
1129 	.gpl_only	= true,
1130 	.ret_type	= RET_INTEGER,
1131 	.arg1_type	= ARG_PTR_TO_CTX,
1132 	.arg2_type	= ARG_CONST_MAP_PTR,
1133 	.arg3_type	= ARG_ANYTHING,
1134 	.arg4_type	= ARG_PTR_TO_MEM,
1135 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1136 };
1137 
1138 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1139 	   u64, flags)
1140 {
1141 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1142 
1143 	/*
1144 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1145 	 * the other helper's function body cannot be inlined due to being
1146 	 * external, thus we need to call raw helper function.
1147 	 */
1148 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1149 			       flags, 0, 0);
1150 }
1151 
1152 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1153 	.func		= bpf_get_stackid_tp,
1154 	.gpl_only	= true,
1155 	.ret_type	= RET_INTEGER,
1156 	.arg1_type	= ARG_PTR_TO_CTX,
1157 	.arg2_type	= ARG_CONST_MAP_PTR,
1158 	.arg3_type	= ARG_ANYTHING,
1159 };
1160 
1161 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1162 	   u64, flags)
1163 {
1164 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1165 
1166 	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1167 			     (unsigned long) size, flags, 0);
1168 }
1169 
1170 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1171 	.func		= bpf_get_stack_tp,
1172 	.gpl_only	= true,
1173 	.ret_type	= RET_INTEGER,
1174 	.arg1_type	= ARG_PTR_TO_CTX,
1175 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1176 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1177 	.arg4_type	= ARG_ANYTHING,
1178 };
1179 
1180 static const struct bpf_func_proto *
1181 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1182 {
1183 	switch (func_id) {
1184 	case BPF_FUNC_perf_event_output:
1185 		return &bpf_perf_event_output_proto_tp;
1186 	case BPF_FUNC_get_stackid:
1187 		return &bpf_get_stackid_proto_tp;
1188 	case BPF_FUNC_get_stack:
1189 		return &bpf_get_stack_proto_tp;
1190 	default:
1191 		return bpf_tracing_func_proto(func_id, prog);
1192 	}
1193 }
1194 
1195 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1196 				    const struct bpf_prog *prog,
1197 				    struct bpf_insn_access_aux *info)
1198 {
1199 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1200 		return false;
1201 	if (type != BPF_READ)
1202 		return false;
1203 	if (off % size != 0)
1204 		return false;
1205 
1206 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1207 	return true;
1208 }
1209 
1210 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1211 	.get_func_proto  = tp_prog_func_proto,
1212 	.is_valid_access = tp_prog_is_valid_access,
1213 };
1214 
1215 const struct bpf_prog_ops tracepoint_prog_ops = {
1216 };
1217 
1218 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1219 	   struct bpf_perf_event_value *, buf, u32, size)
1220 {
1221 	int err = -EINVAL;
1222 
1223 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1224 		goto clear;
1225 	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1226 				    &buf->running);
1227 	if (unlikely(err))
1228 		goto clear;
1229 	return 0;
1230 clear:
1231 	memset(buf, 0, size);
1232 	return err;
1233 }
1234 
1235 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1236          .func           = bpf_perf_prog_read_value,
1237          .gpl_only       = true,
1238          .ret_type       = RET_INTEGER,
1239          .arg1_type      = ARG_PTR_TO_CTX,
1240          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1241          .arg3_type      = ARG_CONST_SIZE,
1242 };
1243 
1244 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1245 	   void *, buf, u32, size, u64, flags)
1246 {
1247 #ifndef CONFIG_X86
1248 	return -ENOENT;
1249 #else
1250 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1251 	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1252 	u32 to_copy;
1253 
1254 	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1255 		return -EINVAL;
1256 
1257 	if (unlikely(!br_stack))
1258 		return -EINVAL;
1259 
1260 	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1261 		return br_stack->nr * br_entry_size;
1262 
1263 	if (!buf || (size % br_entry_size != 0))
1264 		return -EINVAL;
1265 
1266 	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1267 	memcpy(buf, br_stack->entries, to_copy);
1268 
1269 	return to_copy;
1270 #endif
1271 }
1272 
1273 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1274 	.func           = bpf_read_branch_records,
1275 	.gpl_only       = true,
1276 	.ret_type       = RET_INTEGER,
1277 	.arg1_type      = ARG_PTR_TO_CTX,
1278 	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1279 	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1280 	.arg4_type      = ARG_ANYTHING,
1281 };
1282 
1283 static const struct bpf_func_proto *
1284 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1285 {
1286 	switch (func_id) {
1287 	case BPF_FUNC_perf_event_output:
1288 		return &bpf_perf_event_output_proto_tp;
1289 	case BPF_FUNC_get_stackid:
1290 		return &bpf_get_stackid_proto_pe;
1291 	case BPF_FUNC_get_stack:
1292 		return &bpf_get_stack_proto_pe;
1293 	case BPF_FUNC_perf_prog_read_value:
1294 		return &bpf_perf_prog_read_value_proto;
1295 	case BPF_FUNC_read_branch_records:
1296 		return &bpf_read_branch_records_proto;
1297 	default:
1298 		return bpf_tracing_func_proto(func_id, prog);
1299 	}
1300 }
1301 
1302 /*
1303  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1304  * to avoid potential recursive reuse issue when/if tracepoints are added
1305  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1306  *
1307  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1308  * in normal, irq, and nmi context.
1309  */
1310 struct bpf_raw_tp_regs {
1311 	struct pt_regs regs[3];
1312 };
1313 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1314 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1315 static struct pt_regs *get_bpf_raw_tp_regs(void)
1316 {
1317 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1318 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1319 
1320 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1321 		this_cpu_dec(bpf_raw_tp_nest_level);
1322 		return ERR_PTR(-EBUSY);
1323 	}
1324 
1325 	return &tp_regs->regs[nest_level - 1];
1326 }
1327 
1328 static void put_bpf_raw_tp_regs(void)
1329 {
1330 	this_cpu_dec(bpf_raw_tp_nest_level);
1331 }
1332 
1333 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1334 	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1335 {
1336 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1337 	int ret;
1338 
1339 	if (IS_ERR(regs))
1340 		return PTR_ERR(regs);
1341 
1342 	perf_fetch_caller_regs(regs);
1343 	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1344 
1345 	put_bpf_raw_tp_regs();
1346 	return ret;
1347 }
1348 
1349 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1350 	.func		= bpf_perf_event_output_raw_tp,
1351 	.gpl_only	= true,
1352 	.ret_type	= RET_INTEGER,
1353 	.arg1_type	= ARG_PTR_TO_CTX,
1354 	.arg2_type	= ARG_CONST_MAP_PTR,
1355 	.arg3_type	= ARG_ANYTHING,
1356 	.arg4_type	= ARG_PTR_TO_MEM,
1357 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1358 };
1359 
1360 extern const struct bpf_func_proto bpf_skb_output_proto;
1361 extern const struct bpf_func_proto bpf_xdp_output_proto;
1362 
1363 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1364 	   struct bpf_map *, map, u64, flags)
1365 {
1366 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1367 	int ret;
1368 
1369 	if (IS_ERR(regs))
1370 		return PTR_ERR(regs);
1371 
1372 	perf_fetch_caller_regs(regs);
1373 	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1374 	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1375 			      flags, 0, 0);
1376 	put_bpf_raw_tp_regs();
1377 	return ret;
1378 }
1379 
1380 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1381 	.func		= bpf_get_stackid_raw_tp,
1382 	.gpl_only	= true,
1383 	.ret_type	= RET_INTEGER,
1384 	.arg1_type	= ARG_PTR_TO_CTX,
1385 	.arg2_type	= ARG_CONST_MAP_PTR,
1386 	.arg3_type	= ARG_ANYTHING,
1387 };
1388 
1389 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1390 	   void *, buf, u32, size, u64, flags)
1391 {
1392 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1393 	int ret;
1394 
1395 	if (IS_ERR(regs))
1396 		return PTR_ERR(regs);
1397 
1398 	perf_fetch_caller_regs(regs);
1399 	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1400 			    (unsigned long) size, flags, 0);
1401 	put_bpf_raw_tp_regs();
1402 	return ret;
1403 }
1404 
1405 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1406 	.func		= bpf_get_stack_raw_tp,
1407 	.gpl_only	= true,
1408 	.ret_type	= RET_INTEGER,
1409 	.arg1_type	= ARG_PTR_TO_CTX,
1410 	.arg2_type	= ARG_PTR_TO_MEM,
1411 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1412 	.arg4_type	= ARG_ANYTHING,
1413 };
1414 
1415 static const struct bpf_func_proto *
1416 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1417 {
1418 	switch (func_id) {
1419 	case BPF_FUNC_perf_event_output:
1420 		return &bpf_perf_event_output_proto_raw_tp;
1421 	case BPF_FUNC_get_stackid:
1422 		return &bpf_get_stackid_proto_raw_tp;
1423 	case BPF_FUNC_get_stack:
1424 		return &bpf_get_stack_proto_raw_tp;
1425 	default:
1426 		return bpf_tracing_func_proto(func_id, prog);
1427 	}
1428 }
1429 
1430 const struct bpf_func_proto *
1431 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1432 {
1433 	switch (func_id) {
1434 #ifdef CONFIG_NET
1435 	case BPF_FUNC_skb_output:
1436 		return &bpf_skb_output_proto;
1437 	case BPF_FUNC_xdp_output:
1438 		return &bpf_xdp_output_proto;
1439 	case BPF_FUNC_skc_to_tcp6_sock:
1440 		return &bpf_skc_to_tcp6_sock_proto;
1441 	case BPF_FUNC_skc_to_tcp_sock:
1442 		return &bpf_skc_to_tcp_sock_proto;
1443 	case BPF_FUNC_skc_to_tcp_timewait_sock:
1444 		return &bpf_skc_to_tcp_timewait_sock_proto;
1445 	case BPF_FUNC_skc_to_tcp_request_sock:
1446 		return &bpf_skc_to_tcp_request_sock_proto;
1447 	case BPF_FUNC_skc_to_udp6_sock:
1448 		return &bpf_skc_to_udp6_sock_proto;
1449 	case BPF_FUNC_sk_storage_get:
1450 		return &bpf_sk_storage_get_tracing_proto;
1451 	case BPF_FUNC_sk_storage_delete:
1452 		return &bpf_sk_storage_delete_tracing_proto;
1453 	case BPF_FUNC_sock_from_file:
1454 		return &bpf_sock_from_file_proto;
1455 	case BPF_FUNC_get_socket_cookie:
1456 		return &bpf_get_socket_ptr_cookie_proto;
1457 #endif
1458 	case BPF_FUNC_seq_printf:
1459 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1460 		       &bpf_seq_printf_proto :
1461 		       NULL;
1462 	case BPF_FUNC_seq_write:
1463 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1464 		       &bpf_seq_write_proto :
1465 		       NULL;
1466 	case BPF_FUNC_seq_printf_btf:
1467 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1468 		       &bpf_seq_printf_btf_proto :
1469 		       NULL;
1470 	case BPF_FUNC_d_path:
1471 		return &bpf_d_path_proto;
1472 	default:
1473 		return raw_tp_prog_func_proto(func_id, prog);
1474 	}
1475 }
1476 
1477 static bool raw_tp_prog_is_valid_access(int off, int size,
1478 					enum bpf_access_type type,
1479 					const struct bpf_prog *prog,
1480 					struct bpf_insn_access_aux *info)
1481 {
1482 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1483 		return false;
1484 	if (type != BPF_READ)
1485 		return false;
1486 	if (off % size != 0)
1487 		return false;
1488 	return true;
1489 }
1490 
1491 static bool tracing_prog_is_valid_access(int off, int size,
1492 					 enum bpf_access_type type,
1493 					 const struct bpf_prog *prog,
1494 					 struct bpf_insn_access_aux *info)
1495 {
1496 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1497 		return false;
1498 	if (type != BPF_READ)
1499 		return false;
1500 	if (off % size != 0)
1501 		return false;
1502 	return btf_ctx_access(off, size, type, prog, info);
1503 }
1504 
1505 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1506 				     const union bpf_attr *kattr,
1507 				     union bpf_attr __user *uattr)
1508 {
1509 	return -ENOTSUPP;
1510 }
1511 
1512 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1513 	.get_func_proto  = raw_tp_prog_func_proto,
1514 	.is_valid_access = raw_tp_prog_is_valid_access,
1515 };
1516 
1517 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1518 #ifdef CONFIG_NET
1519 	.test_run = bpf_prog_test_run_raw_tp,
1520 #endif
1521 };
1522 
1523 const struct bpf_verifier_ops tracing_verifier_ops = {
1524 	.get_func_proto  = tracing_prog_func_proto,
1525 	.is_valid_access = tracing_prog_is_valid_access,
1526 };
1527 
1528 const struct bpf_prog_ops tracing_prog_ops = {
1529 	.test_run = bpf_prog_test_run_tracing,
1530 };
1531 
1532 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1533 						 enum bpf_access_type type,
1534 						 const struct bpf_prog *prog,
1535 						 struct bpf_insn_access_aux *info)
1536 {
1537 	if (off == 0) {
1538 		if (size != sizeof(u64) || type != BPF_READ)
1539 			return false;
1540 		info->reg_type = PTR_TO_TP_BUFFER;
1541 	}
1542 	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1543 }
1544 
1545 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1546 	.get_func_proto  = raw_tp_prog_func_proto,
1547 	.is_valid_access = raw_tp_writable_prog_is_valid_access,
1548 };
1549 
1550 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1551 };
1552 
1553 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1554 				    const struct bpf_prog *prog,
1555 				    struct bpf_insn_access_aux *info)
1556 {
1557 	const int size_u64 = sizeof(u64);
1558 
1559 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1560 		return false;
1561 	if (type != BPF_READ)
1562 		return false;
1563 	if (off % size != 0) {
1564 		if (sizeof(unsigned long) != 4)
1565 			return false;
1566 		if (size != 8)
1567 			return false;
1568 		if (off % size != 4)
1569 			return false;
1570 	}
1571 
1572 	switch (off) {
1573 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1574 		bpf_ctx_record_field_size(info, size_u64);
1575 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1576 			return false;
1577 		break;
1578 	case bpf_ctx_range(struct bpf_perf_event_data, addr):
1579 		bpf_ctx_record_field_size(info, size_u64);
1580 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1581 			return false;
1582 		break;
1583 	default:
1584 		if (size != sizeof(long))
1585 			return false;
1586 	}
1587 
1588 	return true;
1589 }
1590 
1591 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1592 				      const struct bpf_insn *si,
1593 				      struct bpf_insn *insn_buf,
1594 				      struct bpf_prog *prog, u32 *target_size)
1595 {
1596 	struct bpf_insn *insn = insn_buf;
1597 
1598 	switch (si->off) {
1599 	case offsetof(struct bpf_perf_event_data, sample_period):
1600 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1601 						       data), si->dst_reg, si->src_reg,
1602 				      offsetof(struct bpf_perf_event_data_kern, data));
1603 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1604 				      bpf_target_off(struct perf_sample_data, period, 8,
1605 						     target_size));
1606 		break;
1607 	case offsetof(struct bpf_perf_event_data, addr):
1608 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1609 						       data), si->dst_reg, si->src_reg,
1610 				      offsetof(struct bpf_perf_event_data_kern, data));
1611 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1612 				      bpf_target_off(struct perf_sample_data, addr, 8,
1613 						     target_size));
1614 		break;
1615 	default:
1616 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1617 						       regs), si->dst_reg, si->src_reg,
1618 				      offsetof(struct bpf_perf_event_data_kern, regs));
1619 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1620 				      si->off);
1621 		break;
1622 	}
1623 
1624 	return insn - insn_buf;
1625 }
1626 
1627 const struct bpf_verifier_ops perf_event_verifier_ops = {
1628 	.get_func_proto		= pe_prog_func_proto,
1629 	.is_valid_access	= pe_prog_is_valid_access,
1630 	.convert_ctx_access	= pe_prog_convert_ctx_access,
1631 };
1632 
1633 const struct bpf_prog_ops perf_event_prog_ops = {
1634 };
1635 
1636 static DEFINE_MUTEX(bpf_event_mutex);
1637 
1638 #define BPF_TRACE_MAX_PROGS 64
1639 
1640 int perf_event_attach_bpf_prog(struct perf_event *event,
1641 			       struct bpf_prog *prog)
1642 {
1643 	struct bpf_prog_array *old_array;
1644 	struct bpf_prog_array *new_array;
1645 	int ret = -EEXIST;
1646 
1647 	/*
1648 	 * Kprobe override only works if they are on the function entry,
1649 	 * and only if they are on the opt-in list.
1650 	 */
1651 	if (prog->kprobe_override &&
1652 	    (!trace_kprobe_on_func_entry(event->tp_event) ||
1653 	     !trace_kprobe_error_injectable(event->tp_event)))
1654 		return -EINVAL;
1655 
1656 	mutex_lock(&bpf_event_mutex);
1657 
1658 	if (event->prog)
1659 		goto unlock;
1660 
1661 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1662 	if (old_array &&
1663 	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1664 		ret = -E2BIG;
1665 		goto unlock;
1666 	}
1667 
1668 	ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1669 	if (ret < 0)
1670 		goto unlock;
1671 
1672 	/* set the new array to event->tp_event and set event->prog */
1673 	event->prog = prog;
1674 	rcu_assign_pointer(event->tp_event->prog_array, new_array);
1675 	bpf_prog_array_free(old_array);
1676 
1677 unlock:
1678 	mutex_unlock(&bpf_event_mutex);
1679 	return ret;
1680 }
1681 
1682 void perf_event_detach_bpf_prog(struct perf_event *event)
1683 {
1684 	struct bpf_prog_array *old_array;
1685 	struct bpf_prog_array *new_array;
1686 	int ret;
1687 
1688 	mutex_lock(&bpf_event_mutex);
1689 
1690 	if (!event->prog)
1691 		goto unlock;
1692 
1693 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1694 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1695 	if (ret == -ENOENT)
1696 		goto unlock;
1697 	if (ret < 0) {
1698 		bpf_prog_array_delete_safe(old_array, event->prog);
1699 	} else {
1700 		rcu_assign_pointer(event->tp_event->prog_array, new_array);
1701 		bpf_prog_array_free(old_array);
1702 	}
1703 
1704 	bpf_prog_put(event->prog);
1705 	event->prog = NULL;
1706 
1707 unlock:
1708 	mutex_unlock(&bpf_event_mutex);
1709 }
1710 
1711 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1712 {
1713 	struct perf_event_query_bpf __user *uquery = info;
1714 	struct perf_event_query_bpf query = {};
1715 	struct bpf_prog_array *progs;
1716 	u32 *ids, prog_cnt, ids_len;
1717 	int ret;
1718 
1719 	if (!perfmon_capable())
1720 		return -EPERM;
1721 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
1722 		return -EINVAL;
1723 	if (copy_from_user(&query, uquery, sizeof(query)))
1724 		return -EFAULT;
1725 
1726 	ids_len = query.ids_len;
1727 	if (ids_len > BPF_TRACE_MAX_PROGS)
1728 		return -E2BIG;
1729 	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1730 	if (!ids)
1731 		return -ENOMEM;
1732 	/*
1733 	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1734 	 * is required when user only wants to check for uquery->prog_cnt.
1735 	 * There is no need to check for it since the case is handled
1736 	 * gracefully in bpf_prog_array_copy_info.
1737 	 */
1738 
1739 	mutex_lock(&bpf_event_mutex);
1740 	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1741 	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1742 	mutex_unlock(&bpf_event_mutex);
1743 
1744 	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1745 	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1746 		ret = -EFAULT;
1747 
1748 	kfree(ids);
1749 	return ret;
1750 }
1751 
1752 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1753 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1754 
1755 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1756 {
1757 	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1758 
1759 	for (; btp < __stop__bpf_raw_tp; btp++) {
1760 		if (!strcmp(btp->tp->name, name))
1761 			return btp;
1762 	}
1763 
1764 	return bpf_get_raw_tracepoint_module(name);
1765 }
1766 
1767 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1768 {
1769 	struct module *mod;
1770 
1771 	preempt_disable();
1772 	mod = __module_address((unsigned long)btp);
1773 	module_put(mod);
1774 	preempt_enable();
1775 }
1776 
1777 static __always_inline
1778 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1779 {
1780 	cant_sleep();
1781 	rcu_read_lock();
1782 	(void) BPF_PROG_RUN(prog, args);
1783 	rcu_read_unlock();
1784 }
1785 
1786 #define UNPACK(...)			__VA_ARGS__
1787 #define REPEAT_1(FN, DL, X, ...)	FN(X)
1788 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1789 #define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1790 #define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1791 #define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1792 #define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1793 #define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1794 #define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1795 #define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1796 #define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1797 #define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1798 #define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1799 #define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
1800 
1801 #define SARG(X)		u64 arg##X
1802 #define COPY(X)		args[X] = arg##X
1803 
1804 #define __DL_COM	(,)
1805 #define __DL_SEM	(;)
1806 
1807 #define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1808 
1809 #define BPF_TRACE_DEFN_x(x)						\
1810 	void bpf_trace_run##x(struct bpf_prog *prog,			\
1811 			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
1812 	{								\
1813 		u64 args[x];						\
1814 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
1815 		__bpf_trace_run(prog, args);				\
1816 	}								\
1817 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1818 BPF_TRACE_DEFN_x(1);
1819 BPF_TRACE_DEFN_x(2);
1820 BPF_TRACE_DEFN_x(3);
1821 BPF_TRACE_DEFN_x(4);
1822 BPF_TRACE_DEFN_x(5);
1823 BPF_TRACE_DEFN_x(6);
1824 BPF_TRACE_DEFN_x(7);
1825 BPF_TRACE_DEFN_x(8);
1826 BPF_TRACE_DEFN_x(9);
1827 BPF_TRACE_DEFN_x(10);
1828 BPF_TRACE_DEFN_x(11);
1829 BPF_TRACE_DEFN_x(12);
1830 
1831 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1832 {
1833 	struct tracepoint *tp = btp->tp;
1834 
1835 	/*
1836 	 * check that program doesn't access arguments beyond what's
1837 	 * available in this tracepoint
1838 	 */
1839 	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1840 		return -EINVAL;
1841 
1842 	if (prog->aux->max_tp_access > btp->writable_size)
1843 		return -EINVAL;
1844 
1845 	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
1846 						   prog);
1847 }
1848 
1849 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1850 {
1851 	return __bpf_probe_register(btp, prog);
1852 }
1853 
1854 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1855 {
1856 	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1857 }
1858 
1859 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1860 			    u32 *fd_type, const char **buf,
1861 			    u64 *probe_offset, u64 *probe_addr)
1862 {
1863 	bool is_tracepoint, is_syscall_tp;
1864 	struct bpf_prog *prog;
1865 	int flags, err = 0;
1866 
1867 	prog = event->prog;
1868 	if (!prog)
1869 		return -ENOENT;
1870 
1871 	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1872 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1873 		return -EOPNOTSUPP;
1874 
1875 	*prog_id = prog->aux->id;
1876 	flags = event->tp_event->flags;
1877 	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1878 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
1879 
1880 	if (is_tracepoint || is_syscall_tp) {
1881 		*buf = is_tracepoint ? event->tp_event->tp->name
1882 				     : event->tp_event->name;
1883 		*fd_type = BPF_FD_TYPE_TRACEPOINT;
1884 		*probe_offset = 0x0;
1885 		*probe_addr = 0x0;
1886 	} else {
1887 		/* kprobe/uprobe */
1888 		err = -EOPNOTSUPP;
1889 #ifdef CONFIG_KPROBE_EVENTS
1890 		if (flags & TRACE_EVENT_FL_KPROBE)
1891 			err = bpf_get_kprobe_info(event, fd_type, buf,
1892 						  probe_offset, probe_addr,
1893 						  event->attr.type == PERF_TYPE_TRACEPOINT);
1894 #endif
1895 #ifdef CONFIG_UPROBE_EVENTS
1896 		if (flags & TRACE_EVENT_FL_UPROBE)
1897 			err = bpf_get_uprobe_info(event, fd_type, buf,
1898 						  probe_offset,
1899 						  event->attr.type == PERF_TYPE_TRACEPOINT);
1900 #endif
1901 	}
1902 
1903 	return err;
1904 }
1905 
1906 static int __init send_signal_irq_work_init(void)
1907 {
1908 	int cpu;
1909 	struct send_signal_irq_work *work;
1910 
1911 	for_each_possible_cpu(cpu) {
1912 		work = per_cpu_ptr(&send_signal_work, cpu);
1913 		init_irq_work(&work->irq_work, do_bpf_send_signal);
1914 	}
1915 	return 0;
1916 }
1917 
1918 subsys_initcall(send_signal_irq_work_init);
1919 
1920 #ifdef CONFIG_MODULES
1921 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1922 			    void *module)
1923 {
1924 	struct bpf_trace_module *btm, *tmp;
1925 	struct module *mod = module;
1926 	int ret = 0;
1927 
1928 	if (mod->num_bpf_raw_events == 0 ||
1929 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1930 		goto out;
1931 
1932 	mutex_lock(&bpf_module_mutex);
1933 
1934 	switch (op) {
1935 	case MODULE_STATE_COMING:
1936 		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1937 		if (btm) {
1938 			btm->module = module;
1939 			list_add(&btm->list, &bpf_trace_modules);
1940 		} else {
1941 			ret = -ENOMEM;
1942 		}
1943 		break;
1944 	case MODULE_STATE_GOING:
1945 		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1946 			if (btm->module == module) {
1947 				list_del(&btm->list);
1948 				kfree(btm);
1949 				break;
1950 			}
1951 		}
1952 		break;
1953 	}
1954 
1955 	mutex_unlock(&bpf_module_mutex);
1956 
1957 out:
1958 	return notifier_from_errno(ret);
1959 }
1960 
1961 static struct notifier_block bpf_module_nb = {
1962 	.notifier_call = bpf_event_notify,
1963 };
1964 
1965 static int __init bpf_event_init(void)
1966 {
1967 	register_module_notifier(&bpf_module_nb);
1968 	return 0;
1969 }
1970 
1971 fs_initcall(bpf_event_init);
1972 #endif /* CONFIG_MODULES */
1973