xref: /linux/kernel/trace/bpf_trace.c (revision 7bb377107c72a40ab7505341f8626c8eb79a0cb7)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
16 
17 #include <asm/tlb.h>
18 
19 #include "trace_probe.h"
20 #include "trace.h"
21 
22 #define bpf_event_rcu_dereference(p)					\
23 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
24 
25 #ifdef CONFIG_MODULES
26 struct bpf_trace_module {
27 	struct module *module;
28 	struct list_head list;
29 };
30 
31 static LIST_HEAD(bpf_trace_modules);
32 static DEFINE_MUTEX(bpf_module_mutex);
33 
34 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
35 {
36 	struct bpf_raw_event_map *btp, *ret = NULL;
37 	struct bpf_trace_module *btm;
38 	unsigned int i;
39 
40 	mutex_lock(&bpf_module_mutex);
41 	list_for_each_entry(btm, &bpf_trace_modules, list) {
42 		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 			btp = &btm->module->bpf_raw_events[i];
44 			if (!strcmp(btp->tp->name, name)) {
45 				if (try_module_get(btm->module))
46 					ret = btp;
47 				goto out;
48 			}
49 		}
50 	}
51 out:
52 	mutex_unlock(&bpf_module_mutex);
53 	return ret;
54 }
55 #else
56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57 {
58 	return NULL;
59 }
60 #endif /* CONFIG_MODULES */
61 
62 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
63 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
64 
65 /**
66  * trace_call_bpf - invoke BPF program
67  * @call: tracepoint event
68  * @ctx: opaque context pointer
69  *
70  * kprobe handlers execute BPF programs via this helper.
71  * Can be used from static tracepoints in the future.
72  *
73  * Return: BPF programs always return an integer which is interpreted by
74  * kprobe handler as:
75  * 0 - return from kprobe (event is filtered out)
76  * 1 - store kprobe event into ring buffer
77  * Other values are reserved and currently alias to 1
78  */
79 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
80 {
81 	unsigned int ret;
82 
83 	if (in_nmi()) /* not supported yet */
84 		return 1;
85 
86 	cant_sleep();
87 
88 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
89 		/*
90 		 * since some bpf program is already running on this cpu,
91 		 * don't call into another bpf program (same or different)
92 		 * and don't send kprobe event into ring-buffer,
93 		 * so return zero here
94 		 */
95 		ret = 0;
96 		goto out;
97 	}
98 
99 	/*
100 	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 	 * to all call sites, we did a bpf_prog_array_valid() there to check
102 	 * whether call->prog_array is empty or not, which is
103 	 * a heurisitc to speed up execution.
104 	 *
105 	 * If bpf_prog_array_valid() fetched prog_array was
106 	 * non-NULL, we go into trace_call_bpf() and do the actual
107 	 * proper rcu_dereference() under RCU lock.
108 	 * If it turns out that prog_array is NULL then, we bail out.
109 	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 	 * was NULL, you'll skip the prog_array with the risk of missing
111 	 * out of events when it was updated in between this and the
112 	 * rcu_dereference() which is accepted risk.
113 	 */
114 	ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
115 
116  out:
117 	__this_cpu_dec(bpf_prog_active);
118 
119 	return ret;
120 }
121 
122 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
123 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
124 {
125 	regs_set_return_value(regs, rc);
126 	override_function_with_return(regs);
127 	return 0;
128 }
129 
130 static const struct bpf_func_proto bpf_override_return_proto = {
131 	.func		= bpf_override_return,
132 	.gpl_only	= true,
133 	.ret_type	= RET_INTEGER,
134 	.arg1_type	= ARG_PTR_TO_CTX,
135 	.arg2_type	= ARG_ANYTHING,
136 };
137 #endif
138 
139 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
140 	   const void __user *, unsafe_ptr)
141 {
142 	int ret = probe_user_read(dst, unsafe_ptr, size);
143 
144 	if (unlikely(ret < 0))
145 		memset(dst, 0, size);
146 
147 	return ret;
148 }
149 
150 static const struct bpf_func_proto bpf_probe_read_user_proto = {
151 	.func		= bpf_probe_read_user,
152 	.gpl_only	= true,
153 	.ret_type	= RET_INTEGER,
154 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
155 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
156 	.arg3_type	= ARG_ANYTHING,
157 };
158 
159 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
160 	   const void __user *, unsafe_ptr)
161 {
162 	int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size);
163 
164 	if (unlikely(ret < 0))
165 		memset(dst, 0, size);
166 
167 	return ret;
168 }
169 
170 static const struct bpf_func_proto bpf_probe_read_user_str_proto = {
171 	.func		= bpf_probe_read_user_str,
172 	.gpl_only	= true,
173 	.ret_type	= RET_INTEGER,
174 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
175 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
176 	.arg3_type	= ARG_ANYTHING,
177 };
178 
179 static __always_inline int
180 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr,
181 			     const bool compat)
182 {
183 	int ret = security_locked_down(LOCKDOWN_BPF_READ);
184 
185 	if (unlikely(ret < 0))
186 		goto out;
187 	ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) :
188 	      probe_kernel_read_strict(dst, unsafe_ptr, size);
189 	if (unlikely(ret < 0))
190 out:
191 		memset(dst, 0, size);
192 	return ret;
193 }
194 
195 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
196 	   const void *, unsafe_ptr)
197 {
198 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
199 }
200 
201 static const struct bpf_func_proto bpf_probe_read_kernel_proto = {
202 	.func		= bpf_probe_read_kernel,
203 	.gpl_only	= true,
204 	.ret_type	= RET_INTEGER,
205 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
206 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
207 	.arg3_type	= ARG_ANYTHING,
208 };
209 
210 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
211 	   const void *, unsafe_ptr)
212 {
213 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true);
214 }
215 
216 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
217 	.func		= bpf_probe_read_compat,
218 	.gpl_only	= true,
219 	.ret_type	= RET_INTEGER,
220 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
221 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
222 	.arg3_type	= ARG_ANYTHING,
223 };
224 
225 static __always_inline int
226 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr,
227 				 const bool compat)
228 {
229 	int ret = security_locked_down(LOCKDOWN_BPF_READ);
230 
231 	if (unlikely(ret < 0))
232 		goto out;
233 	/*
234 	 * The strncpy_from_unsafe_*() call will likely not fill the entire
235 	 * buffer, but that's okay in this circumstance as we're probing
236 	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
237 	 * as well probe the stack. Thus, memory is explicitly cleared
238 	 * only in error case, so that improper users ignoring return
239 	 * code altogether don't copy garbage; otherwise length of string
240 	 * is returned that can be used for bpf_perf_event_output() et al.
241 	 */
242 	ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) :
243 	      strncpy_from_unsafe_strict(dst, unsafe_ptr, size);
244 	if (unlikely(ret < 0))
245 out:
246 		memset(dst, 0, size);
247 	return ret;
248 }
249 
250 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
251 	   const void *, unsafe_ptr)
252 {
253 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
254 }
255 
256 static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
257 	.func		= bpf_probe_read_kernel_str,
258 	.gpl_only	= true,
259 	.ret_type	= RET_INTEGER,
260 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
261 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
262 	.arg3_type	= ARG_ANYTHING,
263 };
264 
265 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
266 	   const void *, unsafe_ptr)
267 {
268 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true);
269 }
270 
271 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
272 	.func		= bpf_probe_read_compat_str,
273 	.gpl_only	= true,
274 	.ret_type	= RET_INTEGER,
275 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
276 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
277 	.arg3_type	= ARG_ANYTHING,
278 };
279 
280 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
281 	   u32, size)
282 {
283 	/*
284 	 * Ensure we're in user context which is safe for the helper to
285 	 * run. This helper has no business in a kthread.
286 	 *
287 	 * access_ok() should prevent writing to non-user memory, but in
288 	 * some situations (nommu, temporary switch, etc) access_ok() does
289 	 * not provide enough validation, hence the check on KERNEL_DS.
290 	 *
291 	 * nmi_uaccess_okay() ensures the probe is not run in an interim
292 	 * state, when the task or mm are switched. This is specifically
293 	 * required to prevent the use of temporary mm.
294 	 */
295 
296 	if (unlikely(in_interrupt() ||
297 		     current->flags & (PF_KTHREAD | PF_EXITING)))
298 		return -EPERM;
299 	if (unlikely(uaccess_kernel()))
300 		return -EPERM;
301 	if (unlikely(!nmi_uaccess_okay()))
302 		return -EPERM;
303 
304 	return probe_user_write(unsafe_ptr, src, size);
305 }
306 
307 static const struct bpf_func_proto bpf_probe_write_user_proto = {
308 	.func		= bpf_probe_write_user,
309 	.gpl_only	= true,
310 	.ret_type	= RET_INTEGER,
311 	.arg1_type	= ARG_ANYTHING,
312 	.arg2_type	= ARG_PTR_TO_MEM,
313 	.arg3_type	= ARG_CONST_SIZE,
314 };
315 
316 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
317 {
318 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
319 			    current->comm, task_pid_nr(current));
320 
321 	return &bpf_probe_write_user_proto;
322 }
323 
324 /*
325  * Only limited trace_printk() conversion specifiers allowed:
326  * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
327  */
328 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
329 	   u64, arg2, u64, arg3)
330 {
331 	bool str_seen = false;
332 	int mod[3] = {};
333 	int fmt_cnt = 0;
334 	u64 unsafe_addr;
335 	char buf[64];
336 	int i;
337 
338 	/*
339 	 * bpf_check()->check_func_arg()->check_stack_boundary()
340 	 * guarantees that fmt points to bpf program stack,
341 	 * fmt_size bytes of it were initialized and fmt_size > 0
342 	 */
343 	if (fmt[--fmt_size] != 0)
344 		return -EINVAL;
345 
346 	/* check format string for allowed specifiers */
347 	for (i = 0; i < fmt_size; i++) {
348 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
349 			return -EINVAL;
350 
351 		if (fmt[i] != '%')
352 			continue;
353 
354 		if (fmt_cnt >= 3)
355 			return -EINVAL;
356 
357 		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
358 		i++;
359 		if (fmt[i] == 'l') {
360 			mod[fmt_cnt]++;
361 			i++;
362 		} else if (fmt[i] == 'p' || fmt[i] == 's') {
363 			mod[fmt_cnt]++;
364 			/* disallow any further format extensions */
365 			if (fmt[i + 1] != 0 &&
366 			    !isspace(fmt[i + 1]) &&
367 			    !ispunct(fmt[i + 1]))
368 				return -EINVAL;
369 			fmt_cnt++;
370 			if (fmt[i] == 's') {
371 				if (str_seen)
372 					/* allow only one '%s' per fmt string */
373 					return -EINVAL;
374 				str_seen = true;
375 
376 				switch (fmt_cnt) {
377 				case 1:
378 					unsafe_addr = arg1;
379 					arg1 = (long) buf;
380 					break;
381 				case 2:
382 					unsafe_addr = arg2;
383 					arg2 = (long) buf;
384 					break;
385 				case 3:
386 					unsafe_addr = arg3;
387 					arg3 = (long) buf;
388 					break;
389 				}
390 				buf[0] = 0;
391 				strncpy_from_unsafe(buf,
392 						    (void *) (long) unsafe_addr,
393 						    sizeof(buf));
394 			}
395 			continue;
396 		}
397 
398 		if (fmt[i] == 'l') {
399 			mod[fmt_cnt]++;
400 			i++;
401 		}
402 
403 		if (fmt[i] != 'i' && fmt[i] != 'd' &&
404 		    fmt[i] != 'u' && fmt[i] != 'x')
405 			return -EINVAL;
406 		fmt_cnt++;
407 	}
408 
409 /* Horrid workaround for getting va_list handling working with different
410  * argument type combinations generically for 32 and 64 bit archs.
411  */
412 #define __BPF_TP_EMIT()	__BPF_ARG3_TP()
413 #define __BPF_TP(...)							\
414 	__trace_printk(0 /* Fake ip */,					\
415 		       fmt, ##__VA_ARGS__)
416 
417 #define __BPF_ARG1_TP(...)						\
418 	((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))	\
419 	  ? __BPF_TP(arg1, ##__VA_ARGS__)				\
420 	  : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32))	\
421 	      ? __BPF_TP((long)arg1, ##__VA_ARGS__)			\
422 	      : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
423 
424 #define __BPF_ARG2_TP(...)						\
425 	((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64))	\
426 	  ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__)				\
427 	  : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32))	\
428 	      ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__)		\
429 	      : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
430 
431 #define __BPF_ARG3_TP(...)						\
432 	((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64))	\
433 	  ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__)				\
434 	  : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32))	\
435 	      ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__)		\
436 	      : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
437 
438 	return __BPF_TP_EMIT();
439 }
440 
441 static const struct bpf_func_proto bpf_trace_printk_proto = {
442 	.func		= bpf_trace_printk,
443 	.gpl_only	= true,
444 	.ret_type	= RET_INTEGER,
445 	.arg1_type	= ARG_PTR_TO_MEM,
446 	.arg2_type	= ARG_CONST_SIZE,
447 };
448 
449 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
450 {
451 	/*
452 	 * this program might be calling bpf_trace_printk,
453 	 * so allocate per-cpu printk buffers
454 	 */
455 	trace_printk_init_buffers();
456 
457 	return &bpf_trace_printk_proto;
458 }
459 
460 static __always_inline int
461 get_map_perf_counter(struct bpf_map *map, u64 flags,
462 		     u64 *value, u64 *enabled, u64 *running)
463 {
464 	struct bpf_array *array = container_of(map, struct bpf_array, map);
465 	unsigned int cpu = smp_processor_id();
466 	u64 index = flags & BPF_F_INDEX_MASK;
467 	struct bpf_event_entry *ee;
468 
469 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
470 		return -EINVAL;
471 	if (index == BPF_F_CURRENT_CPU)
472 		index = cpu;
473 	if (unlikely(index >= array->map.max_entries))
474 		return -E2BIG;
475 
476 	ee = READ_ONCE(array->ptrs[index]);
477 	if (!ee)
478 		return -ENOENT;
479 
480 	return perf_event_read_local(ee->event, value, enabled, running);
481 }
482 
483 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
484 {
485 	u64 value = 0;
486 	int err;
487 
488 	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
489 	/*
490 	 * this api is ugly since we miss [-22..-2] range of valid
491 	 * counter values, but that's uapi
492 	 */
493 	if (err)
494 		return err;
495 	return value;
496 }
497 
498 static const struct bpf_func_proto bpf_perf_event_read_proto = {
499 	.func		= bpf_perf_event_read,
500 	.gpl_only	= true,
501 	.ret_type	= RET_INTEGER,
502 	.arg1_type	= ARG_CONST_MAP_PTR,
503 	.arg2_type	= ARG_ANYTHING,
504 };
505 
506 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
507 	   struct bpf_perf_event_value *, buf, u32, size)
508 {
509 	int err = -EINVAL;
510 
511 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
512 		goto clear;
513 	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
514 				   &buf->running);
515 	if (unlikely(err))
516 		goto clear;
517 	return 0;
518 clear:
519 	memset(buf, 0, size);
520 	return err;
521 }
522 
523 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
524 	.func		= bpf_perf_event_read_value,
525 	.gpl_only	= true,
526 	.ret_type	= RET_INTEGER,
527 	.arg1_type	= ARG_CONST_MAP_PTR,
528 	.arg2_type	= ARG_ANYTHING,
529 	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
530 	.arg4_type	= ARG_CONST_SIZE,
531 };
532 
533 static __always_inline u64
534 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
535 			u64 flags, struct perf_sample_data *sd)
536 {
537 	struct bpf_array *array = container_of(map, struct bpf_array, map);
538 	unsigned int cpu = smp_processor_id();
539 	u64 index = flags & BPF_F_INDEX_MASK;
540 	struct bpf_event_entry *ee;
541 	struct perf_event *event;
542 
543 	if (index == BPF_F_CURRENT_CPU)
544 		index = cpu;
545 	if (unlikely(index >= array->map.max_entries))
546 		return -E2BIG;
547 
548 	ee = READ_ONCE(array->ptrs[index]);
549 	if (!ee)
550 		return -ENOENT;
551 
552 	event = ee->event;
553 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
554 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
555 		return -EINVAL;
556 
557 	if (unlikely(event->oncpu != cpu))
558 		return -EOPNOTSUPP;
559 
560 	return perf_event_output(event, sd, regs);
561 }
562 
563 /*
564  * Support executing tracepoints in normal, irq, and nmi context that each call
565  * bpf_perf_event_output
566  */
567 struct bpf_trace_sample_data {
568 	struct perf_sample_data sds[3];
569 };
570 
571 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
572 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
573 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
574 	   u64, flags, void *, data, u64, size)
575 {
576 	struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
577 	int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
578 	struct perf_raw_record raw = {
579 		.frag = {
580 			.size = size,
581 			.data = data,
582 		},
583 	};
584 	struct perf_sample_data *sd;
585 	int err;
586 
587 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
588 		err = -EBUSY;
589 		goto out;
590 	}
591 
592 	sd = &sds->sds[nest_level - 1];
593 
594 	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
595 		err = -EINVAL;
596 		goto out;
597 	}
598 
599 	perf_sample_data_init(sd, 0, 0);
600 	sd->raw = &raw;
601 
602 	err = __bpf_perf_event_output(regs, map, flags, sd);
603 
604 out:
605 	this_cpu_dec(bpf_trace_nest_level);
606 	return err;
607 }
608 
609 static const struct bpf_func_proto bpf_perf_event_output_proto = {
610 	.func		= bpf_perf_event_output,
611 	.gpl_only	= true,
612 	.ret_type	= RET_INTEGER,
613 	.arg1_type	= ARG_PTR_TO_CTX,
614 	.arg2_type	= ARG_CONST_MAP_PTR,
615 	.arg3_type	= ARG_ANYTHING,
616 	.arg4_type	= ARG_PTR_TO_MEM,
617 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
618 };
619 
620 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
621 struct bpf_nested_pt_regs {
622 	struct pt_regs regs[3];
623 };
624 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
625 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
626 
627 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
628 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
629 {
630 	int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
631 	struct perf_raw_frag frag = {
632 		.copy		= ctx_copy,
633 		.size		= ctx_size,
634 		.data		= ctx,
635 	};
636 	struct perf_raw_record raw = {
637 		.frag = {
638 			{
639 				.next	= ctx_size ? &frag : NULL,
640 			},
641 			.size	= meta_size,
642 			.data	= meta,
643 		},
644 	};
645 	struct perf_sample_data *sd;
646 	struct pt_regs *regs;
647 	u64 ret;
648 
649 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
650 		ret = -EBUSY;
651 		goto out;
652 	}
653 	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
654 	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
655 
656 	perf_fetch_caller_regs(regs);
657 	perf_sample_data_init(sd, 0, 0);
658 	sd->raw = &raw;
659 
660 	ret = __bpf_perf_event_output(regs, map, flags, sd);
661 out:
662 	this_cpu_dec(bpf_event_output_nest_level);
663 	return ret;
664 }
665 
666 BPF_CALL_0(bpf_get_current_task)
667 {
668 	return (long) current;
669 }
670 
671 static const struct bpf_func_proto bpf_get_current_task_proto = {
672 	.func		= bpf_get_current_task,
673 	.gpl_only	= true,
674 	.ret_type	= RET_INTEGER,
675 };
676 
677 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
678 {
679 	struct bpf_array *array = container_of(map, struct bpf_array, map);
680 	struct cgroup *cgrp;
681 
682 	if (unlikely(idx >= array->map.max_entries))
683 		return -E2BIG;
684 
685 	cgrp = READ_ONCE(array->ptrs[idx]);
686 	if (unlikely(!cgrp))
687 		return -EAGAIN;
688 
689 	return task_under_cgroup_hierarchy(current, cgrp);
690 }
691 
692 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
693 	.func           = bpf_current_task_under_cgroup,
694 	.gpl_only       = false,
695 	.ret_type       = RET_INTEGER,
696 	.arg1_type      = ARG_CONST_MAP_PTR,
697 	.arg2_type      = ARG_ANYTHING,
698 };
699 
700 struct send_signal_irq_work {
701 	struct irq_work irq_work;
702 	struct task_struct *task;
703 	u32 sig;
704 	enum pid_type type;
705 };
706 
707 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
708 
709 static void do_bpf_send_signal(struct irq_work *entry)
710 {
711 	struct send_signal_irq_work *work;
712 
713 	work = container_of(entry, struct send_signal_irq_work, irq_work);
714 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
715 }
716 
717 static int bpf_send_signal_common(u32 sig, enum pid_type type)
718 {
719 	struct send_signal_irq_work *work = NULL;
720 
721 	/* Similar to bpf_probe_write_user, task needs to be
722 	 * in a sound condition and kernel memory access be
723 	 * permitted in order to send signal to the current
724 	 * task.
725 	 */
726 	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
727 		return -EPERM;
728 	if (unlikely(uaccess_kernel()))
729 		return -EPERM;
730 	if (unlikely(!nmi_uaccess_okay()))
731 		return -EPERM;
732 
733 	if (irqs_disabled()) {
734 		/* Do an early check on signal validity. Otherwise,
735 		 * the error is lost in deferred irq_work.
736 		 */
737 		if (unlikely(!valid_signal(sig)))
738 			return -EINVAL;
739 
740 		work = this_cpu_ptr(&send_signal_work);
741 		if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
742 			return -EBUSY;
743 
744 		/* Add the current task, which is the target of sending signal,
745 		 * to the irq_work. The current task may change when queued
746 		 * irq works get executed.
747 		 */
748 		work->task = current;
749 		work->sig = sig;
750 		work->type = type;
751 		irq_work_queue(&work->irq_work);
752 		return 0;
753 	}
754 
755 	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
756 }
757 
758 BPF_CALL_1(bpf_send_signal, u32, sig)
759 {
760 	return bpf_send_signal_common(sig, PIDTYPE_TGID);
761 }
762 
763 static const struct bpf_func_proto bpf_send_signal_proto = {
764 	.func		= bpf_send_signal,
765 	.gpl_only	= false,
766 	.ret_type	= RET_INTEGER,
767 	.arg1_type	= ARG_ANYTHING,
768 };
769 
770 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
771 {
772 	return bpf_send_signal_common(sig, PIDTYPE_PID);
773 }
774 
775 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
776 	.func		= bpf_send_signal_thread,
777 	.gpl_only	= false,
778 	.ret_type	= RET_INTEGER,
779 	.arg1_type	= ARG_ANYTHING,
780 };
781 
782 const struct bpf_func_proto *
783 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
784 {
785 	switch (func_id) {
786 	case BPF_FUNC_map_lookup_elem:
787 		return &bpf_map_lookup_elem_proto;
788 	case BPF_FUNC_map_update_elem:
789 		return &bpf_map_update_elem_proto;
790 	case BPF_FUNC_map_delete_elem:
791 		return &bpf_map_delete_elem_proto;
792 	case BPF_FUNC_map_push_elem:
793 		return &bpf_map_push_elem_proto;
794 	case BPF_FUNC_map_pop_elem:
795 		return &bpf_map_pop_elem_proto;
796 	case BPF_FUNC_map_peek_elem:
797 		return &bpf_map_peek_elem_proto;
798 	case BPF_FUNC_ktime_get_ns:
799 		return &bpf_ktime_get_ns_proto;
800 	case BPF_FUNC_ktime_get_boot_ns:
801 		return &bpf_ktime_get_boot_ns_proto;
802 	case BPF_FUNC_tail_call:
803 		return &bpf_tail_call_proto;
804 	case BPF_FUNC_get_current_pid_tgid:
805 		return &bpf_get_current_pid_tgid_proto;
806 	case BPF_FUNC_get_current_task:
807 		return &bpf_get_current_task_proto;
808 	case BPF_FUNC_get_current_uid_gid:
809 		return &bpf_get_current_uid_gid_proto;
810 	case BPF_FUNC_get_current_comm:
811 		return &bpf_get_current_comm_proto;
812 	case BPF_FUNC_trace_printk:
813 		return bpf_get_trace_printk_proto();
814 	case BPF_FUNC_get_smp_processor_id:
815 		return &bpf_get_smp_processor_id_proto;
816 	case BPF_FUNC_get_numa_node_id:
817 		return &bpf_get_numa_node_id_proto;
818 	case BPF_FUNC_perf_event_read:
819 		return &bpf_perf_event_read_proto;
820 	case BPF_FUNC_probe_write_user:
821 		return bpf_get_probe_write_proto();
822 	case BPF_FUNC_current_task_under_cgroup:
823 		return &bpf_current_task_under_cgroup_proto;
824 	case BPF_FUNC_get_prandom_u32:
825 		return &bpf_get_prandom_u32_proto;
826 	case BPF_FUNC_probe_read_user:
827 		return &bpf_probe_read_user_proto;
828 	case BPF_FUNC_probe_read_kernel:
829 		return &bpf_probe_read_kernel_proto;
830 	case BPF_FUNC_probe_read:
831 		return &bpf_probe_read_compat_proto;
832 	case BPF_FUNC_probe_read_user_str:
833 		return &bpf_probe_read_user_str_proto;
834 	case BPF_FUNC_probe_read_kernel_str:
835 		return &bpf_probe_read_kernel_str_proto;
836 	case BPF_FUNC_probe_read_str:
837 		return &bpf_probe_read_compat_str_proto;
838 #ifdef CONFIG_CGROUPS
839 	case BPF_FUNC_get_current_cgroup_id:
840 		return &bpf_get_current_cgroup_id_proto;
841 #endif
842 	case BPF_FUNC_send_signal:
843 		return &bpf_send_signal_proto;
844 	case BPF_FUNC_send_signal_thread:
845 		return &bpf_send_signal_thread_proto;
846 	case BPF_FUNC_perf_event_read_value:
847 		return &bpf_perf_event_read_value_proto;
848 	case BPF_FUNC_get_ns_current_pid_tgid:
849 		return &bpf_get_ns_current_pid_tgid_proto;
850 	default:
851 		return NULL;
852 	}
853 }
854 
855 static const struct bpf_func_proto *
856 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
857 {
858 	switch (func_id) {
859 	case BPF_FUNC_perf_event_output:
860 		return &bpf_perf_event_output_proto;
861 	case BPF_FUNC_get_stackid:
862 		return &bpf_get_stackid_proto;
863 	case BPF_FUNC_get_stack:
864 		return &bpf_get_stack_proto;
865 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
866 	case BPF_FUNC_override_return:
867 		return &bpf_override_return_proto;
868 #endif
869 	default:
870 		return bpf_tracing_func_proto(func_id, prog);
871 	}
872 }
873 
874 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
875 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
876 					const struct bpf_prog *prog,
877 					struct bpf_insn_access_aux *info)
878 {
879 	if (off < 0 || off >= sizeof(struct pt_regs))
880 		return false;
881 	if (type != BPF_READ)
882 		return false;
883 	if (off % size != 0)
884 		return false;
885 	/*
886 	 * Assertion for 32 bit to make sure last 8 byte access
887 	 * (BPF_DW) to the last 4 byte member is disallowed.
888 	 */
889 	if (off + size > sizeof(struct pt_regs))
890 		return false;
891 
892 	return true;
893 }
894 
895 const struct bpf_verifier_ops kprobe_verifier_ops = {
896 	.get_func_proto  = kprobe_prog_func_proto,
897 	.is_valid_access = kprobe_prog_is_valid_access,
898 };
899 
900 const struct bpf_prog_ops kprobe_prog_ops = {
901 };
902 
903 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
904 	   u64, flags, void *, data, u64, size)
905 {
906 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
907 
908 	/*
909 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
910 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
911 	 * from there and call the same bpf_perf_event_output() helper inline.
912 	 */
913 	return ____bpf_perf_event_output(regs, map, flags, data, size);
914 }
915 
916 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
917 	.func		= bpf_perf_event_output_tp,
918 	.gpl_only	= true,
919 	.ret_type	= RET_INTEGER,
920 	.arg1_type	= ARG_PTR_TO_CTX,
921 	.arg2_type	= ARG_CONST_MAP_PTR,
922 	.arg3_type	= ARG_ANYTHING,
923 	.arg4_type	= ARG_PTR_TO_MEM,
924 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
925 };
926 
927 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
928 	   u64, flags)
929 {
930 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
931 
932 	/*
933 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
934 	 * the other helper's function body cannot be inlined due to being
935 	 * external, thus we need to call raw helper function.
936 	 */
937 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
938 			       flags, 0, 0);
939 }
940 
941 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
942 	.func		= bpf_get_stackid_tp,
943 	.gpl_only	= true,
944 	.ret_type	= RET_INTEGER,
945 	.arg1_type	= ARG_PTR_TO_CTX,
946 	.arg2_type	= ARG_CONST_MAP_PTR,
947 	.arg3_type	= ARG_ANYTHING,
948 };
949 
950 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
951 	   u64, flags)
952 {
953 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
954 
955 	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
956 			     (unsigned long) size, flags, 0);
957 }
958 
959 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
960 	.func		= bpf_get_stack_tp,
961 	.gpl_only	= true,
962 	.ret_type	= RET_INTEGER,
963 	.arg1_type	= ARG_PTR_TO_CTX,
964 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
965 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
966 	.arg4_type	= ARG_ANYTHING,
967 };
968 
969 static const struct bpf_func_proto *
970 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
971 {
972 	switch (func_id) {
973 	case BPF_FUNC_perf_event_output:
974 		return &bpf_perf_event_output_proto_tp;
975 	case BPF_FUNC_get_stackid:
976 		return &bpf_get_stackid_proto_tp;
977 	case BPF_FUNC_get_stack:
978 		return &bpf_get_stack_proto_tp;
979 	default:
980 		return bpf_tracing_func_proto(func_id, prog);
981 	}
982 }
983 
984 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
985 				    const struct bpf_prog *prog,
986 				    struct bpf_insn_access_aux *info)
987 {
988 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
989 		return false;
990 	if (type != BPF_READ)
991 		return false;
992 	if (off % size != 0)
993 		return false;
994 
995 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
996 	return true;
997 }
998 
999 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1000 	.get_func_proto  = tp_prog_func_proto,
1001 	.is_valid_access = tp_prog_is_valid_access,
1002 };
1003 
1004 const struct bpf_prog_ops tracepoint_prog_ops = {
1005 };
1006 
1007 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1008 	   struct bpf_perf_event_value *, buf, u32, size)
1009 {
1010 	int err = -EINVAL;
1011 
1012 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1013 		goto clear;
1014 	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1015 				    &buf->running);
1016 	if (unlikely(err))
1017 		goto clear;
1018 	return 0;
1019 clear:
1020 	memset(buf, 0, size);
1021 	return err;
1022 }
1023 
1024 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1025          .func           = bpf_perf_prog_read_value,
1026          .gpl_only       = true,
1027          .ret_type       = RET_INTEGER,
1028          .arg1_type      = ARG_PTR_TO_CTX,
1029          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1030          .arg3_type      = ARG_CONST_SIZE,
1031 };
1032 
1033 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1034 	   void *, buf, u32, size, u64, flags)
1035 {
1036 #ifndef CONFIG_X86
1037 	return -ENOENT;
1038 #else
1039 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1040 	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1041 	u32 to_copy;
1042 
1043 	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1044 		return -EINVAL;
1045 
1046 	if (unlikely(!br_stack))
1047 		return -EINVAL;
1048 
1049 	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1050 		return br_stack->nr * br_entry_size;
1051 
1052 	if (!buf || (size % br_entry_size != 0))
1053 		return -EINVAL;
1054 
1055 	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1056 	memcpy(buf, br_stack->entries, to_copy);
1057 
1058 	return to_copy;
1059 #endif
1060 }
1061 
1062 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1063 	.func           = bpf_read_branch_records,
1064 	.gpl_only       = true,
1065 	.ret_type       = RET_INTEGER,
1066 	.arg1_type      = ARG_PTR_TO_CTX,
1067 	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1068 	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1069 	.arg4_type      = ARG_ANYTHING,
1070 };
1071 
1072 static const struct bpf_func_proto *
1073 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1074 {
1075 	switch (func_id) {
1076 	case BPF_FUNC_perf_event_output:
1077 		return &bpf_perf_event_output_proto_tp;
1078 	case BPF_FUNC_get_stackid:
1079 		return &bpf_get_stackid_proto_tp;
1080 	case BPF_FUNC_get_stack:
1081 		return &bpf_get_stack_proto_tp;
1082 	case BPF_FUNC_perf_prog_read_value:
1083 		return &bpf_perf_prog_read_value_proto;
1084 	case BPF_FUNC_read_branch_records:
1085 		return &bpf_read_branch_records_proto;
1086 	default:
1087 		return bpf_tracing_func_proto(func_id, prog);
1088 	}
1089 }
1090 
1091 /*
1092  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1093  * to avoid potential recursive reuse issue when/if tracepoints are added
1094  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1095  *
1096  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1097  * in normal, irq, and nmi context.
1098  */
1099 struct bpf_raw_tp_regs {
1100 	struct pt_regs regs[3];
1101 };
1102 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1103 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1104 static struct pt_regs *get_bpf_raw_tp_regs(void)
1105 {
1106 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1107 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1108 
1109 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1110 		this_cpu_dec(bpf_raw_tp_nest_level);
1111 		return ERR_PTR(-EBUSY);
1112 	}
1113 
1114 	return &tp_regs->regs[nest_level - 1];
1115 }
1116 
1117 static void put_bpf_raw_tp_regs(void)
1118 {
1119 	this_cpu_dec(bpf_raw_tp_nest_level);
1120 }
1121 
1122 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1123 	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1124 {
1125 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1126 	int ret;
1127 
1128 	if (IS_ERR(regs))
1129 		return PTR_ERR(regs);
1130 
1131 	perf_fetch_caller_regs(regs);
1132 	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1133 
1134 	put_bpf_raw_tp_regs();
1135 	return ret;
1136 }
1137 
1138 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1139 	.func		= bpf_perf_event_output_raw_tp,
1140 	.gpl_only	= true,
1141 	.ret_type	= RET_INTEGER,
1142 	.arg1_type	= ARG_PTR_TO_CTX,
1143 	.arg2_type	= ARG_CONST_MAP_PTR,
1144 	.arg3_type	= ARG_ANYTHING,
1145 	.arg4_type	= ARG_PTR_TO_MEM,
1146 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1147 };
1148 
1149 extern const struct bpf_func_proto bpf_skb_output_proto;
1150 extern const struct bpf_func_proto bpf_xdp_output_proto;
1151 
1152 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1153 	   struct bpf_map *, map, u64, flags)
1154 {
1155 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1156 	int ret;
1157 
1158 	if (IS_ERR(regs))
1159 		return PTR_ERR(regs);
1160 
1161 	perf_fetch_caller_regs(regs);
1162 	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1163 	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1164 			      flags, 0, 0);
1165 	put_bpf_raw_tp_regs();
1166 	return ret;
1167 }
1168 
1169 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1170 	.func		= bpf_get_stackid_raw_tp,
1171 	.gpl_only	= true,
1172 	.ret_type	= RET_INTEGER,
1173 	.arg1_type	= ARG_PTR_TO_CTX,
1174 	.arg2_type	= ARG_CONST_MAP_PTR,
1175 	.arg3_type	= ARG_ANYTHING,
1176 };
1177 
1178 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1179 	   void *, buf, u32, size, u64, flags)
1180 {
1181 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1182 	int ret;
1183 
1184 	if (IS_ERR(regs))
1185 		return PTR_ERR(regs);
1186 
1187 	perf_fetch_caller_regs(regs);
1188 	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1189 			    (unsigned long) size, flags, 0);
1190 	put_bpf_raw_tp_regs();
1191 	return ret;
1192 }
1193 
1194 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1195 	.func		= bpf_get_stack_raw_tp,
1196 	.gpl_only	= true,
1197 	.ret_type	= RET_INTEGER,
1198 	.arg1_type	= ARG_PTR_TO_CTX,
1199 	.arg2_type	= ARG_PTR_TO_MEM,
1200 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1201 	.arg4_type	= ARG_ANYTHING,
1202 };
1203 
1204 static const struct bpf_func_proto *
1205 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1206 {
1207 	switch (func_id) {
1208 	case BPF_FUNC_perf_event_output:
1209 		return &bpf_perf_event_output_proto_raw_tp;
1210 	case BPF_FUNC_get_stackid:
1211 		return &bpf_get_stackid_proto_raw_tp;
1212 	case BPF_FUNC_get_stack:
1213 		return &bpf_get_stack_proto_raw_tp;
1214 	default:
1215 		return bpf_tracing_func_proto(func_id, prog);
1216 	}
1217 }
1218 
1219 static const struct bpf_func_proto *
1220 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1221 {
1222 	switch (func_id) {
1223 #ifdef CONFIG_NET
1224 	case BPF_FUNC_skb_output:
1225 		return &bpf_skb_output_proto;
1226 	case BPF_FUNC_xdp_output:
1227 		return &bpf_xdp_output_proto;
1228 #endif
1229 	default:
1230 		return raw_tp_prog_func_proto(func_id, prog);
1231 	}
1232 }
1233 
1234 static bool raw_tp_prog_is_valid_access(int off, int size,
1235 					enum bpf_access_type type,
1236 					const struct bpf_prog *prog,
1237 					struct bpf_insn_access_aux *info)
1238 {
1239 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1240 		return false;
1241 	if (type != BPF_READ)
1242 		return false;
1243 	if (off % size != 0)
1244 		return false;
1245 	return true;
1246 }
1247 
1248 static bool tracing_prog_is_valid_access(int off, int size,
1249 					 enum bpf_access_type type,
1250 					 const struct bpf_prog *prog,
1251 					 struct bpf_insn_access_aux *info)
1252 {
1253 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1254 		return false;
1255 	if (type != BPF_READ)
1256 		return false;
1257 	if (off % size != 0)
1258 		return false;
1259 	return btf_ctx_access(off, size, type, prog, info);
1260 }
1261 
1262 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1263 				     const union bpf_attr *kattr,
1264 				     union bpf_attr __user *uattr)
1265 {
1266 	return -ENOTSUPP;
1267 }
1268 
1269 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1270 	.get_func_proto  = raw_tp_prog_func_proto,
1271 	.is_valid_access = raw_tp_prog_is_valid_access,
1272 };
1273 
1274 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1275 };
1276 
1277 const struct bpf_verifier_ops tracing_verifier_ops = {
1278 	.get_func_proto  = tracing_prog_func_proto,
1279 	.is_valid_access = tracing_prog_is_valid_access,
1280 };
1281 
1282 const struct bpf_prog_ops tracing_prog_ops = {
1283 	.test_run = bpf_prog_test_run_tracing,
1284 };
1285 
1286 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1287 						 enum bpf_access_type type,
1288 						 const struct bpf_prog *prog,
1289 						 struct bpf_insn_access_aux *info)
1290 {
1291 	if (off == 0) {
1292 		if (size != sizeof(u64) || type != BPF_READ)
1293 			return false;
1294 		info->reg_type = PTR_TO_TP_BUFFER;
1295 	}
1296 	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1297 }
1298 
1299 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1300 	.get_func_proto  = raw_tp_prog_func_proto,
1301 	.is_valid_access = raw_tp_writable_prog_is_valid_access,
1302 };
1303 
1304 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1305 };
1306 
1307 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1308 				    const struct bpf_prog *prog,
1309 				    struct bpf_insn_access_aux *info)
1310 {
1311 	const int size_u64 = sizeof(u64);
1312 
1313 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1314 		return false;
1315 	if (type != BPF_READ)
1316 		return false;
1317 	if (off % size != 0) {
1318 		if (sizeof(unsigned long) != 4)
1319 			return false;
1320 		if (size != 8)
1321 			return false;
1322 		if (off % size != 4)
1323 			return false;
1324 	}
1325 
1326 	switch (off) {
1327 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1328 		bpf_ctx_record_field_size(info, size_u64);
1329 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1330 			return false;
1331 		break;
1332 	case bpf_ctx_range(struct bpf_perf_event_data, addr):
1333 		bpf_ctx_record_field_size(info, size_u64);
1334 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1335 			return false;
1336 		break;
1337 	default:
1338 		if (size != sizeof(long))
1339 			return false;
1340 	}
1341 
1342 	return true;
1343 }
1344 
1345 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1346 				      const struct bpf_insn *si,
1347 				      struct bpf_insn *insn_buf,
1348 				      struct bpf_prog *prog, u32 *target_size)
1349 {
1350 	struct bpf_insn *insn = insn_buf;
1351 
1352 	switch (si->off) {
1353 	case offsetof(struct bpf_perf_event_data, sample_period):
1354 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1355 						       data), si->dst_reg, si->src_reg,
1356 				      offsetof(struct bpf_perf_event_data_kern, data));
1357 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1358 				      bpf_target_off(struct perf_sample_data, period, 8,
1359 						     target_size));
1360 		break;
1361 	case offsetof(struct bpf_perf_event_data, addr):
1362 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1363 						       data), si->dst_reg, si->src_reg,
1364 				      offsetof(struct bpf_perf_event_data_kern, data));
1365 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1366 				      bpf_target_off(struct perf_sample_data, addr, 8,
1367 						     target_size));
1368 		break;
1369 	default:
1370 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1371 						       regs), si->dst_reg, si->src_reg,
1372 				      offsetof(struct bpf_perf_event_data_kern, regs));
1373 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1374 				      si->off);
1375 		break;
1376 	}
1377 
1378 	return insn - insn_buf;
1379 }
1380 
1381 const struct bpf_verifier_ops perf_event_verifier_ops = {
1382 	.get_func_proto		= pe_prog_func_proto,
1383 	.is_valid_access	= pe_prog_is_valid_access,
1384 	.convert_ctx_access	= pe_prog_convert_ctx_access,
1385 };
1386 
1387 const struct bpf_prog_ops perf_event_prog_ops = {
1388 };
1389 
1390 static DEFINE_MUTEX(bpf_event_mutex);
1391 
1392 #define BPF_TRACE_MAX_PROGS 64
1393 
1394 int perf_event_attach_bpf_prog(struct perf_event *event,
1395 			       struct bpf_prog *prog)
1396 {
1397 	struct bpf_prog_array *old_array;
1398 	struct bpf_prog_array *new_array;
1399 	int ret = -EEXIST;
1400 
1401 	/*
1402 	 * Kprobe override only works if they are on the function entry,
1403 	 * and only if they are on the opt-in list.
1404 	 */
1405 	if (prog->kprobe_override &&
1406 	    (!trace_kprobe_on_func_entry(event->tp_event) ||
1407 	     !trace_kprobe_error_injectable(event->tp_event)))
1408 		return -EINVAL;
1409 
1410 	mutex_lock(&bpf_event_mutex);
1411 
1412 	if (event->prog)
1413 		goto unlock;
1414 
1415 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1416 	if (old_array &&
1417 	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1418 		ret = -E2BIG;
1419 		goto unlock;
1420 	}
1421 
1422 	ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1423 	if (ret < 0)
1424 		goto unlock;
1425 
1426 	/* set the new array to event->tp_event and set event->prog */
1427 	event->prog = prog;
1428 	rcu_assign_pointer(event->tp_event->prog_array, new_array);
1429 	bpf_prog_array_free(old_array);
1430 
1431 unlock:
1432 	mutex_unlock(&bpf_event_mutex);
1433 	return ret;
1434 }
1435 
1436 void perf_event_detach_bpf_prog(struct perf_event *event)
1437 {
1438 	struct bpf_prog_array *old_array;
1439 	struct bpf_prog_array *new_array;
1440 	int ret;
1441 
1442 	mutex_lock(&bpf_event_mutex);
1443 
1444 	if (!event->prog)
1445 		goto unlock;
1446 
1447 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1448 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1449 	if (ret == -ENOENT)
1450 		goto unlock;
1451 	if (ret < 0) {
1452 		bpf_prog_array_delete_safe(old_array, event->prog);
1453 	} else {
1454 		rcu_assign_pointer(event->tp_event->prog_array, new_array);
1455 		bpf_prog_array_free(old_array);
1456 	}
1457 
1458 	bpf_prog_put(event->prog);
1459 	event->prog = NULL;
1460 
1461 unlock:
1462 	mutex_unlock(&bpf_event_mutex);
1463 }
1464 
1465 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1466 {
1467 	struct perf_event_query_bpf __user *uquery = info;
1468 	struct perf_event_query_bpf query = {};
1469 	struct bpf_prog_array *progs;
1470 	u32 *ids, prog_cnt, ids_len;
1471 	int ret;
1472 
1473 	if (!capable(CAP_SYS_ADMIN))
1474 		return -EPERM;
1475 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
1476 		return -EINVAL;
1477 	if (copy_from_user(&query, uquery, sizeof(query)))
1478 		return -EFAULT;
1479 
1480 	ids_len = query.ids_len;
1481 	if (ids_len > BPF_TRACE_MAX_PROGS)
1482 		return -E2BIG;
1483 	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1484 	if (!ids)
1485 		return -ENOMEM;
1486 	/*
1487 	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1488 	 * is required when user only wants to check for uquery->prog_cnt.
1489 	 * There is no need to check for it since the case is handled
1490 	 * gracefully in bpf_prog_array_copy_info.
1491 	 */
1492 
1493 	mutex_lock(&bpf_event_mutex);
1494 	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1495 	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1496 	mutex_unlock(&bpf_event_mutex);
1497 
1498 	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1499 	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1500 		ret = -EFAULT;
1501 
1502 	kfree(ids);
1503 	return ret;
1504 }
1505 
1506 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1507 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1508 
1509 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1510 {
1511 	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1512 
1513 	for (; btp < __stop__bpf_raw_tp; btp++) {
1514 		if (!strcmp(btp->tp->name, name))
1515 			return btp;
1516 	}
1517 
1518 	return bpf_get_raw_tracepoint_module(name);
1519 }
1520 
1521 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1522 {
1523 	struct module *mod = __module_address((unsigned long)btp);
1524 
1525 	if (mod)
1526 		module_put(mod);
1527 }
1528 
1529 static __always_inline
1530 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1531 {
1532 	cant_sleep();
1533 	rcu_read_lock();
1534 	(void) BPF_PROG_RUN(prog, args);
1535 	rcu_read_unlock();
1536 }
1537 
1538 #define UNPACK(...)			__VA_ARGS__
1539 #define REPEAT_1(FN, DL, X, ...)	FN(X)
1540 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1541 #define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1542 #define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1543 #define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1544 #define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1545 #define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1546 #define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1547 #define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1548 #define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1549 #define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1550 #define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1551 #define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
1552 
1553 #define SARG(X)		u64 arg##X
1554 #define COPY(X)		args[X] = arg##X
1555 
1556 #define __DL_COM	(,)
1557 #define __DL_SEM	(;)
1558 
1559 #define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1560 
1561 #define BPF_TRACE_DEFN_x(x)						\
1562 	void bpf_trace_run##x(struct bpf_prog *prog,			\
1563 			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
1564 	{								\
1565 		u64 args[x];						\
1566 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
1567 		__bpf_trace_run(prog, args);				\
1568 	}								\
1569 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1570 BPF_TRACE_DEFN_x(1);
1571 BPF_TRACE_DEFN_x(2);
1572 BPF_TRACE_DEFN_x(3);
1573 BPF_TRACE_DEFN_x(4);
1574 BPF_TRACE_DEFN_x(5);
1575 BPF_TRACE_DEFN_x(6);
1576 BPF_TRACE_DEFN_x(7);
1577 BPF_TRACE_DEFN_x(8);
1578 BPF_TRACE_DEFN_x(9);
1579 BPF_TRACE_DEFN_x(10);
1580 BPF_TRACE_DEFN_x(11);
1581 BPF_TRACE_DEFN_x(12);
1582 
1583 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1584 {
1585 	struct tracepoint *tp = btp->tp;
1586 
1587 	/*
1588 	 * check that program doesn't access arguments beyond what's
1589 	 * available in this tracepoint
1590 	 */
1591 	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1592 		return -EINVAL;
1593 
1594 	if (prog->aux->max_tp_access > btp->writable_size)
1595 		return -EINVAL;
1596 
1597 	return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1598 }
1599 
1600 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1601 {
1602 	return __bpf_probe_register(btp, prog);
1603 }
1604 
1605 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1606 {
1607 	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1608 }
1609 
1610 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1611 			    u32 *fd_type, const char **buf,
1612 			    u64 *probe_offset, u64 *probe_addr)
1613 {
1614 	bool is_tracepoint, is_syscall_tp;
1615 	struct bpf_prog *prog;
1616 	int flags, err = 0;
1617 
1618 	prog = event->prog;
1619 	if (!prog)
1620 		return -ENOENT;
1621 
1622 	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1623 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1624 		return -EOPNOTSUPP;
1625 
1626 	*prog_id = prog->aux->id;
1627 	flags = event->tp_event->flags;
1628 	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1629 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
1630 
1631 	if (is_tracepoint || is_syscall_tp) {
1632 		*buf = is_tracepoint ? event->tp_event->tp->name
1633 				     : event->tp_event->name;
1634 		*fd_type = BPF_FD_TYPE_TRACEPOINT;
1635 		*probe_offset = 0x0;
1636 		*probe_addr = 0x0;
1637 	} else {
1638 		/* kprobe/uprobe */
1639 		err = -EOPNOTSUPP;
1640 #ifdef CONFIG_KPROBE_EVENTS
1641 		if (flags & TRACE_EVENT_FL_KPROBE)
1642 			err = bpf_get_kprobe_info(event, fd_type, buf,
1643 						  probe_offset, probe_addr,
1644 						  event->attr.type == PERF_TYPE_TRACEPOINT);
1645 #endif
1646 #ifdef CONFIG_UPROBE_EVENTS
1647 		if (flags & TRACE_EVENT_FL_UPROBE)
1648 			err = bpf_get_uprobe_info(event, fd_type, buf,
1649 						  probe_offset,
1650 						  event->attr.type == PERF_TYPE_TRACEPOINT);
1651 #endif
1652 	}
1653 
1654 	return err;
1655 }
1656 
1657 static int __init send_signal_irq_work_init(void)
1658 {
1659 	int cpu;
1660 	struct send_signal_irq_work *work;
1661 
1662 	for_each_possible_cpu(cpu) {
1663 		work = per_cpu_ptr(&send_signal_work, cpu);
1664 		init_irq_work(&work->irq_work, do_bpf_send_signal);
1665 	}
1666 	return 0;
1667 }
1668 
1669 subsys_initcall(send_signal_irq_work_init);
1670 
1671 #ifdef CONFIG_MODULES
1672 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1673 			    void *module)
1674 {
1675 	struct bpf_trace_module *btm, *tmp;
1676 	struct module *mod = module;
1677 
1678 	if (mod->num_bpf_raw_events == 0 ||
1679 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1680 		return 0;
1681 
1682 	mutex_lock(&bpf_module_mutex);
1683 
1684 	switch (op) {
1685 	case MODULE_STATE_COMING:
1686 		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1687 		if (btm) {
1688 			btm->module = module;
1689 			list_add(&btm->list, &bpf_trace_modules);
1690 		}
1691 		break;
1692 	case MODULE_STATE_GOING:
1693 		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1694 			if (btm->module == module) {
1695 				list_del(&btm->list);
1696 				kfree(btm);
1697 				break;
1698 			}
1699 		}
1700 		break;
1701 	}
1702 
1703 	mutex_unlock(&bpf_module_mutex);
1704 
1705 	return 0;
1706 }
1707 
1708 static struct notifier_block bpf_module_nb = {
1709 	.notifier_call = bpf_event_notify,
1710 };
1711 
1712 static int __init bpf_event_init(void)
1713 {
1714 	register_module_notifier(&bpf_module_nb);
1715 	return 0;
1716 }
1717 
1718 fs_initcall(bpf_event_init);
1719 #endif /* CONFIG_MODULES */
1720