xref: /linux/kernel/trace/bpf_trace.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/bpf_perf_event.h>
11 #include <linux/btf.h>
12 #include <linux/filter.h>
13 #include <linux/uaccess.h>
14 #include <linux/ctype.h>
15 #include <linux/kprobes.h>
16 #include <linux/spinlock.h>
17 #include <linux/syscalls.h>
18 #include <linux/error-injection.h>
19 #include <linux/btf_ids.h>
20 #include <linux/bpf_lsm.h>
21 #include <linux/fprobe.h>
22 #include <linux/bsearch.h>
23 #include <linux/sort.h>
24 #include <linux/key.h>
25 #include <linux/verification.h>
26 #include <linux/namei.h>
27 #include <linux/fileattr.h>
28 
29 #include <net/bpf_sk_storage.h>
30 
31 #include <uapi/linux/bpf.h>
32 #include <uapi/linux/btf.h>
33 
34 #include <asm/tlb.h>
35 
36 #include "trace_probe.h"
37 #include "trace.h"
38 
39 #define CREATE_TRACE_POINTS
40 #include "bpf_trace.h"
41 
42 #define bpf_event_rcu_dereference(p)					\
43 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
44 
45 #define MAX_UPROBE_MULTI_CNT (1U << 20)
46 #define MAX_KPROBE_MULTI_CNT (1U << 20)
47 
48 #ifdef CONFIG_MODULES
49 struct bpf_trace_module {
50 	struct module *module;
51 	struct list_head list;
52 };
53 
54 static LIST_HEAD(bpf_trace_modules);
55 static DEFINE_MUTEX(bpf_module_mutex);
56 
bpf_get_raw_tracepoint_module(const char * name)57 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
58 {
59 	struct bpf_raw_event_map *btp, *ret = NULL;
60 	struct bpf_trace_module *btm;
61 	unsigned int i;
62 
63 	mutex_lock(&bpf_module_mutex);
64 	list_for_each_entry(btm, &bpf_trace_modules, list) {
65 		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
66 			btp = &btm->module->bpf_raw_events[i];
67 			if (!strcmp(btp->tp->name, name)) {
68 				if (try_module_get(btm->module))
69 					ret = btp;
70 				goto out;
71 			}
72 		}
73 	}
74 out:
75 	mutex_unlock(&bpf_module_mutex);
76 	return ret;
77 }
78 #else
bpf_get_raw_tracepoint_module(const char * name)79 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
80 {
81 	return NULL;
82 }
83 #endif /* CONFIG_MODULES */
84 
85 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
86 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
87 
88 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
89 				  u64 flags, const struct btf **btf,
90 				  s32 *btf_id);
91 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
92 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
93 
94 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
95 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
96 
97 /**
98  * trace_call_bpf - invoke BPF program
99  * @call: tracepoint event
100  * @ctx: opaque context pointer
101  *
102  * kprobe handlers execute BPF programs via this helper.
103  * Can be used from static tracepoints in the future.
104  *
105  * Return: BPF programs always return an integer which is interpreted by
106  * kprobe handler as:
107  * 0 - return from kprobe (event is filtered out)
108  * 1 - store kprobe event into ring buffer
109  * Other values are reserved and currently alias to 1
110  */
trace_call_bpf(struct trace_event_call * call,void * ctx)111 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
112 {
113 	unsigned int ret;
114 
115 	cant_sleep();
116 
117 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
118 		/*
119 		 * since some bpf program is already running on this cpu,
120 		 * don't call into another bpf program (same or different)
121 		 * and don't send kprobe event into ring-buffer,
122 		 * so return zero here
123 		 */
124 		rcu_read_lock();
125 		bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
126 		rcu_read_unlock();
127 		ret = 0;
128 		goto out;
129 	}
130 
131 	/*
132 	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
133 	 * to all call sites, we did a bpf_prog_array_valid() there to check
134 	 * whether call->prog_array is empty or not, which is
135 	 * a heuristic to speed up execution.
136 	 *
137 	 * If bpf_prog_array_valid() fetched prog_array was
138 	 * non-NULL, we go into trace_call_bpf() and do the actual
139 	 * proper rcu_dereference() under RCU lock.
140 	 * If it turns out that prog_array is NULL then, we bail out.
141 	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
142 	 * was NULL, you'll skip the prog_array with the risk of missing
143 	 * out of events when it was updated in between this and the
144 	 * rcu_dereference() which is accepted risk.
145 	 */
146 	rcu_read_lock();
147 	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
148 				 ctx, bpf_prog_run);
149 	rcu_read_unlock();
150 
151  out:
152 	__this_cpu_dec(bpf_prog_active);
153 
154 	return ret;
155 }
156 
157 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
BPF_CALL_2(bpf_override_return,struct pt_regs *,regs,unsigned long,rc)158 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
159 {
160 	regs_set_return_value(regs, rc);
161 	override_function_with_return(regs);
162 	return 0;
163 }
164 
165 static const struct bpf_func_proto bpf_override_return_proto = {
166 	.func		= bpf_override_return,
167 	.gpl_only	= true,
168 	.ret_type	= RET_INTEGER,
169 	.arg1_type	= ARG_PTR_TO_CTX,
170 	.arg2_type	= ARG_ANYTHING,
171 };
172 #endif
173 
174 static __always_inline int
bpf_probe_read_user_common(void * dst,u32 size,const void __user * unsafe_ptr)175 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
176 {
177 	int ret;
178 
179 	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
180 	if (unlikely(ret < 0))
181 		memset(dst, 0, size);
182 	return ret;
183 }
184 
BPF_CALL_3(bpf_probe_read_user,void *,dst,u32,size,const void __user *,unsafe_ptr)185 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
186 	   const void __user *, unsafe_ptr)
187 {
188 	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
189 }
190 
191 const struct bpf_func_proto bpf_probe_read_user_proto = {
192 	.func		= bpf_probe_read_user,
193 	.gpl_only	= true,
194 	.ret_type	= RET_INTEGER,
195 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
196 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
197 	.arg3_type	= ARG_ANYTHING,
198 };
199 
200 static __always_inline int
bpf_probe_read_user_str_common(void * dst,u32 size,const void __user * unsafe_ptr)201 bpf_probe_read_user_str_common(void *dst, u32 size,
202 			       const void __user *unsafe_ptr)
203 {
204 	int ret;
205 
206 	/*
207 	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
208 	 * terminator into `dst`.
209 	 *
210 	 * strncpy_from_user() does long-sized strides in the fast path. If the
211 	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
212 	 * then there could be junk after the NUL in `dst`. If user takes `dst`
213 	 * and keys a hash map with it, then semantically identical strings can
214 	 * occupy multiple entries in the map.
215 	 */
216 	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
217 	if (unlikely(ret < 0))
218 		memset(dst, 0, size);
219 	return ret;
220 }
221 
BPF_CALL_3(bpf_probe_read_user_str,void *,dst,u32,size,const void __user *,unsafe_ptr)222 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
223 	   const void __user *, unsafe_ptr)
224 {
225 	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
226 }
227 
228 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
229 	.func		= bpf_probe_read_user_str,
230 	.gpl_only	= true,
231 	.ret_type	= RET_INTEGER,
232 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
233 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
234 	.arg3_type	= ARG_ANYTHING,
235 };
236 
BPF_CALL_3(bpf_probe_read_kernel,void *,dst,u32,size,const void *,unsafe_ptr)237 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
238 	   const void *, unsafe_ptr)
239 {
240 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
241 }
242 
243 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
244 	.func		= bpf_probe_read_kernel,
245 	.gpl_only	= true,
246 	.ret_type	= RET_INTEGER,
247 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
248 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
249 	.arg3_type	= ARG_ANYTHING,
250 };
251 
252 static __always_inline int
bpf_probe_read_kernel_str_common(void * dst,u32 size,const void * unsafe_ptr)253 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
254 {
255 	int ret;
256 
257 	/*
258 	 * The strncpy_from_kernel_nofault() call will likely not fill the
259 	 * entire buffer, but that's okay in this circumstance as we're probing
260 	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
261 	 * as well probe the stack. Thus, memory is explicitly cleared
262 	 * only in error case, so that improper users ignoring return
263 	 * code altogether don't copy garbage; otherwise length of string
264 	 * is returned that can be used for bpf_perf_event_output() et al.
265 	 */
266 	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
267 	if (unlikely(ret < 0))
268 		memset(dst, 0, size);
269 	return ret;
270 }
271 
BPF_CALL_3(bpf_probe_read_kernel_str,void *,dst,u32,size,const void *,unsafe_ptr)272 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
273 	   const void *, unsafe_ptr)
274 {
275 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
276 }
277 
278 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
279 	.func		= bpf_probe_read_kernel_str,
280 	.gpl_only	= true,
281 	.ret_type	= RET_INTEGER,
282 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
283 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
284 	.arg3_type	= ARG_ANYTHING,
285 };
286 
287 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
BPF_CALL_3(bpf_probe_read_compat,void *,dst,u32,size,const void *,unsafe_ptr)288 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
289 	   const void *, unsafe_ptr)
290 {
291 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
292 		return bpf_probe_read_user_common(dst, size,
293 				(__force void __user *)unsafe_ptr);
294 	}
295 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
296 }
297 
298 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
299 	.func		= bpf_probe_read_compat,
300 	.gpl_only	= true,
301 	.ret_type	= RET_INTEGER,
302 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
303 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
304 	.arg3_type	= ARG_ANYTHING,
305 };
306 
BPF_CALL_3(bpf_probe_read_compat_str,void *,dst,u32,size,const void *,unsafe_ptr)307 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
308 	   const void *, unsafe_ptr)
309 {
310 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
311 		return bpf_probe_read_user_str_common(dst, size,
312 				(__force void __user *)unsafe_ptr);
313 	}
314 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
315 }
316 
317 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
318 	.func		= bpf_probe_read_compat_str,
319 	.gpl_only	= true,
320 	.ret_type	= RET_INTEGER,
321 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
322 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
323 	.arg3_type	= ARG_ANYTHING,
324 };
325 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
326 
BPF_CALL_3(bpf_probe_write_user,void __user *,unsafe_ptr,const void *,src,u32,size)327 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
328 	   u32, size)
329 {
330 	/*
331 	 * Ensure we're in user context which is safe for the helper to
332 	 * run. This helper has no business in a kthread.
333 	 *
334 	 * access_ok() should prevent writing to non-user memory, but in
335 	 * some situations (nommu, temporary switch, etc) access_ok() does
336 	 * not provide enough validation, hence the check on KERNEL_DS.
337 	 *
338 	 * nmi_uaccess_okay() ensures the probe is not run in an interim
339 	 * state, when the task or mm are switched. This is specifically
340 	 * required to prevent the use of temporary mm.
341 	 */
342 
343 	if (unlikely(in_interrupt() ||
344 		     current->flags & (PF_KTHREAD | PF_EXITING)))
345 		return -EPERM;
346 	if (unlikely(!nmi_uaccess_okay()))
347 		return -EPERM;
348 
349 	return copy_to_user_nofault(unsafe_ptr, src, size);
350 }
351 
352 static const struct bpf_func_proto bpf_probe_write_user_proto = {
353 	.func		= bpf_probe_write_user,
354 	.gpl_only	= true,
355 	.ret_type	= RET_INTEGER,
356 	.arg1_type	= ARG_ANYTHING,
357 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
358 	.arg3_type	= ARG_CONST_SIZE,
359 };
360 
bpf_get_probe_write_proto(void)361 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
362 {
363 	if (!capable(CAP_SYS_ADMIN))
364 		return NULL;
365 
366 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
367 			    current->comm, task_pid_nr(current));
368 
369 	return &bpf_probe_write_user_proto;
370 }
371 
372 #define MAX_TRACE_PRINTK_VARARGS	3
373 #define BPF_TRACE_PRINTK_SIZE		1024
374 
BPF_CALL_5(bpf_trace_printk,char *,fmt,u32,fmt_size,u64,arg1,u64,arg2,u64,arg3)375 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
376 	   u64, arg2, u64, arg3)
377 {
378 	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
379 	struct bpf_bprintf_data data = {
380 		.get_bin_args	= true,
381 		.get_buf	= true,
382 	};
383 	int ret;
384 
385 	ret = bpf_bprintf_prepare(fmt, fmt_size, args,
386 				  MAX_TRACE_PRINTK_VARARGS, &data);
387 	if (ret < 0)
388 		return ret;
389 
390 	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
391 
392 	trace_bpf_trace_printk(data.buf);
393 
394 	bpf_bprintf_cleanup(&data);
395 
396 	return ret;
397 }
398 
399 static const struct bpf_func_proto bpf_trace_printk_proto = {
400 	.func		= bpf_trace_printk,
401 	.gpl_only	= true,
402 	.ret_type	= RET_INTEGER,
403 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
404 	.arg2_type	= ARG_CONST_SIZE,
405 };
406 
__set_printk_clr_event(void)407 static void __set_printk_clr_event(void)
408 {
409 	/*
410 	 * This program might be calling bpf_trace_printk,
411 	 * so enable the associated bpf_trace/bpf_trace_printk event.
412 	 * Repeat this each time as it is possible a user has
413 	 * disabled bpf_trace_printk events.  By loading a program
414 	 * calling bpf_trace_printk() however the user has expressed
415 	 * the intent to see such events.
416 	 */
417 	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
418 		pr_warn_ratelimited("could not enable bpf_trace_printk events");
419 }
420 
bpf_get_trace_printk_proto(void)421 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
422 {
423 	__set_printk_clr_event();
424 	return &bpf_trace_printk_proto;
425 }
426 
BPF_CALL_4(bpf_trace_vprintk,char *,fmt,u32,fmt_size,const void *,args,u32,data_len)427 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
428 	   u32, data_len)
429 {
430 	struct bpf_bprintf_data data = {
431 		.get_bin_args	= true,
432 		.get_buf	= true,
433 	};
434 	int ret, num_args;
435 
436 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
437 	    (data_len && !args))
438 		return -EINVAL;
439 	num_args = data_len / 8;
440 
441 	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
442 	if (ret < 0)
443 		return ret;
444 
445 	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
446 
447 	trace_bpf_trace_printk(data.buf);
448 
449 	bpf_bprintf_cleanup(&data);
450 
451 	return ret;
452 }
453 
454 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
455 	.func		= bpf_trace_vprintk,
456 	.gpl_only	= true,
457 	.ret_type	= RET_INTEGER,
458 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
459 	.arg2_type	= ARG_CONST_SIZE,
460 	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
461 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
462 };
463 
bpf_get_trace_vprintk_proto(void)464 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
465 {
466 	__set_printk_clr_event();
467 	return &bpf_trace_vprintk_proto;
468 }
469 
BPF_CALL_5(bpf_seq_printf,struct seq_file *,m,char *,fmt,u32,fmt_size,const void *,args,u32,data_len)470 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
471 	   const void *, args, u32, data_len)
472 {
473 	struct bpf_bprintf_data data = {
474 		.get_bin_args	= true,
475 	};
476 	int err, num_args;
477 
478 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
479 	    (data_len && !args))
480 		return -EINVAL;
481 	num_args = data_len / 8;
482 
483 	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
484 	if (err < 0)
485 		return err;
486 
487 	seq_bprintf(m, fmt, data.bin_args);
488 
489 	bpf_bprintf_cleanup(&data);
490 
491 	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
492 }
493 
494 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
495 
496 static const struct bpf_func_proto bpf_seq_printf_proto = {
497 	.func		= bpf_seq_printf,
498 	.gpl_only	= true,
499 	.ret_type	= RET_INTEGER,
500 	.arg1_type	= ARG_PTR_TO_BTF_ID,
501 	.arg1_btf_id	= &btf_seq_file_ids[0],
502 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
503 	.arg3_type	= ARG_CONST_SIZE,
504 	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
505 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
506 };
507 
BPF_CALL_3(bpf_seq_write,struct seq_file *,m,const void *,data,u32,len)508 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
509 {
510 	return seq_write(m, data, len) ? -EOVERFLOW : 0;
511 }
512 
513 static const struct bpf_func_proto bpf_seq_write_proto = {
514 	.func		= bpf_seq_write,
515 	.gpl_only	= true,
516 	.ret_type	= RET_INTEGER,
517 	.arg1_type	= ARG_PTR_TO_BTF_ID,
518 	.arg1_btf_id	= &btf_seq_file_ids[0],
519 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
520 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
521 };
522 
BPF_CALL_4(bpf_seq_printf_btf,struct seq_file *,m,struct btf_ptr *,ptr,u32,btf_ptr_size,u64,flags)523 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
524 	   u32, btf_ptr_size, u64, flags)
525 {
526 	const struct btf *btf;
527 	s32 btf_id;
528 	int ret;
529 
530 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
531 	if (ret)
532 		return ret;
533 
534 	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
535 }
536 
537 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
538 	.func		= bpf_seq_printf_btf,
539 	.gpl_only	= true,
540 	.ret_type	= RET_INTEGER,
541 	.arg1_type	= ARG_PTR_TO_BTF_ID,
542 	.arg1_btf_id	= &btf_seq_file_ids[0],
543 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
544 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
545 	.arg4_type	= ARG_ANYTHING,
546 };
547 
548 static __always_inline int
get_map_perf_counter(struct bpf_map * map,u64 flags,u64 * value,u64 * enabled,u64 * running)549 get_map_perf_counter(struct bpf_map *map, u64 flags,
550 		     u64 *value, u64 *enabled, u64 *running)
551 {
552 	struct bpf_array *array = container_of(map, struct bpf_array, map);
553 	unsigned int cpu = smp_processor_id();
554 	u64 index = flags & BPF_F_INDEX_MASK;
555 	struct bpf_event_entry *ee;
556 
557 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
558 		return -EINVAL;
559 	if (index == BPF_F_CURRENT_CPU)
560 		index = cpu;
561 	if (unlikely(index >= array->map.max_entries))
562 		return -E2BIG;
563 
564 	ee = READ_ONCE(array->ptrs[index]);
565 	if (!ee)
566 		return -ENOENT;
567 
568 	return perf_event_read_local(ee->event, value, enabled, running);
569 }
570 
BPF_CALL_2(bpf_perf_event_read,struct bpf_map *,map,u64,flags)571 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
572 {
573 	u64 value = 0;
574 	int err;
575 
576 	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
577 	/*
578 	 * this api is ugly since we miss [-22..-2] range of valid
579 	 * counter values, but that's uapi
580 	 */
581 	if (err)
582 		return err;
583 	return value;
584 }
585 
586 static const struct bpf_func_proto bpf_perf_event_read_proto = {
587 	.func		= bpf_perf_event_read,
588 	.gpl_only	= true,
589 	.ret_type	= RET_INTEGER,
590 	.arg1_type	= ARG_CONST_MAP_PTR,
591 	.arg2_type	= ARG_ANYTHING,
592 };
593 
BPF_CALL_4(bpf_perf_event_read_value,struct bpf_map *,map,u64,flags,struct bpf_perf_event_value *,buf,u32,size)594 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
595 	   struct bpf_perf_event_value *, buf, u32, size)
596 {
597 	int err = -EINVAL;
598 
599 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
600 		goto clear;
601 	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
602 				   &buf->running);
603 	if (unlikely(err))
604 		goto clear;
605 	return 0;
606 clear:
607 	memset(buf, 0, size);
608 	return err;
609 }
610 
611 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
612 	.func		= bpf_perf_event_read_value,
613 	.gpl_only	= true,
614 	.ret_type	= RET_INTEGER,
615 	.arg1_type	= ARG_CONST_MAP_PTR,
616 	.arg2_type	= ARG_ANYTHING,
617 	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
618 	.arg4_type	= ARG_CONST_SIZE,
619 };
620 
621 static __always_inline u64
__bpf_perf_event_output(struct pt_regs * regs,struct bpf_map * map,u64 flags,struct perf_sample_data * sd)622 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
623 			u64 flags, struct perf_sample_data *sd)
624 {
625 	struct bpf_array *array = container_of(map, struct bpf_array, map);
626 	unsigned int cpu = smp_processor_id();
627 	u64 index = flags & BPF_F_INDEX_MASK;
628 	struct bpf_event_entry *ee;
629 	struct perf_event *event;
630 
631 	if (index == BPF_F_CURRENT_CPU)
632 		index = cpu;
633 	if (unlikely(index >= array->map.max_entries))
634 		return -E2BIG;
635 
636 	ee = READ_ONCE(array->ptrs[index]);
637 	if (!ee)
638 		return -ENOENT;
639 
640 	event = ee->event;
641 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
642 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
643 		return -EINVAL;
644 
645 	if (unlikely(event->oncpu != cpu))
646 		return -EOPNOTSUPP;
647 
648 	return perf_event_output(event, sd, regs);
649 }
650 
651 /*
652  * Support executing tracepoints in normal, irq, and nmi context that each call
653  * bpf_perf_event_output
654  */
655 struct bpf_trace_sample_data {
656 	struct perf_sample_data sds[3];
657 };
658 
659 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
660 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
BPF_CALL_5(bpf_perf_event_output,struct pt_regs *,regs,struct bpf_map *,map,u64,flags,void *,data,u64,size)661 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
662 	   u64, flags, void *, data, u64, size)
663 {
664 	struct bpf_trace_sample_data *sds;
665 	struct perf_raw_record raw = {
666 		.frag = {
667 			.size = size,
668 			.data = data,
669 		},
670 	};
671 	struct perf_sample_data *sd;
672 	int nest_level, err;
673 
674 	preempt_disable();
675 	sds = this_cpu_ptr(&bpf_trace_sds);
676 	nest_level = this_cpu_inc_return(bpf_trace_nest_level);
677 
678 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
679 		err = -EBUSY;
680 		goto out;
681 	}
682 
683 	sd = &sds->sds[nest_level - 1];
684 
685 	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
686 		err = -EINVAL;
687 		goto out;
688 	}
689 
690 	perf_sample_data_init(sd, 0, 0);
691 	perf_sample_save_raw_data(sd, &raw);
692 
693 	err = __bpf_perf_event_output(regs, map, flags, sd);
694 out:
695 	this_cpu_dec(bpf_trace_nest_level);
696 	preempt_enable();
697 	return err;
698 }
699 
700 static const struct bpf_func_proto bpf_perf_event_output_proto = {
701 	.func		= bpf_perf_event_output,
702 	.gpl_only	= true,
703 	.ret_type	= RET_INTEGER,
704 	.arg1_type	= ARG_PTR_TO_CTX,
705 	.arg2_type	= ARG_CONST_MAP_PTR,
706 	.arg3_type	= ARG_ANYTHING,
707 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
708 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
709 };
710 
711 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
712 struct bpf_nested_pt_regs {
713 	struct pt_regs regs[3];
714 };
715 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
716 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
717 
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)718 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
719 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
720 {
721 	struct perf_raw_frag frag = {
722 		.copy		= ctx_copy,
723 		.size		= ctx_size,
724 		.data		= ctx,
725 	};
726 	struct perf_raw_record raw = {
727 		.frag = {
728 			{
729 				.next	= ctx_size ? &frag : NULL,
730 			},
731 			.size	= meta_size,
732 			.data	= meta,
733 		},
734 	};
735 	struct perf_sample_data *sd;
736 	struct pt_regs *regs;
737 	int nest_level;
738 	u64 ret;
739 
740 	preempt_disable();
741 	nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
742 
743 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
744 		ret = -EBUSY;
745 		goto out;
746 	}
747 	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
748 	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
749 
750 	perf_fetch_caller_regs(regs);
751 	perf_sample_data_init(sd, 0, 0);
752 	perf_sample_save_raw_data(sd, &raw);
753 
754 	ret = __bpf_perf_event_output(regs, map, flags, sd);
755 out:
756 	this_cpu_dec(bpf_event_output_nest_level);
757 	preempt_enable();
758 	return ret;
759 }
760 
BPF_CALL_0(bpf_get_current_task)761 BPF_CALL_0(bpf_get_current_task)
762 {
763 	return (long) current;
764 }
765 
766 const struct bpf_func_proto bpf_get_current_task_proto = {
767 	.func		= bpf_get_current_task,
768 	.gpl_only	= true,
769 	.ret_type	= RET_INTEGER,
770 };
771 
BPF_CALL_0(bpf_get_current_task_btf)772 BPF_CALL_0(bpf_get_current_task_btf)
773 {
774 	return (unsigned long) current;
775 }
776 
777 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
778 	.func		= bpf_get_current_task_btf,
779 	.gpl_only	= true,
780 	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
781 	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
782 };
783 
BPF_CALL_1(bpf_task_pt_regs,struct task_struct *,task)784 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
785 {
786 	return (unsigned long) task_pt_regs(task);
787 }
788 
789 BTF_ID_LIST(bpf_task_pt_regs_ids)
790 BTF_ID(struct, pt_regs)
791 
792 const struct bpf_func_proto bpf_task_pt_regs_proto = {
793 	.func		= bpf_task_pt_regs,
794 	.gpl_only	= true,
795 	.arg1_type	= ARG_PTR_TO_BTF_ID,
796 	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
797 	.ret_type	= RET_PTR_TO_BTF_ID,
798 	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
799 };
800 
BPF_CALL_2(bpf_current_task_under_cgroup,struct bpf_map *,map,u32,idx)801 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
802 {
803 	struct bpf_array *array = container_of(map, struct bpf_array, map);
804 	struct cgroup *cgrp;
805 
806 	if (unlikely(idx >= array->map.max_entries))
807 		return -E2BIG;
808 
809 	cgrp = READ_ONCE(array->ptrs[idx]);
810 	if (unlikely(!cgrp))
811 		return -EAGAIN;
812 
813 	return task_under_cgroup_hierarchy(current, cgrp);
814 }
815 
816 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
817 	.func           = bpf_current_task_under_cgroup,
818 	.gpl_only       = false,
819 	.ret_type       = RET_INTEGER,
820 	.arg1_type      = ARG_CONST_MAP_PTR,
821 	.arg2_type      = ARG_ANYTHING,
822 };
823 
824 struct send_signal_irq_work {
825 	struct irq_work irq_work;
826 	struct task_struct *task;
827 	u32 sig;
828 	enum pid_type type;
829 };
830 
831 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
832 
do_bpf_send_signal(struct irq_work * entry)833 static void do_bpf_send_signal(struct irq_work *entry)
834 {
835 	struct send_signal_irq_work *work;
836 
837 	work = container_of(entry, struct send_signal_irq_work, irq_work);
838 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
839 	put_task_struct(work->task);
840 }
841 
bpf_send_signal_common(u32 sig,enum pid_type type)842 static int bpf_send_signal_common(u32 sig, enum pid_type type)
843 {
844 	struct send_signal_irq_work *work = NULL;
845 
846 	/* Similar to bpf_probe_write_user, task needs to be
847 	 * in a sound condition and kernel memory access be
848 	 * permitted in order to send signal to the current
849 	 * task.
850 	 */
851 	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
852 		return -EPERM;
853 	if (unlikely(!nmi_uaccess_okay()))
854 		return -EPERM;
855 	/* Task should not be pid=1 to avoid kernel panic. */
856 	if (unlikely(is_global_init(current)))
857 		return -EPERM;
858 
859 	if (irqs_disabled()) {
860 		/* Do an early check on signal validity. Otherwise,
861 		 * the error is lost in deferred irq_work.
862 		 */
863 		if (unlikely(!valid_signal(sig)))
864 			return -EINVAL;
865 
866 		work = this_cpu_ptr(&send_signal_work);
867 		if (irq_work_is_busy(&work->irq_work))
868 			return -EBUSY;
869 
870 		/* Add the current task, which is the target of sending signal,
871 		 * to the irq_work. The current task may change when queued
872 		 * irq works get executed.
873 		 */
874 		work->task = get_task_struct(current);
875 		work->sig = sig;
876 		work->type = type;
877 		irq_work_queue(&work->irq_work);
878 		return 0;
879 	}
880 
881 	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
882 }
883 
BPF_CALL_1(bpf_send_signal,u32,sig)884 BPF_CALL_1(bpf_send_signal, u32, sig)
885 {
886 	return bpf_send_signal_common(sig, PIDTYPE_TGID);
887 }
888 
889 static const struct bpf_func_proto bpf_send_signal_proto = {
890 	.func		= bpf_send_signal,
891 	.gpl_only	= false,
892 	.ret_type	= RET_INTEGER,
893 	.arg1_type	= ARG_ANYTHING,
894 };
895 
BPF_CALL_1(bpf_send_signal_thread,u32,sig)896 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
897 {
898 	return bpf_send_signal_common(sig, PIDTYPE_PID);
899 }
900 
901 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
902 	.func		= bpf_send_signal_thread,
903 	.gpl_only	= false,
904 	.ret_type	= RET_INTEGER,
905 	.arg1_type	= ARG_ANYTHING,
906 };
907 
BPF_CALL_3(bpf_d_path,struct path *,path,char *,buf,u32,sz)908 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
909 {
910 	struct path copy;
911 	long len;
912 	char *p;
913 
914 	if (!sz)
915 		return 0;
916 
917 	/*
918 	 * The path pointer is verified as trusted and safe to use,
919 	 * but let's double check it's valid anyway to workaround
920 	 * potentially broken verifier.
921 	 */
922 	len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
923 	if (len < 0)
924 		return len;
925 
926 	p = d_path(&copy, buf, sz);
927 	if (IS_ERR(p)) {
928 		len = PTR_ERR(p);
929 	} else {
930 		len = buf + sz - p;
931 		memmove(buf, p, len);
932 	}
933 
934 	return len;
935 }
936 
937 BTF_SET_START(btf_allowlist_d_path)
938 #ifdef CONFIG_SECURITY
BTF_ID(func,security_file_permission)939 BTF_ID(func, security_file_permission)
940 BTF_ID(func, security_inode_getattr)
941 BTF_ID(func, security_file_open)
942 #endif
943 #ifdef CONFIG_SECURITY_PATH
944 BTF_ID(func, security_path_truncate)
945 #endif
946 BTF_ID(func, vfs_truncate)
947 BTF_ID(func, vfs_fallocate)
948 BTF_ID(func, dentry_open)
949 BTF_ID(func, vfs_getattr)
950 BTF_ID(func, filp_close)
951 BTF_SET_END(btf_allowlist_d_path)
952 
953 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
954 {
955 	if (prog->type == BPF_PROG_TYPE_TRACING &&
956 	    prog->expected_attach_type == BPF_TRACE_ITER)
957 		return true;
958 
959 	if (prog->type == BPF_PROG_TYPE_LSM)
960 		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
961 
962 	return btf_id_set_contains(&btf_allowlist_d_path,
963 				   prog->aux->attach_btf_id);
964 }
965 
966 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
967 
968 static const struct bpf_func_proto bpf_d_path_proto = {
969 	.func		= bpf_d_path,
970 	.gpl_only	= false,
971 	.ret_type	= RET_INTEGER,
972 	.arg1_type	= ARG_PTR_TO_BTF_ID,
973 	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
974 	.arg2_type	= ARG_PTR_TO_MEM,
975 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
976 	.allowed	= bpf_d_path_allowed,
977 };
978 
979 #define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
980 			 BTF_F_PTR_RAW | BTF_F_ZERO)
981 
bpf_btf_printf_prepare(struct btf_ptr * ptr,u32 btf_ptr_size,u64 flags,const struct btf ** btf,s32 * btf_id)982 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
983 				  u64 flags, const struct btf **btf,
984 				  s32 *btf_id)
985 {
986 	const struct btf_type *t;
987 
988 	if (unlikely(flags & ~(BTF_F_ALL)))
989 		return -EINVAL;
990 
991 	if (btf_ptr_size != sizeof(struct btf_ptr))
992 		return -EINVAL;
993 
994 	*btf = bpf_get_btf_vmlinux();
995 
996 	if (IS_ERR_OR_NULL(*btf))
997 		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
998 
999 	if (ptr->type_id > 0)
1000 		*btf_id = ptr->type_id;
1001 	else
1002 		return -EINVAL;
1003 
1004 	if (*btf_id > 0)
1005 		t = btf_type_by_id(*btf, *btf_id);
1006 	if (*btf_id <= 0 || !t)
1007 		return -ENOENT;
1008 
1009 	return 0;
1010 }
1011 
BPF_CALL_5(bpf_snprintf_btf,char *,str,u32,str_size,struct btf_ptr *,ptr,u32,btf_ptr_size,u64,flags)1012 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1013 	   u32, btf_ptr_size, u64, flags)
1014 {
1015 	const struct btf *btf;
1016 	s32 btf_id;
1017 	int ret;
1018 
1019 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1020 	if (ret)
1021 		return ret;
1022 
1023 	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1024 				      flags);
1025 }
1026 
1027 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1028 	.func		= bpf_snprintf_btf,
1029 	.gpl_only	= false,
1030 	.ret_type	= RET_INTEGER,
1031 	.arg1_type	= ARG_PTR_TO_MEM,
1032 	.arg2_type	= ARG_CONST_SIZE,
1033 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1034 	.arg4_type	= ARG_CONST_SIZE,
1035 	.arg5_type	= ARG_ANYTHING,
1036 };
1037 
BPF_CALL_1(bpf_get_func_ip_tracing,void *,ctx)1038 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1039 {
1040 	/* This helper call is inlined by verifier. */
1041 	return ((u64 *)ctx)[-2];
1042 }
1043 
1044 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1045 	.func		= bpf_get_func_ip_tracing,
1046 	.gpl_only	= true,
1047 	.ret_type	= RET_INTEGER,
1048 	.arg1_type	= ARG_PTR_TO_CTX,
1049 };
1050 
1051 #ifdef CONFIG_X86_KERNEL_IBT
get_entry_ip(unsigned long fentry_ip)1052 static unsigned long get_entry_ip(unsigned long fentry_ip)
1053 {
1054 	u32 instr;
1055 
1056 	/* We want to be extra safe in case entry ip is on the page edge,
1057 	 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
1058 	 */
1059 	if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
1060 		if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
1061 			return fentry_ip;
1062 	} else {
1063 		instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
1064 	}
1065 	if (is_endbr(instr))
1066 		fentry_ip -= ENDBR_INSN_SIZE;
1067 	return fentry_ip;
1068 }
1069 #else
1070 #define get_entry_ip(fentry_ip) fentry_ip
1071 #endif
1072 
BPF_CALL_1(bpf_get_func_ip_kprobe,struct pt_regs *,regs)1073 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1074 {
1075 	struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1076 	struct kprobe *kp;
1077 
1078 #ifdef CONFIG_UPROBES
1079 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1080 	if (run_ctx->is_uprobe)
1081 		return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1082 #endif
1083 
1084 	kp = kprobe_running();
1085 
1086 	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1087 		return 0;
1088 
1089 	return get_entry_ip((uintptr_t)kp->addr);
1090 }
1091 
1092 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1093 	.func		= bpf_get_func_ip_kprobe,
1094 	.gpl_only	= true,
1095 	.ret_type	= RET_INTEGER,
1096 	.arg1_type	= ARG_PTR_TO_CTX,
1097 };
1098 
BPF_CALL_1(bpf_get_func_ip_kprobe_multi,struct pt_regs *,regs)1099 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1100 {
1101 	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1102 }
1103 
1104 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1105 	.func		= bpf_get_func_ip_kprobe_multi,
1106 	.gpl_only	= false,
1107 	.ret_type	= RET_INTEGER,
1108 	.arg1_type	= ARG_PTR_TO_CTX,
1109 };
1110 
BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi,struct pt_regs *,regs)1111 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1112 {
1113 	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1114 }
1115 
1116 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1117 	.func		= bpf_get_attach_cookie_kprobe_multi,
1118 	.gpl_only	= false,
1119 	.ret_type	= RET_INTEGER,
1120 	.arg1_type	= ARG_PTR_TO_CTX,
1121 };
1122 
BPF_CALL_1(bpf_get_func_ip_uprobe_multi,struct pt_regs *,regs)1123 BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1124 {
1125 	return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1126 }
1127 
1128 static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1129 	.func		= bpf_get_func_ip_uprobe_multi,
1130 	.gpl_only	= false,
1131 	.ret_type	= RET_INTEGER,
1132 	.arg1_type	= ARG_PTR_TO_CTX,
1133 };
1134 
BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi,struct pt_regs *,regs)1135 BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1136 {
1137 	return bpf_uprobe_multi_cookie(current->bpf_ctx);
1138 }
1139 
1140 static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1141 	.func		= bpf_get_attach_cookie_uprobe_multi,
1142 	.gpl_only	= false,
1143 	.ret_type	= RET_INTEGER,
1144 	.arg1_type	= ARG_PTR_TO_CTX,
1145 };
1146 
BPF_CALL_1(bpf_get_attach_cookie_trace,void *,ctx)1147 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1148 {
1149 	struct bpf_trace_run_ctx *run_ctx;
1150 
1151 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1152 	return run_ctx->bpf_cookie;
1153 }
1154 
1155 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1156 	.func		= bpf_get_attach_cookie_trace,
1157 	.gpl_only	= false,
1158 	.ret_type	= RET_INTEGER,
1159 	.arg1_type	= ARG_PTR_TO_CTX,
1160 };
1161 
BPF_CALL_1(bpf_get_attach_cookie_pe,struct bpf_perf_event_data_kern *,ctx)1162 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1163 {
1164 	return ctx->event->bpf_cookie;
1165 }
1166 
1167 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1168 	.func		= bpf_get_attach_cookie_pe,
1169 	.gpl_only	= false,
1170 	.ret_type	= RET_INTEGER,
1171 	.arg1_type	= ARG_PTR_TO_CTX,
1172 };
1173 
BPF_CALL_1(bpf_get_attach_cookie_tracing,void *,ctx)1174 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1175 {
1176 	struct bpf_trace_run_ctx *run_ctx;
1177 
1178 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1179 	return run_ctx->bpf_cookie;
1180 }
1181 
1182 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1183 	.func		= bpf_get_attach_cookie_tracing,
1184 	.gpl_only	= false,
1185 	.ret_type	= RET_INTEGER,
1186 	.arg1_type	= ARG_PTR_TO_CTX,
1187 };
1188 
BPF_CALL_3(bpf_get_branch_snapshot,void *,buf,u32,size,u64,flags)1189 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1190 {
1191 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1192 	u32 entry_cnt = size / br_entry_size;
1193 
1194 	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1195 
1196 	if (unlikely(flags))
1197 		return -EINVAL;
1198 
1199 	if (!entry_cnt)
1200 		return -ENOENT;
1201 
1202 	return entry_cnt * br_entry_size;
1203 }
1204 
1205 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1206 	.func		= bpf_get_branch_snapshot,
1207 	.gpl_only	= true,
1208 	.ret_type	= RET_INTEGER,
1209 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1210 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1211 };
1212 
BPF_CALL_3(get_func_arg,void *,ctx,u32,n,u64 *,value)1213 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1214 {
1215 	/* This helper call is inlined by verifier. */
1216 	u64 nr_args = ((u64 *)ctx)[-1];
1217 
1218 	if ((u64) n >= nr_args)
1219 		return -EINVAL;
1220 	*value = ((u64 *)ctx)[n];
1221 	return 0;
1222 }
1223 
1224 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1225 	.func		= get_func_arg,
1226 	.ret_type	= RET_INTEGER,
1227 	.arg1_type	= ARG_PTR_TO_CTX,
1228 	.arg2_type	= ARG_ANYTHING,
1229 	.arg3_type	= ARG_PTR_TO_LONG,
1230 };
1231 
BPF_CALL_2(get_func_ret,void *,ctx,u64 *,value)1232 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1233 {
1234 	/* This helper call is inlined by verifier. */
1235 	u64 nr_args = ((u64 *)ctx)[-1];
1236 
1237 	*value = ((u64 *)ctx)[nr_args];
1238 	return 0;
1239 }
1240 
1241 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1242 	.func		= get_func_ret,
1243 	.ret_type	= RET_INTEGER,
1244 	.arg1_type	= ARG_PTR_TO_CTX,
1245 	.arg2_type	= ARG_PTR_TO_LONG,
1246 };
1247 
BPF_CALL_1(get_func_arg_cnt,void *,ctx)1248 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1249 {
1250 	/* This helper call is inlined by verifier. */
1251 	return ((u64 *)ctx)[-1];
1252 }
1253 
1254 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1255 	.func		= get_func_arg_cnt,
1256 	.ret_type	= RET_INTEGER,
1257 	.arg1_type	= ARG_PTR_TO_CTX,
1258 };
1259 
1260 #ifdef CONFIG_KEYS
1261 __bpf_kfunc_start_defs();
1262 
1263 /**
1264  * bpf_lookup_user_key - lookup a key by its serial
1265  * @serial: key handle serial number
1266  * @flags: lookup-specific flags
1267  *
1268  * Search a key with a given *serial* and the provided *flags*.
1269  * If found, increment the reference count of the key by one, and
1270  * return it in the bpf_key structure.
1271  *
1272  * The bpf_key structure must be passed to bpf_key_put() when done
1273  * with it, so that the key reference count is decremented and the
1274  * bpf_key structure is freed.
1275  *
1276  * Permission checks are deferred to the time the key is used by
1277  * one of the available key-specific kfuncs.
1278  *
1279  * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1280  * special keyring (e.g. session keyring), if it doesn't yet exist.
1281  * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1282  * for the key construction, and to retrieve uninstantiated keys (keys
1283  * without data attached to them).
1284  *
1285  * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1286  *         NULL pointer otherwise.
1287  */
bpf_lookup_user_key(u32 serial,u64 flags)1288 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1289 {
1290 	key_ref_t key_ref;
1291 	struct bpf_key *bkey;
1292 
1293 	if (flags & ~KEY_LOOKUP_ALL)
1294 		return NULL;
1295 
1296 	/*
1297 	 * Permission check is deferred until the key is used, as the
1298 	 * intent of the caller is unknown here.
1299 	 */
1300 	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1301 	if (IS_ERR(key_ref))
1302 		return NULL;
1303 
1304 	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1305 	if (!bkey) {
1306 		key_put(key_ref_to_ptr(key_ref));
1307 		return NULL;
1308 	}
1309 
1310 	bkey->key = key_ref_to_ptr(key_ref);
1311 	bkey->has_ref = true;
1312 
1313 	return bkey;
1314 }
1315 
1316 /**
1317  * bpf_lookup_system_key - lookup a key by a system-defined ID
1318  * @id: key ID
1319  *
1320  * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1321  * The key pointer is marked as invalid, to prevent bpf_key_put() from
1322  * attempting to decrement the key reference count on that pointer. The key
1323  * pointer set in such way is currently understood only by
1324  * verify_pkcs7_signature().
1325  *
1326  * Set *id* to one of the values defined in include/linux/verification.h:
1327  * 0 for the primary keyring (immutable keyring of system keys);
1328  * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1329  * (where keys can be added only if they are vouched for by existing keys
1330  * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1331  * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1332  * kerned image and, possibly, the initramfs signature).
1333  *
1334  * Return: a bpf_key pointer with an invalid key pointer set from the
1335  *         pre-determined ID on success, a NULL pointer otherwise
1336  */
bpf_lookup_system_key(u64 id)1337 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1338 {
1339 	struct bpf_key *bkey;
1340 
1341 	if (system_keyring_id_check(id) < 0)
1342 		return NULL;
1343 
1344 	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1345 	if (!bkey)
1346 		return NULL;
1347 
1348 	bkey->key = (struct key *)(unsigned long)id;
1349 	bkey->has_ref = false;
1350 
1351 	return bkey;
1352 }
1353 
1354 /**
1355  * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1356  * @bkey: bpf_key structure
1357  *
1358  * Decrement the reference count of the key inside *bkey*, if the pointer
1359  * is valid, and free *bkey*.
1360  */
bpf_key_put(struct bpf_key * bkey)1361 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1362 {
1363 	if (bkey->has_ref)
1364 		key_put(bkey->key);
1365 
1366 	kfree(bkey);
1367 }
1368 
1369 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1370 /**
1371  * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1372  * @data_ptr: data to verify
1373  * @sig_ptr: signature of the data
1374  * @trusted_keyring: keyring with keys trusted for signature verification
1375  *
1376  * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1377  * with keys in a keyring referenced by *trusted_keyring*.
1378  *
1379  * Return: 0 on success, a negative value on error.
1380  */
bpf_verify_pkcs7_signature(struct bpf_dynptr_kern * data_ptr,struct bpf_dynptr_kern * sig_ptr,struct bpf_key * trusted_keyring)1381 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1382 			       struct bpf_dynptr_kern *sig_ptr,
1383 			       struct bpf_key *trusted_keyring)
1384 {
1385 	const void *data, *sig;
1386 	u32 data_len, sig_len;
1387 	int ret;
1388 
1389 	if (trusted_keyring->has_ref) {
1390 		/*
1391 		 * Do the permission check deferred in bpf_lookup_user_key().
1392 		 * See bpf_lookup_user_key() for more details.
1393 		 *
1394 		 * A call to key_task_permission() here would be redundant, as
1395 		 * it is already done by keyring_search() called by
1396 		 * find_asymmetric_key().
1397 		 */
1398 		ret = key_validate(trusted_keyring->key);
1399 		if (ret < 0)
1400 			return ret;
1401 	}
1402 
1403 	data_len = __bpf_dynptr_size(data_ptr);
1404 	data = __bpf_dynptr_data(data_ptr, data_len);
1405 	sig_len = __bpf_dynptr_size(sig_ptr);
1406 	sig = __bpf_dynptr_data(sig_ptr, sig_len);
1407 
1408 	return verify_pkcs7_signature(data, data_len, sig, sig_len,
1409 				      trusted_keyring->key,
1410 				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1411 				      NULL);
1412 }
1413 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1414 
1415 __bpf_kfunc_end_defs();
1416 
1417 BTF_KFUNCS_START(key_sig_kfunc_set)
1418 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1419 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1420 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1421 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1422 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1423 #endif
1424 BTF_KFUNCS_END(key_sig_kfunc_set)
1425 
1426 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1427 	.owner = THIS_MODULE,
1428 	.set = &key_sig_kfunc_set,
1429 };
1430 
bpf_key_sig_kfuncs_init(void)1431 static int __init bpf_key_sig_kfuncs_init(void)
1432 {
1433 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1434 					 &bpf_key_sig_kfunc_set);
1435 }
1436 
1437 late_initcall(bpf_key_sig_kfuncs_init);
1438 #endif /* CONFIG_KEYS */
1439 
1440 /* filesystem kfuncs */
1441 __bpf_kfunc_start_defs();
1442 
1443 /**
1444  * bpf_get_file_xattr - get xattr of a file
1445  * @file: file to get xattr from
1446  * @name__str: name of the xattr
1447  * @value_ptr: output buffer of the xattr value
1448  *
1449  * Get xattr *name__str* of *file* and store the output in *value_ptr*.
1450  *
1451  * For security reasons, only *name__str* with prefix "user." is allowed.
1452  *
1453  * Return: 0 on success, a negative value on error.
1454  */
bpf_get_file_xattr(struct file * file,const char * name__str,struct bpf_dynptr_kern * value_ptr)1455 __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
1456 				   struct bpf_dynptr_kern *value_ptr)
1457 {
1458 	struct dentry *dentry;
1459 	u32 value_len;
1460 	void *value;
1461 	int ret;
1462 
1463 	if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
1464 		return -EPERM;
1465 
1466 	value_len = __bpf_dynptr_size(value_ptr);
1467 	value = __bpf_dynptr_data_rw(value_ptr, value_len);
1468 	if (!value)
1469 		return -EINVAL;
1470 
1471 	dentry = file_dentry(file);
1472 	ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ);
1473 	if (ret)
1474 		return ret;
1475 	return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len);
1476 }
1477 
1478 __bpf_kfunc_end_defs();
1479 
1480 BTF_KFUNCS_START(fs_kfunc_set_ids)
1481 BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
BTF_KFUNCS_END(fs_kfunc_set_ids)1482 BTF_KFUNCS_END(fs_kfunc_set_ids)
1483 
1484 static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
1485 {
1486 	if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id))
1487 		return 0;
1488 
1489 	/* Only allow to attach from LSM hooks, to avoid recursion */
1490 	return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0;
1491 }
1492 
1493 static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
1494 	.owner = THIS_MODULE,
1495 	.set = &fs_kfunc_set_ids,
1496 	.filter = bpf_get_file_xattr_filter,
1497 };
1498 
bpf_fs_kfuncs_init(void)1499 static int __init bpf_fs_kfuncs_init(void)
1500 {
1501 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
1502 }
1503 
1504 late_initcall(bpf_fs_kfuncs_init);
1505 
1506 static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1507 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1508 {
1509 	switch (func_id) {
1510 	case BPF_FUNC_map_lookup_elem:
1511 		return &bpf_map_lookup_elem_proto;
1512 	case BPF_FUNC_map_update_elem:
1513 		return &bpf_map_update_elem_proto;
1514 	case BPF_FUNC_map_delete_elem:
1515 		return &bpf_map_delete_elem_proto;
1516 	case BPF_FUNC_map_push_elem:
1517 		return &bpf_map_push_elem_proto;
1518 	case BPF_FUNC_map_pop_elem:
1519 		return &bpf_map_pop_elem_proto;
1520 	case BPF_FUNC_map_peek_elem:
1521 		return &bpf_map_peek_elem_proto;
1522 	case BPF_FUNC_map_lookup_percpu_elem:
1523 		return &bpf_map_lookup_percpu_elem_proto;
1524 	case BPF_FUNC_ktime_get_ns:
1525 		return &bpf_ktime_get_ns_proto;
1526 	case BPF_FUNC_ktime_get_boot_ns:
1527 		return &bpf_ktime_get_boot_ns_proto;
1528 	case BPF_FUNC_tail_call:
1529 		return &bpf_tail_call_proto;
1530 	case BPF_FUNC_get_current_task:
1531 		return &bpf_get_current_task_proto;
1532 	case BPF_FUNC_get_current_task_btf:
1533 		return &bpf_get_current_task_btf_proto;
1534 	case BPF_FUNC_task_pt_regs:
1535 		return &bpf_task_pt_regs_proto;
1536 	case BPF_FUNC_get_current_uid_gid:
1537 		return &bpf_get_current_uid_gid_proto;
1538 	case BPF_FUNC_get_current_comm:
1539 		return &bpf_get_current_comm_proto;
1540 	case BPF_FUNC_trace_printk:
1541 		return bpf_get_trace_printk_proto();
1542 	case BPF_FUNC_get_smp_processor_id:
1543 		return &bpf_get_smp_processor_id_proto;
1544 	case BPF_FUNC_get_numa_node_id:
1545 		return &bpf_get_numa_node_id_proto;
1546 	case BPF_FUNC_perf_event_read:
1547 		return &bpf_perf_event_read_proto;
1548 	case BPF_FUNC_current_task_under_cgroup:
1549 		return &bpf_current_task_under_cgroup_proto;
1550 	case BPF_FUNC_get_prandom_u32:
1551 		return &bpf_get_prandom_u32_proto;
1552 	case BPF_FUNC_probe_write_user:
1553 		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1554 		       NULL : bpf_get_probe_write_proto();
1555 	case BPF_FUNC_probe_read_user:
1556 		return &bpf_probe_read_user_proto;
1557 	case BPF_FUNC_probe_read_kernel:
1558 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1559 		       NULL : &bpf_probe_read_kernel_proto;
1560 	case BPF_FUNC_probe_read_user_str:
1561 		return &bpf_probe_read_user_str_proto;
1562 	case BPF_FUNC_probe_read_kernel_str:
1563 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1564 		       NULL : &bpf_probe_read_kernel_str_proto;
1565 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1566 	case BPF_FUNC_probe_read:
1567 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1568 		       NULL : &bpf_probe_read_compat_proto;
1569 	case BPF_FUNC_probe_read_str:
1570 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1571 		       NULL : &bpf_probe_read_compat_str_proto;
1572 #endif
1573 #ifdef CONFIG_CGROUPS
1574 	case BPF_FUNC_cgrp_storage_get:
1575 		return &bpf_cgrp_storage_get_proto;
1576 	case BPF_FUNC_cgrp_storage_delete:
1577 		return &bpf_cgrp_storage_delete_proto;
1578 #endif
1579 	case BPF_FUNC_send_signal:
1580 		return &bpf_send_signal_proto;
1581 	case BPF_FUNC_send_signal_thread:
1582 		return &bpf_send_signal_thread_proto;
1583 	case BPF_FUNC_perf_event_read_value:
1584 		return &bpf_perf_event_read_value_proto;
1585 	case BPF_FUNC_ringbuf_output:
1586 		return &bpf_ringbuf_output_proto;
1587 	case BPF_FUNC_ringbuf_reserve:
1588 		return &bpf_ringbuf_reserve_proto;
1589 	case BPF_FUNC_ringbuf_submit:
1590 		return &bpf_ringbuf_submit_proto;
1591 	case BPF_FUNC_ringbuf_discard:
1592 		return &bpf_ringbuf_discard_proto;
1593 	case BPF_FUNC_ringbuf_query:
1594 		return &bpf_ringbuf_query_proto;
1595 	case BPF_FUNC_jiffies64:
1596 		return &bpf_jiffies64_proto;
1597 	case BPF_FUNC_get_task_stack:
1598 		return &bpf_get_task_stack_proto;
1599 	case BPF_FUNC_copy_from_user:
1600 		return &bpf_copy_from_user_proto;
1601 	case BPF_FUNC_copy_from_user_task:
1602 		return &bpf_copy_from_user_task_proto;
1603 	case BPF_FUNC_snprintf_btf:
1604 		return &bpf_snprintf_btf_proto;
1605 	case BPF_FUNC_per_cpu_ptr:
1606 		return &bpf_per_cpu_ptr_proto;
1607 	case BPF_FUNC_this_cpu_ptr:
1608 		return &bpf_this_cpu_ptr_proto;
1609 	case BPF_FUNC_task_storage_get:
1610 		if (bpf_prog_check_recur(prog))
1611 			return &bpf_task_storage_get_recur_proto;
1612 		return &bpf_task_storage_get_proto;
1613 	case BPF_FUNC_task_storage_delete:
1614 		if (bpf_prog_check_recur(prog))
1615 			return &bpf_task_storage_delete_recur_proto;
1616 		return &bpf_task_storage_delete_proto;
1617 	case BPF_FUNC_for_each_map_elem:
1618 		return &bpf_for_each_map_elem_proto;
1619 	case BPF_FUNC_snprintf:
1620 		return &bpf_snprintf_proto;
1621 	case BPF_FUNC_get_func_ip:
1622 		return &bpf_get_func_ip_proto_tracing;
1623 	case BPF_FUNC_get_branch_snapshot:
1624 		return &bpf_get_branch_snapshot_proto;
1625 	case BPF_FUNC_find_vma:
1626 		return &bpf_find_vma_proto;
1627 	case BPF_FUNC_trace_vprintk:
1628 		return bpf_get_trace_vprintk_proto();
1629 	default:
1630 		return bpf_base_func_proto(func_id, prog);
1631 	}
1632 }
1633 
is_kprobe_multi(const struct bpf_prog * prog)1634 static bool is_kprobe_multi(const struct bpf_prog *prog)
1635 {
1636 	return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ||
1637 	       prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1638 }
1639 
is_kprobe_session(const struct bpf_prog * prog)1640 static inline bool is_kprobe_session(const struct bpf_prog *prog)
1641 {
1642 	return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1643 }
1644 
1645 static const struct bpf_func_proto *
kprobe_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1646 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1647 {
1648 	switch (func_id) {
1649 	case BPF_FUNC_perf_event_output:
1650 		return &bpf_perf_event_output_proto;
1651 	case BPF_FUNC_get_stackid:
1652 		return &bpf_get_stackid_proto;
1653 	case BPF_FUNC_get_stack:
1654 		return &bpf_get_stack_proto;
1655 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1656 	case BPF_FUNC_override_return:
1657 		return &bpf_override_return_proto;
1658 #endif
1659 	case BPF_FUNC_get_func_ip:
1660 		if (is_kprobe_multi(prog))
1661 			return &bpf_get_func_ip_proto_kprobe_multi;
1662 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1663 			return &bpf_get_func_ip_proto_uprobe_multi;
1664 		return &bpf_get_func_ip_proto_kprobe;
1665 	case BPF_FUNC_get_attach_cookie:
1666 		if (is_kprobe_multi(prog))
1667 			return &bpf_get_attach_cookie_proto_kmulti;
1668 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1669 			return &bpf_get_attach_cookie_proto_umulti;
1670 		return &bpf_get_attach_cookie_proto_trace;
1671 	default:
1672 		return bpf_tracing_func_proto(func_id, prog);
1673 	}
1674 }
1675 
1676 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
kprobe_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1677 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1678 					const struct bpf_prog *prog,
1679 					struct bpf_insn_access_aux *info)
1680 {
1681 	if (off < 0 || off >= sizeof(struct pt_regs))
1682 		return false;
1683 	if (type != BPF_READ)
1684 		return false;
1685 	if (off % size != 0)
1686 		return false;
1687 	/*
1688 	 * Assertion for 32 bit to make sure last 8 byte access
1689 	 * (BPF_DW) to the last 4 byte member is disallowed.
1690 	 */
1691 	if (off + size > sizeof(struct pt_regs))
1692 		return false;
1693 
1694 	return true;
1695 }
1696 
1697 const struct bpf_verifier_ops kprobe_verifier_ops = {
1698 	.get_func_proto  = kprobe_prog_func_proto,
1699 	.is_valid_access = kprobe_prog_is_valid_access,
1700 };
1701 
1702 const struct bpf_prog_ops kprobe_prog_ops = {
1703 };
1704 
BPF_CALL_5(bpf_perf_event_output_tp,void *,tp_buff,struct bpf_map *,map,u64,flags,void *,data,u64,size)1705 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1706 	   u64, flags, void *, data, u64, size)
1707 {
1708 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1709 
1710 	/*
1711 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1712 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1713 	 * from there and call the same bpf_perf_event_output() helper inline.
1714 	 */
1715 	return ____bpf_perf_event_output(regs, map, flags, data, size);
1716 }
1717 
1718 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1719 	.func		= bpf_perf_event_output_tp,
1720 	.gpl_only	= true,
1721 	.ret_type	= RET_INTEGER,
1722 	.arg1_type	= ARG_PTR_TO_CTX,
1723 	.arg2_type	= ARG_CONST_MAP_PTR,
1724 	.arg3_type	= ARG_ANYTHING,
1725 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1726 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1727 };
1728 
BPF_CALL_3(bpf_get_stackid_tp,void *,tp_buff,struct bpf_map *,map,u64,flags)1729 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1730 	   u64, flags)
1731 {
1732 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1733 
1734 	/*
1735 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1736 	 * the other helper's function body cannot be inlined due to being
1737 	 * external, thus we need to call raw helper function.
1738 	 */
1739 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1740 			       flags, 0, 0);
1741 }
1742 
1743 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1744 	.func		= bpf_get_stackid_tp,
1745 	.gpl_only	= true,
1746 	.ret_type	= RET_INTEGER,
1747 	.arg1_type	= ARG_PTR_TO_CTX,
1748 	.arg2_type	= ARG_CONST_MAP_PTR,
1749 	.arg3_type	= ARG_ANYTHING,
1750 };
1751 
BPF_CALL_4(bpf_get_stack_tp,void *,tp_buff,void *,buf,u32,size,u64,flags)1752 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1753 	   u64, flags)
1754 {
1755 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1756 
1757 	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1758 			     (unsigned long) size, flags, 0);
1759 }
1760 
1761 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1762 	.func		= bpf_get_stack_tp,
1763 	.gpl_only	= true,
1764 	.ret_type	= RET_INTEGER,
1765 	.arg1_type	= ARG_PTR_TO_CTX,
1766 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1767 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1768 	.arg4_type	= ARG_ANYTHING,
1769 };
1770 
1771 static const struct bpf_func_proto *
tp_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1772 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1773 {
1774 	switch (func_id) {
1775 	case BPF_FUNC_perf_event_output:
1776 		return &bpf_perf_event_output_proto_tp;
1777 	case BPF_FUNC_get_stackid:
1778 		return &bpf_get_stackid_proto_tp;
1779 	case BPF_FUNC_get_stack:
1780 		return &bpf_get_stack_proto_tp;
1781 	case BPF_FUNC_get_attach_cookie:
1782 		return &bpf_get_attach_cookie_proto_trace;
1783 	default:
1784 		return bpf_tracing_func_proto(func_id, prog);
1785 	}
1786 }
1787 
tp_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1788 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1789 				    const struct bpf_prog *prog,
1790 				    struct bpf_insn_access_aux *info)
1791 {
1792 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1793 		return false;
1794 	if (type != BPF_READ)
1795 		return false;
1796 	if (off % size != 0)
1797 		return false;
1798 
1799 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1800 	return true;
1801 }
1802 
1803 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1804 	.get_func_proto  = tp_prog_func_proto,
1805 	.is_valid_access = tp_prog_is_valid_access,
1806 };
1807 
1808 const struct bpf_prog_ops tracepoint_prog_ops = {
1809 };
1810 
BPF_CALL_3(bpf_perf_prog_read_value,struct bpf_perf_event_data_kern *,ctx,struct bpf_perf_event_value *,buf,u32,size)1811 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1812 	   struct bpf_perf_event_value *, buf, u32, size)
1813 {
1814 	int err = -EINVAL;
1815 
1816 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1817 		goto clear;
1818 	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1819 				    &buf->running);
1820 	if (unlikely(err))
1821 		goto clear;
1822 	return 0;
1823 clear:
1824 	memset(buf, 0, size);
1825 	return err;
1826 }
1827 
1828 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1829          .func           = bpf_perf_prog_read_value,
1830          .gpl_only       = true,
1831          .ret_type       = RET_INTEGER,
1832          .arg1_type      = ARG_PTR_TO_CTX,
1833          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1834          .arg3_type      = ARG_CONST_SIZE,
1835 };
1836 
BPF_CALL_4(bpf_read_branch_records,struct bpf_perf_event_data_kern *,ctx,void *,buf,u32,size,u64,flags)1837 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1838 	   void *, buf, u32, size, u64, flags)
1839 {
1840 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1841 	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1842 	u32 to_copy;
1843 
1844 	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1845 		return -EINVAL;
1846 
1847 	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1848 		return -ENOENT;
1849 
1850 	if (unlikely(!br_stack))
1851 		return -ENOENT;
1852 
1853 	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1854 		return br_stack->nr * br_entry_size;
1855 
1856 	if (!buf || (size % br_entry_size != 0))
1857 		return -EINVAL;
1858 
1859 	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1860 	memcpy(buf, br_stack->entries, to_copy);
1861 
1862 	return to_copy;
1863 }
1864 
1865 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1866 	.func           = bpf_read_branch_records,
1867 	.gpl_only       = true,
1868 	.ret_type       = RET_INTEGER,
1869 	.arg1_type      = ARG_PTR_TO_CTX,
1870 	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1871 	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1872 	.arg4_type      = ARG_ANYTHING,
1873 };
1874 
1875 static const struct bpf_func_proto *
pe_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1876 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1877 {
1878 	switch (func_id) {
1879 	case BPF_FUNC_perf_event_output:
1880 		return &bpf_perf_event_output_proto_tp;
1881 	case BPF_FUNC_get_stackid:
1882 		return &bpf_get_stackid_proto_pe;
1883 	case BPF_FUNC_get_stack:
1884 		return &bpf_get_stack_proto_pe;
1885 	case BPF_FUNC_perf_prog_read_value:
1886 		return &bpf_perf_prog_read_value_proto;
1887 	case BPF_FUNC_read_branch_records:
1888 		return &bpf_read_branch_records_proto;
1889 	case BPF_FUNC_get_attach_cookie:
1890 		return &bpf_get_attach_cookie_proto_pe;
1891 	default:
1892 		return bpf_tracing_func_proto(func_id, prog);
1893 	}
1894 }
1895 
1896 /*
1897  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1898  * to avoid potential recursive reuse issue when/if tracepoints are added
1899  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1900  *
1901  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1902  * in normal, irq, and nmi context.
1903  */
1904 struct bpf_raw_tp_regs {
1905 	struct pt_regs regs[3];
1906 };
1907 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1908 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
get_bpf_raw_tp_regs(void)1909 static struct pt_regs *get_bpf_raw_tp_regs(void)
1910 {
1911 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1912 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1913 
1914 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1915 		this_cpu_dec(bpf_raw_tp_nest_level);
1916 		return ERR_PTR(-EBUSY);
1917 	}
1918 
1919 	return &tp_regs->regs[nest_level - 1];
1920 }
1921 
put_bpf_raw_tp_regs(void)1922 static void put_bpf_raw_tp_regs(void)
1923 {
1924 	this_cpu_dec(bpf_raw_tp_nest_level);
1925 }
1926 
BPF_CALL_5(bpf_perf_event_output_raw_tp,struct bpf_raw_tracepoint_args *,args,struct bpf_map *,map,u64,flags,void *,data,u64,size)1927 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1928 	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1929 {
1930 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1931 	int ret;
1932 
1933 	if (IS_ERR(regs))
1934 		return PTR_ERR(regs);
1935 
1936 	perf_fetch_caller_regs(regs);
1937 	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1938 
1939 	put_bpf_raw_tp_regs();
1940 	return ret;
1941 }
1942 
1943 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1944 	.func		= bpf_perf_event_output_raw_tp,
1945 	.gpl_only	= true,
1946 	.ret_type	= RET_INTEGER,
1947 	.arg1_type	= ARG_PTR_TO_CTX,
1948 	.arg2_type	= ARG_CONST_MAP_PTR,
1949 	.arg3_type	= ARG_ANYTHING,
1950 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1951 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1952 };
1953 
1954 extern const struct bpf_func_proto bpf_skb_output_proto;
1955 extern const struct bpf_func_proto bpf_xdp_output_proto;
1956 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1957 
BPF_CALL_3(bpf_get_stackid_raw_tp,struct bpf_raw_tracepoint_args *,args,struct bpf_map *,map,u64,flags)1958 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1959 	   struct bpf_map *, map, u64, flags)
1960 {
1961 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1962 	int ret;
1963 
1964 	if (IS_ERR(regs))
1965 		return PTR_ERR(regs);
1966 
1967 	perf_fetch_caller_regs(regs);
1968 	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1969 	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1970 			      flags, 0, 0);
1971 	put_bpf_raw_tp_regs();
1972 	return ret;
1973 }
1974 
1975 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1976 	.func		= bpf_get_stackid_raw_tp,
1977 	.gpl_only	= true,
1978 	.ret_type	= RET_INTEGER,
1979 	.arg1_type	= ARG_PTR_TO_CTX,
1980 	.arg2_type	= ARG_CONST_MAP_PTR,
1981 	.arg3_type	= ARG_ANYTHING,
1982 };
1983 
BPF_CALL_4(bpf_get_stack_raw_tp,struct bpf_raw_tracepoint_args *,args,void *,buf,u32,size,u64,flags)1984 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1985 	   void *, buf, u32, size, u64, flags)
1986 {
1987 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1988 	int ret;
1989 
1990 	if (IS_ERR(regs))
1991 		return PTR_ERR(regs);
1992 
1993 	perf_fetch_caller_regs(regs);
1994 	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1995 			    (unsigned long) size, flags, 0);
1996 	put_bpf_raw_tp_regs();
1997 	return ret;
1998 }
1999 
2000 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
2001 	.func		= bpf_get_stack_raw_tp,
2002 	.gpl_only	= true,
2003 	.ret_type	= RET_INTEGER,
2004 	.arg1_type	= ARG_PTR_TO_CTX,
2005 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
2006 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
2007 	.arg4_type	= ARG_ANYTHING,
2008 };
2009 
2010 static const struct bpf_func_proto *
raw_tp_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)2011 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2012 {
2013 	switch (func_id) {
2014 	case BPF_FUNC_perf_event_output:
2015 		return &bpf_perf_event_output_proto_raw_tp;
2016 	case BPF_FUNC_get_stackid:
2017 		return &bpf_get_stackid_proto_raw_tp;
2018 	case BPF_FUNC_get_stack:
2019 		return &bpf_get_stack_proto_raw_tp;
2020 	case BPF_FUNC_get_attach_cookie:
2021 		return &bpf_get_attach_cookie_proto_tracing;
2022 	default:
2023 		return bpf_tracing_func_proto(func_id, prog);
2024 	}
2025 }
2026 
2027 const struct bpf_func_proto *
tracing_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)2028 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2029 {
2030 	const struct bpf_func_proto *fn;
2031 
2032 	switch (func_id) {
2033 #ifdef CONFIG_NET
2034 	case BPF_FUNC_skb_output:
2035 		return &bpf_skb_output_proto;
2036 	case BPF_FUNC_xdp_output:
2037 		return &bpf_xdp_output_proto;
2038 	case BPF_FUNC_skc_to_tcp6_sock:
2039 		return &bpf_skc_to_tcp6_sock_proto;
2040 	case BPF_FUNC_skc_to_tcp_sock:
2041 		return &bpf_skc_to_tcp_sock_proto;
2042 	case BPF_FUNC_skc_to_tcp_timewait_sock:
2043 		return &bpf_skc_to_tcp_timewait_sock_proto;
2044 	case BPF_FUNC_skc_to_tcp_request_sock:
2045 		return &bpf_skc_to_tcp_request_sock_proto;
2046 	case BPF_FUNC_skc_to_udp6_sock:
2047 		return &bpf_skc_to_udp6_sock_proto;
2048 	case BPF_FUNC_skc_to_unix_sock:
2049 		return &bpf_skc_to_unix_sock_proto;
2050 	case BPF_FUNC_skc_to_mptcp_sock:
2051 		return &bpf_skc_to_mptcp_sock_proto;
2052 	case BPF_FUNC_sk_storage_get:
2053 		return &bpf_sk_storage_get_tracing_proto;
2054 	case BPF_FUNC_sk_storage_delete:
2055 		return &bpf_sk_storage_delete_tracing_proto;
2056 	case BPF_FUNC_sock_from_file:
2057 		return &bpf_sock_from_file_proto;
2058 	case BPF_FUNC_get_socket_cookie:
2059 		return &bpf_get_socket_ptr_cookie_proto;
2060 	case BPF_FUNC_xdp_get_buff_len:
2061 		return &bpf_xdp_get_buff_len_trace_proto;
2062 #endif
2063 	case BPF_FUNC_seq_printf:
2064 		return prog->expected_attach_type == BPF_TRACE_ITER ?
2065 		       &bpf_seq_printf_proto :
2066 		       NULL;
2067 	case BPF_FUNC_seq_write:
2068 		return prog->expected_attach_type == BPF_TRACE_ITER ?
2069 		       &bpf_seq_write_proto :
2070 		       NULL;
2071 	case BPF_FUNC_seq_printf_btf:
2072 		return prog->expected_attach_type == BPF_TRACE_ITER ?
2073 		       &bpf_seq_printf_btf_proto :
2074 		       NULL;
2075 	case BPF_FUNC_d_path:
2076 		return &bpf_d_path_proto;
2077 	case BPF_FUNC_get_func_arg:
2078 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
2079 	case BPF_FUNC_get_func_ret:
2080 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
2081 	case BPF_FUNC_get_func_arg_cnt:
2082 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2083 	case BPF_FUNC_get_attach_cookie:
2084 		if (prog->type == BPF_PROG_TYPE_TRACING &&
2085 		    prog->expected_attach_type == BPF_TRACE_RAW_TP)
2086 			return &bpf_get_attach_cookie_proto_tracing;
2087 		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2088 	default:
2089 		fn = raw_tp_prog_func_proto(func_id, prog);
2090 		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2091 			fn = bpf_iter_get_func_proto(func_id, prog);
2092 		return fn;
2093 	}
2094 }
2095 
raw_tp_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2096 static bool raw_tp_prog_is_valid_access(int off, int size,
2097 					enum bpf_access_type type,
2098 					const struct bpf_prog *prog,
2099 					struct bpf_insn_access_aux *info)
2100 {
2101 	return bpf_tracing_ctx_access(off, size, type);
2102 }
2103 
tracing_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2104 static bool tracing_prog_is_valid_access(int off, int size,
2105 					 enum bpf_access_type type,
2106 					 const struct bpf_prog *prog,
2107 					 struct bpf_insn_access_aux *info)
2108 {
2109 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
2110 }
2111 
bpf_prog_test_run_tracing(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)2112 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2113 				     const union bpf_attr *kattr,
2114 				     union bpf_attr __user *uattr)
2115 {
2116 	return -ENOTSUPP;
2117 }
2118 
2119 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2120 	.get_func_proto  = raw_tp_prog_func_proto,
2121 	.is_valid_access = raw_tp_prog_is_valid_access,
2122 };
2123 
2124 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2125 #ifdef CONFIG_NET
2126 	.test_run = bpf_prog_test_run_raw_tp,
2127 #endif
2128 };
2129 
2130 const struct bpf_verifier_ops tracing_verifier_ops = {
2131 	.get_func_proto  = tracing_prog_func_proto,
2132 	.is_valid_access = tracing_prog_is_valid_access,
2133 };
2134 
2135 const struct bpf_prog_ops tracing_prog_ops = {
2136 	.test_run = bpf_prog_test_run_tracing,
2137 };
2138 
raw_tp_writable_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2139 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2140 						 enum bpf_access_type type,
2141 						 const struct bpf_prog *prog,
2142 						 struct bpf_insn_access_aux *info)
2143 {
2144 	if (off == 0) {
2145 		if (size != sizeof(u64) || type != BPF_READ)
2146 			return false;
2147 		info->reg_type = PTR_TO_TP_BUFFER;
2148 	}
2149 	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2150 }
2151 
2152 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2153 	.get_func_proto  = raw_tp_prog_func_proto,
2154 	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2155 };
2156 
2157 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2158 };
2159 
pe_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2160 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2161 				    const struct bpf_prog *prog,
2162 				    struct bpf_insn_access_aux *info)
2163 {
2164 	const int size_u64 = sizeof(u64);
2165 
2166 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2167 		return false;
2168 	if (type != BPF_READ)
2169 		return false;
2170 	if (off % size != 0) {
2171 		if (sizeof(unsigned long) != 4)
2172 			return false;
2173 		if (size != 8)
2174 			return false;
2175 		if (off % size != 4)
2176 			return false;
2177 	}
2178 
2179 	switch (off) {
2180 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2181 		bpf_ctx_record_field_size(info, size_u64);
2182 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2183 			return false;
2184 		break;
2185 	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2186 		bpf_ctx_record_field_size(info, size_u64);
2187 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2188 			return false;
2189 		break;
2190 	default:
2191 		if (size != sizeof(long))
2192 			return false;
2193 	}
2194 
2195 	return true;
2196 }
2197 
pe_prog_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)2198 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2199 				      const struct bpf_insn *si,
2200 				      struct bpf_insn *insn_buf,
2201 				      struct bpf_prog *prog, u32 *target_size)
2202 {
2203 	struct bpf_insn *insn = insn_buf;
2204 
2205 	switch (si->off) {
2206 	case offsetof(struct bpf_perf_event_data, sample_period):
2207 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2208 						       data), si->dst_reg, si->src_reg,
2209 				      offsetof(struct bpf_perf_event_data_kern, data));
2210 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2211 				      bpf_target_off(struct perf_sample_data, period, 8,
2212 						     target_size));
2213 		break;
2214 	case offsetof(struct bpf_perf_event_data, addr):
2215 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2216 						       data), si->dst_reg, si->src_reg,
2217 				      offsetof(struct bpf_perf_event_data_kern, data));
2218 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2219 				      bpf_target_off(struct perf_sample_data, addr, 8,
2220 						     target_size));
2221 		break;
2222 	default:
2223 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2224 						       regs), si->dst_reg, si->src_reg,
2225 				      offsetof(struct bpf_perf_event_data_kern, regs));
2226 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2227 				      si->off);
2228 		break;
2229 	}
2230 
2231 	return insn - insn_buf;
2232 }
2233 
2234 const struct bpf_verifier_ops perf_event_verifier_ops = {
2235 	.get_func_proto		= pe_prog_func_proto,
2236 	.is_valid_access	= pe_prog_is_valid_access,
2237 	.convert_ctx_access	= pe_prog_convert_ctx_access,
2238 };
2239 
2240 const struct bpf_prog_ops perf_event_prog_ops = {
2241 };
2242 
2243 static DEFINE_MUTEX(bpf_event_mutex);
2244 
2245 #define BPF_TRACE_MAX_PROGS 64
2246 
perf_event_attach_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)2247 int perf_event_attach_bpf_prog(struct perf_event *event,
2248 			       struct bpf_prog *prog,
2249 			       u64 bpf_cookie)
2250 {
2251 	struct bpf_prog_array *old_array;
2252 	struct bpf_prog_array *new_array;
2253 	int ret = -EEXIST;
2254 
2255 	/*
2256 	 * Kprobe override only works if they are on the function entry,
2257 	 * and only if they are on the opt-in list.
2258 	 */
2259 	if (prog->kprobe_override &&
2260 	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2261 	     !trace_kprobe_error_injectable(event->tp_event)))
2262 		return -EINVAL;
2263 
2264 	mutex_lock(&bpf_event_mutex);
2265 
2266 	if (event->prog)
2267 		goto unlock;
2268 
2269 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2270 	if (old_array &&
2271 	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2272 		ret = -E2BIG;
2273 		goto unlock;
2274 	}
2275 
2276 	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2277 	if (ret < 0)
2278 		goto unlock;
2279 
2280 	/* set the new array to event->tp_event and set event->prog */
2281 	event->prog = prog;
2282 	event->bpf_cookie = bpf_cookie;
2283 	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2284 	bpf_prog_array_free_sleepable(old_array);
2285 
2286 unlock:
2287 	mutex_unlock(&bpf_event_mutex);
2288 	return ret;
2289 }
2290 
perf_event_detach_bpf_prog(struct perf_event * event)2291 void perf_event_detach_bpf_prog(struct perf_event *event)
2292 {
2293 	struct bpf_prog_array *old_array;
2294 	struct bpf_prog_array *new_array;
2295 	int ret;
2296 
2297 	mutex_lock(&bpf_event_mutex);
2298 
2299 	if (!event->prog)
2300 		goto unlock;
2301 
2302 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2303 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2304 	if (ret == -ENOENT)
2305 		goto unlock;
2306 	if (ret < 0) {
2307 		bpf_prog_array_delete_safe(old_array, event->prog);
2308 	} else {
2309 		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2310 		bpf_prog_array_free_sleepable(old_array);
2311 	}
2312 
2313 	bpf_prog_put(event->prog);
2314 	event->prog = NULL;
2315 
2316 unlock:
2317 	mutex_unlock(&bpf_event_mutex);
2318 }
2319 
perf_event_query_prog_array(struct perf_event * event,void __user * info)2320 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2321 {
2322 	struct perf_event_query_bpf __user *uquery = info;
2323 	struct perf_event_query_bpf query = {};
2324 	struct bpf_prog_array *progs;
2325 	u32 *ids, prog_cnt, ids_len;
2326 	int ret;
2327 
2328 	if (!perfmon_capable())
2329 		return -EPERM;
2330 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2331 		return -EINVAL;
2332 	if (copy_from_user(&query, uquery, sizeof(query)))
2333 		return -EFAULT;
2334 
2335 	ids_len = query.ids_len;
2336 	if (ids_len > BPF_TRACE_MAX_PROGS)
2337 		return -E2BIG;
2338 	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2339 	if (!ids)
2340 		return -ENOMEM;
2341 	/*
2342 	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2343 	 * is required when user only wants to check for uquery->prog_cnt.
2344 	 * There is no need to check for it since the case is handled
2345 	 * gracefully in bpf_prog_array_copy_info.
2346 	 */
2347 
2348 	mutex_lock(&bpf_event_mutex);
2349 	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2350 	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2351 	mutex_unlock(&bpf_event_mutex);
2352 
2353 	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2354 	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2355 		ret = -EFAULT;
2356 
2357 	kfree(ids);
2358 	return ret;
2359 }
2360 
2361 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2362 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2363 
bpf_get_raw_tracepoint(const char * name)2364 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2365 {
2366 	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2367 
2368 	for (; btp < __stop__bpf_raw_tp; btp++) {
2369 		if (!strcmp(btp->tp->name, name))
2370 			return btp;
2371 	}
2372 
2373 	return bpf_get_raw_tracepoint_module(name);
2374 }
2375 
bpf_put_raw_tracepoint(struct bpf_raw_event_map * btp)2376 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2377 {
2378 	struct module *mod;
2379 
2380 	preempt_disable();
2381 	mod = __module_address((unsigned long)btp);
2382 	module_put(mod);
2383 	preempt_enable();
2384 }
2385 
2386 static __always_inline
__bpf_trace_run(struct bpf_raw_tp_link * link,u64 * args)2387 void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
2388 {
2389 	struct bpf_prog *prog = link->link.prog;
2390 	struct bpf_run_ctx *old_run_ctx;
2391 	struct bpf_trace_run_ctx run_ctx;
2392 
2393 	cant_sleep();
2394 	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2395 		bpf_prog_inc_misses_counter(prog);
2396 		goto out;
2397 	}
2398 
2399 	run_ctx.bpf_cookie = link->cookie;
2400 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2401 
2402 	rcu_read_lock();
2403 	(void) bpf_prog_run(prog, args);
2404 	rcu_read_unlock();
2405 
2406 	bpf_reset_run_ctx(old_run_ctx);
2407 out:
2408 	this_cpu_dec(*(prog->active));
2409 }
2410 
2411 #define UNPACK(...)			__VA_ARGS__
2412 #define REPEAT_1(FN, DL, X, ...)	FN(X)
2413 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2414 #define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2415 #define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2416 #define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2417 #define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2418 #define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2419 #define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2420 #define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2421 #define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2422 #define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2423 #define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2424 #define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2425 
2426 #define SARG(X)		u64 arg##X
2427 #define COPY(X)		args[X] = arg##X
2428 
2429 #define __DL_COM	(,)
2430 #define __DL_SEM	(;)
2431 
2432 #define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2433 
2434 #define BPF_TRACE_DEFN_x(x)						\
2435 	void bpf_trace_run##x(struct bpf_raw_tp_link *link,		\
2436 			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2437 	{								\
2438 		u64 args[x];						\
2439 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2440 		__bpf_trace_run(link, args);				\
2441 	}								\
2442 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2443 BPF_TRACE_DEFN_x(1);
2444 BPF_TRACE_DEFN_x(2);
2445 BPF_TRACE_DEFN_x(3);
2446 BPF_TRACE_DEFN_x(4);
2447 BPF_TRACE_DEFN_x(5);
2448 BPF_TRACE_DEFN_x(6);
2449 BPF_TRACE_DEFN_x(7);
2450 BPF_TRACE_DEFN_x(8);
2451 BPF_TRACE_DEFN_x(9);
2452 BPF_TRACE_DEFN_x(10);
2453 BPF_TRACE_DEFN_x(11);
2454 BPF_TRACE_DEFN_x(12);
2455 
bpf_probe_register(struct bpf_raw_event_map * btp,struct bpf_raw_tp_link * link)2456 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2457 {
2458 	struct tracepoint *tp = btp->tp;
2459 	struct bpf_prog *prog = link->link.prog;
2460 
2461 	/*
2462 	 * check that program doesn't access arguments beyond what's
2463 	 * available in this tracepoint
2464 	 */
2465 	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2466 		return -EINVAL;
2467 
2468 	if (prog->aux->max_tp_access > btp->writable_size)
2469 		return -EINVAL;
2470 
2471 	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link);
2472 }
2473 
bpf_probe_unregister(struct bpf_raw_event_map * btp,struct bpf_raw_tp_link * link)2474 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2475 {
2476 	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link);
2477 }
2478 
bpf_get_perf_event_info(const struct perf_event * event,u32 * prog_id,u32 * fd_type,const char ** buf,u64 * probe_offset,u64 * probe_addr,unsigned long * missed)2479 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2480 			    u32 *fd_type, const char **buf,
2481 			    u64 *probe_offset, u64 *probe_addr,
2482 			    unsigned long *missed)
2483 {
2484 	bool is_tracepoint, is_syscall_tp;
2485 	struct bpf_prog *prog;
2486 	int flags, err = 0;
2487 
2488 	prog = event->prog;
2489 	if (!prog)
2490 		return -ENOENT;
2491 
2492 	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2493 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2494 		return -EOPNOTSUPP;
2495 
2496 	*prog_id = prog->aux->id;
2497 	flags = event->tp_event->flags;
2498 	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2499 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2500 
2501 	if (is_tracepoint || is_syscall_tp) {
2502 		*buf = is_tracepoint ? event->tp_event->tp->name
2503 				     : event->tp_event->name;
2504 		/* We allow NULL pointer for tracepoint */
2505 		if (fd_type)
2506 			*fd_type = BPF_FD_TYPE_TRACEPOINT;
2507 		if (probe_offset)
2508 			*probe_offset = 0x0;
2509 		if (probe_addr)
2510 			*probe_addr = 0x0;
2511 	} else {
2512 		/* kprobe/uprobe */
2513 		err = -EOPNOTSUPP;
2514 #ifdef CONFIG_KPROBE_EVENTS
2515 		if (flags & TRACE_EVENT_FL_KPROBE)
2516 			err = bpf_get_kprobe_info(event, fd_type, buf,
2517 						  probe_offset, probe_addr, missed,
2518 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2519 #endif
2520 #ifdef CONFIG_UPROBE_EVENTS
2521 		if (flags & TRACE_EVENT_FL_UPROBE)
2522 			err = bpf_get_uprobe_info(event, fd_type, buf,
2523 						  probe_offset, probe_addr,
2524 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2525 #endif
2526 	}
2527 
2528 	return err;
2529 }
2530 
send_signal_irq_work_init(void)2531 static int __init send_signal_irq_work_init(void)
2532 {
2533 	int cpu;
2534 	struct send_signal_irq_work *work;
2535 
2536 	for_each_possible_cpu(cpu) {
2537 		work = per_cpu_ptr(&send_signal_work, cpu);
2538 		init_irq_work(&work->irq_work, do_bpf_send_signal);
2539 	}
2540 	return 0;
2541 }
2542 
2543 subsys_initcall(send_signal_irq_work_init);
2544 
2545 #ifdef CONFIG_MODULES
bpf_event_notify(struct notifier_block * nb,unsigned long op,void * module)2546 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2547 			    void *module)
2548 {
2549 	struct bpf_trace_module *btm, *tmp;
2550 	struct module *mod = module;
2551 	int ret = 0;
2552 
2553 	if (mod->num_bpf_raw_events == 0 ||
2554 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2555 		goto out;
2556 
2557 	mutex_lock(&bpf_module_mutex);
2558 
2559 	switch (op) {
2560 	case MODULE_STATE_COMING:
2561 		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2562 		if (btm) {
2563 			btm->module = module;
2564 			list_add(&btm->list, &bpf_trace_modules);
2565 		} else {
2566 			ret = -ENOMEM;
2567 		}
2568 		break;
2569 	case MODULE_STATE_GOING:
2570 		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2571 			if (btm->module == module) {
2572 				list_del(&btm->list);
2573 				kfree(btm);
2574 				break;
2575 			}
2576 		}
2577 		break;
2578 	}
2579 
2580 	mutex_unlock(&bpf_module_mutex);
2581 
2582 out:
2583 	return notifier_from_errno(ret);
2584 }
2585 
2586 static struct notifier_block bpf_module_nb = {
2587 	.notifier_call = bpf_event_notify,
2588 };
2589 
bpf_event_init(void)2590 static int __init bpf_event_init(void)
2591 {
2592 	register_module_notifier(&bpf_module_nb);
2593 	return 0;
2594 }
2595 
2596 fs_initcall(bpf_event_init);
2597 #endif /* CONFIG_MODULES */
2598 
2599 struct bpf_session_run_ctx {
2600 	struct bpf_run_ctx run_ctx;
2601 	bool is_return;
2602 	void *data;
2603 };
2604 
2605 #ifdef CONFIG_FPROBE
2606 struct bpf_kprobe_multi_link {
2607 	struct bpf_link link;
2608 	struct fprobe fp;
2609 	unsigned long *addrs;
2610 	u64 *cookies;
2611 	u32 cnt;
2612 	u32 mods_cnt;
2613 	struct module **mods;
2614 	u32 flags;
2615 };
2616 
2617 struct bpf_kprobe_multi_run_ctx {
2618 	struct bpf_session_run_ctx session_ctx;
2619 	struct bpf_kprobe_multi_link *link;
2620 	unsigned long entry_ip;
2621 };
2622 
2623 struct user_syms {
2624 	const char **syms;
2625 	char *buf;
2626 };
2627 
copy_user_syms(struct user_syms * us,unsigned long __user * usyms,u32 cnt)2628 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2629 {
2630 	unsigned long __user usymbol;
2631 	const char **syms = NULL;
2632 	char *buf = NULL, *p;
2633 	int err = -ENOMEM;
2634 	unsigned int i;
2635 
2636 	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2637 	if (!syms)
2638 		goto error;
2639 
2640 	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2641 	if (!buf)
2642 		goto error;
2643 
2644 	for (p = buf, i = 0; i < cnt; i++) {
2645 		if (__get_user(usymbol, usyms + i)) {
2646 			err = -EFAULT;
2647 			goto error;
2648 		}
2649 		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2650 		if (err == KSYM_NAME_LEN)
2651 			err = -E2BIG;
2652 		if (err < 0)
2653 			goto error;
2654 		syms[i] = p;
2655 		p += err + 1;
2656 	}
2657 
2658 	us->syms = syms;
2659 	us->buf = buf;
2660 	return 0;
2661 
2662 error:
2663 	if (err) {
2664 		kvfree(syms);
2665 		kvfree(buf);
2666 	}
2667 	return err;
2668 }
2669 
kprobe_multi_put_modules(struct module ** mods,u32 cnt)2670 static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2671 {
2672 	u32 i;
2673 
2674 	for (i = 0; i < cnt; i++)
2675 		module_put(mods[i]);
2676 }
2677 
free_user_syms(struct user_syms * us)2678 static void free_user_syms(struct user_syms *us)
2679 {
2680 	kvfree(us->syms);
2681 	kvfree(us->buf);
2682 }
2683 
bpf_kprobe_multi_link_release(struct bpf_link * link)2684 static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2685 {
2686 	struct bpf_kprobe_multi_link *kmulti_link;
2687 
2688 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2689 	unregister_fprobe(&kmulti_link->fp);
2690 	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2691 }
2692 
bpf_kprobe_multi_link_dealloc(struct bpf_link * link)2693 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2694 {
2695 	struct bpf_kprobe_multi_link *kmulti_link;
2696 
2697 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2698 	kvfree(kmulti_link->addrs);
2699 	kvfree(kmulti_link->cookies);
2700 	kfree(kmulti_link->mods);
2701 	kfree(kmulti_link);
2702 }
2703 
bpf_kprobe_multi_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)2704 static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2705 						struct bpf_link_info *info)
2706 {
2707 	u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
2708 	u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2709 	struct bpf_kprobe_multi_link *kmulti_link;
2710 	u32 ucount = info->kprobe_multi.count;
2711 	int err = 0, i;
2712 
2713 	if (!uaddrs ^ !ucount)
2714 		return -EINVAL;
2715 	if (ucookies && !ucount)
2716 		return -EINVAL;
2717 
2718 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2719 	info->kprobe_multi.count = kmulti_link->cnt;
2720 	info->kprobe_multi.flags = kmulti_link->flags;
2721 	info->kprobe_multi.missed = kmulti_link->fp.nmissed;
2722 
2723 	if (!uaddrs)
2724 		return 0;
2725 	if (ucount < kmulti_link->cnt)
2726 		err = -ENOSPC;
2727 	else
2728 		ucount = kmulti_link->cnt;
2729 
2730 	if (ucookies) {
2731 		if (kmulti_link->cookies) {
2732 			if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
2733 				return -EFAULT;
2734 		} else {
2735 			for (i = 0; i < ucount; i++) {
2736 				if (put_user(0, ucookies + i))
2737 					return -EFAULT;
2738 			}
2739 		}
2740 	}
2741 
2742 	if (kallsyms_show_value(current_cred())) {
2743 		if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2744 			return -EFAULT;
2745 	} else {
2746 		for (i = 0; i < ucount; i++) {
2747 			if (put_user(0, uaddrs + i))
2748 				return -EFAULT;
2749 		}
2750 	}
2751 	return err;
2752 }
2753 
2754 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2755 	.release = bpf_kprobe_multi_link_release,
2756 	.dealloc_deferred = bpf_kprobe_multi_link_dealloc,
2757 	.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2758 };
2759 
bpf_kprobe_multi_cookie_swap(void * a,void * b,int size,const void * priv)2760 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2761 {
2762 	const struct bpf_kprobe_multi_link *link = priv;
2763 	unsigned long *addr_a = a, *addr_b = b;
2764 	u64 *cookie_a, *cookie_b;
2765 
2766 	cookie_a = link->cookies + (addr_a - link->addrs);
2767 	cookie_b = link->cookies + (addr_b - link->addrs);
2768 
2769 	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2770 	swap(*addr_a, *addr_b);
2771 	swap(*cookie_a, *cookie_b);
2772 }
2773 
bpf_kprobe_multi_addrs_cmp(const void * a,const void * b)2774 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2775 {
2776 	const unsigned long *addr_a = a, *addr_b = b;
2777 
2778 	if (*addr_a == *addr_b)
2779 		return 0;
2780 	return *addr_a < *addr_b ? -1 : 1;
2781 }
2782 
bpf_kprobe_multi_cookie_cmp(const void * a,const void * b,const void * priv)2783 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2784 {
2785 	return bpf_kprobe_multi_addrs_cmp(a, b);
2786 }
2787 
bpf_kprobe_multi_cookie(struct bpf_run_ctx * ctx)2788 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2789 {
2790 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2791 	struct bpf_kprobe_multi_link *link;
2792 	u64 *cookie, entry_ip;
2793 	unsigned long *addr;
2794 
2795 	if (WARN_ON_ONCE(!ctx))
2796 		return 0;
2797 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2798 			       session_ctx.run_ctx);
2799 	link = run_ctx->link;
2800 	if (!link->cookies)
2801 		return 0;
2802 	entry_ip = run_ctx->entry_ip;
2803 	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2804 		       bpf_kprobe_multi_addrs_cmp);
2805 	if (!addr)
2806 		return 0;
2807 	cookie = link->cookies + (addr - link->addrs);
2808 	return *cookie;
2809 }
2810 
bpf_kprobe_multi_entry_ip(struct bpf_run_ctx * ctx)2811 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2812 {
2813 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2814 
2815 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2816 			       session_ctx.run_ctx);
2817 	return run_ctx->entry_ip;
2818 }
2819 
2820 static int
kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link * link,unsigned long entry_ip,struct pt_regs * regs,bool is_return,void * data)2821 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2822 			   unsigned long entry_ip, struct pt_regs *regs,
2823 			   bool is_return, void *data)
2824 {
2825 	struct bpf_kprobe_multi_run_ctx run_ctx = {
2826 		.session_ctx = {
2827 			.is_return = is_return,
2828 			.data = data,
2829 		},
2830 		.link = link,
2831 		.entry_ip = entry_ip,
2832 	};
2833 	struct bpf_run_ctx *old_run_ctx;
2834 	int err;
2835 
2836 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2837 		bpf_prog_inc_misses_counter(link->link.prog);
2838 		err = 0;
2839 		goto out;
2840 	}
2841 
2842 	migrate_disable();
2843 	rcu_read_lock();
2844 	old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
2845 	err = bpf_prog_run(link->link.prog, regs);
2846 	bpf_reset_run_ctx(old_run_ctx);
2847 	rcu_read_unlock();
2848 	migrate_enable();
2849 
2850  out:
2851 	__this_cpu_dec(bpf_prog_active);
2852 	return err;
2853 }
2854 
2855 static int
kprobe_multi_link_handler(struct fprobe * fp,unsigned long fentry_ip,unsigned long ret_ip,struct pt_regs * regs,void * data)2856 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2857 			  unsigned long ret_ip, struct pt_regs *regs,
2858 			  void *data)
2859 {
2860 	struct bpf_kprobe_multi_link *link;
2861 	int err;
2862 
2863 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2864 	err = kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, false, data);
2865 	return is_kprobe_session(link->link.prog) ? err : 0;
2866 }
2867 
2868 static void
kprobe_multi_link_exit_handler(struct fprobe * fp,unsigned long fentry_ip,unsigned long ret_ip,struct pt_regs * regs,void * data)2869 kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2870 			       unsigned long ret_ip, struct pt_regs *regs,
2871 			       void *data)
2872 {
2873 	struct bpf_kprobe_multi_link *link;
2874 
2875 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2876 	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, true, data);
2877 }
2878 
symbols_cmp_r(const void * a,const void * b,const void * priv)2879 static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2880 {
2881 	const char **str_a = (const char **) a;
2882 	const char **str_b = (const char **) b;
2883 
2884 	return strcmp(*str_a, *str_b);
2885 }
2886 
2887 struct multi_symbols_sort {
2888 	const char **funcs;
2889 	u64 *cookies;
2890 };
2891 
symbols_swap_r(void * a,void * b,int size,const void * priv)2892 static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2893 {
2894 	const struct multi_symbols_sort *data = priv;
2895 	const char **name_a = a, **name_b = b;
2896 
2897 	swap(*name_a, *name_b);
2898 
2899 	/* If defined, swap also related cookies. */
2900 	if (data->cookies) {
2901 		u64 *cookie_a, *cookie_b;
2902 
2903 		cookie_a = data->cookies + (name_a - data->funcs);
2904 		cookie_b = data->cookies + (name_b - data->funcs);
2905 		swap(*cookie_a, *cookie_b);
2906 	}
2907 }
2908 
2909 struct modules_array {
2910 	struct module **mods;
2911 	int mods_cnt;
2912 	int mods_cap;
2913 };
2914 
add_module(struct modules_array * arr,struct module * mod)2915 static int add_module(struct modules_array *arr, struct module *mod)
2916 {
2917 	struct module **mods;
2918 
2919 	if (arr->mods_cnt == arr->mods_cap) {
2920 		arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2921 		mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2922 		if (!mods)
2923 			return -ENOMEM;
2924 		arr->mods = mods;
2925 	}
2926 
2927 	arr->mods[arr->mods_cnt] = mod;
2928 	arr->mods_cnt++;
2929 	return 0;
2930 }
2931 
has_module(struct modules_array * arr,struct module * mod)2932 static bool has_module(struct modules_array *arr, struct module *mod)
2933 {
2934 	int i;
2935 
2936 	for (i = arr->mods_cnt - 1; i >= 0; i--) {
2937 		if (arr->mods[i] == mod)
2938 			return true;
2939 	}
2940 	return false;
2941 }
2942 
get_modules_for_addrs(struct module *** mods,unsigned long * addrs,u32 addrs_cnt)2943 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2944 {
2945 	struct modules_array arr = {};
2946 	u32 i, err = 0;
2947 
2948 	for (i = 0; i < addrs_cnt; i++) {
2949 		struct module *mod;
2950 
2951 		preempt_disable();
2952 		mod = __module_address(addrs[i]);
2953 		/* Either no module or we it's already stored  */
2954 		if (!mod || has_module(&arr, mod)) {
2955 			preempt_enable();
2956 			continue;
2957 		}
2958 		if (!try_module_get(mod))
2959 			err = -EINVAL;
2960 		preempt_enable();
2961 		if (err)
2962 			break;
2963 		err = add_module(&arr, mod);
2964 		if (err) {
2965 			module_put(mod);
2966 			break;
2967 		}
2968 	}
2969 
2970 	/* We return either err < 0 in case of error, ... */
2971 	if (err) {
2972 		kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2973 		kfree(arr.mods);
2974 		return err;
2975 	}
2976 
2977 	/* or number of modules found if everything is ok. */
2978 	*mods = arr.mods;
2979 	return arr.mods_cnt;
2980 }
2981 
addrs_check_error_injection_list(unsigned long * addrs,u32 cnt)2982 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2983 {
2984 	u32 i;
2985 
2986 	for (i = 0; i < cnt; i++) {
2987 		if (!within_error_injection_list(addrs[i]))
2988 			return -EINVAL;
2989 	}
2990 	return 0;
2991 }
2992 
bpf_kprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)2993 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2994 {
2995 	struct bpf_kprobe_multi_link *link = NULL;
2996 	struct bpf_link_primer link_primer;
2997 	void __user *ucookies;
2998 	unsigned long *addrs;
2999 	u32 flags, cnt, size;
3000 	void __user *uaddrs;
3001 	u64 *cookies = NULL;
3002 	void __user *usyms;
3003 	int err;
3004 
3005 	/* no support for 32bit archs yet */
3006 	if (sizeof(u64) != sizeof(void *))
3007 		return -EOPNOTSUPP;
3008 
3009 	if (!is_kprobe_multi(prog))
3010 		return -EINVAL;
3011 
3012 	flags = attr->link_create.kprobe_multi.flags;
3013 	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
3014 		return -EINVAL;
3015 
3016 	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
3017 	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
3018 	if (!!uaddrs == !!usyms)
3019 		return -EINVAL;
3020 
3021 	cnt = attr->link_create.kprobe_multi.cnt;
3022 	if (!cnt)
3023 		return -EINVAL;
3024 	if (cnt > MAX_KPROBE_MULTI_CNT)
3025 		return -E2BIG;
3026 
3027 	size = cnt * sizeof(*addrs);
3028 	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
3029 	if (!addrs)
3030 		return -ENOMEM;
3031 
3032 	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
3033 	if (ucookies) {
3034 		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
3035 		if (!cookies) {
3036 			err = -ENOMEM;
3037 			goto error;
3038 		}
3039 		if (copy_from_user(cookies, ucookies, size)) {
3040 			err = -EFAULT;
3041 			goto error;
3042 		}
3043 	}
3044 
3045 	if (uaddrs) {
3046 		if (copy_from_user(addrs, uaddrs, size)) {
3047 			err = -EFAULT;
3048 			goto error;
3049 		}
3050 	} else {
3051 		struct multi_symbols_sort data = {
3052 			.cookies = cookies,
3053 		};
3054 		struct user_syms us;
3055 
3056 		err = copy_user_syms(&us, usyms, cnt);
3057 		if (err)
3058 			goto error;
3059 
3060 		if (cookies)
3061 			data.funcs = us.syms;
3062 
3063 		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
3064 		       symbols_swap_r, &data);
3065 
3066 		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
3067 		free_user_syms(&us);
3068 		if (err)
3069 			goto error;
3070 	}
3071 
3072 	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
3073 		err = -EINVAL;
3074 		goto error;
3075 	}
3076 
3077 	link = kzalloc(sizeof(*link), GFP_KERNEL);
3078 	if (!link) {
3079 		err = -ENOMEM;
3080 		goto error;
3081 	}
3082 
3083 	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
3084 		      &bpf_kprobe_multi_link_lops, prog);
3085 
3086 	err = bpf_link_prime(&link->link, &link_primer);
3087 	if (err)
3088 		goto error;
3089 
3090 	if (!(flags & BPF_F_KPROBE_MULTI_RETURN))
3091 		link->fp.entry_handler = kprobe_multi_link_handler;
3092 	if ((flags & BPF_F_KPROBE_MULTI_RETURN) || is_kprobe_session(prog))
3093 		link->fp.exit_handler = kprobe_multi_link_exit_handler;
3094 	if (is_kprobe_session(prog))
3095 		link->fp.entry_data_size = sizeof(u64);
3096 
3097 	link->addrs = addrs;
3098 	link->cookies = cookies;
3099 	link->cnt = cnt;
3100 	link->flags = flags;
3101 
3102 	if (cookies) {
3103 		/*
3104 		 * Sorting addresses will trigger sorting cookies as well
3105 		 * (check bpf_kprobe_multi_cookie_swap). This way we can
3106 		 * find cookie based on the address in bpf_get_attach_cookie
3107 		 * helper.
3108 		 */
3109 		sort_r(addrs, cnt, sizeof(*addrs),
3110 		       bpf_kprobe_multi_cookie_cmp,
3111 		       bpf_kprobe_multi_cookie_swap,
3112 		       link);
3113 	}
3114 
3115 	err = get_modules_for_addrs(&link->mods, addrs, cnt);
3116 	if (err < 0) {
3117 		bpf_link_cleanup(&link_primer);
3118 		return err;
3119 	}
3120 	link->mods_cnt = err;
3121 
3122 	err = register_fprobe_ips(&link->fp, addrs, cnt);
3123 	if (err) {
3124 		kprobe_multi_put_modules(link->mods, link->mods_cnt);
3125 		bpf_link_cleanup(&link_primer);
3126 		return err;
3127 	}
3128 
3129 	return bpf_link_settle(&link_primer);
3130 
3131 error:
3132 	kfree(link);
3133 	kvfree(addrs);
3134 	kvfree(cookies);
3135 	return err;
3136 }
3137 #else /* !CONFIG_FPROBE */
bpf_kprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)3138 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3139 {
3140 	return -EOPNOTSUPP;
3141 }
bpf_kprobe_multi_cookie(struct bpf_run_ctx * ctx)3142 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3143 {
3144 	return 0;
3145 }
bpf_kprobe_multi_entry_ip(struct bpf_run_ctx * ctx)3146 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3147 {
3148 	return 0;
3149 }
3150 #endif
3151 
3152 #ifdef CONFIG_UPROBES
3153 struct bpf_uprobe_multi_link;
3154 
3155 struct bpf_uprobe {
3156 	struct bpf_uprobe_multi_link *link;
3157 	loff_t offset;
3158 	unsigned long ref_ctr_offset;
3159 	u64 cookie;
3160 	struct uprobe_consumer consumer;
3161 };
3162 
3163 struct bpf_uprobe_multi_link {
3164 	struct path path;
3165 	struct bpf_link link;
3166 	u32 cnt;
3167 	u32 flags;
3168 	struct bpf_uprobe *uprobes;
3169 	struct task_struct *task;
3170 };
3171 
3172 struct bpf_uprobe_multi_run_ctx {
3173 	struct bpf_run_ctx run_ctx;
3174 	unsigned long entry_ip;
3175 	struct bpf_uprobe *uprobe;
3176 };
3177 
bpf_uprobe_unregister(struct path * path,struct bpf_uprobe * uprobes,u32 cnt)3178 static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
3179 				  u32 cnt)
3180 {
3181 	u32 i;
3182 
3183 	for (i = 0; i < cnt; i++) {
3184 		uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
3185 				  &uprobes[i].consumer);
3186 	}
3187 }
3188 
bpf_uprobe_multi_link_release(struct bpf_link * link)3189 static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3190 {
3191 	struct bpf_uprobe_multi_link *umulti_link;
3192 
3193 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3194 	bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
3195 	if (umulti_link->task)
3196 		put_task_struct(umulti_link->task);
3197 	path_put(&umulti_link->path);
3198 }
3199 
bpf_uprobe_multi_link_dealloc(struct bpf_link * link)3200 static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3201 {
3202 	struct bpf_uprobe_multi_link *umulti_link;
3203 
3204 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3205 	kvfree(umulti_link->uprobes);
3206 	kfree(umulti_link);
3207 }
3208 
bpf_uprobe_multi_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)3209 static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
3210 						struct bpf_link_info *info)
3211 {
3212 	u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3213 	u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3214 	u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3215 	u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3216 	u32 upath_size = info->uprobe_multi.path_size;
3217 	struct bpf_uprobe_multi_link *umulti_link;
3218 	u32 ucount = info->uprobe_multi.count;
3219 	int err = 0, i;
3220 	long left;
3221 
3222 	if (!upath ^ !upath_size)
3223 		return -EINVAL;
3224 
3225 	if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
3226 		return -EINVAL;
3227 
3228 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3229 	info->uprobe_multi.count = umulti_link->cnt;
3230 	info->uprobe_multi.flags = umulti_link->flags;
3231 	info->uprobe_multi.pid = umulti_link->task ?
3232 				 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3233 
3234 	if (upath) {
3235 		char *p, *buf;
3236 
3237 		upath_size = min_t(u32, upath_size, PATH_MAX);
3238 
3239 		buf = kmalloc(upath_size, GFP_KERNEL);
3240 		if (!buf)
3241 			return -ENOMEM;
3242 		p = d_path(&umulti_link->path, buf, upath_size);
3243 		if (IS_ERR(p)) {
3244 			kfree(buf);
3245 			return PTR_ERR(p);
3246 		}
3247 		upath_size = buf + upath_size - p;
3248 		left = copy_to_user(upath, p, upath_size);
3249 		kfree(buf);
3250 		if (left)
3251 			return -EFAULT;
3252 		info->uprobe_multi.path_size = upath_size;
3253 	}
3254 
3255 	if (!uoffsets && !ucookies && !uref_ctr_offsets)
3256 		return 0;
3257 
3258 	if (ucount < umulti_link->cnt)
3259 		err = -ENOSPC;
3260 	else
3261 		ucount = umulti_link->cnt;
3262 
3263 	for (i = 0; i < ucount; i++) {
3264 		if (uoffsets &&
3265 		    put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3266 			return -EFAULT;
3267 		if (uref_ctr_offsets &&
3268 		    put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3269 			return -EFAULT;
3270 		if (ucookies &&
3271 		    put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3272 			return -EFAULT;
3273 	}
3274 
3275 	return err;
3276 }
3277 
3278 static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3279 	.release = bpf_uprobe_multi_link_release,
3280 	.dealloc_deferred = bpf_uprobe_multi_link_dealloc,
3281 	.fill_link_info = bpf_uprobe_multi_link_fill_link_info,
3282 };
3283 
uprobe_prog_run(struct bpf_uprobe * uprobe,unsigned long entry_ip,struct pt_regs * regs)3284 static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3285 			   unsigned long entry_ip,
3286 			   struct pt_regs *regs)
3287 {
3288 	struct bpf_uprobe_multi_link *link = uprobe->link;
3289 	struct bpf_uprobe_multi_run_ctx run_ctx = {
3290 		.entry_ip = entry_ip,
3291 		.uprobe = uprobe,
3292 	};
3293 	struct bpf_prog *prog = link->link.prog;
3294 	bool sleepable = prog->sleepable;
3295 	struct bpf_run_ctx *old_run_ctx;
3296 	int err = 0;
3297 
3298 	if (link->task && current != link->task)
3299 		return 0;
3300 
3301 	if (sleepable)
3302 		rcu_read_lock_trace();
3303 	else
3304 		rcu_read_lock();
3305 
3306 	migrate_disable();
3307 
3308 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
3309 	err = bpf_prog_run(link->link.prog, regs);
3310 	bpf_reset_run_ctx(old_run_ctx);
3311 
3312 	migrate_enable();
3313 
3314 	if (sleepable)
3315 		rcu_read_unlock_trace();
3316 	else
3317 		rcu_read_unlock();
3318 	return err;
3319 }
3320 
3321 static bool
uprobe_multi_link_filter(struct uprobe_consumer * con,enum uprobe_filter_ctx ctx,struct mm_struct * mm)3322 uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
3323 			 struct mm_struct *mm)
3324 {
3325 	struct bpf_uprobe *uprobe;
3326 
3327 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3328 	return uprobe->link->task->mm == mm;
3329 }
3330 
3331 static int
uprobe_multi_link_handler(struct uprobe_consumer * con,struct pt_regs * regs)3332 uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
3333 {
3334 	struct bpf_uprobe *uprobe;
3335 
3336 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3337 	return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
3338 }
3339 
3340 static int
uprobe_multi_link_ret_handler(struct uprobe_consumer * con,unsigned long func,struct pt_regs * regs)3341 uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
3342 {
3343 	struct bpf_uprobe *uprobe;
3344 
3345 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3346 	return uprobe_prog_run(uprobe, func, regs);
3347 }
3348 
bpf_uprobe_multi_entry_ip(struct bpf_run_ctx * ctx)3349 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3350 {
3351 	struct bpf_uprobe_multi_run_ctx *run_ctx;
3352 
3353 	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3354 	return run_ctx->entry_ip;
3355 }
3356 
bpf_uprobe_multi_cookie(struct bpf_run_ctx * ctx)3357 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3358 {
3359 	struct bpf_uprobe_multi_run_ctx *run_ctx;
3360 
3361 	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3362 	return run_ctx->uprobe->cookie;
3363 }
3364 
bpf_uprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)3365 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3366 {
3367 	struct bpf_uprobe_multi_link *link = NULL;
3368 	unsigned long __user *uref_ctr_offsets;
3369 	struct bpf_link_primer link_primer;
3370 	struct bpf_uprobe *uprobes = NULL;
3371 	struct task_struct *task = NULL;
3372 	unsigned long __user *uoffsets;
3373 	u64 __user *ucookies;
3374 	void __user *upath;
3375 	u32 flags, cnt, i;
3376 	struct path path;
3377 	char *name;
3378 	pid_t pid;
3379 	int err;
3380 
3381 	/* no support for 32bit archs yet */
3382 	if (sizeof(u64) != sizeof(void *))
3383 		return -EOPNOTSUPP;
3384 
3385 	if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
3386 		return -EINVAL;
3387 
3388 	flags = attr->link_create.uprobe_multi.flags;
3389 	if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3390 		return -EINVAL;
3391 
3392 	/*
3393 	 * path, offsets and cnt are mandatory,
3394 	 * ref_ctr_offsets and cookies are optional
3395 	 */
3396 	upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3397 	uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3398 	cnt = attr->link_create.uprobe_multi.cnt;
3399 
3400 	if (!upath || !uoffsets || !cnt)
3401 		return -EINVAL;
3402 	if (cnt > MAX_UPROBE_MULTI_CNT)
3403 		return -E2BIG;
3404 
3405 	uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3406 	ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3407 
3408 	name = strndup_user(upath, PATH_MAX);
3409 	if (IS_ERR(name)) {
3410 		err = PTR_ERR(name);
3411 		return err;
3412 	}
3413 
3414 	err = kern_path(name, LOOKUP_FOLLOW, &path);
3415 	kfree(name);
3416 	if (err)
3417 		return err;
3418 
3419 	if (!d_is_reg(path.dentry)) {
3420 		err = -EBADF;
3421 		goto error_path_put;
3422 	}
3423 
3424 	pid = attr->link_create.uprobe_multi.pid;
3425 	if (pid) {
3426 		rcu_read_lock();
3427 		task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3428 		rcu_read_unlock();
3429 		if (!task) {
3430 			err = -ESRCH;
3431 			goto error_path_put;
3432 		}
3433 	}
3434 
3435 	err = -ENOMEM;
3436 
3437 	link = kzalloc(sizeof(*link), GFP_KERNEL);
3438 	uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3439 
3440 	if (!uprobes || !link)
3441 		goto error_free;
3442 
3443 	for (i = 0; i < cnt; i++) {
3444 		if (__get_user(uprobes[i].offset, uoffsets + i)) {
3445 			err = -EFAULT;
3446 			goto error_free;
3447 		}
3448 		if (uprobes[i].offset < 0) {
3449 			err = -EINVAL;
3450 			goto error_free;
3451 		}
3452 		if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
3453 			err = -EFAULT;
3454 			goto error_free;
3455 		}
3456 		if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3457 			err = -EFAULT;
3458 			goto error_free;
3459 		}
3460 
3461 		uprobes[i].link = link;
3462 
3463 		if (flags & BPF_F_UPROBE_MULTI_RETURN)
3464 			uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3465 		else
3466 			uprobes[i].consumer.handler = uprobe_multi_link_handler;
3467 
3468 		if (pid)
3469 			uprobes[i].consumer.filter = uprobe_multi_link_filter;
3470 	}
3471 
3472 	link->cnt = cnt;
3473 	link->uprobes = uprobes;
3474 	link->path = path;
3475 	link->task = task;
3476 	link->flags = flags;
3477 
3478 	bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3479 		      &bpf_uprobe_multi_link_lops, prog);
3480 
3481 	for (i = 0; i < cnt; i++) {
3482 		err = uprobe_register_refctr(d_real_inode(link->path.dentry),
3483 					     uprobes[i].offset,
3484 					     uprobes[i].ref_ctr_offset,
3485 					     &uprobes[i].consumer);
3486 		if (err) {
3487 			bpf_uprobe_unregister(&path, uprobes, i);
3488 			goto error_free;
3489 		}
3490 	}
3491 
3492 	err = bpf_link_prime(&link->link, &link_primer);
3493 	if (err)
3494 		goto error_free;
3495 
3496 	return bpf_link_settle(&link_primer);
3497 
3498 error_free:
3499 	kvfree(uprobes);
3500 	kfree(link);
3501 	if (task)
3502 		put_task_struct(task);
3503 error_path_put:
3504 	path_put(&path);
3505 	return err;
3506 }
3507 #else /* !CONFIG_UPROBES */
bpf_uprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)3508 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3509 {
3510 	return -EOPNOTSUPP;
3511 }
bpf_uprobe_multi_cookie(struct bpf_run_ctx * ctx)3512 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3513 {
3514 	return 0;
3515 }
bpf_uprobe_multi_entry_ip(struct bpf_run_ctx * ctx)3516 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3517 {
3518 	return 0;
3519 }
3520 #endif /* CONFIG_UPROBES */
3521 
3522 #ifdef CONFIG_FPROBE
3523 __bpf_kfunc_start_defs();
3524 
bpf_session_is_return(void)3525 __bpf_kfunc bool bpf_session_is_return(void)
3526 {
3527 	struct bpf_session_run_ctx *session_ctx;
3528 
3529 	session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3530 	return session_ctx->is_return;
3531 }
3532 
bpf_session_cookie(void)3533 __bpf_kfunc __u64 *bpf_session_cookie(void)
3534 {
3535 	struct bpf_session_run_ctx *session_ctx;
3536 
3537 	session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3538 	return session_ctx->data;
3539 }
3540 
3541 __bpf_kfunc_end_defs();
3542 
3543 BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids)
BTF_ID_FLAGS(func,bpf_session_is_return)3544 BTF_ID_FLAGS(func, bpf_session_is_return)
3545 BTF_ID_FLAGS(func, bpf_session_cookie)
3546 BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids)
3547 
3548 static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
3549 {
3550 	if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id))
3551 		return 0;
3552 
3553 	if (!is_kprobe_session(prog))
3554 		return -EACCES;
3555 
3556 	return 0;
3557 }
3558 
3559 static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
3560 	.owner = THIS_MODULE,
3561 	.set = &kprobe_multi_kfunc_set_ids,
3562 	.filter = bpf_kprobe_multi_filter,
3563 };
3564 
bpf_kprobe_multi_kfuncs_init(void)3565 static int __init bpf_kprobe_multi_kfuncs_init(void)
3566 {
3567 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
3568 }
3569 
3570 late_initcall(bpf_kprobe_multi_kfuncs_init);
3571 #endif
3572