xref: /linux/kernel/trace/bpf_trace.c (revision 69137ea60c9dad58773a1918de6c1b00b088520c)
1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  */
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_perf_event.h>
13 #include <linux/filter.h>
14 #include <linux/uaccess.h>
15 #include <linux/ctype.h>
16 #include "trace.h"
17 
18 /**
19  * trace_call_bpf - invoke BPF program
20  * @prog: BPF program
21  * @ctx: opaque context pointer
22  *
23  * kprobe handlers execute BPF programs via this helper.
24  * Can be used from static tracepoints in the future.
25  *
26  * Return: BPF programs always return an integer which is interpreted by
27  * kprobe handler as:
28  * 0 - return from kprobe (event is filtered out)
29  * 1 - store kprobe event into ring buffer
30  * Other values are reserved and currently alias to 1
31  */
32 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
33 {
34 	unsigned int ret;
35 
36 	if (in_nmi()) /* not supported yet */
37 		return 1;
38 
39 	preempt_disable();
40 
41 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
42 		/*
43 		 * since some bpf program is already running on this cpu,
44 		 * don't call into another bpf program (same or different)
45 		 * and don't send kprobe event into ring-buffer,
46 		 * so return zero here
47 		 */
48 		ret = 0;
49 		goto out;
50 	}
51 
52 	rcu_read_lock();
53 	ret = BPF_PROG_RUN(prog, ctx);
54 	rcu_read_unlock();
55 
56  out:
57 	__this_cpu_dec(bpf_prog_active);
58 	preempt_enable();
59 
60 	return ret;
61 }
62 EXPORT_SYMBOL_GPL(trace_call_bpf);
63 
64 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
65 {
66 	int ret;
67 
68 	ret = probe_kernel_read(dst, unsafe_ptr, size);
69 	if (unlikely(ret < 0))
70 		memset(dst, 0, size);
71 
72 	return ret;
73 }
74 
75 static const struct bpf_func_proto bpf_probe_read_proto = {
76 	.func		= bpf_probe_read,
77 	.gpl_only	= true,
78 	.ret_type	= RET_INTEGER,
79 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
80 	.arg2_type	= ARG_CONST_SIZE,
81 	.arg3_type	= ARG_ANYTHING,
82 };
83 
84 BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
85 	   u32, size)
86 {
87 	/*
88 	 * Ensure we're in user context which is safe for the helper to
89 	 * run. This helper has no business in a kthread.
90 	 *
91 	 * access_ok() should prevent writing to non-user memory, but in
92 	 * some situations (nommu, temporary switch, etc) access_ok() does
93 	 * not provide enough validation, hence the check on KERNEL_DS.
94 	 */
95 
96 	if (unlikely(in_interrupt() ||
97 		     current->flags & (PF_KTHREAD | PF_EXITING)))
98 		return -EPERM;
99 	if (unlikely(uaccess_kernel()))
100 		return -EPERM;
101 	if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
102 		return -EPERM;
103 
104 	return probe_kernel_write(unsafe_ptr, src, size);
105 }
106 
107 static const struct bpf_func_proto bpf_probe_write_user_proto = {
108 	.func		= bpf_probe_write_user,
109 	.gpl_only	= true,
110 	.ret_type	= RET_INTEGER,
111 	.arg1_type	= ARG_ANYTHING,
112 	.arg2_type	= ARG_PTR_TO_MEM,
113 	.arg3_type	= ARG_CONST_SIZE,
114 };
115 
116 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
117 {
118 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
119 			    current->comm, task_pid_nr(current));
120 
121 	return &bpf_probe_write_user_proto;
122 }
123 
124 /*
125  * limited trace_printk()
126  * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
127  */
128 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
129 	   u64, arg2, u64, arg3)
130 {
131 	bool str_seen = false;
132 	int mod[3] = {};
133 	int fmt_cnt = 0;
134 	u64 unsafe_addr;
135 	char buf[64];
136 	int i;
137 
138 	/*
139 	 * bpf_check()->check_func_arg()->check_stack_boundary()
140 	 * guarantees that fmt points to bpf program stack,
141 	 * fmt_size bytes of it were initialized and fmt_size > 0
142 	 */
143 	if (fmt[--fmt_size] != 0)
144 		return -EINVAL;
145 
146 	/* check format string for allowed specifiers */
147 	for (i = 0; i < fmt_size; i++) {
148 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
149 			return -EINVAL;
150 
151 		if (fmt[i] != '%')
152 			continue;
153 
154 		if (fmt_cnt >= 3)
155 			return -EINVAL;
156 
157 		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
158 		i++;
159 		if (fmt[i] == 'l') {
160 			mod[fmt_cnt]++;
161 			i++;
162 		} else if (fmt[i] == 'p' || fmt[i] == 's') {
163 			mod[fmt_cnt]++;
164 			i++;
165 			if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
166 				return -EINVAL;
167 			fmt_cnt++;
168 			if (fmt[i - 1] == 's') {
169 				if (str_seen)
170 					/* allow only one '%s' per fmt string */
171 					return -EINVAL;
172 				str_seen = true;
173 
174 				switch (fmt_cnt) {
175 				case 1:
176 					unsafe_addr = arg1;
177 					arg1 = (long) buf;
178 					break;
179 				case 2:
180 					unsafe_addr = arg2;
181 					arg2 = (long) buf;
182 					break;
183 				case 3:
184 					unsafe_addr = arg3;
185 					arg3 = (long) buf;
186 					break;
187 				}
188 				buf[0] = 0;
189 				strncpy_from_unsafe(buf,
190 						    (void *) (long) unsafe_addr,
191 						    sizeof(buf));
192 			}
193 			continue;
194 		}
195 
196 		if (fmt[i] == 'l') {
197 			mod[fmt_cnt]++;
198 			i++;
199 		}
200 
201 		if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
202 			return -EINVAL;
203 		fmt_cnt++;
204 	}
205 
206 	return __trace_printk(1/* fake ip will not be printed */, fmt,
207 			      mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
208 			      mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
209 			      mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
210 }
211 
212 static const struct bpf_func_proto bpf_trace_printk_proto = {
213 	.func		= bpf_trace_printk,
214 	.gpl_only	= true,
215 	.ret_type	= RET_INTEGER,
216 	.arg1_type	= ARG_PTR_TO_MEM,
217 	.arg2_type	= ARG_CONST_SIZE,
218 };
219 
220 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
221 {
222 	/*
223 	 * this program might be calling bpf_trace_printk,
224 	 * so allocate per-cpu printk buffers
225 	 */
226 	trace_printk_init_buffers();
227 
228 	return &bpf_trace_printk_proto;
229 }
230 
231 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
232 {
233 	struct bpf_array *array = container_of(map, struct bpf_array, map);
234 	unsigned int cpu = smp_processor_id();
235 	u64 index = flags & BPF_F_INDEX_MASK;
236 	struct bpf_event_entry *ee;
237 	u64 value = 0;
238 	int err;
239 
240 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
241 		return -EINVAL;
242 	if (index == BPF_F_CURRENT_CPU)
243 		index = cpu;
244 	if (unlikely(index >= array->map.max_entries))
245 		return -E2BIG;
246 
247 	ee = READ_ONCE(array->ptrs[index]);
248 	if (!ee)
249 		return -ENOENT;
250 
251 	err = perf_event_read_local(ee->event, &value);
252 	/*
253 	 * this api is ugly since we miss [-22..-2] range of valid
254 	 * counter values, but that's uapi
255 	 */
256 	if (err)
257 		return err;
258 	return value;
259 }
260 
261 static const struct bpf_func_proto bpf_perf_event_read_proto = {
262 	.func		= bpf_perf_event_read,
263 	.gpl_only	= true,
264 	.ret_type	= RET_INTEGER,
265 	.arg1_type	= ARG_CONST_MAP_PTR,
266 	.arg2_type	= ARG_ANYTHING,
267 };
268 
269 static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
270 
271 static __always_inline u64
272 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
273 			u64 flags, struct perf_raw_record *raw)
274 {
275 	struct bpf_array *array = container_of(map, struct bpf_array, map);
276 	struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
277 	unsigned int cpu = smp_processor_id();
278 	u64 index = flags & BPF_F_INDEX_MASK;
279 	struct bpf_event_entry *ee;
280 	struct perf_event *event;
281 
282 	if (index == BPF_F_CURRENT_CPU)
283 		index = cpu;
284 	if (unlikely(index >= array->map.max_entries))
285 		return -E2BIG;
286 
287 	ee = READ_ONCE(array->ptrs[index]);
288 	if (!ee)
289 		return -ENOENT;
290 
291 	event = ee->event;
292 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
293 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
294 		return -EINVAL;
295 
296 	if (unlikely(event->oncpu != cpu))
297 		return -EOPNOTSUPP;
298 
299 	perf_sample_data_init(sd, 0, 0);
300 	sd->raw = raw;
301 	perf_event_output(event, sd, regs);
302 	return 0;
303 }
304 
305 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
306 	   u64, flags, void *, data, u64, size)
307 {
308 	struct perf_raw_record raw = {
309 		.frag = {
310 			.size = size,
311 			.data = data,
312 		},
313 	};
314 
315 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
316 		return -EINVAL;
317 
318 	return __bpf_perf_event_output(regs, map, flags, &raw);
319 }
320 
321 static const struct bpf_func_proto bpf_perf_event_output_proto = {
322 	.func		= bpf_perf_event_output,
323 	.gpl_only	= true,
324 	.ret_type	= RET_INTEGER,
325 	.arg1_type	= ARG_PTR_TO_CTX,
326 	.arg2_type	= ARG_CONST_MAP_PTR,
327 	.arg3_type	= ARG_ANYTHING,
328 	.arg4_type	= ARG_PTR_TO_MEM,
329 	.arg5_type	= ARG_CONST_SIZE,
330 };
331 
332 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
333 
334 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
335 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
336 {
337 	struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
338 	struct perf_raw_frag frag = {
339 		.copy		= ctx_copy,
340 		.size		= ctx_size,
341 		.data		= ctx,
342 	};
343 	struct perf_raw_record raw = {
344 		.frag = {
345 			{
346 				.next	= ctx_size ? &frag : NULL,
347 			},
348 			.size	= meta_size,
349 			.data	= meta,
350 		},
351 	};
352 
353 	perf_fetch_caller_regs(regs);
354 
355 	return __bpf_perf_event_output(regs, map, flags, &raw);
356 }
357 
358 BPF_CALL_0(bpf_get_current_task)
359 {
360 	return (long) current;
361 }
362 
363 static const struct bpf_func_proto bpf_get_current_task_proto = {
364 	.func		= bpf_get_current_task,
365 	.gpl_only	= true,
366 	.ret_type	= RET_INTEGER,
367 };
368 
369 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
370 {
371 	struct bpf_array *array = container_of(map, struct bpf_array, map);
372 	struct cgroup *cgrp;
373 
374 	if (unlikely(in_interrupt()))
375 		return -EINVAL;
376 	if (unlikely(idx >= array->map.max_entries))
377 		return -E2BIG;
378 
379 	cgrp = READ_ONCE(array->ptrs[idx]);
380 	if (unlikely(!cgrp))
381 		return -EAGAIN;
382 
383 	return task_under_cgroup_hierarchy(current, cgrp);
384 }
385 
386 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
387 	.func           = bpf_current_task_under_cgroup,
388 	.gpl_only       = false,
389 	.ret_type       = RET_INTEGER,
390 	.arg1_type      = ARG_CONST_MAP_PTR,
391 	.arg2_type      = ARG_ANYTHING,
392 };
393 
394 BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
395 	   const void *, unsafe_ptr)
396 {
397 	int ret;
398 
399 	/*
400 	 * The strncpy_from_unsafe() call will likely not fill the entire
401 	 * buffer, but that's okay in this circumstance as we're probing
402 	 * arbitrary memory anyway similar to bpf_probe_read() and might
403 	 * as well probe the stack. Thus, memory is explicitly cleared
404 	 * only in error case, so that improper users ignoring return
405 	 * code altogether don't copy garbage; otherwise length of string
406 	 * is returned that can be used for bpf_perf_event_output() et al.
407 	 */
408 	ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
409 	if (unlikely(ret < 0))
410 		memset(dst, 0, size);
411 
412 	return ret;
413 }
414 
415 static const struct bpf_func_proto bpf_probe_read_str_proto = {
416 	.func		= bpf_probe_read_str,
417 	.gpl_only	= true,
418 	.ret_type	= RET_INTEGER,
419 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
420 	.arg2_type	= ARG_CONST_SIZE,
421 	.arg3_type	= ARG_ANYTHING,
422 };
423 
424 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
425 {
426 	switch (func_id) {
427 	case BPF_FUNC_map_lookup_elem:
428 		return &bpf_map_lookup_elem_proto;
429 	case BPF_FUNC_map_update_elem:
430 		return &bpf_map_update_elem_proto;
431 	case BPF_FUNC_map_delete_elem:
432 		return &bpf_map_delete_elem_proto;
433 	case BPF_FUNC_probe_read:
434 		return &bpf_probe_read_proto;
435 	case BPF_FUNC_ktime_get_ns:
436 		return &bpf_ktime_get_ns_proto;
437 	case BPF_FUNC_tail_call:
438 		return &bpf_tail_call_proto;
439 	case BPF_FUNC_get_current_pid_tgid:
440 		return &bpf_get_current_pid_tgid_proto;
441 	case BPF_FUNC_get_current_task:
442 		return &bpf_get_current_task_proto;
443 	case BPF_FUNC_get_current_uid_gid:
444 		return &bpf_get_current_uid_gid_proto;
445 	case BPF_FUNC_get_current_comm:
446 		return &bpf_get_current_comm_proto;
447 	case BPF_FUNC_trace_printk:
448 		return bpf_get_trace_printk_proto();
449 	case BPF_FUNC_get_smp_processor_id:
450 		return &bpf_get_smp_processor_id_proto;
451 	case BPF_FUNC_get_numa_node_id:
452 		return &bpf_get_numa_node_id_proto;
453 	case BPF_FUNC_perf_event_read:
454 		return &bpf_perf_event_read_proto;
455 	case BPF_FUNC_probe_write_user:
456 		return bpf_get_probe_write_proto();
457 	case BPF_FUNC_current_task_under_cgroup:
458 		return &bpf_current_task_under_cgroup_proto;
459 	case BPF_FUNC_get_prandom_u32:
460 		return &bpf_get_prandom_u32_proto;
461 	case BPF_FUNC_probe_read_str:
462 		return &bpf_probe_read_str_proto;
463 	default:
464 		return NULL;
465 	}
466 }
467 
468 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
469 {
470 	switch (func_id) {
471 	case BPF_FUNC_perf_event_output:
472 		return &bpf_perf_event_output_proto;
473 	case BPF_FUNC_get_stackid:
474 		return &bpf_get_stackid_proto;
475 	default:
476 		return tracing_func_proto(func_id);
477 	}
478 }
479 
480 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
481 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
482 					enum bpf_reg_type *reg_type, int *ctx_field_size)
483 {
484 	if (off < 0 || off >= sizeof(struct pt_regs))
485 		return false;
486 	if (type != BPF_READ)
487 		return false;
488 	if (off % size != 0)
489 		return false;
490 	/*
491 	 * Assertion for 32 bit to make sure last 8 byte access
492 	 * (BPF_DW) to the last 4 byte member is disallowed.
493 	 */
494 	if (off + size > sizeof(struct pt_regs))
495 		return false;
496 
497 	return true;
498 }
499 
500 const struct bpf_verifier_ops kprobe_prog_ops = {
501 	.get_func_proto  = kprobe_prog_func_proto,
502 	.is_valid_access = kprobe_prog_is_valid_access,
503 };
504 
505 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
506 	   u64, flags, void *, data, u64, size)
507 {
508 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
509 
510 	/*
511 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
512 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
513 	 * from there and call the same bpf_perf_event_output() helper inline.
514 	 */
515 	return ____bpf_perf_event_output(regs, map, flags, data, size);
516 }
517 
518 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
519 	.func		= bpf_perf_event_output_tp,
520 	.gpl_only	= true,
521 	.ret_type	= RET_INTEGER,
522 	.arg1_type	= ARG_PTR_TO_CTX,
523 	.arg2_type	= ARG_CONST_MAP_PTR,
524 	.arg3_type	= ARG_ANYTHING,
525 	.arg4_type	= ARG_PTR_TO_MEM,
526 	.arg5_type	= ARG_CONST_SIZE,
527 };
528 
529 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
530 	   u64, flags)
531 {
532 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
533 
534 	/*
535 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
536 	 * the other helper's function body cannot be inlined due to being
537 	 * external, thus we need to call raw helper function.
538 	 */
539 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
540 			       flags, 0, 0);
541 }
542 
543 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
544 	.func		= bpf_get_stackid_tp,
545 	.gpl_only	= true,
546 	.ret_type	= RET_INTEGER,
547 	.arg1_type	= ARG_PTR_TO_CTX,
548 	.arg2_type	= ARG_CONST_MAP_PTR,
549 	.arg3_type	= ARG_ANYTHING,
550 };
551 
552 static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
553 {
554 	switch (func_id) {
555 	case BPF_FUNC_perf_event_output:
556 		return &bpf_perf_event_output_proto_tp;
557 	case BPF_FUNC_get_stackid:
558 		return &bpf_get_stackid_proto_tp;
559 	default:
560 		return tracing_func_proto(func_id);
561 	}
562 }
563 
564 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
565 				    enum bpf_reg_type *reg_type, int *ctx_field_size)
566 {
567 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
568 		return false;
569 	if (type != BPF_READ)
570 		return false;
571 	if (off % size != 0)
572 		return false;
573 
574 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
575 	return true;
576 }
577 
578 const struct bpf_verifier_ops tracepoint_prog_ops = {
579 	.get_func_proto  = tp_prog_func_proto,
580 	.is_valid_access = tp_prog_is_valid_access,
581 };
582 
583 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
584 				    enum bpf_reg_type *reg_type, int *ctx_field_size)
585 {
586 	int sample_period_off;
587 
588 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
589 		return false;
590 	if (type != BPF_READ)
591 		return false;
592 	if (off % size != 0)
593 		return false;
594 
595 	/* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */
596 	sample_period_off = offsetof(struct bpf_perf_event_data, sample_period);
597 	if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) {
598 		*ctx_field_size = 8;
599 #ifdef __LITTLE_ENDIAN
600 		return (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0;
601 #else
602 		return ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0;
603 #endif
604 	} else {
605 		if (size != sizeof(long))
606 			return false;
607 	}
608 	return true;
609 }
610 
611 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
612 				      const struct bpf_insn *si,
613 				      struct bpf_insn *insn_buf,
614 				      struct bpf_prog *prog)
615 {
616 	struct bpf_insn *insn = insn_buf;
617 
618 	switch (si->off) {
619 	case offsetof(struct bpf_perf_event_data, sample_period):
620 		BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64));
621 
622 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
623 						       data), si->dst_reg, si->src_reg,
624 				      offsetof(struct bpf_perf_event_data_kern, data));
625 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
626 				      offsetof(struct perf_sample_data, period));
627 		break;
628 	default:
629 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
630 						       regs), si->dst_reg, si->src_reg,
631 				      offsetof(struct bpf_perf_event_data_kern, regs));
632 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
633 				      si->off);
634 		break;
635 	}
636 
637 	return insn - insn_buf;
638 }
639 
640 const struct bpf_verifier_ops perf_event_prog_ops = {
641 	.get_func_proto		= tp_prog_func_proto,
642 	.is_valid_access	= pe_prog_is_valid_access,
643 	.convert_ctx_access	= pe_prog_convert_ctx_access,
644 };
645