xref: /linux/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c (revision f6d08d9d8543c8ee494b307804b28e2750ffedb9)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bpf.h>
4 #include "bpf_helpers.h"
5 
6 /* Permit pretty deep stack traces */
7 #define MAX_STACK_RAWTP 100
8 struct stack_trace_t {
9 	int pid;
10 	int kern_stack_size;
11 	int user_stack_size;
12 	int user_stack_buildid_size;
13 	__u64 kern_stack[MAX_STACK_RAWTP];
14 	__u64 user_stack[MAX_STACK_RAWTP];
15 	struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
16 };
17 
18 struct {
19 	__u32 type;
20 	__u32 max_entries;
21 	__u32 key_size;
22 	__u32 value_size;
23 } perfmap SEC(".maps") = {
24 	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
25 	.max_entries = 2,
26 	.key_size = sizeof(int),
27 	.value_size = sizeof(__u32),
28 };
29 
30 struct {
31 	__u32 type;
32 	__u32 max_entries;
33 	__u32 *key;
34 	struct stack_trace_t *value;
35 } stackdata_map SEC(".maps") = {
36 	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
37 	.max_entries = 1,
38 };
39 
40 /* Allocate per-cpu space twice the needed. For the code below
41  *   usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
42  *   if (usize < 0)
43  *     return 0;
44  *   ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
45  *
46  * If we have value_size = MAX_STACK_RAWTP * sizeof(__u64),
47  * verifier will complain that access "raw_data + usize"
48  * with size "max_len - usize" may be out of bound.
49  * The maximum "raw_data + usize" is "raw_data + max_len"
50  * and the maximum "max_len - usize" is "max_len", verifier
51  * concludes that the maximum buffer access range is
52  * "raw_data[0...max_len * 2 - 1]" and hence reject the program.
53  *
54  * Doubling the to-be-used max buffer size can fix this verifier
55  * issue and avoid complicated C programming massaging.
56  * This is an acceptable workaround since there is one entry here.
57  */
58 struct {
59 	__u32 type;
60 	__u32 max_entries;
61 	__u32 *key;
62 	__u64 (*value)[2 * MAX_STACK_RAWTP];
63 } rawdata_map SEC(".maps") = {
64 	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
65 	.max_entries = 1,
66 };
67 
68 SEC("tracepoint/raw_syscalls/sys_enter")
69 int bpf_prog1(void *ctx)
70 {
71 	int max_len, max_buildid_len, usize, ksize, total_size;
72 	struct stack_trace_t *data;
73 	void *raw_data;
74 	__u32 key = 0;
75 
76 	data = bpf_map_lookup_elem(&stackdata_map, &key);
77 	if (!data)
78 		return 0;
79 
80 	max_len = MAX_STACK_RAWTP * sizeof(__u64);
81 	max_buildid_len = MAX_STACK_RAWTP * sizeof(struct bpf_stack_build_id);
82 	data->pid = bpf_get_current_pid_tgid();
83 	data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
84 					      max_len, 0);
85 	data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
86 					    BPF_F_USER_STACK);
87 	data->user_stack_buildid_size = bpf_get_stack(
88 		ctx, data->user_stack_buildid, max_buildid_len,
89 		BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
90 	bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data));
91 
92 	/* write both kernel and user stacks to the same buffer */
93 	raw_data = bpf_map_lookup_elem(&rawdata_map, &key);
94 	if (!raw_data)
95 		return 0;
96 
97 	usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
98 	if (usize < 0)
99 		return 0;
100 
101 	ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
102 	if (ksize < 0)
103 		return 0;
104 
105 	total_size = usize + ksize;
106 	if (total_size > 0 && total_size <= max_len)
107 		bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size);
108 
109 	return 0;
110 }
111 
112 char _license[] SEC("license") = "GPL";
113 __u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
114