xref: /linux/tools/bpf/bpftool/skeleton/profiler.bpf.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (c) 2020 Facebook
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6 
7 struct bpf_perf_event_value___local {
8 	__u64 counter;
9 	__u64 enabled;
10 	__u64 running;
11 } __attribute__((preserve_access_index));
12 
13 /* map of perf event fds, num_cpu * num_metric entries */
14 struct {
15 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
16 	__uint(key_size, sizeof(u32));
17 	__uint(value_size, sizeof(int));
18 } events SEC(".maps");
19 
20 /* readings at fentry */
21 struct {
22 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
23 	__uint(key_size, sizeof(u32));
24 	__uint(value_size, sizeof(struct bpf_perf_event_value___local));
25 } fentry_readings SEC(".maps");
26 
27 /* accumulated readings */
28 struct {
29 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
30 	__uint(key_size, sizeof(u32));
31 	__uint(value_size, sizeof(struct bpf_perf_event_value___local));
32 } accum_readings SEC(".maps");
33 
34 /* sample counts, one per cpu */
35 struct {
36 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
37 	__uint(key_size, sizeof(u32));
38 	__uint(value_size, sizeof(u64));
39 } counts SEC(".maps");
40 
41 const volatile __u32 num_cpu = 1;
42 const volatile __u32 num_metric = 1;
43 #define MAX_NUM_METRICS 4
44 
45 SEC("fentry/XXX")
46 int BPF_PROG(fentry_XXX)
47 {
48 	struct bpf_perf_event_value___local *ptrs[MAX_NUM_METRICS];
49 	u32 key = bpf_get_smp_processor_id();
50 	u32 i;
51 
52 	/* look up before reading, to reduce error */
53 	for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
54 		u32 flag = i;
55 
56 		ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
57 		if (!ptrs[i])
58 			return 0;
59 	}
60 
61 	for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
62 		struct bpf_perf_event_value___local reading;
63 		int err;
64 
65 		err = bpf_perf_event_read_value(&events, key, (void *)&reading,
66 						sizeof(reading));
67 		if (err)
68 			return 0;
69 		*(ptrs[i]) = reading;
70 		key += num_cpu;
71 	}
72 
73 	return 0;
74 }
75 
76 static inline void
77 fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
78 {
79 	struct bpf_perf_event_value___local *before, diff;
80 
81 	before = bpf_map_lookup_elem(&fentry_readings, &id);
82 	/* only account samples with a valid fentry_reading */
83 	if (before && before->counter) {
84 		struct bpf_perf_event_value___local *accum;
85 
86 		diff.counter = after->counter - before->counter;
87 		diff.enabled = after->enabled - before->enabled;
88 		diff.running = after->running - before->running;
89 
90 		accum = bpf_map_lookup_elem(&accum_readings, &id);
91 		if (accum) {
92 			accum->counter += diff.counter;
93 			accum->enabled += diff.enabled;
94 			accum->running += diff.running;
95 		}
96 	}
97 }
98 
99 SEC("fexit/XXX")
100 int BPF_PROG(fexit_XXX)
101 {
102 	struct bpf_perf_event_value___local readings[MAX_NUM_METRICS];
103 	u32 cpu = bpf_get_smp_processor_id();
104 	u32 i, zero = 0;
105 	int err;
106 	u64 *count;
107 
108 	/* read all events before updating the maps, to reduce error */
109 	for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
110 		err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
111 						(void *)(readings + i),
112 						sizeof(*readings));
113 		if (err)
114 			return 0;
115 	}
116 	count = bpf_map_lookup_elem(&counts, &zero);
117 	if (count) {
118 		*count += 1;
119 		for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++)
120 			fexit_update_maps(i, &readings[i]);
121 	}
122 	return 0;
123 }
124 
125 char LICENSE[] SEC("license") = "Dual BSD/GPL";
126