xref: /linux/tools/bpf/bpftool/skeleton/profiler.bpf.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Facebook
3 #include "profiler.h"
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 
8 /* map of perf event fds, num_cpu * num_metric entries */
9 struct {
10 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
11 	__uint(key_size, sizeof(u32));
12 	__uint(value_size, sizeof(int));
13 } events SEC(".maps");
14 
15 /* readings at fentry */
16 struct {
17 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
18 	__uint(key_size, sizeof(u32));
19 	__uint(value_size, sizeof(struct bpf_perf_event_value));
20 } fentry_readings SEC(".maps");
21 
22 /* accumulated readings */
23 struct {
24 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
25 	__uint(key_size, sizeof(u32));
26 	__uint(value_size, sizeof(struct bpf_perf_event_value));
27 } accum_readings SEC(".maps");
28 
29 /* sample counts, one per cpu */
30 struct {
31 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
32 	__uint(key_size, sizeof(u32));
33 	__uint(value_size, sizeof(u64));
34 } counts SEC(".maps");
35 
36 const volatile __u32 num_cpu = 1;
37 const volatile __u32 num_metric = 1;
38 #define MAX_NUM_MATRICS 4
39 
40 SEC("fentry/XXX")
41 int BPF_PROG(fentry_XXX)
42 {
43 	struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
44 	u32 key = bpf_get_smp_processor_id();
45 	u32 i;
46 
47 	/* look up before reading, to reduce error */
48 	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
49 		u32 flag = i;
50 
51 		ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
52 		if (!ptrs[i])
53 			return 0;
54 	}
55 
56 	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
57 		struct bpf_perf_event_value reading;
58 		int err;
59 
60 		err = bpf_perf_event_read_value(&events, key, &reading,
61 						sizeof(reading));
62 		if (err)
63 			return 0;
64 		*(ptrs[i]) = reading;
65 		key += num_cpu;
66 	}
67 
68 	return 0;
69 }
70 
71 static inline void
72 fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
73 {
74 	struct bpf_perf_event_value *before, diff, *accum;
75 
76 	before = bpf_map_lookup_elem(&fentry_readings, &id);
77 	/* only account samples with a valid fentry_reading */
78 	if (before && before->counter) {
79 		struct bpf_perf_event_value *accum;
80 
81 		diff.counter = after->counter - before->counter;
82 		diff.enabled = after->enabled - before->enabled;
83 		diff.running = after->running - before->running;
84 
85 		accum = bpf_map_lookup_elem(&accum_readings, &id);
86 		if (accum) {
87 			accum->counter += diff.counter;
88 			accum->enabled += diff.enabled;
89 			accum->running += diff.running;
90 		}
91 	}
92 }
93 
94 SEC("fexit/XXX")
95 int BPF_PROG(fexit_XXX)
96 {
97 	struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
98 	u32 cpu = bpf_get_smp_processor_id();
99 	u32 i, one = 1, zero = 0;
100 	int err;
101 	u64 *count;
102 
103 	/* read all events before updating the maps, to reduce error */
104 	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
105 		err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
106 						readings + i, sizeof(*readings));
107 		if (err)
108 			return 0;
109 	}
110 	count = bpf_map_lookup_elem(&counts, &zero);
111 	if (count) {
112 		*count += 1;
113 		for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
114 			fexit_update_maps(i, &readings[i]);
115 	}
116 	return 0;
117 }
118 
119 char LICENSE[] SEC("license") = "GPL";
120