xref: /linux/tools/bpf/bpftool/map_perf_ring.c (revision cbac924200b838cfb8d8b1415113d788089dc50b)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
3 /* This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <bpf/libbpf.h>
10 #include <poll.h>
11 #include <signal.h>
12 #include <stdbool.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <time.h>
17 #include <unistd.h>
18 #include <linux/bpf.h>
19 #include <linux/perf_event.h>
20 #include <sys/ioctl.h>
21 #include <sys/mman.h>
22 #include <sys/syscall.h>
23 
24 #include <bpf/bpf.h>
25 
26 #include "main.h"
27 
28 #define MMAP_PAGE_CNT	16
29 
30 static volatile bool stop;
31 
32 struct event_ring_info {
33 	int fd;
34 	int key;
35 	unsigned int cpu;
36 	void *mem;
37 };
38 
39 struct perf_event_sample {
40 	struct perf_event_header header;
41 	__u64 time;
42 	__u32 size;
43 	unsigned char data[];
44 };
45 
46 struct perf_event_lost {
47 	struct perf_event_header header;
48 	__u64 id;
49 	__u64 lost;
50 };
51 
52 static void int_exit(int signo)
53 {
54 	fprintf(stderr, "Stopping...\n");
55 	stop = true;
56 }
57 
58 struct event_pipe_ctx {
59 	bool all_cpus;
60 	int cpu;
61 	int idx;
62 };
63 
64 static enum bpf_perf_event_ret
65 print_bpf_output(void *private_data, int cpu, struct perf_event_header *event)
66 {
67 	struct perf_event_sample *e = container_of(event,
68 						   struct perf_event_sample,
69 						   header);
70 	struct perf_event_lost *lost = container_of(event,
71 						    struct perf_event_lost,
72 						    header);
73 	struct event_pipe_ctx *ctx = private_data;
74 	int idx = ctx->all_cpus ? cpu : ctx->idx;
75 
76 	if (json_output) {
77 		jsonw_start_object(json_wtr);
78 		jsonw_name(json_wtr, "type");
79 		jsonw_uint(json_wtr, e->header.type);
80 		jsonw_name(json_wtr, "cpu");
81 		jsonw_uint(json_wtr, cpu);
82 		jsonw_name(json_wtr, "index");
83 		jsonw_uint(json_wtr, idx);
84 		if (e->header.type == PERF_RECORD_SAMPLE) {
85 			jsonw_name(json_wtr, "timestamp");
86 			jsonw_uint(json_wtr, e->time);
87 			jsonw_name(json_wtr, "data");
88 			print_data_json(e->data, e->size);
89 		} else if (e->header.type == PERF_RECORD_LOST) {
90 			jsonw_name(json_wtr, "lost");
91 			jsonw_start_object(json_wtr);
92 			jsonw_name(json_wtr, "id");
93 			jsonw_uint(json_wtr, lost->id);
94 			jsonw_name(json_wtr, "count");
95 			jsonw_uint(json_wtr, lost->lost);
96 			jsonw_end_object(json_wtr);
97 		}
98 		jsonw_end_object(json_wtr);
99 	} else {
100 		if (e->header.type == PERF_RECORD_SAMPLE) {
101 			printf("== @%lld.%09lld CPU: %d index: %d =====\n",
102 			       e->time / 1000000000ULL, e->time % 1000000000ULL,
103 			       cpu, idx);
104 			fprint_hex(stdout, e->data, e->size, " ");
105 			printf("\n");
106 		} else if (e->header.type == PERF_RECORD_LOST) {
107 			printf("lost %lld events\n", lost->lost);
108 		} else {
109 			printf("unknown event type=%d size=%d\n",
110 			       e->header.type, e->header.size);
111 		}
112 	}
113 
114 	return LIBBPF_PERF_EVENT_CONT;
115 }
116 
117 int do_event_pipe(int argc, char **argv)
118 {
119 	struct perf_event_attr perf_attr = {
120 		.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME,
121 		.type = PERF_TYPE_SOFTWARE,
122 		.config = PERF_COUNT_SW_BPF_OUTPUT,
123 		.sample_period = 1,
124 		.wakeup_events = 1,
125 	};
126 	struct bpf_map_info map_info = {};
127 	LIBBPF_OPTS(perf_buffer_raw_opts, opts);
128 	struct event_pipe_ctx ctx = {
129 		.all_cpus = true,
130 		.cpu = -1,
131 		.idx = -1,
132 	};
133 	struct perf_buffer *pb;
134 	__u32 map_info_len;
135 	int err, map_fd;
136 
137 	map_info_len = sizeof(map_info);
138 	map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len);
139 	if (map_fd < 0)
140 		return -1;
141 
142 	if (map_info.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
143 		p_err("map is not a perf event array");
144 		goto err_close_map;
145 	}
146 
147 	while (argc) {
148 		if (argc < 2) {
149 			BAD_ARG();
150 			goto err_close_map;
151 		}
152 
153 		if (is_prefix(*argv, "cpu")) {
154 			char *endptr;
155 
156 			NEXT_ARG();
157 			ctx.cpu = strtoul(*argv, &endptr, 0);
158 			if (*endptr) {
159 				p_err("can't parse %s as CPU ID", *argv);
160 				goto err_close_map;
161 			}
162 
163 			NEXT_ARG();
164 		} else if (is_prefix(*argv, "index")) {
165 			char *endptr;
166 
167 			NEXT_ARG();
168 			ctx.idx = strtoul(*argv, &endptr, 0);
169 			if (*endptr) {
170 				p_err("can't parse %s as index", *argv);
171 				goto err_close_map;
172 			}
173 
174 			NEXT_ARG();
175 		} else {
176 			BAD_ARG();
177 			goto err_close_map;
178 		}
179 
180 		ctx.all_cpus = false;
181 	}
182 
183 	if (!ctx.all_cpus) {
184 		if (ctx.idx == -1 || ctx.cpu == -1) {
185 			p_err("cpu and index must be specified together");
186 			goto err_close_map;
187 		}
188 	} else {
189 		ctx.cpu = 0;
190 		ctx.idx = 0;
191 	}
192 
193 	opts.cpu_cnt = ctx.all_cpus ? 0 : 1;
194 	opts.cpus = &ctx.cpu;
195 	opts.map_keys = &ctx.idx;
196 	pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &perf_attr,
197 				  print_bpf_output, &ctx, &opts);
198 	err = libbpf_get_error(pb);
199 	if (err) {
200 		p_err("failed to create perf buffer: %s (%d)",
201 		      strerror(err), err);
202 		goto err_close_map;
203 	}
204 
205 	signal(SIGINT, int_exit);
206 	signal(SIGHUP, int_exit);
207 	signal(SIGTERM, int_exit);
208 
209 	if (json_output)
210 		jsonw_start_array(json_wtr);
211 
212 	while (!stop) {
213 		err = perf_buffer__poll(pb, 200);
214 		if (err < 0 && err != -EINTR) {
215 			p_err("perf buffer polling failed: %s (%d)",
216 			      strerror(err), err);
217 			goto err_close_pb;
218 		}
219 	}
220 
221 	if (json_output)
222 		jsonw_end_array(json_wtr);
223 
224 	perf_buffer__free(pb);
225 	close(map_fd);
226 
227 	return 0;
228 
229 err_close_pb:
230 	perf_buffer__free(pb);
231 err_close_map:
232 	close(map_fd);
233 	return -1;
234 }
235