xref: /linux/tools/testing/selftests/ring-buffer/map_test.c (revision 40ccd6aa3e2e05be93394e3cd560c718dedfcc77)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Ring-buffer memory mapping tests
4  *
5  * Copyright (c) 2024 Vincent Donnefort <vdonnefort@google.com>
6  */
7 #include <fcntl.h>
8 #include <sched.h>
9 #include <stdbool.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <unistd.h>
13 
14 #include <linux/trace_mmap.h>
15 
16 #include <sys/mman.h>
17 #include <sys/ioctl.h>
18 
19 #include "../user_events/user_events_selftests.h" /* share tracefs setup */
20 #include "../kselftest_harness.h"
21 
22 #define TRACEFS_ROOT "/sys/kernel/tracing"
23 
24 static int __tracefs_write(const char *path, const char *value)
25 {
26 	int fd, ret;
27 
28 	fd = open(path, O_WRONLY | O_TRUNC);
29 	if (fd < 0)
30 		return fd;
31 
32 	ret = write(fd, value, strlen(value));
33 
34 	close(fd);
35 
36 	return ret == -1 ? -errno : 0;
37 }
38 
39 static int __tracefs_write_int(const char *path, int value)
40 {
41 	char *str;
42 	int ret;
43 
44 	if (asprintf(&str, "%d", value) < 0)
45 		return -1;
46 
47 	ret = __tracefs_write(path, str);
48 
49 	free(str);
50 
51 	return ret;
52 }
53 
54 #define tracefs_write_int(path, value) \
55 	ASSERT_EQ(__tracefs_write_int((path), (value)), 0)
56 
57 #define tracefs_write(path, value) \
58 	ASSERT_EQ(__tracefs_write((path), (value)), 0)
59 
60 static int tracefs_reset(void)
61 {
62 	if (__tracefs_write_int(TRACEFS_ROOT"/tracing_on", 0))
63 		return -1;
64 	if (__tracefs_write(TRACEFS_ROOT"/trace", ""))
65 		return -1;
66 	if (__tracefs_write(TRACEFS_ROOT"/set_event", ""))
67 		return -1;
68 	if (__tracefs_write(TRACEFS_ROOT"/current_tracer", "nop"))
69 		return -1;
70 
71 	return 0;
72 }
73 
74 struct tracefs_cpu_map_desc {
75 	struct trace_buffer_meta	*meta;
76 	int				cpu_fd;
77 };
78 
79 int tracefs_cpu_map(struct tracefs_cpu_map_desc *desc, int cpu)
80 {
81 	int page_size = getpagesize();
82 	char *cpu_path;
83 	void *map;
84 
85 	if (asprintf(&cpu_path,
86 		     TRACEFS_ROOT"/per_cpu/cpu%d/trace_pipe_raw",
87 		     cpu) < 0)
88 		return -ENOMEM;
89 
90 	desc->cpu_fd = open(cpu_path, O_RDONLY | O_NONBLOCK);
91 	free(cpu_path);
92 	if (desc->cpu_fd < 0)
93 		return -ENODEV;
94 
95 	map = mmap(NULL, page_size, PROT_READ, MAP_SHARED, desc->cpu_fd, 0);
96 	if (map == MAP_FAILED)
97 		return -errno;
98 
99 	desc->meta = (struct trace_buffer_meta *)map;
100 
101 	return 0;
102 }
103 
104 void tracefs_cpu_unmap(struct tracefs_cpu_map_desc *desc)
105 {
106 	munmap(desc->meta, desc->meta->meta_page_size);
107 	close(desc->cpu_fd);
108 }
109 
110 FIXTURE(map) {
111 	struct tracefs_cpu_map_desc	map_desc;
112 	bool				umount;
113 };
114 
115 FIXTURE_VARIANT(map) {
116 	int	subbuf_size;
117 };
118 
119 FIXTURE_VARIANT_ADD(map, subbuf_size_4k) {
120 	.subbuf_size = 4,
121 };
122 
123 FIXTURE_VARIANT_ADD(map, subbuf_size_8k) {
124 	.subbuf_size = 8,
125 };
126 
127 FIXTURE_SETUP(map)
128 {
129 	int cpu = sched_getcpu();
130 	cpu_set_t cpu_mask;
131 	bool fail, umount;
132 	char *message;
133 
134 	if (getuid() != 0)
135 		SKIP(return, "Skipping: %s", "Please run the test as root");
136 
137 	if (!tracefs_enabled(&message, &fail, &umount)) {
138 		if (fail) {
139 			TH_LOG("Tracefs setup failed: %s", message);
140 			ASSERT_FALSE(fail);
141 		}
142 		SKIP(return, "Skipping: %s", message);
143 	}
144 
145 	self->umount = umount;
146 
147 	ASSERT_GE(cpu, 0);
148 
149 	ASSERT_EQ(tracefs_reset(), 0);
150 
151 	tracefs_write_int(TRACEFS_ROOT"/buffer_subbuf_size_kb", variant->subbuf_size);
152 
153 	ASSERT_EQ(tracefs_cpu_map(&self->map_desc, cpu), 0);
154 
155 	/*
156 	 * Ensure generated events will be found on this very same ring-buffer.
157 	 */
158 	CPU_ZERO(&cpu_mask);
159 	CPU_SET(cpu, &cpu_mask);
160 	ASSERT_EQ(sched_setaffinity(0, sizeof(cpu_mask), &cpu_mask), 0);
161 }
162 
163 FIXTURE_TEARDOWN(map)
164 {
165 	tracefs_reset();
166 
167 	if (self->umount)
168 		tracefs_unmount();
169 
170 	tracefs_cpu_unmap(&self->map_desc);
171 }
172 
173 TEST_F(map, meta_page_check)
174 {
175 	struct tracefs_cpu_map_desc *desc = &self->map_desc;
176 	int cnt = 0;
177 
178 	ASSERT_EQ(desc->meta->entries, 0);
179 	ASSERT_EQ(desc->meta->overrun, 0);
180 	ASSERT_EQ(desc->meta->read, 0);
181 
182 	ASSERT_EQ(desc->meta->reader.id, 0);
183 	ASSERT_EQ(desc->meta->reader.read, 0);
184 
185 	ASSERT_EQ(ioctl(desc->cpu_fd, TRACE_MMAP_IOCTL_GET_READER), 0);
186 	ASSERT_EQ(desc->meta->reader.id, 0);
187 
188 	tracefs_write_int(TRACEFS_ROOT"/tracing_on", 1);
189 	for (int i = 0; i < 16; i++)
190 		tracefs_write_int(TRACEFS_ROOT"/trace_marker", i);
191 again:
192 	ASSERT_EQ(ioctl(desc->cpu_fd, TRACE_MMAP_IOCTL_GET_READER), 0);
193 
194 	ASSERT_EQ(desc->meta->entries, 16);
195 	ASSERT_EQ(desc->meta->overrun, 0);
196 	ASSERT_EQ(desc->meta->read, 16);
197 
198 	ASSERT_EQ(desc->meta->reader.id, 1);
199 
200 	if (!(cnt++))
201 		goto again;
202 }
203 
204 TEST_F(map, data_mmap)
205 {
206 	struct tracefs_cpu_map_desc *desc = &self->map_desc;
207 	unsigned long meta_len, data_len;
208 	void *data;
209 
210 	meta_len = desc->meta->meta_page_size;
211 	data_len = desc->meta->subbuf_size * desc->meta->nr_subbufs;
212 
213 	/* Map all the available subbufs */
214 	data = mmap(NULL, data_len, PROT_READ, MAP_SHARED,
215 		    desc->cpu_fd, meta_len);
216 	ASSERT_NE(data, MAP_FAILED);
217 	munmap(data, data_len);
218 
219 	/* Map all the available subbufs - 1 */
220 	data_len -= desc->meta->subbuf_size;
221 	data = mmap(NULL, data_len, PROT_READ, MAP_SHARED,
222 		    desc->cpu_fd, meta_len);
223 	ASSERT_NE(data, MAP_FAILED);
224 	munmap(data, data_len);
225 
226 	/* Overflow the available subbufs by 1 */
227 	meta_len += desc->meta->subbuf_size * 2;
228 	data = mmap(NULL, data_len, PROT_READ, MAP_SHARED,
229 		    desc->cpu_fd, meta_len);
230 	ASSERT_EQ(data, MAP_FAILED);
231 }
232 
233 FIXTURE(snapshot) {
234 	bool	umount;
235 };
236 
237 FIXTURE_SETUP(snapshot)
238 {
239 	bool fail, umount;
240 	struct stat sb;
241 	char *message;
242 
243 	if (getuid() != 0)
244 		SKIP(return, "Skipping: %s", "Please run the test as root");
245 
246 	if (stat(TRACEFS_ROOT"/snapshot", &sb))
247 		SKIP(return, "Skipping: %s", "snapshot not available");
248 
249 	if (!tracefs_enabled(&message, &fail, &umount)) {
250 		if (fail) {
251 			TH_LOG("Tracefs setup failed: %s", message);
252 			ASSERT_FALSE(fail);
253 		}
254 		SKIP(return, "Skipping: %s", message);
255 	}
256 
257 	self->umount = umount;
258 }
259 
260 FIXTURE_TEARDOWN(snapshot)
261 {
262 	__tracefs_write(TRACEFS_ROOT"/events/sched/sched_switch/trigger",
263 			"!snapshot");
264 	tracefs_reset();
265 
266 	if (self->umount)
267 		tracefs_unmount();
268 }
269 
270 TEST_F(snapshot, excludes_map)
271 {
272 	struct tracefs_cpu_map_desc map_desc;
273 	int cpu = sched_getcpu();
274 
275 	ASSERT_GE(cpu, 0);
276 	tracefs_write(TRACEFS_ROOT"/events/sched/sched_switch/trigger",
277 		      "snapshot");
278 	ASSERT_EQ(tracefs_cpu_map(&map_desc, cpu), -EBUSY);
279 }
280 
281 TEST_F(snapshot, excluded_by_map)
282 {
283 	struct tracefs_cpu_map_desc map_desc;
284 	int cpu = sched_getcpu();
285 
286 	ASSERT_EQ(tracefs_cpu_map(&map_desc, cpu), 0);
287 
288 	ASSERT_EQ(__tracefs_write(TRACEFS_ROOT"/events/sched/sched_switch/trigger",
289 				  "snapshot"), -EBUSY);
290 	ASSERT_EQ(__tracefs_write(TRACEFS_ROOT"/snapshot",
291 				  "1"), -EBUSY);
292 }
293 
294 TEST_HARNESS_MAIN
295