xref: /linux/tools/testing/selftests/ring-buffer/map_test.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Ring-buffer memory mapping tests
4  *
5  * Copyright (c) 2024 Vincent Donnefort <vdonnefort@google.com>
6  */
7 #include <fcntl.h>
8 #include <sched.h>
9 #include <stdbool.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <unistd.h>
13 
14 #include <linux/trace_mmap.h>
15 
16 #include <sys/mman.h>
17 #include <sys/ioctl.h>
18 
19 #include "../user_events/user_events_selftests.h" /* share tracefs setup */
20 #include "../kselftest_harness.h"
21 
22 #define TRACEFS_ROOT "/sys/kernel/tracing"
23 
24 static int __tracefs_write(const char *path, const char *value)
25 {
26 	int fd, ret;
27 
28 	fd = open(path, O_WRONLY | O_TRUNC);
29 	if (fd < 0)
30 		return fd;
31 
32 	ret = write(fd, value, strlen(value));
33 
34 	close(fd);
35 
36 	return ret == -1 ? -errno : 0;
37 }
38 
39 static int __tracefs_write_int(const char *path, int value)
40 {
41 	char *str;
42 	int ret;
43 
44 	if (asprintf(&str, "%d", value) < 0)
45 		return -1;
46 
47 	ret = __tracefs_write(path, str);
48 
49 	free(str);
50 
51 	return ret;
52 }
53 
54 #define tracefs_write_int(path, value) \
55 	ASSERT_EQ(__tracefs_write_int((path), (value)), 0)
56 
57 #define tracefs_write(path, value) \
58 	ASSERT_EQ(__tracefs_write((path), (value)), 0)
59 
60 static int tracefs_reset(void)
61 {
62 	if (__tracefs_write_int(TRACEFS_ROOT"/tracing_on", 0))
63 		return -1;
64 	if (__tracefs_write(TRACEFS_ROOT"/trace", ""))
65 		return -1;
66 	if (__tracefs_write(TRACEFS_ROOT"/set_event", ""))
67 		return -1;
68 	if (__tracefs_write(TRACEFS_ROOT"/current_tracer", "nop"))
69 		return -1;
70 
71 	return 0;
72 }
73 
74 struct tracefs_cpu_map_desc {
75 	struct trace_buffer_meta	*meta;
76 	int				cpu_fd;
77 };
78 
79 int tracefs_cpu_map(struct tracefs_cpu_map_desc *desc, int cpu)
80 {
81 	int page_size = getpagesize();
82 	char *cpu_path;
83 	void *map;
84 
85 	if (asprintf(&cpu_path,
86 		     TRACEFS_ROOT"/per_cpu/cpu%d/trace_pipe_raw",
87 		     cpu) < 0)
88 		return -ENOMEM;
89 
90 	desc->cpu_fd = open(cpu_path, O_RDONLY | O_NONBLOCK);
91 	free(cpu_path);
92 	if (desc->cpu_fd < 0)
93 		return -ENODEV;
94 
95 again:
96 	map = mmap(NULL, page_size, PROT_READ, MAP_SHARED, desc->cpu_fd, 0);
97 	if (map == MAP_FAILED)
98 		return -errno;
99 
100 	desc->meta = (struct trace_buffer_meta *)map;
101 
102 	/* the meta-page is bigger than the original mapping */
103 	if (page_size < desc->meta->meta_struct_len) {
104 		int meta_page_size = desc->meta->meta_page_size;
105 
106 		munmap(desc->meta, page_size);
107 		page_size = meta_page_size;
108 		goto again;
109 	}
110 
111 	return 0;
112 }
113 
114 void tracefs_cpu_unmap(struct tracefs_cpu_map_desc *desc)
115 {
116 	munmap(desc->meta, desc->meta->meta_page_size);
117 	close(desc->cpu_fd);
118 }
119 
120 FIXTURE(map) {
121 	struct tracefs_cpu_map_desc	map_desc;
122 	bool				umount;
123 };
124 
125 FIXTURE_VARIANT(map) {
126 	int	subbuf_size;
127 };
128 
129 FIXTURE_VARIANT_ADD(map, subbuf_size_4k) {
130 	.subbuf_size = 4,
131 };
132 
133 FIXTURE_VARIANT_ADD(map, subbuf_size_8k) {
134 	.subbuf_size = 8,
135 };
136 
137 FIXTURE_SETUP(map)
138 {
139 	int cpu = sched_getcpu();
140 	cpu_set_t cpu_mask;
141 	bool fail, umount;
142 	char *message;
143 
144 	if (getuid() != 0)
145 		SKIP(return, "Skipping: %s", "Please run the test as root");
146 
147 	if (!tracefs_enabled(&message, &fail, &umount)) {
148 		if (fail) {
149 			TH_LOG("Tracefs setup failed: %s", message);
150 			ASSERT_FALSE(fail);
151 		}
152 		SKIP(return, "Skipping: %s", message);
153 	}
154 
155 	self->umount = umount;
156 
157 	ASSERT_GE(cpu, 0);
158 
159 	ASSERT_EQ(tracefs_reset(), 0);
160 
161 	tracefs_write_int(TRACEFS_ROOT"/buffer_subbuf_size_kb", variant->subbuf_size);
162 
163 	ASSERT_EQ(tracefs_cpu_map(&self->map_desc, cpu), 0);
164 
165 	/*
166 	 * Ensure generated events will be found on this very same ring-buffer.
167 	 */
168 	CPU_ZERO(&cpu_mask);
169 	CPU_SET(cpu, &cpu_mask);
170 	ASSERT_EQ(sched_setaffinity(0, sizeof(cpu_mask), &cpu_mask), 0);
171 }
172 
173 FIXTURE_TEARDOWN(map)
174 {
175 	tracefs_reset();
176 
177 	if (self->umount)
178 		tracefs_unmount();
179 
180 	tracefs_cpu_unmap(&self->map_desc);
181 }
182 
183 TEST_F(map, meta_page_check)
184 {
185 	struct tracefs_cpu_map_desc *desc = &self->map_desc;
186 	int cnt = 0;
187 
188 	ASSERT_EQ(desc->meta->entries, 0);
189 	ASSERT_EQ(desc->meta->overrun, 0);
190 	ASSERT_EQ(desc->meta->read, 0);
191 
192 	ASSERT_EQ(desc->meta->reader.id, 0);
193 	ASSERT_EQ(desc->meta->reader.read, 0);
194 
195 	ASSERT_EQ(ioctl(desc->cpu_fd, TRACE_MMAP_IOCTL_GET_READER), 0);
196 	ASSERT_EQ(desc->meta->reader.id, 0);
197 
198 	tracefs_write_int(TRACEFS_ROOT"/tracing_on", 1);
199 	for (int i = 0; i < 16; i++)
200 		tracefs_write_int(TRACEFS_ROOT"/trace_marker", i);
201 again:
202 	ASSERT_EQ(ioctl(desc->cpu_fd, TRACE_MMAP_IOCTL_GET_READER), 0);
203 
204 	ASSERT_EQ(desc->meta->entries, 16);
205 	ASSERT_EQ(desc->meta->overrun, 0);
206 	ASSERT_EQ(desc->meta->read, 16);
207 
208 	ASSERT_EQ(desc->meta->reader.id, 1);
209 
210 	if (!(cnt++))
211 		goto again;
212 }
213 
214 TEST_F(map, data_mmap)
215 {
216 	struct tracefs_cpu_map_desc *desc = &self->map_desc;
217 	unsigned long meta_len, data_len;
218 	void *data;
219 
220 	meta_len = desc->meta->meta_page_size;
221 	data_len = desc->meta->subbuf_size * desc->meta->nr_subbufs;
222 
223 	/* Map all the available subbufs */
224 	data = mmap(NULL, data_len, PROT_READ, MAP_SHARED,
225 		    desc->cpu_fd, meta_len);
226 	ASSERT_NE(data, MAP_FAILED);
227 	munmap(data, data_len);
228 
229 	/* Map all the available subbufs - 1 */
230 	data_len -= desc->meta->subbuf_size;
231 	data = mmap(NULL, data_len, PROT_READ, MAP_SHARED,
232 		    desc->cpu_fd, meta_len);
233 	ASSERT_NE(data, MAP_FAILED);
234 	munmap(data, data_len);
235 
236 	/* Overflow the available subbufs by 1 */
237 	meta_len += desc->meta->subbuf_size * 2;
238 	data = mmap(NULL, data_len, PROT_READ, MAP_SHARED,
239 		    desc->cpu_fd, meta_len);
240 	ASSERT_EQ(data, MAP_FAILED);
241 
242 	/* Verify meta-page padding */
243 	if (desc->meta->meta_page_size > getpagesize()) {
244 		data_len = desc->meta->meta_page_size;
245 		data = mmap(NULL, data_len,
246 			    PROT_READ, MAP_SHARED, desc->cpu_fd, 0);
247 		ASSERT_NE(data, MAP_FAILED);
248 
249 		for (int i = desc->meta->meta_struct_len;
250 		     i < desc->meta->meta_page_size; i += sizeof(int))
251 			ASSERT_EQ(*(int *)(data + i), 0);
252 
253 		munmap(data, data_len);
254 	}
255 }
256 
257 FIXTURE(snapshot) {
258 	bool	umount;
259 };
260 
261 FIXTURE_SETUP(snapshot)
262 {
263 	bool fail, umount;
264 	struct stat sb;
265 	char *message;
266 
267 	if (getuid() != 0)
268 		SKIP(return, "Skipping: %s", "Please run the test as root");
269 
270 	if (stat(TRACEFS_ROOT"/snapshot", &sb))
271 		SKIP(return, "Skipping: %s", "snapshot not available");
272 
273 	if (!tracefs_enabled(&message, &fail, &umount)) {
274 		if (fail) {
275 			TH_LOG("Tracefs setup failed: %s", message);
276 			ASSERT_FALSE(fail);
277 		}
278 		SKIP(return, "Skipping: %s", message);
279 	}
280 
281 	self->umount = umount;
282 }
283 
284 FIXTURE_TEARDOWN(snapshot)
285 {
286 	__tracefs_write(TRACEFS_ROOT"/events/sched/sched_switch/trigger",
287 			"!snapshot");
288 	tracefs_reset();
289 
290 	if (self->umount)
291 		tracefs_unmount();
292 }
293 
294 TEST_F(snapshot, excludes_map)
295 {
296 	struct tracefs_cpu_map_desc map_desc;
297 	int cpu = sched_getcpu();
298 
299 	ASSERT_GE(cpu, 0);
300 	tracefs_write(TRACEFS_ROOT"/events/sched/sched_switch/trigger",
301 		      "snapshot");
302 	ASSERT_EQ(tracefs_cpu_map(&map_desc, cpu), -EBUSY);
303 }
304 
305 TEST_F(snapshot, excluded_by_map)
306 {
307 	struct tracefs_cpu_map_desc map_desc;
308 	int cpu = sched_getcpu();
309 
310 	ASSERT_EQ(tracefs_cpu_map(&map_desc, cpu), 0);
311 
312 	ASSERT_EQ(__tracefs_write(TRACEFS_ROOT"/events/sched/sched_switch/trigger",
313 				  "snapshot"), -EBUSY);
314 	ASSERT_EQ(__tracefs_write(TRACEFS_ROOT"/snapshot",
315 				  "1"), -EBUSY);
316 }
317 
318 TEST_HARNESS_MAIN
319