xref: /linux/tools/testing/selftests/x86/test_vsyscall.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #define _GNU_SOURCE
4 
5 #include <stdio.h>
6 #include <sys/time.h>
7 #include <time.h>
8 #include <stdlib.h>
9 #include <sys/syscall.h>
10 #include <unistd.h>
11 #include <dlfcn.h>
12 #include <string.h>
13 #include <inttypes.h>
14 #include <signal.h>
15 #include <sys/ucontext.h>
16 #include <errno.h>
17 #include <err.h>
18 #include <sched.h>
19 #include <stdbool.h>
20 #include <setjmp.h>
21 #include <sys/uio.h>
22 
23 #include "helpers.h"
24 #include "../kselftest.h"
25 
26 #ifdef __x86_64__
27 #define TOTAL_TESTS 13
28 #else
29 #define TOTAL_TESTS 8
30 #endif
31 
32 #ifdef __x86_64__
33 # define VSYS(x) (x)
34 #else
35 # define VSYS(x) 0
36 #endif
37 
38 #ifndef SYS_getcpu
39 # ifdef __x86_64__
40 #  define SYS_getcpu 309
41 # else
42 #  define SYS_getcpu 318
43 # endif
44 #endif
45 
46 /* max length of lines in /proc/self/maps - anything longer is skipped here */
47 #define MAPS_LINE_LEN 128
48 
49 /* vsyscalls and vDSO */
50 bool vsyscall_map_r = false, vsyscall_map_x = false;
51 
52 typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
53 const gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
54 gtod_t vdso_gtod;
55 
56 typedef int (*vgettime_t)(clockid_t, struct timespec *);
57 vgettime_t vdso_gettime;
58 
59 typedef long (*time_func_t)(time_t *t);
60 const time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
61 time_func_t vdso_time;
62 
63 typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
64 const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
65 getcpu_t vdso_getcpu;
66 
67 static void init_vdso(void)
68 {
69 	void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
70 	if (!vdso)
71 		vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
72 	if (!vdso) {
73 		ksft_print_msg("[WARN] failed to find vDSO\n");
74 		return;
75 	}
76 
77 	vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
78 	if (!vdso_gtod)
79 		ksft_print_msg("[WARN] failed to find gettimeofday in vDSO\n");
80 
81 	vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
82 	if (!vdso_gettime)
83 		ksft_print_msg("[WARN] failed to find clock_gettime in vDSO\n");
84 
85 	vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
86 	if (!vdso_time)
87 		ksft_print_msg("[WARN] failed to find time in vDSO\n");
88 
89 	vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
90 	if (!vdso_getcpu)
91 		ksft_print_msg("[WARN] failed to find getcpu in vDSO\n");
92 }
93 
94 /* syscalls */
95 static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
96 {
97 	return syscall(SYS_gettimeofday, tv, tz);
98 }
99 
100 static inline long sys_time(time_t *t)
101 {
102 	return syscall(SYS_time, t);
103 }
104 
105 static inline long sys_getcpu(unsigned * cpu, unsigned * node,
106 			      void* cache)
107 {
108 	return syscall(SYS_getcpu, cpu, node, cache);
109 }
110 
111 static double tv_diff(const struct timeval *a, const struct timeval *b)
112 {
113 	return (double)(a->tv_sec - b->tv_sec) +
114 		(double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
115 }
116 
117 static void check_gtod(const struct timeval *tv_sys1,
118 		       const struct timeval *tv_sys2,
119 		       const struct timezone *tz_sys,
120 		       const char *which,
121 		       const struct timeval *tv_other,
122 		       const struct timezone *tz_other)
123 {
124 	double d1, d2;
125 
126 	if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest ||
127 			 tz_sys->tz_dsttime != tz_other->tz_dsttime))
128 		ksft_print_msg("%s tz mismatch\n", which);
129 
130 	d1 = tv_diff(tv_other, tv_sys1);
131 	d2 = tv_diff(tv_sys2, tv_other);
132 
133 	ksft_print_msg("%s time offsets: %lf %lf\n", which, d1, d2);
134 
135 	ksft_test_result(!(d1 < 0 || d2 < 0), "%s gettimeofday()'s timeval\n", which);
136 }
137 
138 static void test_gtod(void)
139 {
140 	struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
141 	struct timezone tz_sys, tz_vdso, tz_vsys;
142 	long ret_vdso = -1;
143 	long ret_vsys = -1;
144 
145 	ksft_print_msg("test gettimeofday()\n");
146 
147 	if (sys_gtod(&tv_sys1, &tz_sys) != 0)
148 		ksft_exit_fail_msg("syscall gettimeofday: %s\n", strerror(errno));
149 	if (vdso_gtod)
150 		ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
151 	if (vsyscall_map_x)
152 		ret_vsys = vgtod(&tv_vsys, &tz_vsys);
153 	if (sys_gtod(&tv_sys2, &tz_sys) != 0)
154 		ksft_exit_fail_msg("syscall gettimeofday: %s\n", strerror(errno));
155 
156 	if (vdso_gtod) {
157 		if (ret_vdso == 0)
158 			check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
159 		else
160 			ksft_test_result_fail("vDSO gettimeofday() failed: %ld\n", ret_vdso);
161 	} else {
162 		ksft_test_result_skip("vdso_gtod isn't set\n");
163 	}
164 
165 	if (vsyscall_map_x) {
166 		if (ret_vsys == 0)
167 			check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
168 		else
169 			ksft_test_result_fail("vsys gettimeofday() failed: %ld\n", ret_vsys);
170 	} else {
171 		ksft_test_result_skip("vsyscall_map_x isn't set\n");
172 	}
173 }
174 
175 static void test_time(void)
176 {
177 	long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
178 	long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
179 
180 	ksft_print_msg("test time()\n");
181 	t_sys1 = sys_time(&t2_sys1);
182 	if (vdso_time)
183 		t_vdso = vdso_time(&t2_vdso);
184 	if (vsyscall_map_x)
185 		t_vsys = vtime(&t2_vsys);
186 	t_sys2 = sys_time(&t2_sys2);
187 	if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
188 		ksft_print_msg("syscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n",
189 			       t_sys1, t2_sys1, t_sys2, t2_sys2);
190 		ksft_test_result_skip("vdso_time\n");
191 		ksft_test_result_skip("vdso_time\n");
192 		return;
193 	}
194 
195 	if (vdso_time) {
196 		if (t_vdso < 0 || t_vdso != t2_vdso)
197 			ksft_test_result_fail("vDSO failed (ret:%ld output:%ld)\n",
198 					      t_vdso, t2_vdso);
199 		else if (t_vdso < t_sys1 || t_vdso > t_sys2)
200 			ksft_test_result_fail("vDSO returned the wrong time (%ld %ld %ld)\n",
201 					      t_sys1, t_vdso, t_sys2);
202 		else
203 			ksft_test_result_pass("vDSO time() is okay\n");
204 	} else {
205 		ksft_test_result_skip("vdso_time isn't set\n");
206 	}
207 
208 	if (vsyscall_map_x) {
209 		if (t_vsys < 0 || t_vsys != t2_vsys)
210 			ksft_test_result_fail("vsyscall failed (ret:%ld output:%ld)\n",
211 					      t_vsys, t2_vsys);
212 		else if (t_vsys < t_sys1 || t_vsys > t_sys2)
213 			ksft_test_result_fail("vsyscall returned the wrong time (%ld %ld %ld)\n",
214 					      t_sys1, t_vsys, t_sys2);
215 		else
216 			ksft_test_result_pass("vsyscall time() is okay\n");
217 	} else {
218 		ksft_test_result_skip("vsyscall_map_x isn't set\n");
219 	}
220 }
221 
222 static void test_getcpu(int cpu)
223 {
224 	unsigned int cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
225 	long ret_sys, ret_vdso = -1, ret_vsys = -1;
226 	unsigned int node = 0;
227 	bool have_node = false;
228 	cpu_set_t cpuset;
229 
230 	ksft_print_msg("getcpu() on CPU %d\n", cpu);
231 
232 	CPU_ZERO(&cpuset);
233 	CPU_SET(cpu, &cpuset);
234 	if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
235 		ksft_print_msg("failed to force CPU %d\n", cpu);
236 		ksft_test_result_skip("vdso_getcpu\n");
237 		ksft_test_result_skip("vsyscall_map_x\n");
238 
239 		return;
240 	}
241 
242 	ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
243 	if (vdso_getcpu)
244 		ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
245 	if (vsyscall_map_x)
246 		ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
247 
248 	if (ret_sys == 0) {
249 		if (cpu_sys != cpu)
250 			ksft_print_msg("syscall reported CPU %u but should be %d\n",
251 				       cpu_sys, cpu);
252 
253 		have_node = true;
254 		node = node_sys;
255 	}
256 
257 	if (vdso_getcpu) {
258 		if (ret_vdso) {
259 			ksft_test_result_fail("vDSO getcpu() failed\n");
260 		} else {
261 			if (!have_node) {
262 				have_node = true;
263 				node = node_vdso;
264 			}
265 
266 			if (cpu_vdso != cpu || node_vdso != node) {
267 				if (cpu_vdso != cpu)
268 					ksft_print_msg("vDSO reported CPU %u but should be %d\n",
269 						       cpu_vdso, cpu);
270 				if (node_vdso != node)
271 					ksft_print_msg("vDSO reported node %u but should be %u\n",
272 						       node_vdso, node);
273 				ksft_test_result_fail("Wrong values\n");
274 			} else {
275 				ksft_test_result_pass("vDSO reported correct CPU and node\n");
276 			}
277 		}
278 	} else {
279 		ksft_test_result_skip("vdso_getcpu isn't set\n");
280 	}
281 
282 	if (vsyscall_map_x) {
283 		if (ret_vsys) {
284 			ksft_test_result_fail("vsyscall getcpu() failed\n");
285 		} else {
286 			if (!have_node) {
287 				have_node = true;
288 				node = node_vsys;
289 			}
290 
291 			if (cpu_vsys != cpu || node_vsys != node) {
292 				if (cpu_vsys != cpu)
293 					ksft_print_msg("vsyscall reported CPU %u but should be %d\n",
294 						       cpu_vsys, cpu);
295 				if (node_vsys != node)
296 					ksft_print_msg("vsyscall reported node %u but should be %u\n",
297 						       node_vsys, node);
298 				ksft_test_result_fail("Wrong values\n");
299 			} else {
300 				ksft_test_result_pass("vsyscall reported correct CPU and node\n");
301 			}
302 		}
303 	} else {
304 		ksft_test_result_skip("vsyscall_map_x isn't set\n");
305 	}
306 }
307 
308 #ifdef __x86_64__
309 
310 static jmp_buf jmpbuf;
311 static volatile unsigned long segv_err, segv_trapno;
312 
313 static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
314 {
315 	ucontext_t *ctx = (ucontext_t *)ctx_void;
316 
317 	segv_trapno = ctx->uc_mcontext.gregs[REG_TRAPNO];
318 	segv_err =  ctx->uc_mcontext.gregs[REG_ERR];
319 	siglongjmp(jmpbuf, 1);
320 }
321 
322 static void test_vsys_r(void)
323 {
324 	ksft_print_msg("Checking read access to the vsyscall page\n");
325 	bool can_read;
326 	if (sigsetjmp(jmpbuf, 1) == 0) {
327 		*(volatile int *)0xffffffffff600000;
328 		can_read = true;
329 	} else {
330 		can_read = false;
331 	}
332 
333 	if (can_read && !vsyscall_map_r)
334 		ksft_test_result_fail("We have read access, but we shouldn't\n");
335 	else if (!can_read && vsyscall_map_r)
336 		ksft_test_result_fail("We don't have read access, but we should\n");
337 	else if (can_read)
338 		ksft_test_result_pass("We have read access\n");
339 	else
340 		ksft_test_result_pass("We do not have read access (trap=%ld, error=0x%lx)\n",
341 				      segv_trapno, segv_err);
342 }
343 
344 static void test_vsys_x(void)
345 {
346 	if (vsyscall_map_x) {
347 		/* We already tested this adequately. */
348 		ksft_test_result_pass("vsyscall_map_x is true\n");
349 		return;
350 	}
351 
352 	ksft_print_msg("Make sure that vsyscalls really cause a fault\n");
353 
354 	bool can_exec;
355 	if (sigsetjmp(jmpbuf, 1) == 0) {
356 		vgtod(NULL, NULL);
357 		can_exec = true;
358 	} else {
359 		can_exec = false;
360 	}
361 
362 	if (can_exec)
363 		ksft_test_result_fail("Executing the vsyscall did not fault\n");
364 	/* #GP or #PF (with X86_PF_INSTR) */
365 	else if ((segv_trapno == 13) || ((segv_trapno == 14) && (segv_err & (1 << 4))))
366 		ksft_test_result_pass("Executing the vsyscall page failed (trap=%ld, error=0x%lx)\n",
367 				      segv_trapno, segv_err);
368 	else
369 		ksft_test_result_fail("Execution failed with the wrong error (trap=%ld, error=0x%lx)\n",
370 				      segv_trapno, segv_err);
371 }
372 
373 /*
374  * Debuggers expect ptrace() to be able to peek at the vsyscall page.
375  * Use process_vm_readv() as a proxy for ptrace() to test this.  We
376  * want it to work in the vsyscall=emulate case and to fail in the
377  * vsyscall=xonly case.
378  *
379  * It's worth noting that this ABI is a bit nutty.  write(2) can't
380  * read from the vsyscall page on any kernel version or mode.  The
381  * fact that ptrace() ever worked was a nice courtesy of old kernels,
382  * but the code to support it is fairly gross.
383  */
384 static void test_process_vm_readv(void)
385 {
386 	char buf[4096];
387 	struct iovec local, remote;
388 	int ret;
389 
390 	ksft_print_msg("process_vm_readv() from vsyscall page\n");
391 
392 	local.iov_base = buf;
393 	local.iov_len = 4096;
394 	remote.iov_base = (void *)0xffffffffff600000;
395 	remote.iov_len = 4096;
396 	ret = process_vm_readv(getpid(), &local, 1, &remote, 1, 0);
397 	if (ret != 4096) {
398 		/*
399 		 * We expect process_vm_readv() to work if and only if the
400 		 * vsyscall page is readable.
401 		 */
402 		ksft_test_result(!vsyscall_map_r,
403 				 "process_vm_readv() failed (ret = %d, errno = %d)\n", ret, errno);
404 		return;
405 	}
406 
407 	if (vsyscall_map_r)
408 		ksft_test_result(!memcmp(buf, remote.iov_base, sizeof(buf)), "Read data\n");
409 	else
410 		ksft_test_result_fail("process_rm_readv() succeeded, but it should have failed in this configuration\n");
411 }
412 
413 static void init_vsys(void)
414 {
415 	int nerrs = 0;
416 	FILE *maps;
417 	char line[MAPS_LINE_LEN];
418 	bool found = false;
419 
420 	maps = fopen("/proc/self/maps", "r");
421 	if (!maps) {
422 		ksft_test_result_skip("Could not open /proc/self/maps -- assuming vsyscall is r-x\n");
423 		vsyscall_map_r = true;
424 		return;
425 	}
426 
427 	while (fgets(line, MAPS_LINE_LEN, maps)) {
428 		char r, x;
429 		void *start, *end;
430 		char name[MAPS_LINE_LEN];
431 
432 		/* sscanf() is safe here as strlen(name) >= strlen(line) */
433 		if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
434 			   &start, &end, &r, &x, name) != 5)
435 			continue;
436 
437 		if (strcmp(name, "[vsyscall]"))
438 			continue;
439 
440 		ksft_print_msg("vsyscall map: %s", line);
441 
442 		if (start != (void *)0xffffffffff600000 ||
443 		    end != (void *)0xffffffffff601000) {
444 			ksft_print_msg("address range is nonsense\n");
445 			nerrs++;
446 		}
447 
448 		ksft_print_msg("vsyscall permissions are %c-%c\n", r, x);
449 		vsyscall_map_r = (r == 'r');
450 		vsyscall_map_x = (x == 'x');
451 
452 		found = true;
453 		break;
454 	}
455 
456 	fclose(maps);
457 
458 	if (!found) {
459 		ksft_print_msg("no vsyscall map in /proc/self/maps\n");
460 		vsyscall_map_r = false;
461 		vsyscall_map_x = false;
462 	}
463 
464 	ksft_test_result(!nerrs, "vsyscall map\n");
465 }
466 
467 static volatile sig_atomic_t num_vsyscall_traps;
468 
469 static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
470 {
471 	ucontext_t *ctx = (ucontext_t *)ctx_void;
472 	unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
473 
474 	if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
475 		num_vsyscall_traps++;
476 }
477 
478 static void test_emulation(void)
479 {
480 	time_t tmp;
481 	bool is_native;
482 
483 	if (!vsyscall_map_x) {
484 		ksft_test_result_skip("vsyscall_map_x isn't set\n");
485 		return;
486 	}
487 
488 	ksft_print_msg("checking that vsyscalls are emulated\n");
489 	sethandler(SIGTRAP, sigtrap, 0);
490 	set_eflags(get_eflags() | X86_EFLAGS_TF);
491 	vtime(&tmp);
492 	set_eflags(get_eflags() & ~X86_EFLAGS_TF);
493 
494 	/*
495 	 * If vsyscalls are emulated, we expect a single trap in the
496 	 * vsyscall page -- the call instruction will trap with RIP
497 	 * pointing to the entry point before emulation takes over.
498 	 * In native mode, we expect two traps, since whatever code
499 	 * the vsyscall page contains will be more than just a ret
500 	 * instruction.
501 	 */
502 	is_native = (num_vsyscall_traps > 1);
503 
504 	ksft_test_result(!is_native, "vsyscalls are %s (%d instructions in vsyscall page)\n",
505 			 (is_native ? "native" : "emulated"), (int)num_vsyscall_traps);
506 }
507 #endif
508 
509 int main(int argc, char **argv)
510 {
511 	int total_tests = TOTAL_TESTS;
512 
513 	ksft_print_header();
514 	ksft_set_plan(total_tests);
515 
516 	init_vdso();
517 #ifdef __x86_64__
518 	init_vsys();
519 #endif
520 
521 	test_gtod();
522 	test_time();
523 	test_getcpu(0);
524 	test_getcpu(1);
525 
526 #ifdef __x86_64__
527 	sethandler(SIGSEGV, sigsegv, 0);
528 	test_vsys_r();
529 	test_vsys_x();
530 	test_process_vm_readv();
531 	test_emulation();
532 #endif
533 
534 	ksft_finished();
535 }
536