1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #define _GNU_SOURCE
4
5 #include <stdio.h>
6 #include <sys/time.h>
7 #include <time.h>
8 #include <stdlib.h>
9 #include <sys/syscall.h>
10 #include <unistd.h>
11 #include <dlfcn.h>
12 #include <string.h>
13 #include <inttypes.h>
14 #include <signal.h>
15 #include <sys/ucontext.h>
16 #include <errno.h>
17 #include <err.h>
18 #include <sched.h>
19 #include <stdbool.h>
20 #include <setjmp.h>
21 #include <sys/uio.h>
22
23 #include "helpers.h"
24 #include "../kselftest.h"
25
26 #ifdef __x86_64__
27 #define TOTAL_TESTS 13
28 #else
29 #define TOTAL_TESTS 8
30 #endif
31
32 #ifdef __x86_64__
33 # define VSYS(x) (x)
34 #else
35 # define VSYS(x) 0
36 #endif
37
38 #ifndef SYS_getcpu
39 # ifdef __x86_64__
40 # define SYS_getcpu 309
41 # else
42 # define SYS_getcpu 318
43 # endif
44 #endif
45
46 /* max length of lines in /proc/self/maps - anything longer is skipped here */
47 #define MAPS_LINE_LEN 128
48
49 /* vsyscalls and vDSO */
50 bool vsyscall_map_r = false, vsyscall_map_x = false;
51
52 typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
53 const gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
54 gtod_t vdso_gtod;
55
56 typedef int (*vgettime_t)(clockid_t, struct timespec *);
57 vgettime_t vdso_gettime;
58
59 typedef long (*time_func_t)(time_t *t);
60 const time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
61 time_func_t vdso_time;
62
63 typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
64 const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
65 getcpu_t vdso_getcpu;
66
init_vdso(void)67 static void init_vdso(void)
68 {
69 void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
70 if (!vdso)
71 vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
72 if (!vdso) {
73 ksft_print_msg("[WARN] failed to find vDSO\n");
74 return;
75 }
76
77 vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
78 if (!vdso_gtod)
79 ksft_print_msg("[WARN] failed to find gettimeofday in vDSO\n");
80
81 vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
82 if (!vdso_gettime)
83 ksft_print_msg("[WARN] failed to find clock_gettime in vDSO\n");
84
85 vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
86 if (!vdso_time)
87 ksft_print_msg("[WARN] failed to find time in vDSO\n");
88
89 vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
90 if (!vdso_getcpu)
91 ksft_print_msg("[WARN] failed to find getcpu in vDSO\n");
92 }
93
94 /* syscalls */
sys_gtod(struct timeval * tv,struct timezone * tz)95 static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
96 {
97 return syscall(SYS_gettimeofday, tv, tz);
98 }
99
sys_time(time_t * t)100 static inline long sys_time(time_t *t)
101 {
102 return syscall(SYS_time, t);
103 }
104
sys_getcpu(unsigned * cpu,unsigned * node,void * cache)105 static inline long sys_getcpu(unsigned * cpu, unsigned * node,
106 void* cache)
107 {
108 return syscall(SYS_getcpu, cpu, node, cache);
109 }
110
tv_diff(const struct timeval * a,const struct timeval * b)111 static double tv_diff(const struct timeval *a, const struct timeval *b)
112 {
113 return (double)(a->tv_sec - b->tv_sec) +
114 (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
115 }
116
check_gtod(const struct timeval * tv_sys1,const struct timeval * tv_sys2,const struct timezone * tz_sys,const char * which,const struct timeval * tv_other,const struct timezone * tz_other)117 static void check_gtod(const struct timeval *tv_sys1,
118 const struct timeval *tv_sys2,
119 const struct timezone *tz_sys,
120 const char *which,
121 const struct timeval *tv_other,
122 const struct timezone *tz_other)
123 {
124 double d1, d2;
125
126 if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest ||
127 tz_sys->tz_dsttime != tz_other->tz_dsttime))
128 ksft_print_msg("%s tz mismatch\n", which);
129
130 d1 = tv_diff(tv_other, tv_sys1);
131 d2 = tv_diff(tv_sys2, tv_other);
132
133 ksft_print_msg("%s time offsets: %lf %lf\n", which, d1, d2);
134
135 ksft_test_result(!(d1 < 0 || d2 < 0), "%s gettimeofday()'s timeval\n", which);
136 }
137
test_gtod(void)138 static void test_gtod(void)
139 {
140 struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
141 struct timezone tz_sys, tz_vdso, tz_vsys;
142 long ret_vdso = -1;
143 long ret_vsys = -1;
144
145 ksft_print_msg("test gettimeofday()\n");
146
147 if (sys_gtod(&tv_sys1, &tz_sys) != 0)
148 ksft_exit_fail_msg("syscall gettimeofday: %s\n", strerror(errno));
149 if (vdso_gtod)
150 ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
151 if (vsyscall_map_x)
152 ret_vsys = vgtod(&tv_vsys, &tz_vsys);
153 if (sys_gtod(&tv_sys2, &tz_sys) != 0)
154 ksft_exit_fail_msg("syscall gettimeofday: %s\n", strerror(errno));
155
156 if (vdso_gtod) {
157 if (ret_vdso == 0)
158 check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
159 else
160 ksft_test_result_fail("vDSO gettimeofday() failed: %ld\n", ret_vdso);
161 } else {
162 ksft_test_result_skip("vdso_gtod isn't set\n");
163 }
164
165 if (vsyscall_map_x) {
166 if (ret_vsys == 0)
167 check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
168 else
169 ksft_test_result_fail("vsys gettimeofday() failed: %ld\n", ret_vsys);
170 } else {
171 ksft_test_result_skip("vsyscall_map_x isn't set\n");
172 }
173 }
174
test_time(void)175 static void test_time(void)
176 {
177 long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
178 long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
179
180 ksft_print_msg("test time()\n");
181 t_sys1 = sys_time(&t2_sys1);
182 if (vdso_time)
183 t_vdso = vdso_time(&t2_vdso);
184 if (vsyscall_map_x)
185 t_vsys = vtime(&t2_vsys);
186 t_sys2 = sys_time(&t2_sys2);
187 if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
188 ksft_print_msg("syscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n",
189 t_sys1, t2_sys1, t_sys2, t2_sys2);
190 ksft_test_result_skip("vdso_time\n");
191 ksft_test_result_skip("vdso_time\n");
192 return;
193 }
194
195 if (vdso_time) {
196 if (t_vdso < 0 || t_vdso != t2_vdso)
197 ksft_test_result_fail("vDSO failed (ret:%ld output:%ld)\n",
198 t_vdso, t2_vdso);
199 else if (t_vdso < t_sys1 || t_vdso > t_sys2)
200 ksft_test_result_fail("vDSO returned the wrong time (%ld %ld %ld)\n",
201 t_sys1, t_vdso, t_sys2);
202 else
203 ksft_test_result_pass("vDSO time() is okay\n");
204 } else {
205 ksft_test_result_skip("vdso_time isn't set\n");
206 }
207
208 if (vsyscall_map_x) {
209 if (t_vsys < 0 || t_vsys != t2_vsys)
210 ksft_test_result_fail("vsyscall failed (ret:%ld output:%ld)\n",
211 t_vsys, t2_vsys);
212 else if (t_vsys < t_sys1 || t_vsys > t_sys2)
213 ksft_test_result_fail("vsyscall returned the wrong time (%ld %ld %ld)\n",
214 t_sys1, t_vsys, t_sys2);
215 else
216 ksft_test_result_pass("vsyscall time() is okay\n");
217 } else {
218 ksft_test_result_skip("vsyscall_map_x isn't set\n");
219 }
220 }
221
test_getcpu(int cpu)222 static void test_getcpu(int cpu)
223 {
224 unsigned int cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
225 long ret_sys, ret_vdso = -1, ret_vsys = -1;
226 unsigned int node = 0;
227 bool have_node = false;
228 cpu_set_t cpuset;
229
230 ksft_print_msg("getcpu() on CPU %d\n", cpu);
231
232 CPU_ZERO(&cpuset);
233 CPU_SET(cpu, &cpuset);
234 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
235 ksft_print_msg("failed to force CPU %d\n", cpu);
236 ksft_test_result_skip("vdso_getcpu\n");
237 ksft_test_result_skip("vsyscall_map_x\n");
238
239 return;
240 }
241
242 ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
243 if (vdso_getcpu)
244 ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
245 if (vsyscall_map_x)
246 ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
247
248 if (ret_sys == 0) {
249 if (cpu_sys != cpu)
250 ksft_print_msg("syscall reported CPU %u but should be %d\n",
251 cpu_sys, cpu);
252
253 have_node = true;
254 node = node_sys;
255 }
256
257 if (vdso_getcpu) {
258 if (ret_vdso) {
259 ksft_test_result_fail("vDSO getcpu() failed\n");
260 } else {
261 if (!have_node) {
262 have_node = true;
263 node = node_vdso;
264 }
265
266 if (cpu_vdso != cpu || node_vdso != node) {
267 if (cpu_vdso != cpu)
268 ksft_print_msg("vDSO reported CPU %u but should be %d\n",
269 cpu_vdso, cpu);
270 if (node_vdso != node)
271 ksft_print_msg("vDSO reported node %u but should be %u\n",
272 node_vdso, node);
273 ksft_test_result_fail("Wrong values\n");
274 } else {
275 ksft_test_result_pass("vDSO reported correct CPU and node\n");
276 }
277 }
278 } else {
279 ksft_test_result_skip("vdso_getcpu isn't set\n");
280 }
281
282 if (vsyscall_map_x) {
283 if (ret_vsys) {
284 ksft_test_result_fail("vsyscall getcpu() failed\n");
285 } else {
286 if (!have_node) {
287 have_node = true;
288 node = node_vsys;
289 }
290
291 if (cpu_vsys != cpu || node_vsys != node) {
292 if (cpu_vsys != cpu)
293 ksft_print_msg("vsyscall reported CPU %u but should be %d\n",
294 cpu_vsys, cpu);
295 if (node_vsys != node)
296 ksft_print_msg("vsyscall reported node %u but should be %u\n",
297 node_vsys, node);
298 ksft_test_result_fail("Wrong values\n");
299 } else {
300 ksft_test_result_pass("vsyscall reported correct CPU and node\n");
301 }
302 }
303 } else {
304 ksft_test_result_skip("vsyscall_map_x isn't set\n");
305 }
306 }
307
308 #ifdef __x86_64__
309
310 static jmp_buf jmpbuf;
311 static volatile unsigned long segv_err;
312
sethandler(int sig,void (* handler)(int,siginfo_t *,void *),int flags)313 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
314 int flags)
315 {
316 struct sigaction sa;
317
318 memset(&sa, 0, sizeof(sa));
319 sa.sa_sigaction = handler;
320 sa.sa_flags = SA_SIGINFO | flags;
321 sigemptyset(&sa.sa_mask);
322 if (sigaction(sig, &sa, 0))
323 ksft_exit_fail_msg("sigaction failed\n");
324 }
325
sigsegv(int sig,siginfo_t * info,void * ctx_void)326 static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
327 {
328 ucontext_t *ctx = (ucontext_t *)ctx_void;
329
330 segv_err = ctx->uc_mcontext.gregs[REG_ERR];
331 siglongjmp(jmpbuf, 1);
332 }
333
test_vsys_r(void)334 static void test_vsys_r(void)
335 {
336 ksft_print_msg("Checking read access to the vsyscall page\n");
337 bool can_read;
338 if (sigsetjmp(jmpbuf, 1) == 0) {
339 *(volatile int *)0xffffffffff600000;
340 can_read = true;
341 } else {
342 can_read = false;
343 }
344
345 if (can_read && !vsyscall_map_r)
346 ksft_test_result_fail("We have read access, but we shouldn't\n");
347 else if (!can_read && vsyscall_map_r)
348 ksft_test_result_fail("We don't have read access, but we should\n");
349 else if (can_read)
350 ksft_test_result_pass("We have read access\n");
351 else
352 ksft_test_result_pass("We do not have read access: #PF(0x%lx)\n", segv_err);
353 }
354
test_vsys_x(void)355 static void test_vsys_x(void)
356 {
357 if (vsyscall_map_x) {
358 /* We already tested this adequately. */
359 ksft_test_result_pass("vsyscall_map_x is true\n");
360 return;
361 }
362
363 ksft_print_msg("Make sure that vsyscalls really page fault\n");
364
365 bool can_exec;
366 if (sigsetjmp(jmpbuf, 1) == 0) {
367 vgtod(NULL, NULL);
368 can_exec = true;
369 } else {
370 can_exec = false;
371 }
372
373 if (can_exec)
374 ksft_test_result_fail("Executing the vsyscall did not page fault\n");
375 else if (segv_err & (1 << 4)) /* INSTR */
376 ksft_test_result_pass("Executing the vsyscall page failed: #PF(0x%lx)\n",
377 segv_err);
378 else
379 ksft_test_result_fail("Execution failed with the wrong error: #PF(0x%lx)\n",
380 segv_err);
381 }
382
383 /*
384 * Debuggers expect ptrace() to be able to peek at the vsyscall page.
385 * Use process_vm_readv() as a proxy for ptrace() to test this. We
386 * want it to work in the vsyscall=emulate case and to fail in the
387 * vsyscall=xonly case.
388 *
389 * It's worth noting that this ABI is a bit nutty. write(2) can't
390 * read from the vsyscall page on any kernel version or mode. The
391 * fact that ptrace() ever worked was a nice courtesy of old kernels,
392 * but the code to support it is fairly gross.
393 */
test_process_vm_readv(void)394 static void test_process_vm_readv(void)
395 {
396 char buf[4096];
397 struct iovec local, remote;
398 int ret;
399
400 ksft_print_msg("process_vm_readv() from vsyscall page\n");
401
402 local.iov_base = buf;
403 local.iov_len = 4096;
404 remote.iov_base = (void *)0xffffffffff600000;
405 remote.iov_len = 4096;
406 ret = process_vm_readv(getpid(), &local, 1, &remote, 1, 0);
407 if (ret != 4096) {
408 /*
409 * We expect process_vm_readv() to work if and only if the
410 * vsyscall page is readable.
411 */
412 ksft_test_result(!vsyscall_map_r,
413 "process_vm_readv() failed (ret = %d, errno = %d)\n", ret, errno);
414 return;
415 }
416
417 if (vsyscall_map_r)
418 ksft_test_result(!memcmp(buf, remote.iov_base, sizeof(buf)), "Read data\n");
419 else
420 ksft_test_result_fail("process_rm_readv() succeeded, but it should have failed in this configuration\n");
421 }
422
init_vsys(void)423 static void init_vsys(void)
424 {
425 int nerrs = 0;
426 FILE *maps;
427 char line[MAPS_LINE_LEN];
428 bool found = false;
429
430 maps = fopen("/proc/self/maps", "r");
431 if (!maps) {
432 ksft_test_result_skip("Could not open /proc/self/maps -- assuming vsyscall is r-x\n");
433 vsyscall_map_r = true;
434 return;
435 }
436
437 while (fgets(line, MAPS_LINE_LEN, maps)) {
438 char r, x;
439 void *start, *end;
440 char name[MAPS_LINE_LEN];
441
442 /* sscanf() is safe here as strlen(name) >= strlen(line) */
443 if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
444 &start, &end, &r, &x, name) != 5)
445 continue;
446
447 if (strcmp(name, "[vsyscall]"))
448 continue;
449
450 ksft_print_msg("vsyscall map: %s", line);
451
452 if (start != (void *)0xffffffffff600000 ||
453 end != (void *)0xffffffffff601000) {
454 ksft_print_msg("address range is nonsense\n");
455 nerrs++;
456 }
457
458 ksft_print_msg("vsyscall permissions are %c-%c\n", r, x);
459 vsyscall_map_r = (r == 'r');
460 vsyscall_map_x = (x == 'x');
461
462 found = true;
463 break;
464 }
465
466 fclose(maps);
467
468 if (!found) {
469 ksft_print_msg("no vsyscall map in /proc/self/maps\n");
470 vsyscall_map_r = false;
471 vsyscall_map_x = false;
472 }
473
474 ksft_test_result(!nerrs, "vsyscall map\n");
475 }
476
477 static volatile sig_atomic_t num_vsyscall_traps;
478
sigtrap(int sig,siginfo_t * info,void * ctx_void)479 static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
480 {
481 ucontext_t *ctx = (ucontext_t *)ctx_void;
482 unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
483
484 if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
485 num_vsyscall_traps++;
486 }
487
test_emulation(void)488 static void test_emulation(void)
489 {
490 time_t tmp;
491 bool is_native;
492
493 if (!vsyscall_map_x) {
494 ksft_test_result_skip("vsyscall_map_x isn't set\n");
495 return;
496 }
497
498 ksft_print_msg("checking that vsyscalls are emulated\n");
499 sethandler(SIGTRAP, sigtrap, 0);
500 set_eflags(get_eflags() | X86_EFLAGS_TF);
501 vtime(&tmp);
502 set_eflags(get_eflags() & ~X86_EFLAGS_TF);
503
504 /*
505 * If vsyscalls are emulated, we expect a single trap in the
506 * vsyscall page -- the call instruction will trap with RIP
507 * pointing to the entry point before emulation takes over.
508 * In native mode, we expect two traps, since whatever code
509 * the vsyscall page contains will be more than just a ret
510 * instruction.
511 */
512 is_native = (num_vsyscall_traps > 1);
513
514 ksft_test_result(!is_native, "vsyscalls are %s (%d instructions in vsyscall page)\n",
515 (is_native ? "native" : "emulated"), (int)num_vsyscall_traps);
516 }
517 #endif
518
main(int argc,char ** argv)519 int main(int argc, char **argv)
520 {
521 int total_tests = TOTAL_TESTS;
522
523 ksft_print_header();
524 ksft_set_plan(total_tests);
525
526 init_vdso();
527 #ifdef __x86_64__
528 init_vsys();
529 #endif
530
531 test_gtod();
532 test_time();
533 test_getcpu(0);
534 test_getcpu(1);
535
536 #ifdef __x86_64__
537 sethandler(SIGSEGV, sigsegv, 0);
538 test_vsys_r();
539 test_vsys_x();
540 test_process_vm_readv();
541 test_emulation();
542 #endif
543
544 ksft_finished();
545 }
546