xref: /linux/arch/x86/entry/vsyscall/vsyscall_64.c (revision 4232da23d75d173195c6766729e51947b64f83cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
4  *
5  * Based on the original implementation which is:
6  *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
7  *  Copyright 2003 Andi Kleen, SuSE Labs.
8  *
9  *  Parts of the original code have been moved to arch/x86/vdso/vma.c
10  *
11  * This file implements vsyscall emulation.  vsyscalls are a legacy ABI:
12  * Userspace can request certain kernel services by calling fixed
13  * addresses.  This concept is problematic:
14  *
15  * - It interferes with ASLR.
16  * - It's awkward to write code that lives in kernel addresses but is
17  *   callable by userspace at fixed addresses.
18  * - The whole concept is impossible for 32-bit compat userspace.
19  * - UML cannot easily virtualize a vsyscall.
20  *
21  * As of mid-2014, I believe that there is no new userspace code that
22  * will use a vsyscall if the vDSO is present.  I hope that there will
23  * soon be no new userspace code that will ever use a vsyscall.
24  *
25  * The code in this file emulates vsyscalls when notified of a page
26  * fault to a vsyscall address.
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/timer.h>
31 #include <linux/sched/signal.h>
32 #include <linux/mm_types.h>
33 #include <linux/syscalls.h>
34 #include <linux/ratelimit.h>
35 
36 #include <asm/vsyscall.h>
37 #include <asm/unistd.h>
38 #include <asm/fixmap.h>
39 #include <asm/traps.h>
40 #include <asm/paravirt.h>
41 
42 #define CREATE_TRACE_POINTS
43 #include "vsyscall_trace.h"
44 
45 static enum { EMULATE, XONLY, NONE } vsyscall_mode __ro_after_init =
46 #ifdef CONFIG_LEGACY_VSYSCALL_NONE
47 	NONE;
48 #elif defined(CONFIG_LEGACY_VSYSCALL_XONLY)
49 	XONLY;
50 #else
51 	#error VSYSCALL config is broken
52 #endif
53 
vsyscall_setup(char * str)54 static int __init vsyscall_setup(char *str)
55 {
56 	if (str) {
57 		if (!strcmp("emulate", str))
58 			vsyscall_mode = EMULATE;
59 		else if (!strcmp("xonly", str))
60 			vsyscall_mode = XONLY;
61 		else if (!strcmp("none", str))
62 			vsyscall_mode = NONE;
63 		else
64 			return -EINVAL;
65 
66 		return 0;
67 	}
68 
69 	return -EINVAL;
70 }
71 early_param("vsyscall", vsyscall_setup);
72 
warn_bad_vsyscall(const char * level,struct pt_regs * regs,const char * message)73 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
74 			      const char *message)
75 {
76 	if (!show_unhandled_signals)
77 		return;
78 
79 	printk_ratelimited("%s%s[%d] %s ip:%lx cs:%x sp:%lx ax:%lx si:%lx di:%lx\n",
80 			   level, current->comm, task_pid_nr(current),
81 			   message, regs->ip, regs->cs,
82 			   regs->sp, regs->ax, regs->si, regs->di);
83 }
84 
addr_to_vsyscall_nr(unsigned long addr)85 static int addr_to_vsyscall_nr(unsigned long addr)
86 {
87 	int nr;
88 
89 	if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
90 		return -EINVAL;
91 
92 	nr = (addr & 0xC00UL) >> 10;
93 	if (nr >= 3)
94 		return -EINVAL;
95 
96 	return nr;
97 }
98 
write_ok_or_segv(unsigned long ptr,size_t size)99 static bool write_ok_or_segv(unsigned long ptr, size_t size)
100 {
101 	if (!access_ok((void __user *)ptr, size)) {
102 		struct thread_struct *thread = &current->thread;
103 
104 		thread->error_code	= X86_PF_USER | X86_PF_WRITE;
105 		thread->cr2		= ptr;
106 		thread->trap_nr		= X86_TRAP_PF;
107 
108 		force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)ptr);
109 		return false;
110 	} else {
111 		return true;
112 	}
113 }
114 
emulate_vsyscall(unsigned long error_code,struct pt_regs * regs,unsigned long address)115 bool emulate_vsyscall(unsigned long error_code,
116 		      struct pt_regs *regs, unsigned long address)
117 {
118 	unsigned long caller;
119 	int vsyscall_nr, syscall_nr, tmp;
120 	long ret;
121 	unsigned long orig_dx;
122 
123 	/* Write faults or kernel-privilege faults never get fixed up. */
124 	if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
125 		return false;
126 
127 	if (!(error_code & X86_PF_INSTR)) {
128 		/* Failed vsyscall read */
129 		if (vsyscall_mode == EMULATE)
130 			return false;
131 
132 		/*
133 		 * User code tried and failed to read the vsyscall page.
134 		 */
135 		warn_bad_vsyscall(KERN_INFO, regs, "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
136 		return false;
137 	}
138 
139 	/*
140 	 * No point in checking CS -- the only way to get here is a user mode
141 	 * trap to a high address, which means that we're in 64-bit user code.
142 	 */
143 
144 	WARN_ON_ONCE(address != regs->ip);
145 
146 	if (vsyscall_mode == NONE) {
147 		warn_bad_vsyscall(KERN_INFO, regs,
148 				  "vsyscall attempted with vsyscall=none");
149 		return false;
150 	}
151 
152 	vsyscall_nr = addr_to_vsyscall_nr(address);
153 
154 	trace_emulate_vsyscall(vsyscall_nr);
155 
156 	if (vsyscall_nr < 0) {
157 		warn_bad_vsyscall(KERN_WARNING, regs,
158 				  "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
159 		goto sigsegv;
160 	}
161 
162 	if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
163 		warn_bad_vsyscall(KERN_WARNING, regs,
164 				  "vsyscall with bad stack (exploit attempt?)");
165 		goto sigsegv;
166 	}
167 
168 	/*
169 	 * Check for access_ok violations and find the syscall nr.
170 	 *
171 	 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
172 	 * 64-bit, so we don't need to special-case it here.  For all the
173 	 * vsyscalls, NULL means "don't write anything" not "write it at
174 	 * address 0".
175 	 */
176 	switch (vsyscall_nr) {
177 	case 0:
178 		if (!write_ok_or_segv(regs->di, sizeof(struct __kernel_old_timeval)) ||
179 		    !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
180 			ret = -EFAULT;
181 			goto check_fault;
182 		}
183 
184 		syscall_nr = __NR_gettimeofday;
185 		break;
186 
187 	case 1:
188 		if (!write_ok_or_segv(regs->di, sizeof(__kernel_old_time_t))) {
189 			ret = -EFAULT;
190 			goto check_fault;
191 		}
192 
193 		syscall_nr = __NR_time;
194 		break;
195 
196 	case 2:
197 		if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
198 		    !write_ok_or_segv(regs->si, sizeof(unsigned))) {
199 			ret = -EFAULT;
200 			goto check_fault;
201 		}
202 
203 		syscall_nr = __NR_getcpu;
204 		break;
205 	}
206 
207 	/*
208 	 * Handle seccomp.  regs->ip must be the original value.
209 	 * See seccomp_send_sigsys and Documentation/userspace-api/seccomp_filter.rst.
210 	 *
211 	 * We could optimize the seccomp disabled case, but performance
212 	 * here doesn't matter.
213 	 */
214 	regs->orig_ax = syscall_nr;
215 	regs->ax = -ENOSYS;
216 	tmp = secure_computing();
217 	if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
218 		warn_bad_vsyscall(KERN_DEBUG, regs,
219 				  "seccomp tried to change syscall nr or ip");
220 		force_exit_sig(SIGSYS);
221 		return true;
222 	}
223 	regs->orig_ax = -1;
224 	if (tmp)
225 		goto do_ret;  /* skip requested */
226 
227 	/*
228 	 * With a real vsyscall, page faults cause SIGSEGV.
229 	 */
230 	ret = -EFAULT;
231 	switch (vsyscall_nr) {
232 	case 0:
233 		/* this decodes regs->di and regs->si on its own */
234 		ret = __x64_sys_gettimeofday(regs);
235 		break;
236 
237 	case 1:
238 		/* this decodes regs->di on its own */
239 		ret = __x64_sys_time(regs);
240 		break;
241 
242 	case 2:
243 		/* while we could clobber regs->dx, we didn't in the past... */
244 		orig_dx = regs->dx;
245 		regs->dx = 0;
246 		/* this decodes regs->di, regs->si and regs->dx on its own */
247 		ret = __x64_sys_getcpu(regs);
248 		regs->dx = orig_dx;
249 		break;
250 	}
251 
252 check_fault:
253 	if (ret == -EFAULT) {
254 		/* Bad news -- userspace fed a bad pointer to a vsyscall. */
255 		warn_bad_vsyscall(KERN_INFO, regs,
256 				  "vsyscall fault (exploit attempt?)");
257 		goto sigsegv;
258 	}
259 
260 	regs->ax = ret;
261 
262 do_ret:
263 	/* Emulate a ret instruction. */
264 	regs->ip = caller;
265 	regs->sp += 8;
266 	return true;
267 
268 sigsegv:
269 	force_sig(SIGSEGV);
270 	return true;
271 }
272 
273 /*
274  * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
275  * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
276  * not need special handling anymore:
277  */
gate_vma_name(struct vm_area_struct * vma)278 static const char *gate_vma_name(struct vm_area_struct *vma)
279 {
280 	return "[vsyscall]";
281 }
282 static const struct vm_operations_struct gate_vma_ops = {
283 	.name = gate_vma_name,
284 };
285 static struct vm_area_struct gate_vma __ro_after_init = {
286 	.vm_start	= VSYSCALL_ADDR,
287 	.vm_end		= VSYSCALL_ADDR + PAGE_SIZE,
288 	.vm_page_prot	= PAGE_READONLY_EXEC,
289 	.vm_flags	= VM_READ | VM_EXEC,
290 	.vm_ops		= &gate_vma_ops,
291 };
292 
get_gate_vma(struct mm_struct * mm)293 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
294 {
295 #ifdef CONFIG_COMPAT
296 	if (!mm || !test_bit(MM_CONTEXT_HAS_VSYSCALL, &mm->context.flags))
297 		return NULL;
298 #endif
299 	if (vsyscall_mode == NONE)
300 		return NULL;
301 	return &gate_vma;
302 }
303 
in_gate_area(struct mm_struct * mm,unsigned long addr)304 int in_gate_area(struct mm_struct *mm, unsigned long addr)
305 {
306 	struct vm_area_struct *vma = get_gate_vma(mm);
307 
308 	if (!vma)
309 		return 0;
310 
311 	return (addr >= vma->vm_start) && (addr < vma->vm_end);
312 }
313 
314 /*
315  * Use this when you have no reliable mm, typically from interrupt
316  * context. It is less reliable than using a task's mm and may give
317  * false positives.
318  */
in_gate_area_no_mm(unsigned long addr)319 int in_gate_area_no_mm(unsigned long addr)
320 {
321 	return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
322 }
323 
324 /*
325  * The VSYSCALL page is the only user-accessible page in the kernel address
326  * range.  Normally, the kernel page tables can have _PAGE_USER clear, but
327  * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
328  * are enabled.
329  *
330  * Some day we may create a "minimal" vsyscall mode in which we emulate
331  * vsyscalls but leave the page not present.  If so, we skip calling
332  * this.
333  */
set_vsyscall_pgtable_user_bits(pgd_t * root)334 void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
335 {
336 	pgd_t *pgd;
337 	p4d_t *p4d;
338 	pud_t *pud;
339 	pmd_t *pmd;
340 
341 	pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
342 	set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
343 	p4d = p4d_offset(pgd, VSYSCALL_ADDR);
344 #if CONFIG_PGTABLE_LEVELS >= 5
345 	set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
346 #endif
347 	pud = pud_offset(p4d, VSYSCALL_ADDR);
348 	set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
349 	pmd = pmd_offset(pud, VSYSCALL_ADDR);
350 	set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
351 }
352 
map_vsyscall(void)353 void __init map_vsyscall(void)
354 {
355 	extern char __vsyscall_page;
356 	unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
357 
358 	/*
359 	 * For full emulation, the page needs to exist for real.  In
360 	 * execute-only mode, there is no PTE at all backing the vsyscall
361 	 * page.
362 	 */
363 	if (vsyscall_mode == EMULATE) {
364 		__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
365 			     PAGE_KERNEL_VVAR);
366 		set_vsyscall_pgtable_user_bits(swapper_pg_dir);
367 	}
368 
369 	if (vsyscall_mode == XONLY)
370 		vm_flags_init(&gate_vma, VM_EXEC);
371 
372 	BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
373 		     (unsigned long)VSYSCALL_ADDR);
374 }
375