1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net> 4 * 5 * Based on the original implementation which is: 6 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 7 * Copyright 2003 Andi Kleen, SuSE Labs. 8 * 9 * Parts of the original code have been moved to arch/x86/vdso/vma.c 10 * 11 * This file implements vsyscall emulation. vsyscalls are a legacy ABI: 12 * Userspace can request certain kernel services by calling fixed 13 * addresses. This concept is problematic: 14 * 15 * - It interferes with ASLR. 16 * - It's awkward to write code that lives in kernel addresses but is 17 * callable by userspace at fixed addresses. 18 * - The whole concept is impossible for 32-bit compat userspace. 19 * - UML cannot easily virtualize a vsyscall. 20 * 21 * As of mid-2014, I believe that there is no new userspace code that 22 * will use a vsyscall if the vDSO is present. I hope that there will 23 * soon be no new userspace code that will ever use a vsyscall. 24 * 25 * The code in this file emulates vsyscalls when notified of a page 26 * fault to a vsyscall address. 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/timer.h> 31 #include <linux/sched/signal.h> 32 #include <linux/mm_types.h> 33 #include <linux/syscalls.h> 34 #include <linux/ratelimit.h> 35 36 #include <asm/vsyscall.h> 37 #include <asm/unistd.h> 38 #include <asm/fixmap.h> 39 #include <asm/traps.h> 40 #include <asm/paravirt.h> 41 42 #define CREATE_TRACE_POINTS 43 #include "vsyscall_trace.h" 44 45 static enum { EMULATE, NONE } vsyscall_mode = 46 #ifdef CONFIG_LEGACY_VSYSCALL_NONE 47 NONE; 48 #else 49 EMULATE; 50 #endif 51 52 static int __init vsyscall_setup(char *str) 53 { 54 if (str) { 55 if (!strcmp("emulate", str)) 56 vsyscall_mode = EMULATE; 57 else if (!strcmp("none", str)) 58 vsyscall_mode = NONE; 59 else 60 return -EINVAL; 61 62 return 0; 63 } 64 65 return -EINVAL; 66 } 67 early_param("vsyscall", vsyscall_setup); 68 69 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, 70 const char *message) 71 { 72 if (!show_unhandled_signals) 73 return; 74 75 printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", 76 level, current->comm, task_pid_nr(current), 77 message, regs->ip, regs->cs, 78 regs->sp, regs->ax, regs->si, regs->di); 79 } 80 81 static int addr_to_vsyscall_nr(unsigned long addr) 82 { 83 int nr; 84 85 if ((addr & ~0xC00UL) != VSYSCALL_ADDR) 86 return -EINVAL; 87 88 nr = (addr & 0xC00UL) >> 10; 89 if (nr >= 3) 90 return -EINVAL; 91 92 return nr; 93 } 94 95 static bool write_ok_or_segv(unsigned long ptr, size_t size) 96 { 97 /* 98 * XXX: if access_ok, get_user, and put_user handled 99 * sig_on_uaccess_err, this could go away. 100 */ 101 102 if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) { 103 siginfo_t info; 104 struct thread_struct *thread = ¤t->thread; 105 106 thread->error_code = 6; /* user fault, no page, write */ 107 thread->cr2 = ptr; 108 thread->trap_nr = X86_TRAP_PF; 109 110 memset(&info, 0, sizeof(info)); 111 info.si_signo = SIGSEGV; 112 info.si_errno = 0; 113 info.si_code = SEGV_MAPERR; 114 info.si_addr = (void __user *)ptr; 115 116 force_sig_info(SIGSEGV, &info, current); 117 return false; 118 } else { 119 return true; 120 } 121 } 122 123 bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) 124 { 125 struct task_struct *tsk; 126 unsigned long caller; 127 int vsyscall_nr, syscall_nr, tmp; 128 int prev_sig_on_uaccess_err; 129 long ret; 130 131 /* 132 * No point in checking CS -- the only way to get here is a user mode 133 * trap to a high address, which means that we're in 64-bit user code. 134 */ 135 136 WARN_ON_ONCE(address != regs->ip); 137 138 if (vsyscall_mode == NONE) { 139 warn_bad_vsyscall(KERN_INFO, regs, 140 "vsyscall attempted with vsyscall=none"); 141 return false; 142 } 143 144 vsyscall_nr = addr_to_vsyscall_nr(address); 145 146 trace_emulate_vsyscall(vsyscall_nr); 147 148 if (vsyscall_nr < 0) { 149 warn_bad_vsyscall(KERN_WARNING, regs, 150 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround"); 151 goto sigsegv; 152 } 153 154 if (get_user(caller, (unsigned long __user *)regs->sp) != 0) { 155 warn_bad_vsyscall(KERN_WARNING, regs, 156 "vsyscall with bad stack (exploit attempt?)"); 157 goto sigsegv; 158 } 159 160 tsk = current; 161 162 /* 163 * Check for access_ok violations and find the syscall nr. 164 * 165 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and 166 * 64-bit, so we don't need to special-case it here. For all the 167 * vsyscalls, NULL means "don't write anything" not "write it at 168 * address 0". 169 */ 170 switch (vsyscall_nr) { 171 case 0: 172 if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) || 173 !write_ok_or_segv(regs->si, sizeof(struct timezone))) { 174 ret = -EFAULT; 175 goto check_fault; 176 } 177 178 syscall_nr = __NR_gettimeofday; 179 break; 180 181 case 1: 182 if (!write_ok_or_segv(regs->di, sizeof(time_t))) { 183 ret = -EFAULT; 184 goto check_fault; 185 } 186 187 syscall_nr = __NR_time; 188 break; 189 190 case 2: 191 if (!write_ok_or_segv(regs->di, sizeof(unsigned)) || 192 !write_ok_or_segv(regs->si, sizeof(unsigned))) { 193 ret = -EFAULT; 194 goto check_fault; 195 } 196 197 syscall_nr = __NR_getcpu; 198 break; 199 } 200 201 /* 202 * Handle seccomp. regs->ip must be the original value. 203 * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt. 204 * 205 * We could optimize the seccomp disabled case, but performance 206 * here doesn't matter. 207 */ 208 regs->orig_ax = syscall_nr; 209 regs->ax = -ENOSYS; 210 tmp = secure_computing(NULL); 211 if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { 212 warn_bad_vsyscall(KERN_DEBUG, regs, 213 "seccomp tried to change syscall nr or ip"); 214 do_exit(SIGSYS); 215 } 216 regs->orig_ax = -1; 217 if (tmp) 218 goto do_ret; /* skip requested */ 219 220 /* 221 * With a real vsyscall, page faults cause SIGSEGV. We want to 222 * preserve that behavior to make writing exploits harder. 223 */ 224 prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err; 225 current->thread.sig_on_uaccess_err = 1; 226 227 ret = -EFAULT; 228 switch (vsyscall_nr) { 229 case 0: 230 ret = sys_gettimeofday( 231 (struct timeval __user *)regs->di, 232 (struct timezone __user *)regs->si); 233 break; 234 235 case 1: 236 ret = sys_time((time_t __user *)regs->di); 237 break; 238 239 case 2: 240 ret = sys_getcpu((unsigned __user *)regs->di, 241 (unsigned __user *)regs->si, 242 NULL); 243 break; 244 } 245 246 current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err; 247 248 check_fault: 249 if (ret == -EFAULT) { 250 /* Bad news -- userspace fed a bad pointer to a vsyscall. */ 251 warn_bad_vsyscall(KERN_INFO, regs, 252 "vsyscall fault (exploit attempt?)"); 253 254 /* 255 * If we failed to generate a signal for any reason, 256 * generate one here. (This should be impossible.) 257 */ 258 if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) && 259 !sigismember(&tsk->pending.signal, SIGSEGV))) 260 goto sigsegv; 261 262 return true; /* Don't emulate the ret. */ 263 } 264 265 regs->ax = ret; 266 267 do_ret: 268 /* Emulate a ret instruction. */ 269 regs->ip = caller; 270 regs->sp += 8; 271 return true; 272 273 sigsegv: 274 force_sig(SIGSEGV, current); 275 return true; 276 } 277 278 /* 279 * A pseudo VMA to allow ptrace access for the vsyscall page. This only 280 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does 281 * not need special handling anymore: 282 */ 283 static const char *gate_vma_name(struct vm_area_struct *vma) 284 { 285 return "[vsyscall]"; 286 } 287 static const struct vm_operations_struct gate_vma_ops = { 288 .name = gate_vma_name, 289 }; 290 static struct vm_area_struct gate_vma = { 291 .vm_start = VSYSCALL_ADDR, 292 .vm_end = VSYSCALL_ADDR + PAGE_SIZE, 293 .vm_page_prot = PAGE_READONLY_EXEC, 294 .vm_flags = VM_READ | VM_EXEC, 295 .vm_ops = &gate_vma_ops, 296 }; 297 298 struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 299 { 300 #ifdef CONFIG_COMPAT 301 if (!mm || mm->context.ia32_compat) 302 return NULL; 303 #endif 304 if (vsyscall_mode == NONE) 305 return NULL; 306 return &gate_vma; 307 } 308 309 int in_gate_area(struct mm_struct *mm, unsigned long addr) 310 { 311 struct vm_area_struct *vma = get_gate_vma(mm); 312 313 if (!vma) 314 return 0; 315 316 return (addr >= vma->vm_start) && (addr < vma->vm_end); 317 } 318 319 /* 320 * Use this when you have no reliable mm, typically from interrupt 321 * context. It is less reliable than using a task's mm and may give 322 * false positives. 323 */ 324 int in_gate_area_no_mm(unsigned long addr) 325 { 326 return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR; 327 } 328 329 /* 330 * The VSYSCALL page is the only user-accessible page in the kernel address 331 * range. Normally, the kernel page tables can have _PAGE_USER clear, but 332 * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls 333 * are enabled. 334 * 335 * Some day we may create a "minimal" vsyscall mode in which we emulate 336 * vsyscalls but leave the page not present. If so, we skip calling 337 * this. 338 */ 339 void __init set_vsyscall_pgtable_user_bits(pgd_t *root) 340 { 341 pgd_t *pgd; 342 p4d_t *p4d; 343 pud_t *pud; 344 pmd_t *pmd; 345 346 pgd = pgd_offset_pgd(root, VSYSCALL_ADDR); 347 set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); 348 p4d = p4d_offset(pgd, VSYSCALL_ADDR); 349 #if CONFIG_PGTABLE_LEVELS >= 5 350 p4d->p4d |= _PAGE_USER; 351 #endif 352 pud = pud_offset(p4d, VSYSCALL_ADDR); 353 set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); 354 pmd = pmd_offset(pud, VSYSCALL_ADDR); 355 set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER)); 356 } 357 358 void __init map_vsyscall(void) 359 { 360 extern char __vsyscall_page; 361 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); 362 363 if (vsyscall_mode != NONE) { 364 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, 365 PAGE_KERNEL_VVAR); 366 set_vsyscall_pgtable_user_bits(swapper_pg_dir); 367 } 368 369 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != 370 (unsigned long)VSYSCALL_ADDR); 371 } 372