1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net> 4 * 5 * Based on the original implementation which is: 6 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 7 * Copyright 2003 Andi Kleen, SuSE Labs. 8 * 9 * Parts of the original code have been moved to arch/x86/vdso/vma.c 10 * 11 * This file implements vsyscall emulation. vsyscalls are a legacy ABI: 12 * Userspace can request certain kernel services by calling fixed 13 * addresses. This concept is problematic: 14 * 15 * - It interferes with ASLR. 16 * - It's awkward to write code that lives in kernel addresses but is 17 * callable by userspace at fixed addresses. 18 * - The whole concept is impossible for 32-bit compat userspace. 19 * - UML cannot easily virtualize a vsyscall. 20 * 21 * As of mid-2014, I believe that there is no new userspace code that 22 * will use a vsyscall if the vDSO is present. I hope that there will 23 * soon be no new userspace code that will ever use a vsyscall. 24 * 25 * The code in this file emulates vsyscalls when notified of a page 26 * fault to a vsyscall address. 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/timer.h> 31 #include <linux/sched/signal.h> 32 #include <linux/mm_types.h> 33 #include <linux/syscalls.h> 34 #include <linux/ratelimit.h> 35 36 #include <asm/vsyscall.h> 37 #include <asm/unistd.h> 38 #include <asm/fixmap.h> 39 #include <asm/traps.h> 40 #include <asm/paravirt.h> 41 42 #define CREATE_TRACE_POINTS 43 #include "vsyscall_trace.h" 44 45 static enum { EMULATE, NONE } vsyscall_mode = 46 #ifdef CONFIG_LEGACY_VSYSCALL_NONE 47 NONE; 48 #else 49 EMULATE; 50 #endif 51 52 static int __init vsyscall_setup(char *str) 53 { 54 if (str) { 55 if (!strcmp("emulate", str)) 56 vsyscall_mode = EMULATE; 57 else if (!strcmp("none", str)) 58 vsyscall_mode = NONE; 59 else 60 return -EINVAL; 61 62 return 0; 63 } 64 65 return -EINVAL; 66 } 67 early_param("vsyscall", vsyscall_setup); 68 69 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, 70 const char *message) 71 { 72 if (!show_unhandled_signals) 73 return; 74 75 printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", 76 level, current->comm, task_pid_nr(current), 77 message, regs->ip, regs->cs, 78 regs->sp, regs->ax, regs->si, regs->di); 79 } 80 81 static int addr_to_vsyscall_nr(unsigned long addr) 82 { 83 int nr; 84 85 if ((addr & ~0xC00UL) != VSYSCALL_ADDR) 86 return -EINVAL; 87 88 nr = (addr & 0xC00UL) >> 10; 89 if (nr >= 3) 90 return -EINVAL; 91 92 return nr; 93 } 94 95 static bool write_ok_or_segv(unsigned long ptr, size_t size) 96 { 97 /* 98 * XXX: if access_ok, get_user, and put_user handled 99 * sig_on_uaccess_err, this could go away. 100 */ 101 102 if (!access_ok((void __user *)ptr, size)) { 103 struct thread_struct *thread = ¤t->thread; 104 105 thread->error_code = X86_PF_USER | X86_PF_WRITE; 106 thread->cr2 = ptr; 107 thread->trap_nr = X86_TRAP_PF; 108 109 force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)ptr, current); 110 return false; 111 } else { 112 return true; 113 } 114 } 115 116 bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) 117 { 118 struct task_struct *tsk; 119 unsigned long caller; 120 int vsyscall_nr, syscall_nr, tmp; 121 int prev_sig_on_uaccess_err; 122 long ret; 123 unsigned long orig_dx; 124 125 /* 126 * No point in checking CS -- the only way to get here is a user mode 127 * trap to a high address, which means that we're in 64-bit user code. 128 */ 129 130 WARN_ON_ONCE(address != regs->ip); 131 132 if (vsyscall_mode == NONE) { 133 warn_bad_vsyscall(KERN_INFO, regs, 134 "vsyscall attempted with vsyscall=none"); 135 return false; 136 } 137 138 vsyscall_nr = addr_to_vsyscall_nr(address); 139 140 trace_emulate_vsyscall(vsyscall_nr); 141 142 if (vsyscall_nr < 0) { 143 warn_bad_vsyscall(KERN_WARNING, regs, 144 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround"); 145 goto sigsegv; 146 } 147 148 if (get_user(caller, (unsigned long __user *)regs->sp) != 0) { 149 warn_bad_vsyscall(KERN_WARNING, regs, 150 "vsyscall with bad stack (exploit attempt?)"); 151 goto sigsegv; 152 } 153 154 tsk = current; 155 156 /* 157 * Check for access_ok violations and find the syscall nr. 158 * 159 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and 160 * 64-bit, so we don't need to special-case it here. For all the 161 * vsyscalls, NULL means "don't write anything" not "write it at 162 * address 0". 163 */ 164 switch (vsyscall_nr) { 165 case 0: 166 if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) || 167 !write_ok_or_segv(regs->si, sizeof(struct timezone))) { 168 ret = -EFAULT; 169 goto check_fault; 170 } 171 172 syscall_nr = __NR_gettimeofday; 173 break; 174 175 case 1: 176 if (!write_ok_or_segv(regs->di, sizeof(time_t))) { 177 ret = -EFAULT; 178 goto check_fault; 179 } 180 181 syscall_nr = __NR_time; 182 break; 183 184 case 2: 185 if (!write_ok_or_segv(regs->di, sizeof(unsigned)) || 186 !write_ok_or_segv(regs->si, sizeof(unsigned))) { 187 ret = -EFAULT; 188 goto check_fault; 189 } 190 191 syscall_nr = __NR_getcpu; 192 break; 193 } 194 195 /* 196 * Handle seccomp. regs->ip must be the original value. 197 * See seccomp_send_sigsys and Documentation/userspace-api/seccomp_filter.rst. 198 * 199 * We could optimize the seccomp disabled case, but performance 200 * here doesn't matter. 201 */ 202 regs->orig_ax = syscall_nr; 203 regs->ax = -ENOSYS; 204 tmp = secure_computing(NULL); 205 if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { 206 warn_bad_vsyscall(KERN_DEBUG, regs, 207 "seccomp tried to change syscall nr or ip"); 208 do_exit(SIGSYS); 209 } 210 regs->orig_ax = -1; 211 if (tmp) 212 goto do_ret; /* skip requested */ 213 214 /* 215 * With a real vsyscall, page faults cause SIGSEGV. We want to 216 * preserve that behavior to make writing exploits harder. 217 */ 218 prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err; 219 current->thread.sig_on_uaccess_err = 1; 220 221 ret = -EFAULT; 222 switch (vsyscall_nr) { 223 case 0: 224 /* this decodes regs->di and regs->si on its own */ 225 ret = __x64_sys_gettimeofday(regs); 226 break; 227 228 case 1: 229 /* this decodes regs->di on its own */ 230 ret = __x64_sys_time(regs); 231 break; 232 233 case 2: 234 /* while we could clobber regs->dx, we didn't in the past... */ 235 orig_dx = regs->dx; 236 regs->dx = 0; 237 /* this decodes regs->di, regs->si and regs->dx on its own */ 238 ret = __x64_sys_getcpu(regs); 239 regs->dx = orig_dx; 240 break; 241 } 242 243 current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err; 244 245 check_fault: 246 if (ret == -EFAULT) { 247 /* Bad news -- userspace fed a bad pointer to a vsyscall. */ 248 warn_bad_vsyscall(KERN_INFO, regs, 249 "vsyscall fault (exploit attempt?)"); 250 251 /* 252 * If we failed to generate a signal for any reason, 253 * generate one here. (This should be impossible.) 254 */ 255 if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) && 256 !sigismember(&tsk->pending.signal, SIGSEGV))) 257 goto sigsegv; 258 259 return true; /* Don't emulate the ret. */ 260 } 261 262 regs->ax = ret; 263 264 do_ret: 265 /* Emulate a ret instruction. */ 266 regs->ip = caller; 267 regs->sp += 8; 268 return true; 269 270 sigsegv: 271 force_sig(SIGSEGV, current); 272 return true; 273 } 274 275 /* 276 * A pseudo VMA to allow ptrace access for the vsyscall page. This only 277 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does 278 * not need special handling anymore: 279 */ 280 static const char *gate_vma_name(struct vm_area_struct *vma) 281 { 282 return "[vsyscall]"; 283 } 284 static const struct vm_operations_struct gate_vma_ops = { 285 .name = gate_vma_name, 286 }; 287 static struct vm_area_struct gate_vma = { 288 .vm_start = VSYSCALL_ADDR, 289 .vm_end = VSYSCALL_ADDR + PAGE_SIZE, 290 .vm_page_prot = PAGE_READONLY_EXEC, 291 .vm_flags = VM_READ | VM_EXEC, 292 .vm_ops = &gate_vma_ops, 293 }; 294 295 struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 296 { 297 #ifdef CONFIG_COMPAT 298 if (!mm || mm->context.ia32_compat) 299 return NULL; 300 #endif 301 if (vsyscall_mode == NONE) 302 return NULL; 303 return &gate_vma; 304 } 305 306 int in_gate_area(struct mm_struct *mm, unsigned long addr) 307 { 308 struct vm_area_struct *vma = get_gate_vma(mm); 309 310 if (!vma) 311 return 0; 312 313 return (addr >= vma->vm_start) && (addr < vma->vm_end); 314 } 315 316 /* 317 * Use this when you have no reliable mm, typically from interrupt 318 * context. It is less reliable than using a task's mm and may give 319 * false positives. 320 */ 321 int in_gate_area_no_mm(unsigned long addr) 322 { 323 return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR; 324 } 325 326 /* 327 * The VSYSCALL page is the only user-accessible page in the kernel address 328 * range. Normally, the kernel page tables can have _PAGE_USER clear, but 329 * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls 330 * are enabled. 331 * 332 * Some day we may create a "minimal" vsyscall mode in which we emulate 333 * vsyscalls but leave the page not present. If so, we skip calling 334 * this. 335 */ 336 void __init set_vsyscall_pgtable_user_bits(pgd_t *root) 337 { 338 pgd_t *pgd; 339 p4d_t *p4d; 340 pud_t *pud; 341 pmd_t *pmd; 342 343 pgd = pgd_offset_pgd(root, VSYSCALL_ADDR); 344 set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); 345 p4d = p4d_offset(pgd, VSYSCALL_ADDR); 346 #if CONFIG_PGTABLE_LEVELS >= 5 347 set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER)); 348 #endif 349 pud = pud_offset(p4d, VSYSCALL_ADDR); 350 set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); 351 pmd = pmd_offset(pud, VSYSCALL_ADDR); 352 set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER)); 353 } 354 355 void __init map_vsyscall(void) 356 { 357 extern char __vsyscall_page; 358 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); 359 360 if (vsyscall_mode != NONE) { 361 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, 362 PAGE_KERNEL_VVAR); 363 set_vsyscall_pgtable_user_bits(swapper_pg_dir); 364 } 365 366 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != 367 (unsigned long)VSYSCALL_ADDR); 368 } 369