1/*- 2 * Copyright (c) 2003 Peter Wemm 3 * All rights reserved. 4 * 5 * Copyright (c) 2021 The FreeBSD Foundation 6 * 7 * Portions of this software were developed by Konstantin Belousov 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32#include <machine/asmacros.h> 33#include <sys/syscall.h> 34 35#include "ia32_assym.h" 36 37 .text 38/* 39 * Signal trampoline, mapped as vdso into shared page, or copied to 40 * top of user stack for old binaries. 41 */ 42 ALIGN_TEXT 43 .globl __vdso_ia32_sigcode 44__vdso_ia32_sigcode: 45 .cfi_startproc 46 .cfi_signal_frame 47 .cfi_def_cfa %esp, 0 48#if 0 49 .cfi_offset %gs, IA32_SIGF_UC + IA32_UC_GS 50 .cfi_offset %fs, IA32_SIGF_UC + IA32_UC_FS 51 .cfi_offset %es, IA32_SIGF_UC + IA32_UC_ES 52 .cfi_offset %ds, IA32_SIGF_UC + IA32_UC_DS 53#endif 54 .cfi_offset %edi, IA32_SIGF_UC + IA32_UC_EDI 55 .cfi_offset %esi, IA32_SIGF_UC + IA32_UC_ESI 56 .cfi_offset %ebp, IA32_SIGF_UC + IA32_UC_EBP 57 .cfi_offset %ebx, IA32_SIGF_UC + IA32_UC_EBX 58 .cfi_offset %edx, IA32_SIGF_UC + IA32_UC_EDX 59 .cfi_offset %ecx, IA32_SIGF_UC + IA32_UC_ECX 60 .cfi_offset %eax, IA32_SIGF_UC + IA32_UC_EAX 61 .cfi_offset %eip, IA32_SIGF_UC + IA32_UC_EIP 62#if 0 63 .cfi_offset %cs, IA32_SIGF_UC + IA32_UC_CS 64 .cfi_offset %flags, IA32_SIGF_UC + IA32_UC_EFLAGS 65#endif 66 .cfi_offset %esp, IA32_SIGF_UC + IA32_UC_ESP 67#if 0 68 .cfi_offset %ss, IA32_SIGF_UC + IA32_UC_SS 69 .cfi_offset 93 /* %fs.base */, IA32_SIGF_UC + IA32_UC_FSBASE 70 .cfi_offset 94 /* %gs.base */, IA32_SIGF_UC + IA32_UC_GSBASE 71#endif 72 calll *IA32_SIGF_HANDLER(%esp) 73 leal IA32_SIGF_UC(%esp),%eax /* get ucontext */ 74 pushl %eax 75 .cfi_def_cfa %esp, 4 76 movl $SYS_sigreturn,%eax 77 pushl %eax /* junk to fake return addr. */ 78 .cfi_def_cfa %esp, 8 79 int $0x80 /* enter kernel with args */ 80 /* on stack */ 811: 82 jmp 1b 83 .cfi_endproc 84 85 ALIGN_TEXT 86 .globl __vdso_freebsd4_ia32_sigcode 87__vdso_freebsd4_ia32_sigcode: 88#ifdef COMPAT_FREEBSD4 89 calll *IA32_SIGF_HANDLER(%esp) 90 leal IA32_SIGF_UC4(%esp),%eax/* get ucontext */ 91 pushl %eax 92 movl $344,%eax /* 4.x SYS_sigreturn */ 93 pushl %eax /* junk to fake return addr. */ 94 int $0x80 /* enter kernel with args */ 95 /* on stack */ 961: 97 jmp 1b 98#else 99 ud2 100#endif 101 102 ALIGN_TEXT 103 .globl __vdso_ia32_osigcode 104__vdso_ia32_osigcode: 105#ifdef COMPAT_43 106 calll *IA32_SIGF_HANDLER(%esp)/* call signal handler */ 107 leal IA32_SIGF_SC(%esp),%eax /* get sigcontext */ 108 pushl %eax 109 movl $103,%eax /* 3.x SYS_sigreturn */ 110 pushl %eax /* junk to fake return addr. */ 111 int $0x80 /* enter kernel with args */ 1121: 113 jmp 1b 114#else 115 ud2 116#endif 117 118/* 119 * Our lcall $7,$0 handler remains in user mode (ring 3), since lcalls 120 * don't change the interrupt mask, so if this one went directly to the 121 * kernel then there would be a window with interrupts enabled in kernel 122 * mode, and all interrupt handlers would have to be almost as complicated 123 * as the NMI handler to support this. 124 * 125 * Instead, convert the lcall to an int0x80 call. The kernel does most 126 * of the conversion by popping the lcall return values off the user 127 * stack and returning to them instead of to here, except when the 128 * conversion itself fails. Adjusting the stack here is impossible for 129 * vfork() and harder for other syscalls. 130 */ 131 ALIGN_TEXT 132 .globl __vdso_lcall_tramp 133__vdso_lcall_tramp: 134#ifdef COMPAT_43 135 int $0x80 1361: jmp 1b 137#else 138 ud2 139#endif 140 .p2align 1 141