xref: /freebsd/sys/amd64/ia32/ia32_sigtramp.S (revision 5956d97f4b3204318ceb6aa9c77bd0bc6ea87a41)
1/*-
2 * Copyright (c) 2003 Peter Wemm
3 * All rights reserved.
4 *
5 * Copyright (c) 2021 The FreeBSD Foundation
6 *
7 * Portions of this software were developed by Konstantin Belousov
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34#include <machine/asmacros.h>
35#include <sys/syscall.h>
36
37#include "ia32_assym.h"
38
39	.text
40/*
41 * Signal trampoline, mapped as vdso into shared page, or copied to
42 * top of user stack for old binaries.
43 */
44	ALIGN_TEXT
45	.globl	__vdso_ia32_sigcode
46__vdso_ia32_sigcode:
47	.cfi_startproc
48	.cfi_signal_frame
49	.cfi_def_cfa	%esp, 0
50#if 0
51	.cfi_offset	%gs,  IA32_SIGF_UC + IA32_UC_GS
52	.cfi_offset	%fs,  IA32_SIGF_UC + IA32_UC_FS
53	.cfi_offset	%es,  IA32_SIGF_UC + IA32_UC_ES
54	.cfi_offset	%ds,  IA32_SIGF_UC + IA32_UC_DS
55#endif
56	.cfi_offset	%edi, IA32_SIGF_UC + IA32_UC_EDI
57	.cfi_offset	%esi, IA32_SIGF_UC + IA32_UC_ESI
58	.cfi_offset	%ebp, IA32_SIGF_UC + IA32_UC_EBP
59	.cfi_offset	%ebx, IA32_SIGF_UC + IA32_UC_EBX
60	.cfi_offset	%edx, IA32_SIGF_UC + IA32_UC_EDX
61	.cfi_offset	%ecx, IA32_SIGF_UC + IA32_UC_ECX
62	.cfi_offset	%eax, IA32_SIGF_UC + IA32_UC_EAX
63	.cfi_offset	%eip, IA32_SIGF_UC + IA32_UC_EIP
64#if 0
65	.cfi_offset	%cs,  IA32_SIGF_UC + IA32_UC_CS
66	.cfi_offset	%flags, IA32_SIGF_UC + IA32_UC_EFLAGS
67#endif
68	.cfi_offset	%esp, IA32_SIGF_UC + IA32_UC_ESP
69#if 0
70	.cfi_offset	%ss,  IA32_SIGF_UC + IA32_UC_SS
71	.cfi_offset	93 /* %fs.base */, IA32_SIGF_UC + IA32_UC_FSBASE
72	.cfi_offset	94 /* %gs.base */, IA32_SIGF_UC + IA32_UC_GSBASE
73#endif
74	calll	*IA32_SIGF_HANDLER(%esp)
75	leal	IA32_SIGF_UC(%esp),%eax	/* get ucontext */
76	pushl	%eax
77	.cfi_def_cfa	%esp, 4
78	movl	$SYS_sigreturn,%eax
79	pushl	%eax			/* junk to fake return addr. */
80	.cfi_def_cfa	%esp, 8
81	int	$0x80			/* enter kernel with args */
82					/* on stack */
831:
84	jmp	1b
85	.cfi_endproc
86
87#ifdef COMPAT_FREEBSD4
88	ALIGN_TEXT
89	.globl	__vdso_freebsd4_ia32_sigcode
90__vdso_freebsd4_ia32_sigcode:
91	calll	*IA32_SIGF_HANDLER(%esp)
92	leal	IA32_SIGF_UC4(%esp),%eax/* get ucontext */
93	pushl	%eax
94	movl	$344,%eax		/* 4.x SYS_sigreturn */
95	pushl	%eax			/* junk to fake return addr. */
96	int	$0x80			/* enter kernel with args */
97					/* on stack */
981:
99	jmp	1b
100#endif
101
102#ifdef COMPAT_43
103	ALIGN_TEXT
104	.globl	__vdso_ia32_osigcode
105__vdso_ia32_osigcode:
106	calll	*IA32_SIGF_HANDLER(%esp)/* call signal handler */
107	leal	IA32_SIGF_SC(%esp),%eax	/* get sigcontext */
108	pushl	%eax
109	movl	$103,%eax		/* 3.x SYS_sigreturn */
110	pushl	%eax			/* junk to fake return addr. */
111	int	$0x80			/* enter kernel with args */
1121:
113	jmp	1b
114
115/*
116 * Our lcall $7,$0 handler remains in user mode (ring 3), since lcalls
117 * don't change the interrupt mask, so if this one went directly to the
118 * kernel then there would be a window with interrupts enabled in kernel
119 * mode, and all interrupt handlers would have to be almost as complicated
120 * as the NMI handler to support this.
121 *
122 * Instead, convert the lcall to an int0x80 call.  The kernel does most
123 * of the conversion by popping the lcall return values off the user
124 * stack and returning to them instead of to here, except when the
125 * conversion itself fails.  Adjusting the stack here is impossible for
126 * vfork() and harder for other syscalls.
127 */
128	ALIGN_TEXT
129	.globl	__vdso_lcall_tramp
130__vdso_lcall_tramp:
131	int	$0x80
1321:	jmp	1b
133#endif
134	.p2align 1
135