xref: /freebsd/sys/amd64/ia32/ia32_sigtramp.S (revision bdd1243df58e60e85101c09001d9812a789b6bc4)
1/*-
2 * Copyright (c) 2003 Peter Wemm
3 * All rights reserved.
4 *
5 * Copyright (c) 2021 The FreeBSD Foundation
6 *
7 * Portions of this software were developed by Konstantin Belousov
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34#include <machine/asmacros.h>
35#include <sys/syscall.h>
36
37#include "ia32_assym.h"
38
39	.text
40/*
41 * Signal trampoline, mapped as vdso into shared page, or copied to
42 * top of user stack for old binaries.
43 */
44	ALIGN_TEXT
45	.globl	__vdso_ia32_sigcode
46__vdso_ia32_sigcode:
47	.cfi_startproc
48	.cfi_signal_frame
49	.cfi_def_cfa	%esp, 0
50#if 0
51	.cfi_offset	%gs,  IA32_SIGF_UC + IA32_UC_GS
52	.cfi_offset	%fs,  IA32_SIGF_UC + IA32_UC_FS
53	.cfi_offset	%es,  IA32_SIGF_UC + IA32_UC_ES
54	.cfi_offset	%ds,  IA32_SIGF_UC + IA32_UC_DS
55#endif
56	.cfi_offset	%edi, IA32_SIGF_UC + IA32_UC_EDI
57	.cfi_offset	%esi, IA32_SIGF_UC + IA32_UC_ESI
58	.cfi_offset	%ebp, IA32_SIGF_UC + IA32_UC_EBP
59	.cfi_offset	%ebx, IA32_SIGF_UC + IA32_UC_EBX
60	.cfi_offset	%edx, IA32_SIGF_UC + IA32_UC_EDX
61	.cfi_offset	%ecx, IA32_SIGF_UC + IA32_UC_ECX
62	.cfi_offset	%eax, IA32_SIGF_UC + IA32_UC_EAX
63	.cfi_offset	%eip, IA32_SIGF_UC + IA32_UC_EIP
64#if 0
65	.cfi_offset	%cs,  IA32_SIGF_UC + IA32_UC_CS
66	.cfi_offset	%flags, IA32_SIGF_UC + IA32_UC_EFLAGS
67#endif
68	.cfi_offset	%esp, IA32_SIGF_UC + IA32_UC_ESP
69#if 0
70	.cfi_offset	%ss,  IA32_SIGF_UC + IA32_UC_SS
71	.cfi_offset	93 /* %fs.base */, IA32_SIGF_UC + IA32_UC_FSBASE
72	.cfi_offset	94 /* %gs.base */, IA32_SIGF_UC + IA32_UC_GSBASE
73#endif
74	calll	*IA32_SIGF_HANDLER(%esp)
75	leal	IA32_SIGF_UC(%esp),%eax	/* get ucontext */
76	pushl	%eax
77	.cfi_def_cfa	%esp, 4
78	movl	$SYS_sigreturn,%eax
79	pushl	%eax			/* junk to fake return addr. */
80	.cfi_def_cfa	%esp, 8
81	int	$0x80			/* enter kernel with args */
82					/* on stack */
831:
84	jmp	1b
85	.cfi_endproc
86
87	ALIGN_TEXT
88	.globl	__vdso_freebsd4_ia32_sigcode
89__vdso_freebsd4_ia32_sigcode:
90#ifdef COMPAT_FREEBSD4
91	calll	*IA32_SIGF_HANDLER(%esp)
92	leal	IA32_SIGF_UC4(%esp),%eax/* get ucontext */
93	pushl	%eax
94	movl	$344,%eax		/* 4.x SYS_sigreturn */
95	pushl	%eax			/* junk to fake return addr. */
96	int	$0x80			/* enter kernel with args */
97					/* on stack */
981:
99	jmp	1b
100#else
101	ud2
102#endif
103
104	ALIGN_TEXT
105	.globl	__vdso_ia32_osigcode
106__vdso_ia32_osigcode:
107#ifdef COMPAT_43
108	calll	*IA32_SIGF_HANDLER(%esp)/* call signal handler */
109	leal	IA32_SIGF_SC(%esp),%eax	/* get sigcontext */
110	pushl	%eax
111	movl	$103,%eax		/* 3.x SYS_sigreturn */
112	pushl	%eax			/* junk to fake return addr. */
113	int	$0x80			/* enter kernel with args */
1141:
115	jmp	1b
116#else
117	ud2
118#endif
119
120/*
121 * Our lcall $7,$0 handler remains in user mode (ring 3), since lcalls
122 * don't change the interrupt mask, so if this one went directly to the
123 * kernel then there would be a window with interrupts enabled in kernel
124 * mode, and all interrupt handlers would have to be almost as complicated
125 * as the NMI handler to support this.
126 *
127 * Instead, convert the lcall to an int0x80 call.  The kernel does most
128 * of the conversion by popping the lcall return values off the user
129 * stack and returning to them instead of to here, except when the
130 * conversion itself fails.  Adjusting the stack here is impossible for
131 * vfork() and harder for other syscalls.
132 */
133	ALIGN_TEXT
134	.globl	__vdso_lcall_tramp
135__vdso_lcall_tramp:
136#ifdef COMPAT_43
137	int	$0x80
1381:	jmp	1b
139#else
140	ud2
141#endif
142	.p2align 1
143