xref: /freebsd/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S (revision 6be3386466ab79a84b48429ae66244f21526d3df)
1//===-- xray_trampoline_x86.s -----------------------------------*- ASM -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of XRay, a dynamic runtime instrumentation system.
10//
11// This implements the X86-specific assembler for the trampolines.
12//
13//===----------------------------------------------------------------------===//
14
15#include "../builtins/assembly.h"
16#include "../sanitizer_common/sanitizer_asm.h"
17
18
19
20.macro SAVE_REGISTERS
21	pushfq
22	subq $240, %rsp
23	CFI_DEF_CFA_OFFSET(248)
24	movq %rbp, 232(%rsp)
25	movupd	%xmm0, 216(%rsp)
26	movupd	%xmm1, 200(%rsp)
27	movupd	%xmm2, 184(%rsp)
28	movupd	%xmm3, 168(%rsp)
29	movupd	%xmm4, 152(%rsp)
30	movupd	%xmm5, 136(%rsp)
31	movupd	%xmm6, 120(%rsp)
32	movupd	%xmm7, 104(%rsp)
33	movq	%rdi, 96(%rsp)
34	movq	%rax, 88(%rsp)
35	movq	%rdx, 80(%rsp)
36	movq	%rsi, 72(%rsp)
37	movq	%rcx, 64(%rsp)
38	movq	%r8, 56(%rsp)
39	movq	%r9, 48(%rsp)
40	movq  %r10, 40(%rsp)
41	movq  %r11, 32(%rsp)
42	movq  %r12, 24(%rsp)
43	movq  %r13, 16(%rsp)
44	movq  %r14, 8(%rsp)
45	movq  %r15, 0(%rsp)
46.endm
47
48.macro RESTORE_REGISTERS
49	movq  232(%rsp), %rbp
50	movupd	216(%rsp), %xmm0
51	movupd	200(%rsp), %xmm1
52	movupd	184(%rsp), %xmm2
53	movupd	168(%rsp), %xmm3
54	movupd	152(%rsp), %xmm4
55	movupd	136(%rsp), %xmm5
56	movupd	120(%rsp) , %xmm6
57	movupd	104(%rsp) , %xmm7
58	movq	96(%rsp), %rdi
59	movq	88(%rsp), %rax
60	movq	80(%rsp), %rdx
61	movq	72(%rsp), %rsi
62	movq	64(%rsp), %rcx
63	movq	56(%rsp), %r8
64	movq	48(%rsp), %r9
65	movq  40(%rsp), %r10
66	movq  32(%rsp), %r11
67	movq  24(%rsp), %r12
68	movq  16(%rsp), %r13
69	movq  8(%rsp), %r14
70	movq  0(%rsp), %r15
71	addq	$240, %rsp
72	popfq
73	CFI_DEF_CFA_OFFSET(8)
74.endm
75
76.macro ALIGNED_CALL_RAX
77	// Call the logging handler, after aligning the stack to a 16-byte boundary.
78	// The approach we're taking here uses additional stack space to stash the
79	// stack pointer twice before aligning the pointer to 16-bytes. If the stack
80	// was 8-byte aligned, it will become 16-byte aligned -- when restoring the
81	// pointer, we can always look -8 bytes from the current position to get
82	// either of the values we've stashed in the first place.
83	pushq %rsp
84	pushq (%rsp)
85	andq $-0x10, %rsp
86  callq *%rax
87	movq 8(%rsp), %rsp
88.endm
89
90	.text
91#if !defined(__APPLE__)
92	.section .text
93	.file "xray_trampoline_x86.S"
94#else
95	.section __TEXT,__text
96#endif
97
98//===----------------------------------------------------------------------===//
99
100	.globl ASM_SYMBOL(__xray_FunctionEntry)
101	ASM_HIDDEN(__xray_FunctionEntry)
102	.align 16, 0x90
103	ASM_TYPE_FUNCTION(__xray_FunctionEntry)
104# LLVM-MCA-BEGIN __xray_FunctionEntry
105ASM_SYMBOL(__xray_FunctionEntry):
106	CFI_STARTPROC
107	SAVE_REGISTERS
108
109	// This load has to be atomic, it's concurrent with __xray_patch().
110	// On x86/amd64, a simple (type-aligned) MOV instruction is enough.
111	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
112	testq	%rax, %rax
113	je	.Ltmp0
114
115	// The patched function prologue puts its xray_instr_map index into %r10d.
116	movl	%r10d, %edi
117	xor	%esi,%esi
118	ALIGNED_CALL_RAX
119
120.Ltmp0:
121	RESTORE_REGISTERS
122	retq
123# LLVM-MCA-END
124	ASM_SIZE(__xray_FunctionEntry)
125	CFI_ENDPROC
126
127//===----------------------------------------------------------------------===//
128
129	.globl ASM_SYMBOL(__xray_FunctionExit)
130	ASM_HIDDEN(__xray_FunctionExit)
131	.align 16, 0x90
132	ASM_TYPE_FUNCTION(__xray_FunctionExit)
133# LLVM-MCA-BEGIN __xray_FunctionExit
134ASM_SYMBOL(__xray_FunctionExit):
135	CFI_STARTPROC
136	// Save the important registers first. Since we're assuming that this
137	// function is only jumped into, we only preserve the registers for
138	// returning.
139	subq	$56, %rsp
140	CFI_DEF_CFA_OFFSET(64)
141	movq  %rbp, 48(%rsp)
142	movupd	%xmm0, 32(%rsp)
143	movupd	%xmm1, 16(%rsp)
144	movq	%rax, 8(%rsp)
145	movq	%rdx, 0(%rsp)
146	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
147	testq %rax,%rax
148	je	.Ltmp2
149
150	movl	%r10d, %edi
151	movl	$1, %esi
152  ALIGNED_CALL_RAX
153
154.Ltmp2:
155	// Restore the important registers.
156	movq  48(%rsp), %rbp
157	movupd	32(%rsp), %xmm0
158	movupd	16(%rsp), %xmm1
159	movq	8(%rsp), %rax
160	movq	0(%rsp), %rdx
161	addq	$56, %rsp
162	CFI_DEF_CFA_OFFSET(8)
163	retq
164# LLVM-MCA-END
165	ASM_SIZE(__xray_FunctionExit)
166	CFI_ENDPROC
167
168//===----------------------------------------------------------------------===//
169
170	.globl ASM_SYMBOL(__xray_FunctionTailExit)
171	ASM_HIDDEN(__xray_FunctionTailExit)
172	.align 16, 0x90
173	ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
174# LLVM-MCA-BEGIN __xray_FunctionTailExit
175ASM_SYMBOL(__xray_FunctionTailExit):
176	CFI_STARTPROC
177	SAVE_REGISTERS
178
179	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
180	testq %rax,%rax
181	je	.Ltmp4
182
183	movl	%r10d, %edi
184	movl	$2, %esi
185
186  ALIGNED_CALL_RAX
187
188.Ltmp4:
189	RESTORE_REGISTERS
190	retq
191# LLVM-MCA-END
192	ASM_SIZE(__xray_FunctionTailExit)
193	CFI_ENDPROC
194
195//===----------------------------------------------------------------------===//
196
197	.globl ASM_SYMBOL(__xray_ArgLoggerEntry)
198	ASM_HIDDEN(__xray_ArgLoggerEntry)
199	.align 16, 0x90
200	ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry)
201# LLVM-MCA-BEGIN __xray_ArgLoggerEntry
202ASM_SYMBOL(__xray_ArgLoggerEntry):
203	CFI_STARTPROC
204	SAVE_REGISTERS
205
206	// Again, these function pointer loads must be atomic; MOV is fine.
207	movq	ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
208	testq	%rax, %rax
209	jne	.Larg1entryLog
210
211	// If [arg1 logging handler] not set, defer to no-arg logging.
212	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
213	testq	%rax, %rax
214	je	.Larg1entryFail
215
216.Larg1entryLog:
217
218	// First argument will become the third
219	movq	%rdi, %rdx
220
221	// XRayEntryType::LOG_ARGS_ENTRY into the second
222	mov	$0x3, %esi
223
224	// 32-bit function ID becomes the first
225	movl	%r10d, %edi
226	ALIGNED_CALL_RAX
227
228.Larg1entryFail:
229	RESTORE_REGISTERS
230	retq
231# LLVM-MCA-END
232	ASM_SIZE(__xray_ArgLoggerEntry)
233	CFI_ENDPROC
234
235//===----------------------------------------------------------------------===//
236
237	.global ASM_SYMBOL(__xray_CustomEvent)
238	ASM_HIDDEN(__xray_CustomEvent)
239	.align 16, 0x90
240	ASM_TYPE_FUNCTION(__xray_CustomEvent)
241# LLVM-MCA-BEGIN __xray_CustomEvent
242ASM_SYMBOL(__xray_CustomEvent):
243	CFI_STARTPROC
244	SAVE_REGISTERS
245
246	// We take two arguments to this trampoline, which should be in rdi	and rsi
247	// already.
248	movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax
249	testq %rax,%rax
250	je .LcustomEventCleanup
251
252	ALIGNED_CALL_RAX
253
254.LcustomEventCleanup:
255	RESTORE_REGISTERS
256	retq
257# LLVM-MCA-END
258	ASM_SIZE(__xray_CustomEvent)
259	CFI_ENDPROC
260
261//===----------------------------------------------------------------------===//
262
263	.global ASM_SYMBOL(__xray_TypedEvent)
264	ASM_HIDDEN(__xray_TypedEvent)
265	.align 16, 0x90
266	ASM_TYPE_FUNCTION(__xray_TypedEvent)
267# LLVM-MCA-BEGIN __xray_TypedEvent
268ASM_SYMBOL(__xray_TypedEvent):
269	CFI_STARTPROC
270	SAVE_REGISTERS
271
272	// We pass three arguments to this trampoline, which should be in rdi, rsi
273	// and rdx without our intervention.
274	movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax
275	testq %rax,%rax
276	je .LtypedEventCleanup
277
278	ALIGNED_CALL_RAX
279
280.LtypedEventCleanup:
281	RESTORE_REGISTERS
282	retq
283# LLVM-MCA-END
284	ASM_SIZE(__xray_TypedEvent)
285	CFI_ENDPROC
286
287//===----------------------------------------------------------------------===//
288
289NO_EXEC_STACK_DIRECTIVE
290