xref: /freebsd/sys/cddl/dev/dtrace/amd64/dtrace_asm.S (revision ca987d4641cdcd7f27e153db17c5bf064934faf5)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Portions Copyright 2008 John Birrell <jb@freebsd.org>
22 *
23 * $FreeBSD$
24 *
25 */
26/*
27 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
28 * Use is subject to license terms.
29 */
30
31#define _ASM
32
33#include <machine/asmacros.h>
34#include <sys/cpuvar_defs.h>
35#include <sys/dtrace.h>
36
37#include "assym.s"
38
39#define INTR_POP				\
40	MEXITCOUNT;				\
41	movq	TF_RDI(%rsp),%rdi;		\
42	movq	TF_RSI(%rsp),%rsi;		\
43	movq	TF_RDX(%rsp),%rdx;		\
44	movq	TF_RCX(%rsp),%rcx;		\
45	movq	TF_R8(%rsp),%r8;		\
46	movq	TF_R9(%rsp),%r9;		\
47	movq	TF_RAX(%rsp),%rax;		\
48	movq	TF_RBX(%rsp),%rbx;		\
49	movq	TF_RBP(%rsp),%rbp;		\
50	movq	TF_R10(%rsp),%r10;		\
51	movq	TF_R11(%rsp),%r11;		\
52	movq	TF_R12(%rsp),%r12;		\
53	movq	TF_R13(%rsp),%r13;		\
54	movq	TF_R14(%rsp),%r14;		\
55	movq	TF_R15(%rsp),%r15;		\
56	testb	$SEL_RPL_MASK,TF_CS(%rsp);	\
57	jz	1f;				\
58	cli;					\
59	swapgs;					\
601:	addq	$TF_RIP,%rsp;
61
62
63	ENTRY(dtrace_invop_start)
64
65	/*
66	 * #BP traps with %rip set to the next address. We need to decrement
67	 * the value to indicate the address of the int3 (0xcc) instruction
68	 * that we substituted.
69	 */
70	movq	TF_RIP(%rsp), %rdi
71	decq	%rdi
72	movq	%rsp, %rsi
73	movq	TF_RAX(%rsp), %rdx
74	call	dtrace_invop
75	ALTENTRY(dtrace_invop_callsite)
76	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
77	je	bp_push
78	cmpl	$DTRACE_INVOP_LEAVE, %eax
79	je	bp_leave
80	cmpl	$DTRACE_INVOP_NOP, %eax
81	je	bp_nop
82	cmpl	$DTRACE_INVOP_RET, %eax
83	je	bp_ret
84
85	/* When all else fails handle the trap in the usual way. */
86	jmpq	*dtrace_invop_calltrap_addr
87
88bp_push:
89	/*
90	 * We must emulate a "pushq %rbp".  To do this, we pull the stack
91	 * down 8 bytes, and then store the base pointer.
92	 */
93	INTR_POP
94	subq	$16, %rsp		/* make room for %rbp */
95	pushq	%rax			/* push temp */
96	movq	24(%rsp), %rax		/* load calling RIP */
97	movq	%rax, 8(%rsp)		/* store calling RIP */
98	movq	32(%rsp), %rax		/* load calling CS */
99	movq	%rax, 16(%rsp)		/* store calling CS */
100	movq	40(%rsp), %rax		/* load calling RFLAGS */
101	movq	%rax, 24(%rsp)		/* store calling RFLAGS */
102	movq	48(%rsp), %rax		/* load calling RSP */
103	subq	$8, %rax		/* make room for %rbp */
104	movq	%rax, 32(%rsp)		/* store calling RSP */
105	movq	56(%rsp), %rax		/* load calling SS */
106	movq	%rax, 40(%rsp)		/* store calling SS */
107	movq	32(%rsp), %rax		/* reload calling RSP */
108	movq	%rbp, (%rax)		/* store %rbp there */
109	popq	%rax			/* pop off temp */
110	iretq				/* return from interrupt */
111	/*NOTREACHED*/
112
113bp_leave:
114	/*
115	 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
116	 * followed by a "popq %rbp".  This is quite a bit simpler on amd64
117	 * than it is on i386 -- we can exploit the fact that the %rsp is
118	 * explicitly saved to effect the pop without having to reshuffle
119	 * the other data pushed for the trap.
120	 */
121	INTR_POP
122	pushq	%rax			/* push temp */
123	movq	8(%rsp), %rax		/* load calling RIP */
124	movq	%rax, 8(%rsp)		/* store calling RIP */
125	movq	(%rbp), %rax		/* get new %rbp */
126	addq	$8, %rbp		/* adjust new %rsp */
127	movq	%rbp, 32(%rsp)		/* store new %rsp */
128	movq	%rax, %rbp		/* set new %rbp */
129	popq	%rax			/* pop off temp */
130	iretq				/* return from interrupt */
131	/*NOTREACHED*/
132
133bp_nop:
134	/* We must emulate a "nop". */
135	INTR_POP
136	iretq
137	/*NOTREACHED*/
138
139bp_ret:
140	INTR_POP
141	pushq	%rax			/* push temp */
142	movq	32(%rsp), %rax		/* load %rsp */
143	movq	(%rax), %rax		/* load calling RIP */
144	movq	%rax, 8(%rsp)		/* store calling RIP */
145	addq	$8, 32(%rsp)		/* adjust new %rsp */
146	popq	%rax			/* pop off temp */
147	iretq				/* return from interrupt */
148	/*NOTREACHED*/
149
150	END(dtrace_invop_start)
151
152/*
153void dtrace_invop_init(void)
154*/
155	ENTRY(dtrace_invop_init)
156	movq	$dtrace_invop_start, dtrace_invop_jump_addr(%rip)
157	ret
158	END(dtrace_invop_init)
159
160/*
161void dtrace_invop_uninit(void)
162*/
163	ENTRY(dtrace_invop_uninit)
164	movq	$0, dtrace_invop_jump_addr(%rip)
165	ret
166	END(dtrace_invop_uninit)
167
168/*
169greg_t dtrace_getfp(void)
170*/
171	ENTRY(dtrace_getfp)
172	movq	%rbp, %rax
173	ret
174	END(dtrace_getfp)
175
176/*
177uint32_t
178dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
179*/
180	ENTRY(dtrace_cas32)
181	movl	%esi, %eax
182	lock
183	cmpxchgl %edx, (%rdi)
184	ret
185	END(dtrace_cas32)
186
187/*
188void *
189dtrace_casptr(void *target, void *cmp, void *new)
190*/
191	ENTRY(dtrace_casptr)
192	movq	%rsi, %rax
193	lock
194	cmpxchgq %rdx, (%rdi)
195	ret
196	END(dtrace_casptr)
197
198/*
199uintptr_t
200dtrace_caller(int aframes)
201*/
202	ENTRY(dtrace_caller)
203	movq	$-1, %rax
204	ret
205	END(dtrace_caller)
206
207/*
208void
209dtrace_copy(uintptr_t src, uintptr_t dest, size_t size)
210*/
211	ENTRY(dtrace_copy)
212	pushq	%rbp
213	movq	%rsp, %rbp
214
215	xchgq	%rdi, %rsi		/* make %rsi source, %rdi dest */
216	movq	%rdx, %rcx		/* load count */
217	repz				/* repeat for count ... */
218	smovb				/*   move from %ds:rsi to %ed:rdi */
219	leave
220	ret
221	END(dtrace_copy)
222
223/*
224void
225dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
226    volatile uint16_t *flags)
227*/
228	ENTRY(dtrace_copystr)
229	pushq	%rbp
230	movq	%rsp, %rbp
231
2320:
233	movb	(%rdi), %al		/* load from source */
234	movb	%al, (%rsi)		/* store to destination */
235	addq	$1, %rdi		/* increment source pointer */
236	addq	$1, %rsi		/* increment destination pointer */
237	subq	$1, %rdx		/* decrement remaining count */
238	cmpb	$0, %al
239	je	2f
240	testq	$0xfff, %rdx		/* test if count is 4k-aligned */
241	jnz	1f			/* if not, continue with copying */
242	testq	$CPU_DTRACE_BADADDR, (%rcx) /* load and test dtrace flags */
243	jnz	2f
2441:
245	cmpq	$0, %rdx
246	jne	0b
2472:
248	leave
249	ret
250
251	END(dtrace_copystr)
252
253/*
254uintptr_t
255dtrace_fulword(void *addr)
256*/
257	ENTRY(dtrace_fulword)
258	movq	(%rdi), %rax
259	ret
260	END(dtrace_fulword)
261
262/*
263uint8_t
264dtrace_fuword8_nocheck(void *addr)
265*/
266	ENTRY(dtrace_fuword8_nocheck)
267	xorq	%rax, %rax
268	movb	(%rdi), %al
269	ret
270	END(dtrace_fuword8_nocheck)
271
272/*
273uint16_t
274dtrace_fuword16_nocheck(void *addr)
275*/
276	ENTRY(dtrace_fuword16_nocheck)
277	xorq	%rax, %rax
278	movw	(%rdi), %ax
279	ret
280	END(dtrace_fuword16_nocheck)
281
282/*
283uint32_t
284dtrace_fuword32_nocheck(void *addr)
285*/
286	ENTRY(dtrace_fuword32_nocheck)
287	xorq	%rax, %rax
288	movl	(%rdi), %eax
289	ret
290	END(dtrace_fuword32_nocheck)
291
292/*
293uint64_t
294dtrace_fuword64_nocheck(void *addr)
295*/
296	ENTRY(dtrace_fuword64_nocheck)
297	movq	(%rdi), %rax
298	ret
299	END(dtrace_fuword64_nocheck)
300
301/*
302void
303dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
304    int fault, int fltoffs, uintptr_t illval)
305*/
306	ENTRY(dtrace_probe_error)
307	pushq	%rbp
308	movq	%rsp, %rbp
309	subq	$0x8, %rsp
310	movq	%r9, (%rsp)
311	movq	%r8, %r9
312	movq	%rcx, %r8
313	movq	%rdx, %rcx
314	movq	%rsi, %rdx
315	movq	%rdi, %rsi
316	movl	dtrace_probeid_error(%rip), %edi
317	call	dtrace_probe
318	addq	$0x8, %rsp
319	leave
320	ret
321	END(dtrace_probe_error)
322
323/*
324void
325dtrace_membar_producer(void)
326*/
327	ENTRY(dtrace_membar_producer)
328	rep;	ret	/* use 2 byte return instruction when branch target */
329			/* AMD Software Optimization Guide - Section 6.2 */
330	END(dtrace_membar_producer)
331
332/*
333void
334dtrace_membar_consumer(void)
335*/
336	ENTRY(dtrace_membar_consumer)
337	rep;	ret	/* use 2 byte return instruction when branch target */
338			/* AMD Software Optimization Guide - Section 6.2 */
339	END(dtrace_membar_consumer)
340
341/*
342dtrace_icookie_t
343dtrace_interrupt_disable(void)
344*/
345	ENTRY(dtrace_interrupt_disable)
346	pushfq
347	popq	%rax
348	cli
349	ret
350	END(dtrace_interrupt_disable)
351
352/*
353void
354dtrace_interrupt_enable(dtrace_icookie_t cookie)
355*/
356	ENTRY(dtrace_interrupt_enable)
357	pushq	%rdi
358	popfq
359	ret
360	END(dtrace_interrupt_enable)
361