xref: /titanic_51/usr/src/uts/i86xpv/sys/machprivregs.h (revision 81fd181a33bee65d5be7a49c6093bb13b382b172)
1843e1988Sjohnlev /*
2843e1988Sjohnlev  * CDDL HEADER START
3843e1988Sjohnlev  *
4843e1988Sjohnlev  * The contents of this file are subject to the terms of the
5843e1988Sjohnlev  * Common Development and Distribution License (the "License").
6843e1988Sjohnlev  * You may not use this file except in compliance with the License.
7843e1988Sjohnlev  *
8843e1988Sjohnlev  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9843e1988Sjohnlev  * or http://www.opensolaris.org/os/licensing.
10843e1988Sjohnlev  * See the License for the specific language governing permissions
11843e1988Sjohnlev  * and limitations under the License.
12843e1988Sjohnlev  *
13843e1988Sjohnlev  * When distributing Covered Code, include this CDDL HEADER in each
14843e1988Sjohnlev  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15843e1988Sjohnlev  * If applicable, add the following below this CDDL HEADER, with the
16843e1988Sjohnlev  * fields enclosed by brackets "[]" replaced with your own identifying
17843e1988Sjohnlev  * information: Portions Copyright [yyyy] [name of copyright owner]
18843e1988Sjohnlev  *
19843e1988Sjohnlev  * CDDL HEADER END
20843e1988Sjohnlev  */
21843e1988Sjohnlev 
22843e1988Sjohnlev /*
239a4611f4SStuart Maybee  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24843e1988Sjohnlev  * Use is subject to license terms.
25843e1988Sjohnlev  */
26843e1988Sjohnlev 
27843e1988Sjohnlev #ifndef	_SYS_MACHPRIVREGS_H
28843e1988Sjohnlev #define	_SYS_MACHPRIVREGS_H
29843e1988Sjohnlev 
30843e1988Sjohnlev #include <sys/hypervisor.h>
31843e1988Sjohnlev 
32843e1988Sjohnlev /*
33843e1988Sjohnlev  * Platform dependent instruction sequences for manipulating
34843e1988Sjohnlev  * privileged state
35843e1988Sjohnlev  */
36843e1988Sjohnlev 
37843e1988Sjohnlev #ifdef __cplusplus
38843e1988Sjohnlev extern "C" {
39843e1988Sjohnlev #endif
40843e1988Sjohnlev 
41843e1988Sjohnlev /*
42843e1988Sjohnlev  * CLI and STI are quite complex to virtualize!
43843e1988Sjohnlev  */
44843e1988Sjohnlev 
45843e1988Sjohnlev #if defined(__amd64)
46843e1988Sjohnlev 
47843e1988Sjohnlev #define	CURVCPU(r)					\
48843e1988Sjohnlev 	movq	%gs:CPU_VCPU_INFO, r
49843e1988Sjohnlev 
50843e1988Sjohnlev #define	CURTHREAD(r)					\
51843e1988Sjohnlev 	movq	%gs:CPU_THREAD, r
52843e1988Sjohnlev 
53843e1988Sjohnlev #elif defined(__i386)
54843e1988Sjohnlev 
55843e1988Sjohnlev #define	CURVCPU(r)					\
56843e1988Sjohnlev 	movl	%gs:CPU_VCPU_INFO, r
57843e1988Sjohnlev 
58843e1988Sjohnlev #define	CURTHREAD(r)					\
59843e1988Sjohnlev 	movl	%gs:CPU_THREAD, r
60843e1988Sjohnlev 
61843e1988Sjohnlev #endif	/* __i386 */
62843e1988Sjohnlev 
63843e1988Sjohnlev #define	XEN_TEST_EVENT_PENDING(r)			\
64843e1988Sjohnlev 	testb	$0xff, VCPU_INFO_EVTCHN_UPCALL_PENDING(r)
65843e1988Sjohnlev 
66843e1988Sjohnlev #define	XEN_SET_UPCALL_MASK(r)				\
67843e1988Sjohnlev 	movb	$1, VCPU_INFO_EVTCHN_UPCALL_MASK(r)
68843e1988Sjohnlev 
69843e1988Sjohnlev #define	XEN_GET_UPCALL_MASK(r, mask)			\
70843e1988Sjohnlev 	movb	VCPU_INFO_EVTCHN_UPCALL_MASK(r), mask
71843e1988Sjohnlev 
72843e1988Sjohnlev #define	XEN_TEST_UPCALL_MASK(r)				\
73843e1988Sjohnlev 	testb	$1, VCPU_INFO_EVTCHN_UPCALL_MASK(r)
74843e1988Sjohnlev 
75843e1988Sjohnlev #define	XEN_CLEAR_UPCALL_MASK(r)			\
76843e1988Sjohnlev 	ASSERT_UPCALL_MASK_IS_SET;			\
77843e1988Sjohnlev 	movb	$0, VCPU_INFO_EVTCHN_UPCALL_MASK(r)
78843e1988Sjohnlev 
79843e1988Sjohnlev #ifdef DEBUG
80843e1988Sjohnlev 
81843e1988Sjohnlev /*
82843e1988Sjohnlev  * Much logic depends on the upcall mask being set at
83843e1988Sjohnlev  * various points in the code; use this macro to validate.
84843e1988Sjohnlev  *
85843e1988Sjohnlev  * Need to use CURVCPU(r) to establish the vcpu pointer.
86843e1988Sjohnlev  */
87843e1988Sjohnlev #if defined(__amd64)
88843e1988Sjohnlev 
89843e1988Sjohnlev #define	ASSERT_UPCALL_MASK_IS_SET			\
90843e1988Sjohnlev 	pushq	%r11;					\
91843e1988Sjohnlev 	CURVCPU(%r11);					\
92843e1988Sjohnlev 	XEN_TEST_UPCALL_MASK(%r11);			\
93843e1988Sjohnlev 	jne	6f;					\
94843e1988Sjohnlev 	cmpl	$0, stistipanic(%rip);			\
95843e1988Sjohnlev 	jle	6f;					\
96843e1988Sjohnlev 	movl	$-1, stistipanic(%rip);			\
97843e1988Sjohnlev 	movq	stistimsg(%rip), %rdi;			\
98843e1988Sjohnlev 	xorl	%eax, %eax;				\
99843e1988Sjohnlev 	call	panic;					\
100843e1988Sjohnlev 6:	pushq	%rax;					\
101843e1988Sjohnlev 	pushq	%rbx;					\
102843e1988Sjohnlev 	movl	%gs:CPU_ID, %eax;			\
103843e1988Sjohnlev 	leaq	.+0(%rip), %r11;			\
104843e1988Sjohnlev 	leaq	laststi(%rip), %rbx;			\
105843e1988Sjohnlev 	movq	%r11, (%rbx, %rax, 8);			\
106843e1988Sjohnlev 	popq	%rbx;					\
107843e1988Sjohnlev 	popq	%rax;					\
108843e1988Sjohnlev 	popq	%r11
109843e1988Sjohnlev 
110843e1988Sjohnlev #define	SAVE_CLI_LOCATION				\
111843e1988Sjohnlev 	pushq	%rax;					\
112843e1988Sjohnlev 	pushq	%rbx;					\
113843e1988Sjohnlev 	pushq	%rcx;					\
114843e1988Sjohnlev 	movl	%gs:CPU_ID, %eax;			\
115843e1988Sjohnlev 	leaq	.+0(%rip), %rcx;			\
116843e1988Sjohnlev 	leaq	lastcli, %rbx;				\
117843e1988Sjohnlev 	movq	%rcx, (%rbx, %rax, 8);			\
118843e1988Sjohnlev 	popq	%rcx;					\
119843e1988Sjohnlev 	popq	%rbx;					\
120843e1988Sjohnlev 	popq	%rax;					\
121843e1988Sjohnlev 
122843e1988Sjohnlev #elif defined(__i386)
123843e1988Sjohnlev 
124843e1988Sjohnlev #define	ASSERT_UPCALL_MASK_IS_SET			\
125843e1988Sjohnlev 	pushl	%ecx;					\
126843e1988Sjohnlev 	CURVCPU(%ecx);					\
127843e1988Sjohnlev 	XEN_TEST_UPCALL_MASK(%ecx);			\
128843e1988Sjohnlev 	jne	6f;					\
129843e1988Sjohnlev 	cmpl	$0, stistipanic;			\
130843e1988Sjohnlev 	jle	6f;					\
131843e1988Sjohnlev 	movl	$-1, stistipanic;			\
132843e1988Sjohnlev 	movl	stistimsg, %ecx;			\
133843e1988Sjohnlev 	pushl	%ecx;					\
134843e1988Sjohnlev 	call	panic;					\
135843e1988Sjohnlev 6:	pushl	%eax;					\
136843e1988Sjohnlev 	pushl	%ebx;					\
137843e1988Sjohnlev 	movl	%gs:CPU_ID, %eax;			\
138843e1988Sjohnlev 	leal	.+0, %ecx;				\
139843e1988Sjohnlev 	leal	laststi, %ebx;				\
140843e1988Sjohnlev 	movl	%ecx, (%ebx, %eax, 4);			\
141843e1988Sjohnlev 	popl	%ebx;					\
142843e1988Sjohnlev 	popl	%eax;					\
143843e1988Sjohnlev 	popl	%ecx
144843e1988Sjohnlev 
145843e1988Sjohnlev #define	SAVE_CLI_LOCATION				\
146843e1988Sjohnlev 	pushl	%eax;					\
147843e1988Sjohnlev 	pushl	%ebx;					\
148843e1988Sjohnlev 	pushl	%ecx;					\
149843e1988Sjohnlev 	movl	%gs:CPU_ID, %eax;			\
150843e1988Sjohnlev 	leal	.+0, %ecx;				\
151843e1988Sjohnlev 	leal	lastcli, %ebx;				\
152843e1988Sjohnlev 	movl	%ecx, (%ebx, %eax, 4);			\
153843e1988Sjohnlev 	popl	%ecx;					\
154843e1988Sjohnlev 	popl	%ebx;					\
155843e1988Sjohnlev 	popl	%eax;					\
156843e1988Sjohnlev 
157843e1988Sjohnlev #endif	/* __i386 */
158843e1988Sjohnlev 
159843e1988Sjohnlev #else	/* DEBUG */
160843e1988Sjohnlev 
161843e1988Sjohnlev #define	ASSERT_UPCALL_MASK_IS_SET	/* empty */
162843e1988Sjohnlev #define	SAVE_CLI_LOCATION		/* empty */
163843e1988Sjohnlev 
164843e1988Sjohnlev #endif	/* DEBUG */
165843e1988Sjohnlev 
166843e1988Sjohnlev #define	KPREEMPT_DISABLE(t)				\
167843e1988Sjohnlev 	addb	$1, T_PREEMPT(t)
168843e1988Sjohnlev 
169843e1988Sjohnlev #define	KPREEMPT_ENABLE_NOKP(t)				\
170843e1988Sjohnlev 	subb	$1, T_PREEMPT(t)
171843e1988Sjohnlev 
172843e1988Sjohnlev #define	CLI(r)						\
173843e1988Sjohnlev 	CURTHREAD(r);					\
174843e1988Sjohnlev 	KPREEMPT_DISABLE(r);				\
175843e1988Sjohnlev 	CURVCPU(r);					\
176843e1988Sjohnlev 	XEN_SET_UPCALL_MASK(r);				\
177843e1988Sjohnlev 	SAVE_CLI_LOCATION;				\
178843e1988Sjohnlev 	CURTHREAD(r);					\
179843e1988Sjohnlev 	KPREEMPT_ENABLE_NOKP(r)
180843e1988Sjohnlev 
181843e1988Sjohnlev #define	CLIRET(r, ret)					\
182843e1988Sjohnlev 	CURTHREAD(r);					\
183843e1988Sjohnlev 	KPREEMPT_DISABLE(r);				\
184843e1988Sjohnlev 	CURVCPU(r);					\
185843e1988Sjohnlev 	XEN_GET_UPCALL_MASK(r, ret);			\
186843e1988Sjohnlev 	XEN_SET_UPCALL_MASK(r);				\
187843e1988Sjohnlev 	SAVE_CLI_LOCATION;				\
188843e1988Sjohnlev 	CURTHREAD(r);					\
189843e1988Sjohnlev 	KPREEMPT_ENABLE_NOKP(r)
190843e1988Sjohnlev 
191843e1988Sjohnlev /*
192843e1988Sjohnlev  * We use the fact that HYPERVISOR_block will clear the upcall mask
193843e1988Sjohnlev  * for us and then give us an upcall if there is a pending event
194843e1988Sjohnlev  * to achieve getting a callback on this cpu without the danger of
195843e1988Sjohnlev  * being preempted and migrating to another cpu between the upcall
196843e1988Sjohnlev  * enable and the callback delivery.
197843e1988Sjohnlev  */
198843e1988Sjohnlev #if defined(__amd64)
199843e1988Sjohnlev 
200843e1988Sjohnlev #define	STI_CLOBBER		/* clobbers %rax, %rdi, %r11 */		\
201843e1988Sjohnlev 	CURVCPU(%r11);							\
202843e1988Sjohnlev 	ASSERT_UPCALL_MASK_IS_SET;					\
203843e1988Sjohnlev 	movw	$0x100, %ax;	/* assume mask set, pending clear */	\
204843e1988Sjohnlev 	movw	$0, %di;	/* clear mask and pending */		\
205843e1988Sjohnlev 	lock;								\
206843e1988Sjohnlev 	cmpxchgw %di, VCPU_INFO_EVTCHN_UPCALL_PENDING(%r11);		\
207843e1988Sjohnlev 	jz	7f;		/* xchg worked, we're done */		\
208843e1988Sjohnlev 	movl	$__HYPERVISOR_sched_op, %eax; /* have pending upcall */	\
209843e1988Sjohnlev 	movl	$SCHEDOP_block, %edi;					\
210843e1988Sjohnlev 	pushq	%rsi;	/* hypercall clobbers C param regs plus r10 */	\
211843e1988Sjohnlev 	pushq	%rcx;							\
212843e1988Sjohnlev 	pushq	%rdx;							\
213843e1988Sjohnlev 	pushq	%r8;							\
214843e1988Sjohnlev 	pushq	%r9;							\
215843e1988Sjohnlev 	pushq	%r10;							\
216843e1988Sjohnlev 	TRAP_INSTR;	/* clear upcall mask, force upcall */ 		\
217843e1988Sjohnlev 	popq	%r10;							\
218843e1988Sjohnlev 	popq	%r9;							\
219843e1988Sjohnlev 	popq	%r8;							\
220843e1988Sjohnlev 	popq	%rdx;							\
221843e1988Sjohnlev 	popq	%rcx;							\
222843e1988Sjohnlev 	popq	%rsi;							\
223843e1988Sjohnlev 7:
224843e1988Sjohnlev 
225843e1988Sjohnlev #define	STI								\
226843e1988Sjohnlev 	pushq	%r11;							\
227843e1988Sjohnlev 	pushq	%rdi;							\
228843e1988Sjohnlev 	pushq	%rax;							\
229843e1988Sjohnlev 	STI_CLOBBER;	/* clobbers %r11, %rax, %rdi */			\
230843e1988Sjohnlev 	popq	%rax;							\
231843e1988Sjohnlev 	popq	%rdi;							\
232843e1988Sjohnlev 	popq	%r11
233843e1988Sjohnlev 
234843e1988Sjohnlev #elif defined(__i386)
235843e1988Sjohnlev 
236843e1988Sjohnlev #define	STI_CLOBBER		/* clobbers %eax, %ebx, %ecx */		\
237843e1988Sjohnlev 	CURVCPU(%ecx);							\
238843e1988Sjohnlev 	ASSERT_UPCALL_MASK_IS_SET;					\
239843e1988Sjohnlev 	movw	$0x100, %ax;	/* assume mask set, pending clear */	\
240843e1988Sjohnlev 	movw	$0, %bx;	/* clear mask and pending */		\
241843e1988Sjohnlev 	lock;								\
242843e1988Sjohnlev 	cmpxchgw %bx, VCPU_INFO_EVTCHN_UPCALL_PENDING(%ecx);		\
243843e1988Sjohnlev 	jz	7f;		/* xchg worked, we're done */		\
244843e1988Sjohnlev 	movl	$__HYPERVISOR_sched_op, %eax; /* have pending upcall */	\
245843e1988Sjohnlev 	movl	$SCHEDOP_block, %ebx;					\
246843e1988Sjohnlev 	TRAP_INSTR;		/* clear upcall mask, force upcall */	\
247843e1988Sjohnlev 7:
248843e1988Sjohnlev 
249843e1988Sjohnlev #define	STI						\
250843e1988Sjohnlev 	pushl	%eax;					\
251843e1988Sjohnlev 	pushl	%ebx;					\
252843e1988Sjohnlev 	pushl	%ecx;					\
253843e1988Sjohnlev 	STI_CLOBBER;	/* clobbers %eax, %ebx, %ecx */	\
254843e1988Sjohnlev 	popl	%ecx;					\
255843e1988Sjohnlev 	popl	%ebx;					\
256843e1988Sjohnlev 	popl	%eax
257843e1988Sjohnlev 
258843e1988Sjohnlev #endif	/* __i386 */
259843e1988Sjohnlev 
260843e1988Sjohnlev /*
261843e1988Sjohnlev  * Map the PS_IE bit to the hypervisor's event mask bit
262843e1988Sjohnlev  * To -set- the event mask, we have to do a CLI
263843e1988Sjohnlev  * To -clear- the event mask, we have to do a STI
264843e1988Sjohnlev  * (with all the accompanying pre-emption and callbacks, ick)
265843e1988Sjohnlev  *
266843e1988Sjohnlev  * And vice versa.
267843e1988Sjohnlev  */
268843e1988Sjohnlev 
269843e1988Sjohnlev #if defined(__amd64)
270843e1988Sjohnlev 
271843e1988Sjohnlev #define	IE_TO_EVENT_MASK(rtmp, rfl)		\
272843e1988Sjohnlev 	testq	$PS_IE, rfl;			\
273843e1988Sjohnlev 	jnz	4f;				\
274843e1988Sjohnlev 	CLI(rtmp);				\
275843e1988Sjohnlev 	jmp	5f;				\
276843e1988Sjohnlev 4:	STI;					\
277843e1988Sjohnlev 5:
278843e1988Sjohnlev 
279843e1988Sjohnlev #define	EVENT_MASK_TO_IE(rtmp, rfl)		\
280843e1988Sjohnlev 	andq	$_BITNOT(PS_IE), rfl;		\
281843e1988Sjohnlev 	CURVCPU(rtmp);				\
282843e1988Sjohnlev 	XEN_TEST_UPCALL_MASK(rtmp);		\
283843e1988Sjohnlev 	jnz	1f;				\
284843e1988Sjohnlev 	orq	$PS_IE, rfl;			\
285843e1988Sjohnlev 1:
286843e1988Sjohnlev 
287843e1988Sjohnlev #elif defined(__i386)
288843e1988Sjohnlev 
289843e1988Sjohnlev #define	IE_TO_EVENT_MASK(rtmp, rfl)		\
290843e1988Sjohnlev 	testl	$PS_IE, rfl;			\
291843e1988Sjohnlev 	jnz	4f;				\
292843e1988Sjohnlev 	CLI(rtmp);				\
293843e1988Sjohnlev 	jmp	5f;				\
294843e1988Sjohnlev 4:	STI;					\
295843e1988Sjohnlev 5:
296843e1988Sjohnlev 
297843e1988Sjohnlev #define	EVENT_MASK_TO_IE(rtmp, rfl)		\
298843e1988Sjohnlev 	andl	$_BITNOT(PS_IE), rfl;		\
299843e1988Sjohnlev 	CURVCPU(rtmp);				\
300843e1988Sjohnlev 	XEN_TEST_UPCALL_MASK(rtmp);		\
301843e1988Sjohnlev 	jnz	1f;				\
302843e1988Sjohnlev 	orl	$PS_IE, rfl;			\
303843e1988Sjohnlev 1:
304843e1988Sjohnlev 
305843e1988Sjohnlev #endif	/* __i386 */
306843e1988Sjohnlev 
307843e1988Sjohnlev /*
308843e1988Sjohnlev  * Used to re-enable interrupts in the body of exception handlers
309843e1988Sjohnlev  */
310843e1988Sjohnlev 
311843e1988Sjohnlev #if defined(__amd64)
312843e1988Sjohnlev 
313843e1988Sjohnlev #define	ENABLE_INTR_FLAGS		\
314843e1988Sjohnlev 	pushq	$F_ON;			\
315843e1988Sjohnlev 	popfq;				\
316843e1988Sjohnlev 	STI
317843e1988Sjohnlev 
318843e1988Sjohnlev #elif defined(__i386)
319843e1988Sjohnlev 
320843e1988Sjohnlev #define	ENABLE_INTR_FLAGS		\
321843e1988Sjohnlev 	pushl	$F_ON;			\
322843e1988Sjohnlev 	popfl;				\
323843e1988Sjohnlev 	STI
324843e1988Sjohnlev 
325843e1988Sjohnlev #endif	/* __i386 */
326843e1988Sjohnlev 
327843e1988Sjohnlev /*
328843e1988Sjohnlev  * Virtualize IRET and SYSRET
329843e1988Sjohnlev  */
330843e1988Sjohnlev 
331843e1988Sjohnlev #if defined(__amd64)
332843e1988Sjohnlev 
333843e1988Sjohnlev #if defined(DEBUG)
334843e1988Sjohnlev 
335843e1988Sjohnlev /*
336843e1988Sjohnlev  * Die nastily with a #ud trap if we are about to switch to user
337843e1988Sjohnlev  * mode in HYPERVISOR_IRET and RUPDATE_PENDING is set.
338843e1988Sjohnlev  */
339843e1988Sjohnlev #define	__ASSERT_NO_RUPDATE_PENDING			\
340843e1988Sjohnlev 	pushq	%r15;					\
341843e1988Sjohnlev 	cmpw	$KCS_SEL, 0x10(%rsp);			\
342843e1988Sjohnlev 	je	1f;					\
343843e1988Sjohnlev 	movq	%gs:CPU_THREAD, %r15;			\
344843e1988Sjohnlev 	movq	T_LWP(%r15), %r15;			\
345843e1988Sjohnlev 	testb	$0x1, PCB_RUPDATE(%r15);		\
346843e1988Sjohnlev 	je	1f;					\
347843e1988Sjohnlev 	ud2;						\
348843e1988Sjohnlev 1:	popq	%r15
349843e1988Sjohnlev 
350843e1988Sjohnlev #else	/* DEBUG */
351843e1988Sjohnlev 
352843e1988Sjohnlev #define	__ASSERT_NO_RUPDATE_PENDING
353843e1988Sjohnlev 
354843e1988Sjohnlev #endif	/* DEBUG */
355843e1988Sjohnlev 
356843e1988Sjohnlev /*
357843e1988Sjohnlev  * Switching from guest kernel to user mode.
358843e1988Sjohnlev  * flag == VGCF_IN_SYSCALL => return via sysret
359843e1988Sjohnlev  * flag == 0 => return via iretq
360843e1988Sjohnlev  *
361843e1988Sjohnlev  * See definition in public/arch-x86_64.h. Stack going in must be:
362843e1988Sjohnlev  * rax, r11, rcx, flags, rip, cs, rflags, rsp, ss.
363843e1988Sjohnlev  */
364843e1988Sjohnlev #define	HYPERVISOR_IRET(flag)			\
365843e1988Sjohnlev 	__ASSERT_NO_RUPDATE_PENDING;		\
366843e1988Sjohnlev 	pushq	$flag;				\
367843e1988Sjohnlev 	pushq	%rcx;				\
368843e1988Sjohnlev 	pushq	%r11;				\
369843e1988Sjohnlev 	pushq	%rax;				\
370843e1988Sjohnlev 	movl	$__HYPERVISOR_iret, %eax;	\
371843e1988Sjohnlev 	syscall;				\
372843e1988Sjohnlev 	ud2	/* die nastily if we return! */
373843e1988Sjohnlev 
374843e1988Sjohnlev #define	IRET	HYPERVISOR_IRET(0)
375*81fd181aSTodd Clayton 
376*81fd181aSTodd Clayton /*
377*81fd181aSTodd Clayton  * XXPV: Normally we would expect to use sysret to return from kernel to
378*81fd181aSTodd Clayton  *       user mode when using the syscall instruction. The iret hypercall
379*81fd181aSTodd Clayton  *       does support both iret and sysret semantics. For us to use sysret
380*81fd181aSTodd Clayton  *	 style would require that we use the hypervisor's private descriptors
381*81fd181aSTodd Clayton  *	 that obey syscall instruction's imposed segment selector ordering.
382*81fd181aSTodd Clayton  *	 With iret we can use whatever %cs value we choose. We should fix
383*81fd181aSTodd Clayton  *	 this to use sysret one day.
384*81fd181aSTodd Clayton  */
385*81fd181aSTodd Clayton #define	SYSRETQ	HYPERVISOR_IRET(0)
386843e1988Sjohnlev #define	SYSRETL	ud2		/* 32-bit syscall/sysret not supported */
387843e1988Sjohnlev #define	SWAPGS	/* empty - handled in hypervisor */
388843e1988Sjohnlev 
389843e1988Sjohnlev #elif defined(__i386)
390843e1988Sjohnlev 
391843e1988Sjohnlev /*
392843e1988Sjohnlev  * Switching from guest kernel to user mode.
393843e1988Sjohnlev  * See definition in public/arch-x86_32.h. Stack going in must be:
394843e1988Sjohnlev  * eax, flags, eip, cs, eflags, esp, ss.
395843e1988Sjohnlev  */
396843e1988Sjohnlev #define	HYPERVISOR_IRET				\
397843e1988Sjohnlev 	pushl	%eax;				\
398843e1988Sjohnlev 	movl	$__HYPERVISOR_iret, %eax;	\
399843e1988Sjohnlev 	int	$0x82;				\
400843e1988Sjohnlev 	ud2	/* die nastily if we return! */
401843e1988Sjohnlev 
402843e1988Sjohnlev #define	IRET	HYPERVISOR_IRET
403843e1988Sjohnlev #define	SYSRET	ud2		/* 32-bit syscall/sysret not supported */
404843e1988Sjohnlev 
405843e1988Sjohnlev #endif	/* __i386 */
406843e1988Sjohnlev 
407843e1988Sjohnlev 
408843e1988Sjohnlev /*
409843e1988Sjohnlev  * Xen 3.x wedges the current value of upcall_mask into unused byte of
410843e1988Sjohnlev  * saved %cs on stack at the time of passing through a trap or interrupt
411843e1988Sjohnlev  * gate.  Since Xen also updates PS_IE in %[e,r]lags as well, we always
412843e1988Sjohnlev  * mask off the saved upcall mask so the kernel and/or tools like debuggers
413843e1988Sjohnlev  * will not be confused about bits set in reserved portions of %cs slot.
414843e1988Sjohnlev  *
415843e1988Sjohnlev  * See xen/include/public/arch-x86_[32,64].h:cpu_user_regs_t for details.
416843e1988Sjohnlev  */
417843e1988Sjohnlev #if defined(__amd64)
418843e1988Sjohnlev 
419843e1988Sjohnlev #define	CLEAN_CS	movb	$0, REGOFF_CS+4(%rsp)
420843e1988Sjohnlev 
421843e1988Sjohnlev #elif defined(__i386)
422843e1988Sjohnlev 
423843e1988Sjohnlev #define	CLEAN_CS	movb	$0, REGOFF_CS+2(%esp)
424843e1988Sjohnlev 
425843e1988Sjohnlev #endif	/* __i386 */
426843e1988Sjohnlev 
427843e1988Sjohnlev /*
428843e1988Sjohnlev  * All exceptions for amd64 have %r11 and %rcx on the stack.
429843e1988Sjohnlev  * Just pop them back into their appropriate registers and
430843e1988Sjohnlev  * let it get saved as is running native.
431843e1988Sjohnlev  */
432843e1988Sjohnlev #if defined(__amd64)
433843e1988Sjohnlev 
434843e1988Sjohnlev #define	XPV_TRAP_POP	\
435843e1988Sjohnlev 	popq	%rcx;	\
436843e1988Sjohnlev 	popq	%r11
437843e1988Sjohnlev 
438843e1988Sjohnlev #define	XPV_TRAP_PUSH	\
439843e1988Sjohnlev 	pushq	%r11;	\
440843e1988Sjohnlev 	pushq	%rcx
441843e1988Sjohnlev 
442843e1988Sjohnlev #endif	/* __amd64 */
443843e1988Sjohnlev 
444843e1988Sjohnlev 
445843e1988Sjohnlev /*
446843e1988Sjohnlev  * Macros for saving the original segment registers and restoring them
447843e1988Sjohnlev  * for fast traps.
448843e1988Sjohnlev  */
449843e1988Sjohnlev #if defined(__amd64)
450843e1988Sjohnlev 
451843e1988Sjohnlev /*
452843e1988Sjohnlev  * Smaller versions of INTR_PUSH and INTR_POP for fast traps.
453843e1988Sjohnlev  * The following registers have been pushed onto the stack by
454843e1988Sjohnlev  * hardware at this point:
455843e1988Sjohnlev  *
456843e1988Sjohnlev  *	greg_t	r_rip;
457843e1988Sjohnlev  *	greg_t	r_cs;
458843e1988Sjohnlev  *	greg_t	r_rfl;
459843e1988Sjohnlev  *	greg_t	r_rsp;
460843e1988Sjohnlev  *	greg_t	r_ss;
461843e1988Sjohnlev  *
462843e1988Sjohnlev  * This handler is executed both by 32-bit and 64-bit applications.
463843e1988Sjohnlev  * 64-bit applications allow us to treat the set (%rdi, %rsi, %rdx,
464843e1988Sjohnlev  * %rcx, %r8, %r9, %r10, %r11, %rax) as volatile across function calls.
465843e1988Sjohnlev  * However, 32-bit applications only expect (%eax, %edx, %ecx) to be volatile
466843e1988Sjohnlev  * across a function call -- in particular, %esi and %edi MUST be saved!
467843e1988Sjohnlev  *
468843e1988Sjohnlev  * We could do this differently by making a FAST_INTR_PUSH32 for 32-bit
469843e1988Sjohnlev  * programs, and FAST_INTR_PUSH for 64-bit programs, but it doesn't seem
470843e1988Sjohnlev  * particularly worth it.
471843e1988Sjohnlev  *
472843e1988Sjohnlev  */
473843e1988Sjohnlev #define	FAST_INTR_PUSH			\
474843e1988Sjohnlev 	INTGATE_INIT_KERNEL_FLAGS;	\
475843e1988Sjohnlev 	popq	%rcx;			\
476843e1988Sjohnlev 	popq	%r11;			\
477843e1988Sjohnlev 	subq    $REGOFF_RIP, %rsp;	\
478843e1988Sjohnlev 	movq    %rsi, REGOFF_RSI(%rsp);	\
479843e1988Sjohnlev 	movq    %rdi, REGOFF_RDI(%rsp);	\
480843e1988Sjohnlev 	CLEAN_CS
481843e1988Sjohnlev 
482843e1988Sjohnlev #define	FAST_INTR_POP			\
483843e1988Sjohnlev 	movq    REGOFF_RSI(%rsp), %rsi;	\
484843e1988Sjohnlev 	movq    REGOFF_RDI(%rsp), %rdi;	\
485843e1988Sjohnlev 	addq    $REGOFF_RIP, %rsp
486843e1988Sjohnlev 
487843e1988Sjohnlev #define	FAST_INTR_RETURN		\
488843e1988Sjohnlev 	ASSERT_UPCALL_MASK_IS_SET;	\
489843e1988Sjohnlev 	HYPERVISOR_IRET(0)
490843e1988Sjohnlev 
491843e1988Sjohnlev #elif defined(__i386)
492843e1988Sjohnlev 
493843e1988Sjohnlev #define	FAST_INTR_PUSH			\
494843e1988Sjohnlev 	cld;				\
495843e1988Sjohnlev 	__SEGREGS_PUSH			\
496843e1988Sjohnlev 	__SEGREGS_LOAD_KERNEL		\
497843e1988Sjohnlev 
498843e1988Sjohnlev #define	FAST_INTR_POP			\
499843e1988Sjohnlev 	__SEGREGS_POP
500843e1988Sjohnlev 
501843e1988Sjohnlev #define	FAST_INTR_RETURN		\
502843e1988Sjohnlev 	IRET
503843e1988Sjohnlev 
504843e1988Sjohnlev #endif	/* __i386 */
505843e1988Sjohnlev 
506843e1988Sjohnlev /*
507843e1988Sjohnlev  * Handling the CR0.TS bit for floating point handling.
508843e1988Sjohnlev  *
509843e1988Sjohnlev  * When the TS bit is *set*, attempts to touch the floating
510843e1988Sjohnlev  * point hardware will result in a #nm trap.
511843e1988Sjohnlev  */
512843e1988Sjohnlev #if defined(__amd64)
513843e1988Sjohnlev 
514843e1988Sjohnlev #define	STTS(rtmp)				\
515843e1988Sjohnlev 	pushq	%rdi;				\
516843e1988Sjohnlev 	movl	$1, %edi;			\
517843e1988Sjohnlev 	call	HYPERVISOR_fpu_taskswitch;	\
518843e1988Sjohnlev 	popq	%rdi
519843e1988Sjohnlev 
520843e1988Sjohnlev #define	CLTS					\
521843e1988Sjohnlev 	pushq	%rdi;				\
522843e1988Sjohnlev 	xorl	%edi, %edi;			\
523843e1988Sjohnlev 	call	HYPERVISOR_fpu_taskswitch;	\
524843e1988Sjohnlev 	popq	%rdi
525843e1988Sjohnlev 
526843e1988Sjohnlev #elif defined(__i386)
527843e1988Sjohnlev 
528843e1988Sjohnlev #define	STTS(r)					\
529843e1988Sjohnlev 	pushl	$1;				\
530843e1988Sjohnlev 	call	HYPERVISOR_fpu_taskswitch;	\
531843e1988Sjohnlev 	addl	$4, %esp
532843e1988Sjohnlev 
533843e1988Sjohnlev #define	CLTS					\
534843e1988Sjohnlev 	pushl	$0;				\
535843e1988Sjohnlev 	call	HYPERVISOR_fpu_taskswitch;	\
536843e1988Sjohnlev 	addl	$4, %esp
537843e1988Sjohnlev 
538843e1988Sjohnlev #endif	/* __i386 */
539843e1988Sjohnlev 
540843e1988Sjohnlev #ifdef __cplusplus
541843e1988Sjohnlev }
542843e1988Sjohnlev #endif
543843e1988Sjohnlev 
544843e1988Sjohnlev #endif	/* _SYS_MACHPRIVREGS_H */
545