xref: /titanic_41/usr/src/uts/intel/amd64/sys/privregs.h (revision c5024742c2f7d10880eae26cc592353b20a58f4a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #ifndef _AMD64_SYS_PRIVREGS_H
28 #define	_AMD64_SYS_PRIVREGS_H
29 
30 #pragma ident	"%Z%%M%	%I%	%E% SMI"
31 
32 #ifdef	__cplusplus
33 extern "C" {
34 #endif
35 
36 /*
37  * This file describes the cpu's privileged register set, and
38  * how the machine state is saved on the stack when a trap occurs.
39  */
40 
41 #if !defined(__amd64)
42 #error	"non-amd64 code depends on amd64 privileged header!"
43 #endif
44 
45 #ifndef	_ASM
46 
47 /*
48  * This is NOT the structure to use for general purpose debugging;
49  * see /proc for that.  This is NOT the structure to use to decode
50  * the ucontext or grovel about in a core file; see <sys/regset.h>.
51  */
52 
53 struct regs {
54 	/*
55 	 * Extra frame for mdb to follow through high level interrupts and
56 	 * system traps.  Set them to 0 to terminate stacktrace.
57 	 */
58 	greg_t	r_savfp;	/* a copy of %rbp */
59 	greg_t	r_savpc;	/* a copy of %rip */
60 
61 	greg_t	r_rdi;		/* 1st arg to function */
62 	greg_t	r_rsi;		/* 2nd arg to function */
63 	greg_t	r_rdx;		/* 3rd arg to function, 2nd return register */
64 	greg_t	r_rcx;		/* 4th arg to function */
65 
66 	greg_t	r_r8;		/* 5th arg to function */
67 	greg_t	r_r9;		/* 6th arg to function */
68 	greg_t	r_rax;		/* 1st return register, # SSE registers */
69 	greg_t	r_rbx;		/* callee-saved, optional base pointer */
70 
71 	greg_t	r_rbp;		/* callee-saved, optional frame pointer */
72 	greg_t	r_r10;		/* temporary register, static chain pointer */
73 	greg_t	r_r11;		/* temporary register */
74 	greg_t	r_r12;		/* callee-saved */
75 
76 	greg_t	r_r13;		/* callee-saved */
77 	greg_t	r_r14;		/* callee-saved */
78 	greg_t	r_r15;		/* callee-saved */
79 
80 	greg_t	r_fsbase;
81 	greg_t	r_gsbase;
82 	greg_t	r_ds;
83 	greg_t	r_es;
84 	greg_t	r_fs;		/* %fs is *never* used by the kernel */
85 	greg_t	r_gs;
86 
87 	greg_t	r_trapno;
88 
89 	/*
90 	 * (the rest of these are defined by the hardware)
91 	 */
92 	greg_t	r_err;
93 	greg_t	r_rip;
94 	greg_t	r_cs;
95 	greg_t	r_rfl;
96 	greg_t	r_rsp;
97 	greg_t	r_ss;
98 };
99 
100 #define	r_r0	r_rax	/* r0 for portability */
101 #define	r_r1	r_rdx	/* r1 for portability */
102 #define	r_fp	r_rbp	/* kernel frame pointer */
103 #define	r_sp	r_rsp	/* user stack pointer */
104 #define	r_pc	r_rip	/* user's instruction pointer */
105 #define	r_ps	r_rfl	/* user's RFLAGS */
106 
107 #ifdef _KERNEL
108 #define	lwptoregs(lwp)	((struct regs *)((lwp)->lwp_regs))
109 #endif	/* _KERNEL */
110 
111 #else	/* !_ASM */
112 
113 #define	TRAPERR_PUSH(err, trapno)	\
114 	pushq	$err;			\
115 	pushq	$trapno
116 
117 /*
118  * Create a struct regs on the stack suitable for an
119  * interrupt trap.
120  *
121  * The swapgs instruction is conditionalized to make sure that
122  * interrupts in kernel mode don't cause us to switch back to
123  * the user's gsbase!
124  *
125  * Assumes that the trap handler has already pushed an
126  * appropriate r_err and r_trapno
127  */
128 #define	__SAVE_REGS				\
129 	movq	%r15, REGOFF_R15(%rsp);		\
130 	movq	%r14, REGOFF_R14(%rsp);		\
131 	movq	%r13, REGOFF_R13(%rsp);		\
132 	movq	%r12, REGOFF_R12(%rsp);		\
133 	movq	%r11, REGOFF_R11(%rsp);		\
134 	movq	%r10, REGOFF_R10(%rsp);		\
135 	movq	%rbp, REGOFF_RBP(%rsp);		\
136 	movq	%rbx, REGOFF_RBX(%rsp);		\
137 	movq	%rax, REGOFF_RAX(%rsp);		\
138 	movq	%r9, REGOFF_R9(%rsp);		\
139 	movq	%r8, REGOFF_R8(%rsp);		\
140 	movq	%rcx, REGOFF_RCX(%rsp);		\
141 	movq	%rdx, REGOFF_RDX(%rsp);		\
142 	movq	%rsi, REGOFF_RSI(%rsp);		\
143 	movq	%rdi, REGOFF_RDI(%rsp);		\
144 	movq	%rbp, REGOFF_SAVFP(%rsp);	\
145 	movq	REGOFF_RIP(%rsp), %rcx;		\
146 	movq	%rcx, REGOFF_SAVPC(%rsp);	\
147 	xorl	%ecx, %ecx;			\
148 	movw	%gs, %cx;			\
149 	movq	%rcx, REGOFF_GS(%rsp);		\
150 	movw	%fs, %cx;			\
151 	movq	%rcx, REGOFF_FS(%rsp);		\
152 	movw	%es, %cx;			\
153 	movq	%rcx, REGOFF_ES(%rsp);		\
154 	movw	%ds, %cx;			\
155 	movq	%rcx, REGOFF_DS(%rsp);		\
156 	movl	$MSR_AMD_FSBASE, %ecx;		\
157 	rdmsr;					\
158 	movl	%eax, REGOFF_FSBASE(%rsp);	\
159 	movl	%edx, REGOFF_FSBASE+4(%rsp);	\
160 	movl	$MSR_AMD_GSBASE, %ecx;		\
161 	rdmsr;					\
162 	movl	%eax, REGOFF_GSBASE(%rsp);	\
163 	movl	%edx, REGOFF_GSBASE+4(%rsp)
164 
165 /*
166  * Push register state onto the stack. If we've
167  * interrupted userland, do a swapgs as well.
168  */
169 
170 #define	INTR_PUSH				\
171 	subq	$REGOFF_TRAPNO, %rsp;		\
172 	__SAVE_REGS;				\
173 	cmpw	$KCS_SEL, REGOFF_CS(%rsp);	\
174 	je	6f;				\
175 	movq	$0, REGOFF_SAVFP(%rsp);		\
176 	swapgs;					\
177 6:
178 
179 #define	TRAP_PUSH				\
180 	subq	$REGOFF_TRAPNO, %rsp;		\
181 	__SAVE_REGS;				\
182 	cmpw	$KCS_SEL, REGOFF_CS(%rsp);	\
183 	je	6f;				\
184 	movq	$0, REGOFF_SAVFP(%rsp);		\
185 	swapgs;					\
186 6:
187 
188 #define	DFTRAP_PUSH				\
189 	subq	$REGOFF_TRAPNO, %rsp;		\
190 	__SAVE_REGS
191 
192 #define	__RESTORE_REGS			\
193 	movq	REGOFF_RDI(%rsp),	%rdi;	\
194 	movq	REGOFF_RSI(%rsp),	%rsi;	\
195 	movq	REGOFF_RDX(%rsp),	%rdx;	\
196 	movq	REGOFF_RCX(%rsp),	%rcx;	\
197 	movq	REGOFF_R8(%rsp),	%r8;	\
198 	movq	REGOFF_R9(%rsp),	%r9;	\
199 	movq	REGOFF_RAX(%rsp),	%rax;	\
200 	movq	REGOFF_RBX(%rsp),	%rbx;	\
201 	movq	REGOFF_RBP(%rsp),	%rbp;	\
202 	movq	REGOFF_R10(%rsp),	%r10;	\
203 	movq	REGOFF_R11(%rsp),	%r11;	\
204 	movq	REGOFF_R12(%rsp),	%r12;	\
205 	movq	REGOFF_R13(%rsp),	%r13;	\
206 	movq	REGOFF_R14(%rsp),	%r14;	\
207 	movq	REGOFF_R15(%rsp),	%r15
208 
209 #define	INTR_POP			\
210 	leaq	sys_lcall32(%rip), %r11;\
211 	cmpq	%r11, REGOFF_RIP(%rsp);	\
212 	__RESTORE_REGS;			\
213 	je	5f;			\
214 	cmpw	$KCS_SEL, REGOFF_CS(%rsp);\
215 	je	8f;			\
216 5:	swapgs;				\
217 8:	addq	$REGOFF_RIP, %rsp
218 
219 #define	USER_POP			\
220 	__RESTORE_REGS;			\
221 	swapgs;				\
222 	addq	$REGOFF_RIP, %rsp	/* Adjust %rsp to prepare for iretq */
223 
224 #define	USER32_POP			\
225 	movl	REGOFF_RDI(%rsp), %edi;	\
226 	movl	REGOFF_RSI(%rsp), %esi;	\
227 	movl	REGOFF_RDX(%rsp), %edx;	\
228 	movl	REGOFF_RCX(%rsp), %ecx;	\
229 	movl	REGOFF_RAX(%rsp), %eax;	\
230 	movl	REGOFF_RBX(%rsp), %ebx;	\
231 	movl	REGOFF_RBP(%rsp), %ebp;	\
232 	swapgs;				\
233 	addq	$REGOFF_RIP, %rsp	/* Adjust %rsp to prepare for iretq */
234 
235 
236 /*
237  * Smaller versions of INTR_PUSH and INTR_POP for fast traps.
238  * The following registers have been pushed onto the stack by
239  * hardware at this point:
240  *
241  *	greg_t	r_rip;
242  *	greg_t	r_cs;
243  *	greg_t	r_rfl;
244  *	greg_t	r_rsp;
245  *	greg_t	r_ss;
246  *
247  * This handler is executed both by 32-bit and 64-bit applications.
248  * 64-bit applications allow us to treat the set (%rdi, %rsi, %rdx,
249  * %rcx, %r8, %r9, %r10, %r11, %rax) as volatile across function calls.
250  * However, 32-bit applications only expect (%eax, %edx, %ecx) to be volatile
251  * across a function call -- in particular, %esi and %edi MUST be saved!
252  *
253  * We could do this differently by making a FAST_INTR_PUSH32 for 32-bit
254  * programs, and FAST_INTR_PUSH for 64-bit programs, but it doesn't seem
255  * particularly worth it.
256  */
257 #define	FAST_INTR_PUSH			\
258 	subq	$REGOFF_RIP, %rsp;	\
259 	movq	%rsi, REGOFF_RSI(%rsp);	\
260 	movq	%rdi, REGOFF_RDI(%rsp);	\
261 	swapgs
262 
263 #define	FAST_INTR_POP			\
264 	swapgs;				\
265 	movq	REGOFF_RSI(%rsp), %rsi;	\
266 	movq	REGOFF_RDI(%rsp), %rdi;	\
267 	addq	$REGOFF_RIP, %rsp
268 
269 #define	DISABLE_INTR_FLAGS		\
270 	pushq	$F_OFF;			\
271 	popfq
272 
273 #define	ENABLE_INTR_FLAGS		\
274 	pushq	$F_ON;			\
275 	popfq
276 
277 #endif	/* !_ASM */
278 
279 #include <sys/controlregs.h>
280 
281 #if defined(_KERNEL) && !defined(_ASM)
282 #if !defined(__lint) && defined(__GNUC__)
283 
284 extern __inline__ ulong_t getcr8(void)
285 {
286 	uint64_t value;
287 
288 	__asm__ __volatile__(
289 		"movq %%cr8, %0"
290 		: "=r" (value));
291 	return (value);
292 }
293 
294 extern __inline__ void setcr8(ulong_t value)
295 {
296 	__asm__ __volatile__(
297 		"movq %0, %%cr8"
298 		: /* no output */
299 		: "r" (value));
300 }
301 
302 #else
303 
304 extern ulong_t getcr8(void);
305 extern void setcr8(ulong_t);
306 
307 #endif	/* !defined(__lint) && defined(__GNUC__) */
308 #endif	/* _KERNEL && !_ASM */
309 
310 /* Control register layout for panic dump */
311 
312 #define	CREGSZ		0x68
313 #define	CREG_GDT	0
314 #define	CREG_IDT	0x10
315 #define	CREG_LDT	0x20
316 #define	CREG_TASKR	0x28
317 #define	CREG_CR0	0x30
318 #define	CREG_CR2	0x38
319 #define	CREG_CR3	0x40
320 #define	CREG_CR4	0x48
321 #define	CREG_CR8	0x50
322 #define	CREG_KGSBASE	0x58
323 #define	CREG_EFER	0x60
324 
325 #if !defined(_ASM)
326 
327 typedef	uint64_t	creg64_t;
328 typedef	upad128_t	creg128_t;
329 
330 struct cregs {
331 	creg128_t	cr_gdt;
332 	creg128_t	cr_idt;
333 	creg64_t	cr_ldt;
334 	creg64_t	cr_task;
335 	creg64_t	cr_cr0;
336 	creg64_t	cr_cr2;
337 	creg64_t	cr_cr3;
338 	creg64_t	cr_cr4;
339 	creg64_t	cr_cr8;
340 	creg64_t	cr_kgsbase;
341 	creg64_t	cr_efer;
342 };
343 
344 #if defined(_KERNEL)
345 extern void getcregs(struct cregs *);
346 #endif	/* _KERNEL */
347 
348 #endif	/* _ASM */
349 
350 #ifdef	__cplusplus
351 }
352 #endif
353 
354 #endif	/* _AMD64_SYS_PRIVREGS_H */
355