1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright 2019 Joyent, Inc.
29 */
30
31 #ifndef _AMD64_SYS_PRIVREGS_H
32 #define _AMD64_SYS_PRIVREGS_H
33
34 #include <sys/ccompile.h>
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 /*
41 * This file describes the cpu's privileged register set, and
42 * how the machine state is saved on the stack when a trap occurs.
43 */
44
45 #if !defined(__amd64)
46 #error "non-amd64 code depends on amd64 privileged header!"
47 #endif
48
49 #ifndef _ASM
50
51 /*
52 * This is NOT the structure to use for general purpose debugging;
53 * see /proc for that. This is NOT the structure to use to decode
54 * the ucontext or grovel about in a core file; see <sys/regset.h>.
55 */
56
57 struct regs {
58 /*
59 * Extra frame for mdb to follow through high level interrupts and
60 * system traps. Set them to 0 to terminate stacktrace.
61 */
62 greg_t r_savfp; /* a copy of %rbp */
63 greg_t r_savpc; /* a copy of %rip */
64
65 greg_t r_rdi; /* 1st arg to function */
66 greg_t r_rsi; /* 2nd arg to function */
67 greg_t r_rdx; /* 3rd arg to function, 2nd return register */
68 greg_t r_rcx; /* 4th arg to function */
69
70 greg_t r_r8; /* 5th arg to function */
71 greg_t r_r9; /* 6th arg to function */
72 greg_t r_rax; /* 1st return register, # SSE registers */
73 greg_t r_rbx; /* callee-saved, optional base pointer */
74
75 greg_t r_rbp; /* callee-saved, optional frame pointer */
76 greg_t r_r10; /* temporary register, static chain pointer */
77 greg_t r_r11; /* temporary register */
78 greg_t r_r12; /* callee-saved */
79
80 greg_t r_r13; /* callee-saved */
81 greg_t r_r14; /* callee-saved */
82 greg_t r_r15; /* callee-saved */
83
84 /*
85 * fsbase and gsbase are sampled on every exception in DEBUG kernels
86 * only. They remain in the non-DEBUG kernel to avoid any flag days.
87 */
88 greg_t __r_fsbase; /* no longer used in non-DEBUG builds */
89 greg_t __r_gsbase; /* no longer used in non-DEBUG builds */
90 greg_t r_ds;
91 greg_t r_es;
92 greg_t r_fs; /* %fs is *never* used by the kernel */
93 greg_t r_gs;
94
95 greg_t r_trapno;
96
97 /*
98 * (the rest of these are defined by the hardware)
99 */
100 greg_t r_err;
101 greg_t r_rip;
102 greg_t r_cs;
103 greg_t r_rfl;
104 greg_t r_rsp;
105 greg_t r_ss;
106 };
107
108 #define r_r0 r_rax /* r0 for portability */
109 #define r_r1 r_rdx /* r1 for portability */
110 #define r_fp r_rbp /* kernel frame pointer */
111 #define r_sp r_rsp /* user stack pointer */
112 #define r_pc r_rip /* user's instruction pointer */
113 #define r_ps r_rfl /* user's RFLAGS */
114
115 #ifdef _KERNEL
116 #define lwptoregs(lwp) ((struct regs *)((lwp)->lwp_regs))
117 #endif /* _KERNEL */
118
119 #else /* !_ASM */
120
121 #if defined(_MACHDEP)
122
123 #include <sys/machprivregs.h>
124 #include <sys/pcb.h>
125
126 /*
127 * We can not safely sample {fs,gs}base on the hypervisor. The rdmsr
128 * instruction triggers a #gp fault which is emulated in the hypervisor
129 * on behalf of the guest. This is normally ok but if the guest is in
130 * the special failsafe handler it must not fault again or the hypervisor
131 * will kill the domain. We could use something different than INTR_PUSH
132 * in xen_failsafe_callback but for now we will not sample them.
133 */
134 #if defined(DEBUG) && !defined(__xpv)
135 #define __SAVE_BASES \
136 movl $MSR_AMD_FSBASE, %ecx; \
137 rdmsr; \
138 movl %eax, REGOFF_FSBASE(%rsp); \
139 movl %edx, REGOFF_FSBASE+4(%rsp); \
140 movl $MSR_AMD_GSBASE, %ecx; \
141 rdmsr; \
142 movl %eax, REGOFF_GSBASE(%rsp); \
143 movl %edx, REGOFF_GSBASE+4(%rsp)
144 #else
145 #define __SAVE_BASES
146 #endif
147
148 /*
149 * Create a struct regs on the stack suitable for an
150 * interrupt trap.
151 *
152 * Assumes that the trap handler has already pushed an
153 * appropriate r_err and r_trapno
154 */
155 #define __SAVE_REGS \
156 movq %r15, REGOFF_R15(%rsp); \
157 movq %r14, REGOFF_R14(%rsp); \
158 movq %r13, REGOFF_R13(%rsp); \
159 movq %r12, REGOFF_R12(%rsp); \
160 movq %r11, REGOFF_R11(%rsp); \
161 movq %r10, REGOFF_R10(%rsp); \
162 movq %rbp, REGOFF_RBP(%rsp); \
163 movq %rbx, REGOFF_RBX(%rsp); \
164 movq %rax, REGOFF_RAX(%rsp); \
165 movq %r9, REGOFF_R9(%rsp); \
166 movq %r8, REGOFF_R8(%rsp); \
167 movq %rcx, REGOFF_RCX(%rsp); \
168 movq %rdx, REGOFF_RDX(%rsp); \
169 movq %rsi, REGOFF_RSI(%rsp); \
170 movq %rdi, REGOFF_RDI(%rsp); \
171 movq %rbp, REGOFF_SAVFP(%rsp); \
172 movq REGOFF_RIP(%rsp), %rcx; \
173 movq %rcx, REGOFF_SAVPC(%rsp); \
174 xorl %ecx, %ecx; \
175 movw %gs, %cx; \
176 movq %rcx, REGOFF_GS(%rsp); \
177 movw %fs, %cx; \
178 movq %rcx, REGOFF_FS(%rsp); \
179 movw %es, %cx; \
180 movq %rcx, REGOFF_ES(%rsp); \
181 movw %ds, %cx; \
182 movq %rcx, REGOFF_DS(%rsp); \
183 __SAVE_BASES
184
185 #define __RESTORE_REGS \
186 movq REGOFF_RDI(%rsp), %rdi; \
187 movq REGOFF_RSI(%rsp), %rsi; \
188 movq REGOFF_RDX(%rsp), %rdx; \
189 movq REGOFF_RCX(%rsp), %rcx; \
190 movq REGOFF_R8(%rsp), %r8; \
191 movq REGOFF_R9(%rsp), %r9; \
192 movq REGOFF_RAX(%rsp), %rax; \
193 movq REGOFF_RBX(%rsp), %rbx; \
194 movq REGOFF_RBP(%rsp), %rbp; \
195 movq REGOFF_R10(%rsp), %r10; \
196 movq REGOFF_R11(%rsp), %r11; \
197 movq REGOFF_R12(%rsp), %r12; \
198 movq REGOFF_R13(%rsp), %r13; \
199 movq REGOFF_R14(%rsp), %r14; \
200 movq REGOFF_R15(%rsp), %r15
201
202 /*
203 * Push register state onto the stack. If we've
204 * interrupted userland, do a swapgs as well.
205 */
206 #define INTR_PUSH \
207 subq $REGOFF_TRAPNO, %rsp; \
208 __SAVE_REGS; \
209 cmpw $KCS_SEL, REGOFF_CS(%rsp); \
210 je 6f; \
211 movq $0, REGOFF_SAVFP(%rsp); \
212 SWAPGS; \
213 6: lfence; /* swapgs mitigation */ \
214 CLEAN_CS
215
216 #define INTR_POP \
217 leaq sys_lcall32(%rip), %r11;\
218 cmpq %r11, REGOFF_RIP(%rsp); \
219 __RESTORE_REGS; \
220 je 5f; \
221 cmpw $KCS_SEL, REGOFF_CS(%rsp);\
222 je 8f; \
223 5: SWAPGS; \
224 8: lfence; /* swapgs mitigation */ \
225 addq $REGOFF_RIP, %rsp
226
227 /*
228 * No need for swapgs mitigation: it's unconditional, and we're heading
229 * back to userspace.
230 */
231 #define USER_POP \
232 __RESTORE_REGS; \
233 SWAPGS; \
234 addq $REGOFF_RIP, %rsp /* Adjust %rsp to prepare for iretq */
235
236 #define USER32_POP \
237 movl REGOFF_RDI(%rsp), %edi; \
238 movl REGOFF_RSI(%rsp), %esi; \
239 movl REGOFF_RDX(%rsp), %edx; \
240 movl REGOFF_RCX(%rsp), %ecx; \
241 movl REGOFF_RAX(%rsp), %eax; \
242 movl REGOFF_RBX(%rsp), %ebx; \
243 movl REGOFF_RBP(%rsp), %ebp; \
244 SWAPGS; \
245 addq $REGOFF_RIP, %rsp /* Adjust %rsp to prepare for iretq */
246
247 #define DFTRAP_PUSH \
248 subq $REGOFF_TRAPNO, %rsp; \
249 __SAVE_REGS
250
251 #endif /* _MACHDEP */
252
253 /*
254 * Used to set rflags to known values at the head of an
255 * interrupt gate handler, i.e. interrupts are -already- disabled.
256 */
257 #define INTGATE_INIT_KERNEL_FLAGS \
258 pushq $F_OFF; \
259 popfq
260
261 #endif /* !_ASM */
262
263 #include <sys/controlregs.h>
264
265 #if defined(_KERNEL) && !defined(_ASM)
266 #if !defined(__lint) && defined(__GNUC__)
267
268 extern __GNU_INLINE ulong_t
getcr8(void)269 getcr8(void)
270 {
271 uint64_t value;
272
273 __asm__ __volatile__(
274 "movq %%cr8, %0"
275 : "=r" (value));
276 return (value);
277 }
278
279 extern __GNU_INLINE void
setcr8(ulong_t value)280 setcr8(ulong_t value)
281 {
282 __asm__ __volatile__(
283 "movq %0, %%cr8"
284 : /* no output */
285 : "r" (value));
286 }
287
288 #else
289
290 extern ulong_t getcr8(void);
291 extern void setcr8(ulong_t);
292
293 #endif /* !defined(__lint) && defined(__GNUC__) */
294 #endif /* _KERNEL && !_ASM */
295
296 /* Control register layout for panic dump */
297
298 #define CREGSZ 0x68
299 #define CREG_GDT 0
300 #define CREG_IDT 0x10
301 #define CREG_LDT 0x20
302 #define CREG_TASKR 0x28
303 #define CREG_CR0 0x30
304 #define CREG_CR2 0x38
305 #define CREG_CR3 0x40
306 #define CREG_CR4 0x48
307 #define CREG_CR8 0x50
308 #define CREG_KGSBASE 0x58
309 #define CREG_EFER 0x60
310
311 #if !defined(_ASM) && defined(_INT64_TYPE)
312
313 typedef uint64_t creg64_t;
314 typedef upad128_t creg128_t;
315
316 struct cregs {
317 creg128_t cr_gdt;
318 creg128_t cr_idt;
319 creg64_t cr_ldt;
320 creg64_t cr_task;
321 creg64_t cr_cr0;
322 creg64_t cr_cr2;
323 creg64_t cr_cr3;
324 creg64_t cr_cr4;
325 creg64_t cr_cr8;
326 creg64_t cr_kgsbase;
327 creg64_t cr_efer;
328 };
329
330 #if defined(_KERNEL)
331 extern void getcregs(struct cregs *);
332 #endif /* _KERNEL */
333
334 #endif /* !_ASM && _INT64_TYPE */
335
336 #ifdef __cplusplus
337 }
338 #endif
339
340 #endif /* !_AMD64_SYS_PRIVREGS_H */
341