1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1990 William Jolitz.
5 * Copyright (c) 1991 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/domainset.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sysctl.h>
45 #include <sys/sysent.h>
46 #include <sys/tslog.h>
47 #include <machine/bus.h>
48 #include <sys/rman.h>
49 #include <sys/signalvar.h>
50 #include <vm/uma.h>
51
52 #include <machine/cputypes.h>
53 #include <machine/frame.h>
54 #include <machine/intr_machdep.h>
55 #include <machine/md_var.h>
56 #include <machine/pcb.h>
57 #include <machine/psl.h>
58 #include <machine/resource.h>
59 #include <machine/specialreg.h>
60 #include <machine/segments.h>
61 #include <machine/ucontext.h>
62 #include <x86/ifunc.h>
63
64 /*
65 * Floating point support.
66 */
67
68 #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw))
69 #define fnclex() __asm __volatile("fnclex")
70 #define fninit() __asm __volatile("fninit")
71 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
72 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr)))
73 #define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr)))
74 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
75 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr))
76 #define stmxcsr(addr) __asm __volatile("stmxcsr %0" : "=m" (*(addr)))
77
78 static __inline void
xrstor32(char * addr,uint64_t mask)79 xrstor32(char *addr, uint64_t mask)
80 {
81 uint32_t low, hi;
82
83 low = mask;
84 hi = mask >> 32;
85 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
86 }
87
88 static __inline void
xrstor64(char * addr,uint64_t mask)89 xrstor64(char *addr, uint64_t mask)
90 {
91 uint32_t low, hi;
92
93 low = mask;
94 hi = mask >> 32;
95 __asm __volatile("xrstor64 %0" : : "m" (*addr), "a" (low), "d" (hi));
96 }
97
98 static __inline void
xsave32(char * addr,uint64_t mask)99 xsave32(char *addr, uint64_t mask)
100 {
101 uint32_t low, hi;
102
103 low = mask;
104 hi = mask >> 32;
105 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
106 "memory");
107 }
108
109 static __inline void
xsave64(char * addr,uint64_t mask)110 xsave64(char *addr, uint64_t mask)
111 {
112 uint32_t low, hi;
113
114 low = mask;
115 hi = mask >> 32;
116 __asm __volatile("xsave64 %0" : "=m" (*addr) : "a" (low), "d" (hi) :
117 "memory");
118 }
119
120 static __inline void
xsaveopt32(char * addr,uint64_t mask)121 xsaveopt32(char *addr, uint64_t mask)
122 {
123 uint32_t low, hi;
124
125 low = mask;
126 hi = mask >> 32;
127 __asm __volatile("xsaveopt %0" : "=m" (*addr) : "a" (low), "d" (hi) :
128 "memory");
129 }
130
131 static __inline void
xsaveopt64(char * addr,uint64_t mask)132 xsaveopt64(char *addr, uint64_t mask)
133 {
134 uint32_t low, hi;
135
136 low = mask;
137 hi = mask >> 32;
138 __asm __volatile("xsaveopt64 %0" : "=m" (*addr) : "a" (low), "d" (hi) :
139 "memory");
140 }
141
142 CTASSERT(sizeof(struct savefpu) == 512);
143 CTASSERT(sizeof(struct xstate_hdr) == 64);
144 CTASSERT(sizeof(struct savefpu_ymm) == 832);
145
146 /*
147 * This requirement is to make it easier for asm code to calculate
148 * offset of the fpu save area from the pcb address. FPU save area
149 * must be 64-byte aligned.
150 */
151 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
152
153 /*
154 * Ensure the copy of XCR0 saved in a core is contained in the padding
155 * area.
156 */
157 CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savefpu, sv_pad) &&
158 X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savefpu));
159
160 static void fpu_clean_state(void);
161
162 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
163 SYSCTL_NULL_INT_PTR, 1, "Floating point instructions executed in hardware");
164
165 int use_xsave; /* non-static for cpu_switch.S */
166 uint64_t xsave_mask; /* the same */
167 static uint64_t xsave_extensions;
168 static uma_zone_t fpu_save_area_zone;
169 static struct savefpu *fpu_initialstate;
170
171 static struct xsave_area_elm_descr {
172 u_int offset;
173 u_int size;
174 u_int flags;
175 } *xsave_area_desc;
176
177 static void
fpusave_xsaveopt64(void * addr)178 fpusave_xsaveopt64(void *addr)
179 {
180 xsaveopt64((char *)addr, xsave_mask);
181 }
182
183 static void
fpusave_xsaveopt3264(void * addr)184 fpusave_xsaveopt3264(void *addr)
185 {
186 if (SV_CURPROC_FLAG(SV_ILP32))
187 xsaveopt32((char *)addr, xsave_mask);
188 else
189 xsaveopt64((char *)addr, xsave_mask);
190 }
191
192 static void
fpusave_xsave64(void * addr)193 fpusave_xsave64(void *addr)
194 {
195 xsave64((char *)addr, xsave_mask);
196 }
197
198 static void
fpusave_xsave3264(void * addr)199 fpusave_xsave3264(void *addr)
200 {
201 if (SV_CURPROC_FLAG(SV_ILP32))
202 xsave32((char *)addr, xsave_mask);
203 else
204 xsave64((char *)addr, xsave_mask);
205 }
206
207 static void
fpurestore_xrstor64(void * addr)208 fpurestore_xrstor64(void *addr)
209 {
210 xrstor64((char *)addr, xsave_mask);
211 }
212
213 static void
fpurestore_xrstor3264(void * addr)214 fpurestore_xrstor3264(void *addr)
215 {
216 if (SV_CURPROC_FLAG(SV_ILP32))
217 xrstor32((char *)addr, xsave_mask);
218 else
219 xrstor64((char *)addr, xsave_mask);
220 }
221
222 static void
fpusave_fxsave(void * addr)223 fpusave_fxsave(void *addr)
224 {
225
226 fxsave((char *)addr);
227 }
228
229 static void
fpurestore_fxrstor(void * addr)230 fpurestore_fxrstor(void *addr)
231 {
232
233 fxrstor((char *)addr);
234 }
235
236 DEFINE_IFUNC(, void, fpusave, (void *))
237 {
238 u_int cp[4];
239
240 if (!use_xsave)
241 return (fpusave_fxsave);
242 cpuid_count(0xd, 0x1, cp);
243 if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) {
244 return ((cpu_stdext_feature & CPUID_STDEXT_NFPUSG) != 0 ?
245 fpusave_xsaveopt64 : fpusave_xsaveopt3264);
246 }
247 return ((cpu_stdext_feature & CPUID_STDEXT_NFPUSG) != 0 ?
248 fpusave_xsave64 : fpusave_xsave3264);
249 }
250
251 DEFINE_IFUNC(, void, fpurestore, (void *))
252 {
253 if (!use_xsave)
254 return (fpurestore_fxrstor);
255 return ((cpu_stdext_feature & CPUID_STDEXT_NFPUSG) != 0 ?
256 fpurestore_xrstor64 : fpurestore_xrstor3264);
257 }
258
259 void
fpususpend(void * addr)260 fpususpend(void *addr)
261 {
262 u_long cr0;
263
264 cr0 = rcr0();
265 fpu_enable();
266 fpusave(addr);
267 load_cr0(cr0);
268 }
269
270 void
fpuresume(void * addr)271 fpuresume(void *addr)
272 {
273 u_long cr0;
274
275 cr0 = rcr0();
276 fpu_enable();
277 fninit();
278 if (use_xsave)
279 load_xcr(XCR0, xsave_mask);
280 fpurestore(addr);
281 load_cr0(cr0);
282 }
283
284 /*
285 * Enable XSAVE if supported and allowed by user.
286 * Calculate the xsave_mask.
287 */
288 static void
fpuinit_bsp1(void)289 fpuinit_bsp1(void)
290 {
291 u_int cp[4];
292 uint64_t xsave_mask_user;
293 bool old_wp;
294
295 if (!use_xsave)
296 return;
297 cpuid_count(0xd, 0x0, cp);
298 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
299 if ((cp[0] & xsave_mask) != xsave_mask)
300 panic("CPU0 does not support X87 or SSE: %x", cp[0]);
301 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
302 xsave_mask_user = xsave_mask;
303 TUNABLE_ULONG_FETCH("hw.xsave_mask", &xsave_mask_user);
304 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
305 xsave_mask &= xsave_mask_user;
306 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512)
307 xsave_mask &= ~XFEATURE_AVX512;
308 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
309 xsave_mask &= ~XFEATURE_MPX;
310
311 cpuid_count(0xd, 0x1, cp);
312 if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) {
313 /*
314 * Patch the XSAVE instruction in the cpu_switch code
315 * to XSAVEOPT. We assume that XSAVE encoding used
316 * REX byte, and set the bit 4 of the r/m byte.
317 *
318 * It seems that some BIOSes give control to the OS
319 * with CR0.WP already set, making the kernel text
320 * read-only before cpu_startup().
321 */
322 old_wp = disable_wp();
323 ctx_switch_xsave32[3] |= 0x10;
324 ctx_switch_xsave[3] |= 0x10;
325 restore_wp(old_wp);
326 }
327 }
328
329 /*
330 * Calculate the fpu save area size.
331 */
332 static void
fpuinit_bsp2(void)333 fpuinit_bsp2(void)
334 {
335 u_int cp[4];
336
337 if (use_xsave) {
338 cpuid_count(0xd, 0x0, cp);
339 cpu_max_ext_state_size = cp[1];
340
341 /*
342 * Reload the cpu_feature2, since we enabled OSXSAVE.
343 */
344 do_cpuid(1, cp);
345 cpu_feature2 = cp[2];
346 } else
347 cpu_max_ext_state_size = sizeof(struct savefpu);
348 }
349
350 /*
351 * Initialize the floating point unit.
352 */
353 void
fpuinit(void)354 fpuinit(void)
355 {
356 register_t saveintr;
357 uint64_t cr4;
358 u_int mxcsr;
359 u_short control;
360
361 TSENTER();
362 if (IS_BSP())
363 fpuinit_bsp1();
364
365 if (use_xsave) {
366 cr4 = rcr4();
367
368 /*
369 * Revert enablement of PKRU if user disabled its
370 * saving on context switches by clearing the bit in
371 * the xsave mask. Also redundantly clear the bit in
372 * cpu_stdext_feature2 to prevent pmap from ever
373 * trying to set the page table bits.
374 */
375 if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0 &&
376 (xsave_mask & XFEATURE_ENABLED_PKRU) == 0) {
377 cr4 &= ~CR4_PKE;
378 cpu_stdext_feature2 &= ~CPUID_STDEXT2_PKU;
379 }
380
381 load_cr4(cr4 | CR4_XSAVE);
382 load_xcr(XCR0, xsave_mask);
383 }
384
385 /*
386 * XCR0 shall be set up before CPU can report the save area size.
387 */
388 if (IS_BSP())
389 fpuinit_bsp2();
390
391 /*
392 * It is too early for critical_enter() to work on AP.
393 */
394 saveintr = intr_disable();
395 fpu_enable();
396 fninit();
397 control = __INITIAL_FPUCW__;
398 fldcw(control);
399 mxcsr = __INITIAL_MXCSR__;
400 ldmxcsr(mxcsr);
401 fpu_disable();
402 intr_restore(saveintr);
403 TSEXIT();
404 }
405
406 /*
407 * On the boot CPU we generate a clean state that is used to
408 * initialize the floating point unit when it is first used by a
409 * process.
410 */
411 static void
fpuinitstate(void * arg __unused)412 fpuinitstate(void *arg __unused)
413 {
414 uint64_t *xstate_bv;
415 register_t saveintr;
416 int cp[4], i, max_ext_n;
417
418 /* Do potentially blocking operations before disabling interrupts. */
419 fpu_save_area_zone = uma_zcreate("FPU_save_area",
420 cpu_max_ext_state_size, NULL, NULL, NULL, NULL,
421 XSAVE_AREA_ALIGN - 1, 0);
422 fpu_initialstate = uma_zalloc(fpu_save_area_zone, M_WAITOK | M_ZERO);
423 if (use_xsave) {
424 max_ext_n = flsl(xsave_mask);
425 xsave_area_desc = malloc(max_ext_n * sizeof(struct
426 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO);
427 }
428
429 cpu_thread_alloc(&thread0);
430
431 saveintr = intr_disable();
432 fpu_enable();
433
434 fpusave_fxsave(fpu_initialstate);
435 if (fpu_initialstate->sv_env.en_mxcsr_mask)
436 cpu_mxcsr_mask = fpu_initialstate->sv_env.en_mxcsr_mask;
437 else
438 cpu_mxcsr_mask = 0xFFBF;
439
440 /*
441 * The fninit instruction does not modify XMM registers or x87
442 * registers (MM/ST). The fpusave call dumped the garbage
443 * contained in the registers after reset to the initial state
444 * saved. Clear XMM and x87 registers file image to make the
445 * startup program state and signal handler XMM/x87 register
446 * content predictable.
447 */
448 bzero(fpu_initialstate->sv_fp, sizeof(fpu_initialstate->sv_fp));
449 bzero(fpu_initialstate->sv_xmm, sizeof(fpu_initialstate->sv_xmm));
450
451 /*
452 * Create a table describing the layout of the CPU Extended
453 * Save Area. See Intel SDM rev. 075 Vol. 1 13.4.1 "Legacy
454 * Region of an XSAVE Area" for the source of offsets/sizes.
455 */
456 if (use_xsave) {
457 cpuid_count(0xd, 1, cp);
458 xsave_extensions = cp[0];
459
460 xstate_bv = (uint64_t *)((char *)(fpu_initialstate + 1) +
461 offsetof(struct xstate_hdr, xstate_bv));
462 *xstate_bv = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
463
464 /* x87 state */
465 xsave_area_desc[0].offset = 0;
466 xsave_area_desc[0].size = 160;
467 /* XMM */
468 xsave_area_desc[1].offset = 160;
469 xsave_area_desc[1].size = 416 - 160;
470
471 for (i = 2; i < max_ext_n; i++) {
472 cpuid_count(0xd, i, cp);
473 xsave_area_desc[i].size = cp[0];
474 xsave_area_desc[i].offset = cp[1];
475 xsave_area_desc[i].flags = cp[2];
476 }
477 }
478
479 fpu_disable();
480 intr_restore(saveintr);
481 }
482 /* EFIRT needs this to be initialized before we can enter our EFI environment */
483 SYSINIT(fpuinitstate, SI_SUB_CPU, SI_ORDER_ANY, fpuinitstate, NULL);
484
485 /*
486 * Free coprocessor (if we have it).
487 */
488 void
fpuexit(struct thread * td)489 fpuexit(struct thread *td)
490 {
491
492 critical_enter();
493 if (curthread == PCPU_GET(fpcurthread)) {
494 fpu_enable();
495 fpusave(curpcb->pcb_save);
496 fpu_disable();
497 PCPU_SET(fpcurthread, NULL);
498 }
499 critical_exit();
500 }
501
502 int
fpuformat(void)503 fpuformat(void)
504 {
505
506 return (_MC_FPFMT_XMM);
507 }
508
509 /*
510 * The following mechanism is used to ensure that the FPE_... value
511 * that is passed as a trapcode to the signal handler of the user
512 * process does not have more than one bit set.
513 *
514 * Multiple bits may be set if the user process modifies the control
515 * word while a status word bit is already set. While this is a sign
516 * of bad coding, we have no choice than to narrow them down to one
517 * bit, since we must not send a trapcode that is not exactly one of
518 * the FPE_ macros.
519 *
520 * The mechanism has a static table with 127 entries. Each combination
521 * of the 7 FPU status word exception bits directly translates to a
522 * position in this table, where a single FPE_... value is stored.
523 * This FPE_... value stored there is considered the "most important"
524 * of the exception bits and will be sent as the signal code. The
525 * precedence of the bits is based upon Intel Document "Numerical
526 * Applications", Chapter "Special Computational Situations".
527 *
528 * The macro to choose one of these values does these steps: 1) Throw
529 * away status word bits that cannot be masked. 2) Throw away the bits
530 * currently masked in the control word, assuming the user isn't
531 * interested in them anymore. 3) Reinsert status word bit 7 (stack
532 * fault) if it is set, which cannot be masked but must be presered.
533 * 4) Use the remaining bits to point into the trapcode table.
534 *
535 * The 6 maskable bits in order of their preference, as stated in the
536 * above referenced Intel manual:
537 * 1 Invalid operation (FP_X_INV)
538 * 1a Stack underflow
539 * 1b Stack overflow
540 * 1c Operand of unsupported format
541 * 1d SNaN operand.
542 * 2 QNaN operand (not an exception, irrelavant here)
543 * 3 Any other invalid-operation not mentioned above or zero divide
544 * (FP_X_INV, FP_X_DZ)
545 * 4 Denormal operand (FP_X_DNML)
546 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
547 * 6 Inexact result (FP_X_IMP)
548 */
549 static char fpetable[128] = {
550 0,
551 FPE_FLTINV, /* 1 - INV */
552 FPE_FLTUND, /* 2 - DNML */
553 FPE_FLTINV, /* 3 - INV | DNML */
554 FPE_FLTDIV, /* 4 - DZ */
555 FPE_FLTINV, /* 5 - INV | DZ */
556 FPE_FLTDIV, /* 6 - DNML | DZ */
557 FPE_FLTINV, /* 7 - INV | DNML | DZ */
558 FPE_FLTOVF, /* 8 - OFL */
559 FPE_FLTINV, /* 9 - INV | OFL */
560 FPE_FLTUND, /* A - DNML | OFL */
561 FPE_FLTINV, /* B - INV | DNML | OFL */
562 FPE_FLTDIV, /* C - DZ | OFL */
563 FPE_FLTINV, /* D - INV | DZ | OFL */
564 FPE_FLTDIV, /* E - DNML | DZ | OFL */
565 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
566 FPE_FLTUND, /* 10 - UFL */
567 FPE_FLTINV, /* 11 - INV | UFL */
568 FPE_FLTUND, /* 12 - DNML | UFL */
569 FPE_FLTINV, /* 13 - INV | DNML | UFL */
570 FPE_FLTDIV, /* 14 - DZ | UFL */
571 FPE_FLTINV, /* 15 - INV | DZ | UFL */
572 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
573 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
574 FPE_FLTOVF, /* 18 - OFL | UFL */
575 FPE_FLTINV, /* 19 - INV | OFL | UFL */
576 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
577 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
578 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
579 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
580 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
581 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
582 FPE_FLTRES, /* 20 - IMP */
583 FPE_FLTINV, /* 21 - INV | IMP */
584 FPE_FLTUND, /* 22 - DNML | IMP */
585 FPE_FLTINV, /* 23 - INV | DNML | IMP */
586 FPE_FLTDIV, /* 24 - DZ | IMP */
587 FPE_FLTINV, /* 25 - INV | DZ | IMP */
588 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
589 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
590 FPE_FLTOVF, /* 28 - OFL | IMP */
591 FPE_FLTINV, /* 29 - INV | OFL | IMP */
592 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
593 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
594 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
595 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
596 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
597 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
598 FPE_FLTUND, /* 30 - UFL | IMP */
599 FPE_FLTINV, /* 31 - INV | UFL | IMP */
600 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
601 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
602 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
603 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
604 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
605 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
606 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
607 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
608 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
609 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
610 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
611 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
612 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
613 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
614 FPE_FLTSUB, /* 40 - STK */
615 FPE_FLTSUB, /* 41 - INV | STK */
616 FPE_FLTUND, /* 42 - DNML | STK */
617 FPE_FLTSUB, /* 43 - INV | DNML | STK */
618 FPE_FLTDIV, /* 44 - DZ | STK */
619 FPE_FLTSUB, /* 45 - INV | DZ | STK */
620 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
621 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
622 FPE_FLTOVF, /* 48 - OFL | STK */
623 FPE_FLTSUB, /* 49 - INV | OFL | STK */
624 FPE_FLTUND, /* 4A - DNML | OFL | STK */
625 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
626 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
627 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
628 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
629 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
630 FPE_FLTUND, /* 50 - UFL | STK */
631 FPE_FLTSUB, /* 51 - INV | UFL | STK */
632 FPE_FLTUND, /* 52 - DNML | UFL | STK */
633 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
634 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
635 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
636 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
637 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
638 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
639 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
640 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
641 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
642 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
643 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
644 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
645 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
646 FPE_FLTRES, /* 60 - IMP | STK */
647 FPE_FLTSUB, /* 61 - INV | IMP | STK */
648 FPE_FLTUND, /* 62 - DNML | IMP | STK */
649 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
650 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
651 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
652 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
653 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
654 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
655 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
656 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
657 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
658 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
659 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
660 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
661 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
662 FPE_FLTUND, /* 70 - UFL | IMP | STK */
663 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
664 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
665 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
666 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
667 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
668 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
669 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
670 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
671 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
672 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
673 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
674 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
675 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
676 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
677 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
678 };
679
680 /*
681 * Read the FP status and control words, then generate si_code value
682 * for SIGFPE. The error code chosen will be one of the
683 * FPE_... macros. It will be sent as the second argument to old
684 * BSD-style signal handlers and as "siginfo_t->si_code" (second
685 * argument) to SA_SIGINFO signal handlers.
686 *
687 * Some time ago, we cleared the x87 exceptions with FNCLEX there.
688 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The
689 * usermode code which understands the FPU hardware enough to enable
690 * the exceptions, can also handle clearing the exception state in the
691 * handler. The only consequence of not clearing the exception is the
692 * rethrow of the SIGFPE on return from the signal handler and
693 * reexecution of the corresponding instruction.
694 *
695 * For XMM traps, the exceptions were never cleared.
696 */
697 int
fputrap_x87(void)698 fputrap_x87(void)
699 {
700 struct savefpu *pcb_save;
701 u_short control, status;
702
703 critical_enter();
704
705 /*
706 * Interrupt handling (for another interrupt) may have pushed the
707 * state to memory. Fetch the relevant parts of the state from
708 * wherever they are.
709 */
710 if (PCPU_GET(fpcurthread) != curthread) {
711 pcb_save = curpcb->pcb_save;
712 control = pcb_save->sv_env.en_cw;
713 status = pcb_save->sv_env.en_sw;
714 } else {
715 fnstcw(&control);
716 fnstsw(&status);
717 }
718
719 critical_exit();
720 return (fpetable[status & ((~control & 0x3f) | 0x40)]);
721 }
722
723 int
fputrap_sse(void)724 fputrap_sse(void)
725 {
726 u_int mxcsr;
727
728 critical_enter();
729 if (PCPU_GET(fpcurthread) != curthread)
730 mxcsr = curpcb->pcb_save->sv_env.en_mxcsr;
731 else
732 stmxcsr(&mxcsr);
733 critical_exit();
734 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]);
735 }
736
737 static void
restore_fpu_curthread(struct thread * td)738 restore_fpu_curthread(struct thread *td)
739 {
740 struct pcb *pcb;
741
742 /*
743 * Record new context early in case frstor causes a trap.
744 */
745 PCPU_SET(fpcurthread, td);
746
747 fpu_enable();
748 fpu_clean_state();
749 pcb = td->td_pcb;
750
751 if ((pcb->pcb_flags & PCB_FPUINITDONE) == 0) {
752 /*
753 * This is the first time this thread has used the FPU or
754 * the PCB doesn't contain a clean FPU state. Explicitly
755 * load an initial state.
756 *
757 * We prefer to restore the state from the actual save
758 * area in PCB instead of directly loading from
759 * fpu_initialstate, to ignite the XSAVEOPT
760 * tracking engine.
761 */
762 bcopy(fpu_initialstate, pcb->pcb_save,
763 cpu_max_ext_state_size);
764 fpurestore(pcb->pcb_save);
765 if (pcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
766 fldcw(pcb->pcb_initial_fpucw);
767 if (PCB_USER_FPU(pcb))
768 set_pcb_flags(pcb, PCB_FPUINITDONE |
769 PCB_USERFPUINITDONE);
770 else
771 set_pcb_flags(pcb, PCB_FPUINITDONE);
772 } else
773 fpurestore(pcb->pcb_save);
774 }
775
776 /*
777 * Device Not Available (DNA, #NM) exception handler.
778 *
779 * It would be better to switch FP context here (if curthread !=
780 * fpcurthread) and not necessarily for every context switch, but it
781 * is too hard to access foreign pcb's.
782 */
783 void
fpudna(void)784 fpudna(void)
785 {
786 struct thread *td;
787
788 td = curthread;
789 /*
790 * This handler is entered with interrupts enabled, so context
791 * switches may occur before critical_enter() is executed. If
792 * a context switch occurs, then when we regain control, our
793 * state will have been completely restored. The CPU may
794 * change underneath us, but the only part of our context that
795 * lives in the CPU is CR0.TS and that will be "restored" by
796 * setting it on the new CPU.
797 */
798 critical_enter();
799
800 KASSERT((curpcb->pcb_flags & PCB_FPUNOSAVE) == 0,
801 ("fpudna while in fpu_kern_enter(FPU_KERN_NOCTX)"));
802 if (__predict_false(PCPU_GET(fpcurthread) == td)) {
803 /*
804 * Some virtual machines seems to set %cr0.TS at
805 * arbitrary moments. Silently clear the TS bit
806 * regardless of the eager/lazy FPU context switch
807 * mode.
808 */
809 fpu_enable();
810 } else {
811 if (__predict_false(PCPU_GET(fpcurthread) != NULL)) {
812 panic(
813 "fpudna: fpcurthread = %p (%d), curthread = %p (%d)\n",
814 PCPU_GET(fpcurthread),
815 PCPU_GET(fpcurthread)->td_tid, td, td->td_tid);
816 }
817 restore_fpu_curthread(td);
818 }
819 critical_exit();
820 }
821
822 void fpu_activate_sw(struct thread *td); /* Called from the context switch */
823 void
fpu_activate_sw(struct thread * td)824 fpu_activate_sw(struct thread *td)
825 {
826
827 if ((td->td_pflags & TDP_KTHREAD) != 0 || !PCB_USER_FPU(td->td_pcb)) {
828 PCPU_SET(fpcurthread, NULL);
829 fpu_disable();
830 } else if (PCPU_GET(fpcurthread) != td) {
831 restore_fpu_curthread(td);
832 }
833 }
834
835 void
fpudrop(void)836 fpudrop(void)
837 {
838 struct thread *td;
839
840 td = PCPU_GET(fpcurthread);
841 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
842 CRITICAL_ASSERT(td);
843 PCPU_SET(fpcurthread, NULL);
844 clear_pcb_flags(td->td_pcb, PCB_FPUINITDONE);
845 fpu_disable();
846 }
847
848 /*
849 * Get the user state of the FPU into pcb->pcb_user_save without
850 * dropping ownership (if possible). It returns the FPU ownership
851 * status.
852 */
853 int
fpugetregs(struct thread * td)854 fpugetregs(struct thread *td)
855 {
856 struct pcb *pcb;
857 uint64_t *xstate_bv, bit;
858 char *sa;
859 struct savefpu *s;
860 uint32_t mxcsr, mxcsr_mask;
861 int max_ext_n, i, owned;
862 bool do_mxcsr;
863
864 pcb = td->td_pcb;
865 critical_enter();
866 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) == 0) {
867 bcopy(fpu_initialstate, get_pcb_user_save_pcb(pcb),
868 cpu_max_ext_state_size);
869 get_pcb_user_save_pcb(pcb)->sv_env.en_cw =
870 pcb->pcb_initial_fpucw;
871 fpuuserinited(td);
872 critical_exit();
873 return (_MC_FPOWNED_PCB);
874 }
875 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
876 fpusave(get_pcb_user_save_pcb(pcb));
877 owned = _MC_FPOWNED_FPU;
878 } else {
879 owned = _MC_FPOWNED_PCB;
880 }
881 if (use_xsave) {
882 /*
883 * Handle partially saved state.
884 */
885 sa = (char *)get_pcb_user_save_pcb(pcb);
886 xstate_bv = (uint64_t *)(sa + sizeof(struct savefpu) +
887 offsetof(struct xstate_hdr, xstate_bv));
888 max_ext_n = flsl(xsave_mask);
889 for (i = 0; i < max_ext_n; i++) {
890 bit = 1ULL << i;
891 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0)
892 continue;
893 do_mxcsr = false;
894 if (i == 0 && (*xstate_bv & (XFEATURE_ENABLED_SSE |
895 XFEATURE_ENABLED_AVX)) != 0) {
896 /*
897 * x87 area was not saved by XSAVEOPT,
898 * but one of XMM or AVX was. Then we need
899 * to preserve MXCSR from being overwritten
900 * with the default value.
901 */
902 s = (struct savefpu *)sa;
903 mxcsr = s->sv_env.en_mxcsr;
904 mxcsr_mask = s->sv_env.en_mxcsr_mask;
905 do_mxcsr = true;
906 }
907 bcopy((char *)fpu_initialstate +
908 xsave_area_desc[i].offset,
909 sa + xsave_area_desc[i].offset,
910 xsave_area_desc[i].size);
911 if (do_mxcsr) {
912 s->sv_env.en_mxcsr = mxcsr;
913 s->sv_env.en_mxcsr_mask = mxcsr_mask;
914 }
915 *xstate_bv |= bit;
916 }
917 }
918 critical_exit();
919 return (owned);
920 }
921
922 void
fpuuserinited(struct thread * td)923 fpuuserinited(struct thread *td)
924 {
925 struct pcb *pcb;
926
927 CRITICAL_ASSERT(td);
928 pcb = td->td_pcb;
929 if (PCB_USER_FPU(pcb))
930 set_pcb_flags(pcb,
931 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
932 else
933 set_pcb_flags(pcb, PCB_FPUINITDONE);
934 }
935
936 int
fpusetxstate(struct thread * td,char * xfpustate,size_t xfpustate_size)937 fpusetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
938 {
939 struct xstate_hdr *hdr, *ehdr;
940 size_t len, max_len;
941 uint64_t bv;
942
943 /* XXXKIB should we clear all extended state in xstate_bv instead ? */
944 if (xfpustate == NULL)
945 return (0);
946 if (!use_xsave)
947 return (EOPNOTSUPP);
948
949 len = xfpustate_size;
950 if (len < sizeof(struct xstate_hdr))
951 return (EINVAL);
952 max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
953 if (len > max_len)
954 return (EINVAL);
955
956 ehdr = (struct xstate_hdr *)xfpustate;
957 bv = ehdr->xstate_bv;
958
959 /*
960 * Avoid #gp.
961 */
962 if (bv & ~xsave_mask)
963 return (EINVAL);
964
965 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
966
967 hdr->xstate_bv = bv;
968 bcopy(xfpustate + sizeof(struct xstate_hdr),
969 (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
970
971 return (0);
972 }
973
974 /*
975 * Set the state of the FPU.
976 */
977 int
fpusetregs(struct thread * td,struct savefpu * addr,char * xfpustate,size_t xfpustate_size)978 fpusetregs(struct thread *td, struct savefpu *addr, char *xfpustate,
979 size_t xfpustate_size)
980 {
981 struct pcb *pcb;
982 int error;
983
984 addr->sv_env.en_mxcsr &= cpu_mxcsr_mask;
985 pcb = td->td_pcb;
986 error = 0;
987 critical_enter();
988 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
989 error = fpusetxstate(td, xfpustate, xfpustate_size);
990 if (error == 0) {
991 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
992 fpurestore(get_pcb_user_save_td(td));
993 set_pcb_flags(pcb, PCB_FPUINITDONE |
994 PCB_USERFPUINITDONE);
995 }
996 } else {
997 error = fpusetxstate(td, xfpustate, xfpustate_size);
998 if (error == 0) {
999 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
1000 fpuuserinited(td);
1001 }
1002 }
1003 critical_exit();
1004 return (error);
1005 }
1006
1007 /*
1008 * On AuthenticAMD processors, the fxrstor instruction does not restore
1009 * the x87's stored last instruction pointer, last data pointer, and last
1010 * opcode values, except in the rare case in which the exception summary
1011 * (ES) bit in the x87 status word is set to 1.
1012 *
1013 * In order to avoid leaking this information across processes, we clean
1014 * these values by performing a dummy load before executing fxrstor().
1015 */
1016 static void
fpu_clean_state(void)1017 fpu_clean_state(void)
1018 {
1019 static float dummy_variable = 0.0;
1020 u_short status;
1021
1022 /*
1023 * Clear the ES bit in the x87 status word if it is currently
1024 * set, in order to avoid causing a fault in the upcoming load.
1025 */
1026 fnstsw(&status);
1027 if (status & 0x80)
1028 fnclex();
1029
1030 /*
1031 * Load the dummy variable into the x87 stack. This mangles
1032 * the x87 stack, but we don't care since we're about to call
1033 * fxrstor() anyway.
1034 */
1035 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
1036 }
1037
1038 /*
1039 * This really sucks. We want the acpi version only, but it requires
1040 * the isa_if.h file in order to get the definitions.
1041 */
1042 #include "opt_isa.h"
1043 #ifdef DEV_ISA
1044 #include <isa/isavar.h>
1045 /*
1046 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
1047 */
1048 static struct isa_pnp_id fpupnp_ids[] = {
1049 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
1050 { 0 }
1051 };
1052
1053 static int
fpupnp_probe(device_t dev)1054 fpupnp_probe(device_t dev)
1055 {
1056 int result;
1057
1058 result = ISA_PNP_PROBE(device_get_parent(dev), dev, fpupnp_ids);
1059 if (result <= 0)
1060 device_quiet(dev);
1061 return (result);
1062 }
1063
1064 static int
fpupnp_attach(device_t dev)1065 fpupnp_attach(device_t dev)
1066 {
1067
1068 return (0);
1069 }
1070
1071 static device_method_t fpupnp_methods[] = {
1072 /* Device interface */
1073 DEVMETHOD(device_probe, fpupnp_probe),
1074 DEVMETHOD(device_attach, fpupnp_attach),
1075 { 0, 0 }
1076 };
1077
1078 static driver_t fpupnp_driver = {
1079 "fpupnp",
1080 fpupnp_methods,
1081 1, /* no softc */
1082 };
1083
1084 DRIVER_MODULE(fpupnp, acpi, fpupnp_driver, 0, 0);
1085 ISA_PNP_INFO(fpupnp_ids);
1086 #endif /* DEV_ISA */
1087
1088 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
1089 "Kernel contexts for FPU state");
1090
1091 #define FPU_KERN_CTX_FPUINITDONE 0x01
1092 #define FPU_KERN_CTX_DUMMY 0x02 /* avoided save for the kern thread */
1093 #define FPU_KERN_CTX_INUSE 0x04
1094
1095 struct fpu_kern_ctx {
1096 struct savefpu *prev;
1097 uint32_t flags;
1098 char hwstate1[];
1099 };
1100
1101 static inline size_t __pure2
fpu_kern_alloc_sz(u_int max_est)1102 fpu_kern_alloc_sz(u_int max_est)
1103 {
1104 return (sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN + max_est);
1105 }
1106
1107 static inline int __pure2
fpu_kern_malloc_flags(u_int fpflags)1108 fpu_kern_malloc_flags(u_int fpflags)
1109 {
1110 return (((fpflags & FPU_KERN_NOWAIT) ? M_NOWAIT : M_WAITOK) | M_ZERO);
1111 }
1112
1113 struct fpu_kern_ctx *
fpu_kern_alloc_ctx_domain(int domain,u_int flags)1114 fpu_kern_alloc_ctx_domain(int domain, u_int flags)
1115 {
1116 return (malloc_domainset(fpu_kern_alloc_sz(cpu_max_ext_state_size),
1117 M_FPUKERN_CTX, DOMAINSET_PREF(domain),
1118 fpu_kern_malloc_flags(flags)));
1119 }
1120
1121 struct fpu_kern_ctx *
fpu_kern_alloc_ctx(u_int flags)1122 fpu_kern_alloc_ctx(u_int flags)
1123 {
1124 return (malloc(fpu_kern_alloc_sz(cpu_max_ext_state_size),
1125 M_FPUKERN_CTX, fpu_kern_malloc_flags(flags)));
1126 }
1127
1128 void
fpu_kern_free_ctx(struct fpu_kern_ctx * ctx)1129 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
1130 {
1131
1132 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx"));
1133 /* XXXKIB clear the memory ? */
1134 free(ctx, M_FPUKERN_CTX);
1135 }
1136
1137 static struct savefpu *
fpu_kern_ctx_savefpu(struct fpu_kern_ctx * ctx)1138 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
1139 {
1140 vm_offset_t p;
1141
1142 p = (vm_offset_t)&ctx->hwstate1;
1143 p = roundup2(p, XSAVE_AREA_ALIGN);
1144 return ((struct savefpu *)p);
1145 }
1146
1147 void
fpu_kern_enter(struct thread * td,struct fpu_kern_ctx * ctx,u_int flags)1148 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
1149 {
1150 struct pcb *pcb;
1151
1152 pcb = td->td_pcb;
1153 KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
1154 ("ctx is required when !FPU_KERN_NOCTX"));
1155 KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0,
1156 ("using inuse ctx"));
1157 KASSERT((pcb->pcb_flags & PCB_FPUNOSAVE) == 0,
1158 ("recursive fpu_kern_enter while in PCB_FPUNOSAVE state"));
1159
1160 if ((flags & FPU_KERN_NOCTX) != 0) {
1161 critical_enter();
1162 fpu_enable();
1163 if (curthread == PCPU_GET(fpcurthread)) {
1164 fpusave(curpcb->pcb_save);
1165 PCPU_SET(fpcurthread, NULL);
1166 } else {
1167 KASSERT(PCPU_GET(fpcurthread) == NULL,
1168 ("invalid fpcurthread"));
1169 }
1170
1171 /*
1172 * This breaks XSAVEOPT tracker, but
1173 * PCB_FPUNOSAVE state is supposed to never need to
1174 * save FPU context at all.
1175 */
1176 fpurestore(fpu_initialstate);
1177 set_pcb_flags(pcb, PCB_KERNFPU | PCB_FPUNOSAVE |
1178 PCB_FPUINITDONE);
1179 return;
1180 }
1181 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
1182 ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE;
1183 return;
1184 }
1185 critical_enter();
1186 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
1187 get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
1188 ctx->flags = FPU_KERN_CTX_INUSE;
1189 if ((pcb->pcb_flags & PCB_FPUINITDONE) != 0)
1190 ctx->flags |= FPU_KERN_CTX_FPUINITDONE;
1191 fpuexit(td);
1192 ctx->prev = pcb->pcb_save;
1193 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
1194 set_pcb_flags(pcb, PCB_KERNFPU);
1195 clear_pcb_flags(pcb, PCB_FPUINITDONE);
1196 critical_exit();
1197 }
1198
1199 int
fpu_kern_leave(struct thread * td,struct fpu_kern_ctx * ctx)1200 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
1201 {
1202 struct pcb *pcb;
1203
1204 pcb = td->td_pcb;
1205
1206 if ((pcb->pcb_flags & PCB_FPUNOSAVE) != 0) {
1207 KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
1208 KASSERT(PCPU_GET(fpcurthread) == NULL,
1209 ("non-NULL fpcurthread for PCB_FPUNOSAVE"));
1210 CRITICAL_ASSERT(td);
1211
1212 clear_pcb_flags(pcb, PCB_FPUNOSAVE | PCB_FPUINITDONE);
1213 fpu_disable();
1214 } else {
1215 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
1216 ("leaving not inuse ctx"));
1217 ctx->flags &= ~FPU_KERN_CTX_INUSE;
1218
1219 if (is_fpu_kern_thread(0) &&
1220 (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
1221 return (0);
1222 KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0,
1223 ("dummy ctx"));
1224 critical_enter();
1225 if (curthread == PCPU_GET(fpcurthread))
1226 fpudrop();
1227 pcb->pcb_save = ctx->prev;
1228 }
1229
1230 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
1231 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
1232 set_pcb_flags(pcb, PCB_FPUINITDONE);
1233 if ((pcb->pcb_flags & PCB_KERNFPU_THR) == 0)
1234 clear_pcb_flags(pcb, PCB_KERNFPU);
1235 } else if ((pcb->pcb_flags & PCB_KERNFPU_THR) == 0)
1236 clear_pcb_flags(pcb, PCB_FPUINITDONE | PCB_KERNFPU);
1237 } else {
1238 if ((ctx->flags & FPU_KERN_CTX_FPUINITDONE) != 0)
1239 set_pcb_flags(pcb, PCB_FPUINITDONE);
1240 else
1241 clear_pcb_flags(pcb, PCB_FPUINITDONE);
1242 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
1243 }
1244 critical_exit();
1245 return (0);
1246 }
1247
1248 int
fpu_kern_thread(u_int flags)1249 fpu_kern_thread(u_int flags)
1250 {
1251
1252 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
1253 ("Only kthread may use fpu_kern_thread"));
1254 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb),
1255 ("mangled pcb_save"));
1256 KASSERT(PCB_USER_FPU(curpcb), ("recursive call"));
1257
1258 set_pcb_flags(curpcb, PCB_KERNFPU | PCB_KERNFPU_THR);
1259 return (0);
1260 }
1261
1262 int
is_fpu_kern_thread(u_int flags)1263 is_fpu_kern_thread(u_int flags)
1264 {
1265
1266 if ((curthread->td_pflags & TDP_KTHREAD) == 0)
1267 return (0);
1268 return ((curpcb->pcb_flags & PCB_KERNFPU_THR) != 0);
1269 }
1270
1271 /*
1272 * FPU save area alloc/free/init utility routines
1273 */
1274 struct savefpu *
fpu_save_area_alloc(void)1275 fpu_save_area_alloc(void)
1276 {
1277
1278 return (uma_zalloc(fpu_save_area_zone, M_WAITOK));
1279 }
1280
1281 void
fpu_save_area_free(struct savefpu * fsa)1282 fpu_save_area_free(struct savefpu *fsa)
1283 {
1284
1285 uma_zfree(fpu_save_area_zone, fsa);
1286 }
1287
1288 void
fpu_save_area_reset(struct savefpu * fsa)1289 fpu_save_area_reset(struct savefpu *fsa)
1290 {
1291
1292 bcopy(fpu_initialstate, fsa, cpu_max_ext_state_size);
1293 }
1294
1295 static __inline void
xsave_extfeature_check(uint64_t feature)1296 xsave_extfeature_check(uint64_t feature)
1297 {
1298
1299 KASSERT((feature & (feature - 1)) == 0,
1300 ("%s: invalid XFEATURE 0x%lx", __func__, feature));
1301 KASSERT(feature < flsl(xsave_mask),
1302 ("%s: unsupported XFEATURE 0x%lx", __func__, feature));
1303 }
1304
1305 static __inline void
xsave_extstate_bv_check(uint64_t xstate_bv)1306 xsave_extstate_bv_check(uint64_t xstate_bv)
1307 {
1308 KASSERT(xstate_bv != 0 && ilog2(xstate_bv) < flsl(xsave_mask),
1309 ("%s: invalid XSTATE_BV 0x%lx", __func__, xstate_bv));
1310 }
1311
1312 /*
1313 * Returns whether the XFEATURE 'feature' is supported as a user state
1314 * or supervisor state component.
1315 */
1316 bool
xsave_extfeature_supported(uint64_t feature,bool supervisor)1317 xsave_extfeature_supported(uint64_t feature, bool supervisor)
1318 {
1319 int idx;
1320
1321 KASSERT(use_xsave, ("%s: XSAVE not supported", __func__));
1322 xsave_extfeature_check(feature);
1323
1324 if ((xsave_mask & feature) == 0)
1325 return (false);
1326 idx = ilog2(feature);
1327 return (((xsave_area_desc[idx].flags & CPUID_EXTSTATE_SUPERVISOR) != 0) ==
1328 supervisor);
1329 }
1330
1331 /*
1332 * Returns whether the given XSAVE extension is supported.
1333 */
1334 bool
xsave_extension_supported(uint64_t extension)1335 xsave_extension_supported(uint64_t extension)
1336 {
1337 KASSERT(use_xsave, ("%s: XSAVE not supported", __func__));
1338
1339 return ((xsave_extensions & extension) != 0);
1340 }
1341
1342 /*
1343 * Returns offset for XFEATURE 'feature' given the requested feature bitmap
1344 * 'xstate_bv', and extended region format ('compact').
1345 */
1346 size_t
xsave_area_offset(uint64_t xstate_bv,uint64_t feature,bool compact)1347 xsave_area_offset(uint64_t xstate_bv, uint64_t feature,
1348 bool compact)
1349 {
1350 int i, idx;
1351 size_t offs;
1352 struct xsave_area_elm_descr *xep;
1353
1354 KASSERT(use_xsave, ("%s: XSAVE not supported", __func__));
1355 xsave_extstate_bv_check(xstate_bv);
1356 xsave_extfeature_check(feature);
1357
1358 idx = ilog2(feature);
1359 if (!compact)
1360 return (xsave_area_desc[idx].offset);
1361 offs = sizeof(struct savefpu) + sizeof(struct xstate_hdr);
1362 xstate_bv &= ~(XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE);
1363 while ((i = ffs(xstate_bv) - 1) > 0 && i < idx) {
1364 xep = &xsave_area_desc[i];
1365 if ((xep->flags & CPUID_EXTSTATE_ALIGNED) != 0)
1366 offs = roundup2(offs, 64);
1367 offs += xep->size;
1368 xstate_bv &= ~((uint64_t)1 << i);
1369 }
1370
1371 return (offs);
1372 }
1373
1374 /*
1375 * Returns the XSAVE area size for the requested feature bitmap
1376 * 'xstate_bv' and extended region format ('compact').
1377 */
1378 size_t
xsave_area_size(uint64_t xstate_bv,bool compact)1379 xsave_area_size(uint64_t xstate_bv, bool compact)
1380 {
1381 int last_idx;
1382
1383 KASSERT(use_xsave, ("%s: XSAVE not supported", __func__));
1384 xsave_extstate_bv_check(xstate_bv);
1385
1386 last_idx = ilog2(xstate_bv);
1387
1388 return (xsave_area_offset(xstate_bv, (uint64_t)1 << last_idx, compact) +
1389 xsave_area_desc[last_idx].size);
1390 }
1391
1392 size_t
xsave_area_hdr_offset(void)1393 xsave_area_hdr_offset(void)
1394 {
1395 return (sizeof(struct savefpu));
1396 }
1397