xref: /freebsd/sys/amd64/amd64/fpu.c (revision eacee0ff7ec955b32e09515246bd97b6edcd2b0f)
1 /*-
2  * Copyright (c) 1990 William Jolitz.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by the University of
17  *	California, Berkeley and its contributors.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)npx.c	7.2 (Berkeley) 5/12/91
35  * $FreeBSD$
36  */
37 
38 #include "opt_cpu.h"
39 #include "opt_debug_npx.h"
40 #include "opt_isa.h"
41 #include "opt_math_emulate.h"
42 #include "opt_npx.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/module.h>
51 #include <sys/mutex.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sysctl.h>
55 #include <machine/bus.h>
56 #include <sys/rman.h>
57 #ifdef NPX_DEBUG
58 #include <sys/syslog.h>
59 #endif
60 #include <sys/signalvar.h>
61 #include <sys/user.h>
62 
63 #ifndef SMP
64 #include <machine/asmacros.h>
65 #endif
66 #include <machine/cputypes.h>
67 #include <machine/frame.h>
68 #include <machine/md_var.h>
69 #include <machine/pcb.h>
70 #include <machine/psl.h>
71 #ifndef SMP
72 #include <machine/clock.h>
73 #endif
74 #include <machine/resource.h>
75 #include <machine/specialreg.h>
76 #include <machine/segments.h>
77 
78 #ifndef SMP
79 #include <i386/isa/icu.h>
80 #ifdef PC98
81 #include <pc98/pc98/pc98.h>
82 #else
83 #include <i386/isa/isa.h>
84 #endif
85 #endif
86 #include <i386/isa/intr_machdep.h>
87 #ifdef DEV_ISA
88 #include <isa/isavar.h>
89 #endif
90 
91 /*
92  * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
93  */
94 
95 /* Configuration flags. */
96 #define	NPX_DISABLE_I586_OPTIMIZED_BCOPY	(1 << 0)
97 #define	NPX_DISABLE_I586_OPTIMIZED_BZERO	(1 << 1)
98 #define	NPX_DISABLE_I586_OPTIMIZED_COPYIO	(1 << 2)
99 #define	NPX_PREFER_EMULATOR			(1 << 3)
100 
101 #ifdef	__GNUC__
102 
103 #define	fldcw(addr)		__asm("fldcw %0" : : "m" (*(addr)))
104 #define	fnclex()		__asm("fnclex")
105 #define	fninit()		__asm("fninit")
106 #define	fnsave(addr)		__asm __volatile("fnsave %0" : "=m" (*(addr)))
107 #define	fnstcw(addr)		__asm __volatile("fnstcw %0" : "=m" (*(addr)))
108 #define	fnstsw(addr)		__asm __volatile("fnstsw %0" : "=m" (*(addr)))
109 #define	fp_divide_by_0()	__asm("fldz; fld1; fdiv %st,%st(1); fnop")
110 #define	frstor(addr)		__asm("frstor %0" : : "m" (*(addr)))
111 #ifdef CPU_ENABLE_SSE
112 #define	fxrstor(addr)		__asm("fxrstor %0" : : "m" (*(addr)))
113 #define	fxsave(addr)		__asm __volatile("fxsave %0" : "=m" (*(addr)))
114 #endif
115 #define	start_emulating()	__asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
116 				      : : "n" (CR0_TS) : "ax")
117 #define	stop_emulating()	__asm("clts")
118 
119 #else	/* not __GNUC__ */
120 
121 void	fldcw		__P((caddr_t addr));
122 void	fnclex		__P((void));
123 void	fninit		__P((void));
124 void	fnsave		__P((caddr_t addr));
125 void	fnstcw		__P((caddr_t addr));
126 void	fnstsw		__P((caddr_t addr));
127 void	fp_divide_by_0	__P((void));
128 void	frstor		__P((caddr_t addr));
129 #ifdef CPU_ENABLE_SSE
130 void	fxsave		__P((caddr_t addr));
131 void	fxrstor		__P((caddr_t addr));
132 #endif
133 void	start_emulating	__P((void));
134 void	stop_emulating	__P((void));
135 
136 #endif	/* __GNUC__ */
137 
138 #ifdef CPU_ENABLE_SSE
139 #define GET_FPU_CW(thread) \
140 	(cpu_fxsr ? \
141 		(thread)->td_pcb->pcb_save.sv_xmm.sv_env.en_cw : \
142 		(thread)->td_pcb->pcb_save.sv_87.sv_env.en_cw)
143 #define GET_FPU_SW(thread) \
144 	(cpu_fxsr ? \
145 		(thread)->td_pcb->pcb_save.sv_xmm.sv_env.en_sw : \
146 		(thread)->td_pcb->pcb_save.sv_87.sv_env.en_sw)
147 #define GET_FPU_EXSW_PTR(pcb) \
148 	(cpu_fxsr ? \
149 		&(pcb)->pcb_save.sv_xmm.sv_ex_sw : \
150 		&(pcb)->pcb_save.sv_87.sv_ex_sw)
151 #else /* CPU_ENABLE_SSE */
152 #define GET_FPU_CW(thread) \
153 	(thread->td_pcb->pcb_save.sv_87.sv_env.en_cw)
154 #define GET_FPU_SW(thread) \
155 	(thread->td_pcb->pcb_save.sv_87.sv_env.en_sw)
156 #define GET_FPU_EXSW_PTR(pcb) \
157 	(&(pcb)->pcb_save.sv_87.sv_ex_sw)
158 #endif /* CPU_ENABLE_SSE */
159 
160 typedef u_char bool_t;
161 
162 static	int	npx_attach	__P((device_t dev));
163 static	void	npx_identify	__P((driver_t *driver, device_t parent));
164 #ifndef SMP
165 static	void	npx_intr	__P((void *));
166 #endif
167 static	int	npx_probe	__P((device_t dev));
168 static	void	fpusave		__P((union savefpu *));
169 static	void	fpurstor	__P((union savefpu *));
170 #ifdef I586_CPU_XXX
171 static	long	timezero	__P((const char *funcname,
172 				     void (*func)(void *buf, size_t len)));
173 #endif /* I586_CPU */
174 
175 int	hw_float;		/* XXX currently just alias for npx_exists */
176 
177 SYSCTL_INT(_hw,HW_FLOATINGPT, floatingpoint,
178 	CTLFLAG_RD, &hw_float, 0,
179 	"Floatingpoint instructions executed in hardware");
180 
181 #ifndef SMP
182 static	volatile u_int		npx_intrs_while_probing;
183 static	volatile u_int		npx_traps_while_probing;
184 #endif
185 
186 static	bool_t			npx_ex16;
187 static	bool_t			npx_exists;
188 static	bool_t			npx_irq13;
189 
190 #ifndef SMP
191 alias_for_inthand_t probetrap;
192 __asm("								\n\
193 	.text							\n\
194 	.p2align 2,0x90						\n\
195 	.type	" __XSTRING(CNAME(probetrap)) ",@function	\n\
196 " __XSTRING(CNAME(probetrap)) ":				\n\
197 	ss							\n\
198 	incl	" __XSTRING(CNAME(npx_traps_while_probing)) "	\n\
199 	fnclex							\n\
200 	iret							\n\
201 ");
202 #endif /* SMP */
203 
204 /*
205  * Identify routine.  Create a connection point on our parent for probing.
206  */
207 static void
208 npx_identify(driver, parent)
209 	driver_t *driver;
210 	device_t parent;
211 {
212 	device_t child;
213 
214 	child = BUS_ADD_CHILD(parent, 0, "npx", 0);
215 	if (child == NULL)
216 		panic("npx_identify");
217 }
218 
219 #ifndef SMP
220 /*
221  * Do minimal handling of npx interrupts to convert them to traps.
222  */
223 static void
224 npx_intr(dummy)
225 	void *dummy;
226 {
227 	struct thread *td;
228 
229 #ifndef SMP
230 	npx_intrs_while_probing++;
231 #endif
232 
233 	/*
234 	 * The BUSY# latch must be cleared in all cases so that the next
235 	 * unmasked npx exception causes an interrupt.
236 	 */
237 #ifdef PC98
238 	outb(0xf8, 0);
239 #else
240 	outb(0xf0, 0);
241 #endif
242 
243 	/*
244 	 * fpcurthread is normally non-null here.  In that case, schedule an
245 	 * AST to finish the exception handling in the correct context
246 	 * (this interrupt may occur after the thread has entered the
247 	 * kernel via a syscall or an interrupt).  Otherwise, the npx
248 	 * state of the thread that caused this interrupt must have been
249 	 * pushed to the thread's pcb, and clearing of the busy latch
250 	 * above has finished the (essentially null) handling of this
251 	 * interrupt.  Control will eventually return to the instruction
252 	 * that caused it and it will repeat.  We will eventually (usually
253 	 * soon) win the race to handle the interrupt properly.
254 	 */
255 	td = PCPU_GET(fpcurthread);
256 	if (td != NULL) {
257 		td->td_pcb->pcb_flags |= PCB_NPXTRAP;
258 		mtx_lock_spin(&sched_lock);
259 		td->td_kse->ke_flags |= KEF_ASTPENDING;
260 		mtx_unlock_spin(&sched_lock);
261 	}
262 }
263 #endif /* !SMP */
264 
265 /*
266  * Probe routine.  Initialize cr0 to give correct behaviour for [f]wait
267  * whether the device exists or not (XXX should be elsewhere).  Set flags
268  * to tell npxattach() what to do.  Modify device struct if npx doesn't
269  * need to use interrupts.  Return 0 if device exists.
270  */
271 static int
272 npx_probe(dev)
273 	device_t dev;
274 {
275 #ifndef SMP
276 	struct gate_descriptor save_idt_npxtrap;
277 	struct resource *ioport_res, *irq_res;
278 	void *irq_cookie;
279 	int ioport_rid, irq_num, irq_rid;
280 	u_short control;
281 	u_short status;
282 
283 	save_idt_npxtrap = idt[16];
284 	setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
285 	ioport_rid = 0;
286 	ioport_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &ioport_rid,
287 	    IO_NPX, IO_NPX, IO_NPXSIZE, RF_ACTIVE);
288 	if (ioport_res == NULL)
289 		panic("npx: can't get ports");
290 #ifdef PC98
291 	if (resource_int_value("npx", 0, "irq", &irq_num) != 0)
292 		irq_num = 8;
293 #else
294 	if (resource_int_value("npx", 0, "irq", &irq_num) != 0)
295 		irq_num = 13;
296 #endif
297 	irq_rid = 0;
298 	irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &ioport_rid, irq_num,
299 	    irq_num, 1, RF_ACTIVE);
300 	if (irq_res == NULL)
301 		panic("npx: can't get IRQ");
302 	if (bus_setup_intr(dev, irq_res, INTR_TYPE_MISC | INTR_FAST, npx_intr,
303 	    NULL, &irq_cookie) != 0)
304 		panic("npx: can't create intr");
305 #endif /* !SMP */
306 
307 	/*
308 	 * Partially reset the coprocessor, if any.  Some BIOS's don't reset
309 	 * it after a warm boot.
310 	 */
311 #ifdef PC98
312 	outb(0xf8,0);
313 #else
314 	outb(0xf1, 0);		/* full reset on some systems, NOP on others */
315 	outb(0xf0, 0);		/* clear BUSY# latch */
316 #endif
317 	/*
318 	 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
319 	 * instructions.  We must set the CR0_MP bit and use the CR0_TS
320 	 * bit to control the trap, because setting the CR0_EM bit does
321 	 * not cause WAIT instructions to trap.  It's important to trap
322 	 * WAIT instructions - otherwise the "wait" variants of no-wait
323 	 * control instructions would degenerate to the "no-wait" variants
324 	 * after FP context switches but work correctly otherwise.  It's
325 	 * particularly important to trap WAITs when there is no NPX -
326 	 * otherwise the "wait" variants would always degenerate.
327 	 *
328 	 * Try setting CR0_NE to get correct error reporting on 486DX's.
329 	 * Setting it should fail or do nothing on lesser processors.
330 	 */
331 	load_cr0(rcr0() | CR0_MP | CR0_NE);
332 	/*
333 	 * But don't trap while we're probing.
334 	 */
335 	stop_emulating();
336 	/*
337 	 * Finish resetting the coprocessor, if any.  If there is an error
338 	 * pending, then we may get a bogus IRQ13, but npx_intr() will handle
339 	 * it OK.  Bogus halts have never been observed, but we enabled
340 	 * IRQ13 and cleared the BUSY# latch early to handle them anyway.
341 	 */
342 	fninit();
343 
344 	device_set_desc(dev, "math processor");
345 
346 #ifdef SMP
347 
348 	/*
349 	 * Exception 16 MUST work for SMP.
350 	 */
351 	npx_ex16 = hw_float = npx_exists = 1;
352 	return (0);
353 
354 #else /* !SMP */
355 
356 	/*
357 	 * Don't use fwait here because it might hang.
358 	 * Don't use fnop here because it usually hangs if there is no FPU.
359 	 */
360 	DELAY(1000);		/* wait for any IRQ13 */
361 #ifdef DIAGNOSTIC
362 	if (npx_intrs_while_probing != 0)
363 		printf("fninit caused %u bogus npx interrupt(s)\n",
364 		       npx_intrs_while_probing);
365 	if (npx_traps_while_probing != 0)
366 		printf("fninit caused %u bogus npx trap(s)\n",
367 		       npx_traps_while_probing);
368 #endif
369 	/*
370 	 * Check for a status of mostly zero.
371 	 */
372 	status = 0x5a5a;
373 	fnstsw(&status);
374 	if ((status & 0xb8ff) == 0) {
375 		/*
376 		 * Good, now check for a proper control word.
377 		 */
378 		control = 0x5a5a;
379 		fnstcw(&control);
380 		if ((control & 0x1f3f) == 0x033f) {
381 			hw_float = npx_exists = 1;
382 			/*
383 			 * We have an npx, now divide by 0 to see if exception
384 			 * 16 works.
385 			 */
386 			control &= ~(1 << 2);	/* enable divide by 0 trap */
387 			fldcw(&control);
388 #ifdef FPU_ERROR_BROKEN
389 			/*
390 			 * FPU error signal doesn't work on some CPU
391 			 * accelerator board.
392 			 */
393 			npx_ex16 = 1;
394 			return (0);
395 #endif
396 			npx_traps_while_probing = npx_intrs_while_probing = 0;
397 			fp_divide_by_0();
398 			if (npx_traps_while_probing != 0) {
399 				/*
400 				 * Good, exception 16 works.
401 				 */
402 				npx_ex16 = 1;
403 				goto no_irq13;
404 			}
405 			if (npx_intrs_while_probing != 0) {
406 				/*
407 				 * Bad, we are stuck with IRQ13.
408 				 */
409 				npx_irq13 = 1;
410 				idt[16] = save_idt_npxtrap;
411 				return (0);
412 			}
413 			/*
414 			 * Worse, even IRQ13 is broken.  Use emulator.
415 			 */
416 		}
417 	}
418 	/*
419 	 * Probe failed, but we want to get to npxattach to initialize the
420 	 * emulator and say that it has been installed.  XXX handle devices
421 	 * that aren't really devices better.
422 	 */
423 	/* FALLTHROUGH */
424 no_irq13:
425 	idt[16] = save_idt_npxtrap;
426 	bus_teardown_intr(dev, irq_res, irq_cookie);
427 
428 	/*
429 	 * XXX hack around brokenness of bus_teardown_intr().  If we left the
430 	 * irq active then we would get it instead of exception 16.
431 	 */
432 	mtx_lock_spin(&icu_lock);
433 	INTRDIS(1 << irq_num);
434 	mtx_unlock_spin(&icu_lock);
435 
436 	bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res);
437 	bus_release_resource(dev, SYS_RES_IOPORT, ioport_rid, ioport_res);
438 	return (0);
439 
440 #endif /* SMP */
441 }
442 
443 /*
444  * Attach routine - announce which it is, and wire into system
445  */
446 int
447 npx_attach(dev)
448 	device_t dev;
449 {
450 	int flags;
451 
452 	if (resource_int_value("npx", 0, "flags", &flags) != 0)
453 		flags = 0;
454 
455 	if (flags)
456 		device_printf(dev, "flags 0x%x ", flags);
457 	if (npx_irq13) {
458 		device_printf(dev, "using IRQ 13 interface\n");
459 	} else {
460 #if defined(MATH_EMULATE) || defined(GPL_MATH_EMULATE)
461 		if (npx_ex16) {
462 			if (!(flags & NPX_PREFER_EMULATOR))
463 				device_printf(dev, "INT 16 interface\n");
464 			else {
465 				device_printf(dev, "FPU exists, but flags request "
466 				    "emulator\n");
467 				hw_float = npx_exists = 0;
468 			}
469 		} else if (npx_exists) {
470 			device_printf(dev, "error reporting broken; using 387 emulator\n");
471 			hw_float = npx_exists = 0;
472 		} else
473 			device_printf(dev, "387 emulator\n");
474 #else
475 		if (npx_ex16) {
476 			device_printf(dev, "INT 16 interface\n");
477 			if (flags & NPX_PREFER_EMULATOR) {
478 				device_printf(dev, "emulator requested, but none compiled "
479 				    "into kernel, using FPU\n");
480 			}
481 		} else
482 			device_printf(dev, "no 387 emulator in kernel and no FPU!\n");
483 #endif
484 	}
485 	npxinit(__INITIAL_NPXCW__);
486 
487 #ifdef I586_CPU_XXX
488 	if (cpu_class == CPUCLASS_586 && npx_ex16 && npx_exists &&
489 	    timezero("i586_bzero()", i586_bzero) <
490 	    timezero("bzero()", bzero) * 4 / 5) {
491 		if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY)) {
492 			bcopy_vector = i586_bcopy;
493 			ovbcopy_vector = i586_bcopy;
494 		}
495 		if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BZERO))
496 			bzero = i586_bzero;
497 		if (!(flags & NPX_DISABLE_I586_OPTIMIZED_COPYIO)) {
498 			copyin_vector = i586_copyin;
499 			copyout_vector = i586_copyout;
500 		}
501 	}
502 #endif
503 
504 	return (0);		/* XXX unused */
505 }
506 
507 /*
508  * Initialize floating point unit.
509  */
510 void
511 npxinit(control)
512 	u_short control;
513 {
514 	static union savefpu dummy;
515 	critical_t savecrit;
516 
517 	if (!npx_exists)
518 		return;
519 	/*
520 	 * fninit has the same h/w bugs as fnsave.  Use the detoxified
521 	 * fnsave to throw away any junk in the fpu.  npxsave() initializes
522 	 * the fpu and sets fpcurthread = NULL as important side effects.
523 	 */
524 	savecrit = cpu_critical_enter();
525 	npxsave(&dummy);
526 	stop_emulating();
527 #ifdef CPU_ENABLE_SSE
528 	/* XXX npxsave() doesn't actually initialize the fpu in the SSE case. */
529 	if (cpu_fxsr)
530 		fninit();
531 #endif
532 	fldcw(&control);
533 	if (PCPU_GET(curpcb) != NULL)
534 		fpusave(&PCPU_GET(curpcb)->pcb_save);
535 	start_emulating();
536 	cpu_critical_exit(savecrit);
537 }
538 
539 /*
540  * Free coprocessor (if we have it).
541  */
542 void
543 npxexit(td)
544 	struct thread *td;
545 {
546 	critical_t savecrit;
547 
548 	savecrit = cpu_critical_enter();
549 	if (td == PCPU_GET(fpcurthread))
550 		npxsave(&PCPU_GET(curpcb)->pcb_save);
551 	cpu_critical_exit(savecrit);
552 #ifdef NPX_DEBUG
553 	if (npx_exists) {
554 		u_int	masked_exceptions;
555 
556 		masked_exceptions = PCPU_GET(curpcb)->pcb_save.sv_87.sv_env.en_cw
557 		    & PCPU_GET(curpcb)->pcb_save.sv_87.sv_env.en_sw & 0x7f;
558 		/*
559 		 * Log exceptions that would have trapped with the old
560 		 * control word (overflow, divide by 0, and invalid operand).
561 		 */
562 		if (masked_exceptions & 0x0d)
563 			log(LOG_ERR,
564 	"pid %d (%s) exited with masked floating point exceptions 0x%02x\n",
565 			    td->td_proc->p_pid, td->td_proc->p_comm,
566 			    masked_exceptions);
567 	}
568 #endif
569 }
570 
571 /*
572  * The following mechanism is used to ensure that the FPE_... value
573  * that is passed as a trapcode to the signal handler of the user
574  * process does not have more than one bit set.
575  *
576  * Multiple bits may be set if the user process modifies the control
577  * word while a status word bit is already set.  While this is a sign
578  * of bad coding, we have no choise than to narrow them down to one
579  * bit, since we must not send a trapcode that is not exactly one of
580  * the FPE_ macros.
581  *
582  * The mechanism has a static table with 127 entries.  Each combination
583  * of the 7 FPU status word exception bits directly translates to a
584  * position in this table, where a single FPE_... value is stored.
585  * This FPE_... value stored there is considered the "most important"
586  * of the exception bits and will be sent as the signal code.  The
587  * precedence of the bits is based upon Intel Document "Numerical
588  * Applications", Chapter "Special Computational Situations".
589  *
590  * The macro to choose one of these values does these steps: 1) Throw
591  * away status word bits that cannot be masked.  2) Throw away the bits
592  * currently masked in the control word, assuming the user isn't
593  * interested in them anymore.  3) Reinsert status word bit 7 (stack
594  * fault) if it is set, which cannot be masked but must be presered.
595  * 4) Use the remaining bits to point into the trapcode table.
596  *
597  * The 6 maskable bits in order of their preference, as stated in the
598  * above referenced Intel manual:
599  * 1  Invalid operation (FP_X_INV)
600  * 1a   Stack underflow
601  * 1b   Stack overflow
602  * 1c   Operand of unsupported format
603  * 1d   SNaN operand.
604  * 2  QNaN operand (not an exception, irrelavant here)
605  * 3  Any other invalid-operation not mentioned above or zero divide
606  *      (FP_X_INV, FP_X_DZ)
607  * 4  Denormal operand (FP_X_DNML)
608  * 5  Numeric over/underflow (FP_X_OFL, FP_X_UFL)
609  * 6  Inexact result (FP_X_IMP)
610  */
611 static char fpetable[128] = {
612 	0,
613 	FPE_FLTINV,	/*  1 - INV */
614 	FPE_FLTUND,	/*  2 - DNML */
615 	FPE_FLTINV,	/*  3 - INV | DNML */
616 	FPE_FLTDIV,	/*  4 - DZ */
617 	FPE_FLTINV,	/*  5 - INV | DZ */
618 	FPE_FLTDIV,	/*  6 - DNML | DZ */
619 	FPE_FLTINV,	/*  7 - INV | DNML | DZ */
620 	FPE_FLTOVF,	/*  8 - OFL */
621 	FPE_FLTINV,	/*  9 - INV | OFL */
622 	FPE_FLTUND,	/*  A - DNML | OFL */
623 	FPE_FLTINV,	/*  B - INV | DNML | OFL */
624 	FPE_FLTDIV,	/*  C - DZ | OFL */
625 	FPE_FLTINV,	/*  D - INV | DZ | OFL */
626 	FPE_FLTDIV,	/*  E - DNML | DZ | OFL */
627 	FPE_FLTINV,	/*  F - INV | DNML | DZ | OFL */
628 	FPE_FLTUND,	/* 10 - UFL */
629 	FPE_FLTINV,	/* 11 - INV | UFL */
630 	FPE_FLTUND,	/* 12 - DNML | UFL */
631 	FPE_FLTINV,	/* 13 - INV | DNML | UFL */
632 	FPE_FLTDIV,	/* 14 - DZ | UFL */
633 	FPE_FLTINV,	/* 15 - INV | DZ | UFL */
634 	FPE_FLTDIV,	/* 16 - DNML | DZ | UFL */
635 	FPE_FLTINV,	/* 17 - INV | DNML | DZ | UFL */
636 	FPE_FLTOVF,	/* 18 - OFL | UFL */
637 	FPE_FLTINV,	/* 19 - INV | OFL | UFL */
638 	FPE_FLTUND,	/* 1A - DNML | OFL | UFL */
639 	FPE_FLTINV,	/* 1B - INV | DNML | OFL | UFL */
640 	FPE_FLTDIV,	/* 1C - DZ | OFL | UFL */
641 	FPE_FLTINV,	/* 1D - INV | DZ | OFL | UFL */
642 	FPE_FLTDIV,	/* 1E - DNML | DZ | OFL | UFL */
643 	FPE_FLTINV,	/* 1F - INV | DNML | DZ | OFL | UFL */
644 	FPE_FLTRES,	/* 20 - IMP */
645 	FPE_FLTINV,	/* 21 - INV | IMP */
646 	FPE_FLTUND,	/* 22 - DNML | IMP */
647 	FPE_FLTINV,	/* 23 - INV | DNML | IMP */
648 	FPE_FLTDIV,	/* 24 - DZ | IMP */
649 	FPE_FLTINV,	/* 25 - INV | DZ | IMP */
650 	FPE_FLTDIV,	/* 26 - DNML | DZ | IMP */
651 	FPE_FLTINV,	/* 27 - INV | DNML | DZ | IMP */
652 	FPE_FLTOVF,	/* 28 - OFL | IMP */
653 	FPE_FLTINV,	/* 29 - INV | OFL | IMP */
654 	FPE_FLTUND,	/* 2A - DNML | OFL | IMP */
655 	FPE_FLTINV,	/* 2B - INV | DNML | OFL | IMP */
656 	FPE_FLTDIV,	/* 2C - DZ | OFL | IMP */
657 	FPE_FLTINV,	/* 2D - INV | DZ | OFL | IMP */
658 	FPE_FLTDIV,	/* 2E - DNML | DZ | OFL | IMP */
659 	FPE_FLTINV,	/* 2F - INV | DNML | DZ | OFL | IMP */
660 	FPE_FLTUND,	/* 30 - UFL | IMP */
661 	FPE_FLTINV,	/* 31 - INV | UFL | IMP */
662 	FPE_FLTUND,	/* 32 - DNML | UFL | IMP */
663 	FPE_FLTINV,	/* 33 - INV | DNML | UFL | IMP */
664 	FPE_FLTDIV,	/* 34 - DZ | UFL | IMP */
665 	FPE_FLTINV,	/* 35 - INV | DZ | UFL | IMP */
666 	FPE_FLTDIV,	/* 36 - DNML | DZ | UFL | IMP */
667 	FPE_FLTINV,	/* 37 - INV | DNML | DZ | UFL | IMP */
668 	FPE_FLTOVF,	/* 38 - OFL | UFL | IMP */
669 	FPE_FLTINV,	/* 39 - INV | OFL | UFL | IMP */
670 	FPE_FLTUND,	/* 3A - DNML | OFL | UFL | IMP */
671 	FPE_FLTINV,	/* 3B - INV | DNML | OFL | UFL | IMP */
672 	FPE_FLTDIV,	/* 3C - DZ | OFL | UFL | IMP */
673 	FPE_FLTINV,	/* 3D - INV | DZ | OFL | UFL | IMP */
674 	FPE_FLTDIV,	/* 3E - DNML | DZ | OFL | UFL | IMP */
675 	FPE_FLTINV,	/* 3F - INV | DNML | DZ | OFL | UFL | IMP */
676 	FPE_FLTSUB,	/* 40 - STK */
677 	FPE_FLTSUB,	/* 41 - INV | STK */
678 	FPE_FLTUND,	/* 42 - DNML | STK */
679 	FPE_FLTSUB,	/* 43 - INV | DNML | STK */
680 	FPE_FLTDIV,	/* 44 - DZ | STK */
681 	FPE_FLTSUB,	/* 45 - INV | DZ | STK */
682 	FPE_FLTDIV,	/* 46 - DNML | DZ | STK */
683 	FPE_FLTSUB,	/* 47 - INV | DNML | DZ | STK */
684 	FPE_FLTOVF,	/* 48 - OFL | STK */
685 	FPE_FLTSUB,	/* 49 - INV | OFL | STK */
686 	FPE_FLTUND,	/* 4A - DNML | OFL | STK */
687 	FPE_FLTSUB,	/* 4B - INV | DNML | OFL | STK */
688 	FPE_FLTDIV,	/* 4C - DZ | OFL | STK */
689 	FPE_FLTSUB,	/* 4D - INV | DZ | OFL | STK */
690 	FPE_FLTDIV,	/* 4E - DNML | DZ | OFL | STK */
691 	FPE_FLTSUB,	/* 4F - INV | DNML | DZ | OFL | STK */
692 	FPE_FLTUND,	/* 50 - UFL | STK */
693 	FPE_FLTSUB,	/* 51 - INV | UFL | STK */
694 	FPE_FLTUND,	/* 52 - DNML | UFL | STK */
695 	FPE_FLTSUB,	/* 53 - INV | DNML | UFL | STK */
696 	FPE_FLTDIV,	/* 54 - DZ | UFL | STK */
697 	FPE_FLTSUB,	/* 55 - INV | DZ | UFL | STK */
698 	FPE_FLTDIV,	/* 56 - DNML | DZ | UFL | STK */
699 	FPE_FLTSUB,	/* 57 - INV | DNML | DZ | UFL | STK */
700 	FPE_FLTOVF,	/* 58 - OFL | UFL | STK */
701 	FPE_FLTSUB,	/* 59 - INV | OFL | UFL | STK */
702 	FPE_FLTUND,	/* 5A - DNML | OFL | UFL | STK */
703 	FPE_FLTSUB,	/* 5B - INV | DNML | OFL | UFL | STK */
704 	FPE_FLTDIV,	/* 5C - DZ | OFL | UFL | STK */
705 	FPE_FLTSUB,	/* 5D - INV | DZ | OFL | UFL | STK */
706 	FPE_FLTDIV,	/* 5E - DNML | DZ | OFL | UFL | STK */
707 	FPE_FLTSUB,	/* 5F - INV | DNML | DZ | OFL | UFL | STK */
708 	FPE_FLTRES,	/* 60 - IMP | STK */
709 	FPE_FLTSUB,	/* 61 - INV | IMP | STK */
710 	FPE_FLTUND,	/* 62 - DNML | IMP | STK */
711 	FPE_FLTSUB,	/* 63 - INV | DNML | IMP | STK */
712 	FPE_FLTDIV,	/* 64 - DZ | IMP | STK */
713 	FPE_FLTSUB,	/* 65 - INV | DZ | IMP | STK */
714 	FPE_FLTDIV,	/* 66 - DNML | DZ | IMP | STK */
715 	FPE_FLTSUB,	/* 67 - INV | DNML | DZ | IMP | STK */
716 	FPE_FLTOVF,	/* 68 - OFL | IMP | STK */
717 	FPE_FLTSUB,	/* 69 - INV | OFL | IMP | STK */
718 	FPE_FLTUND,	/* 6A - DNML | OFL | IMP | STK */
719 	FPE_FLTSUB,	/* 6B - INV | DNML | OFL | IMP | STK */
720 	FPE_FLTDIV,	/* 6C - DZ | OFL | IMP | STK */
721 	FPE_FLTSUB,	/* 6D - INV | DZ | OFL | IMP | STK */
722 	FPE_FLTDIV,	/* 6E - DNML | DZ | OFL | IMP | STK */
723 	FPE_FLTSUB,	/* 6F - INV | DNML | DZ | OFL | IMP | STK */
724 	FPE_FLTUND,	/* 70 - UFL | IMP | STK */
725 	FPE_FLTSUB,	/* 71 - INV | UFL | IMP | STK */
726 	FPE_FLTUND,	/* 72 - DNML | UFL | IMP | STK */
727 	FPE_FLTSUB,	/* 73 - INV | DNML | UFL | IMP | STK */
728 	FPE_FLTDIV,	/* 74 - DZ | UFL | IMP | STK */
729 	FPE_FLTSUB,	/* 75 - INV | DZ | UFL | IMP | STK */
730 	FPE_FLTDIV,	/* 76 - DNML | DZ | UFL | IMP | STK */
731 	FPE_FLTSUB,	/* 77 - INV | DNML | DZ | UFL | IMP | STK */
732 	FPE_FLTOVF,	/* 78 - OFL | UFL | IMP | STK */
733 	FPE_FLTSUB,	/* 79 - INV | OFL | UFL | IMP | STK */
734 	FPE_FLTUND,	/* 7A - DNML | OFL | UFL | IMP | STK */
735 	FPE_FLTSUB,	/* 7B - INV | DNML | OFL | UFL | IMP | STK */
736 	FPE_FLTDIV,	/* 7C - DZ | OFL | UFL | IMP | STK */
737 	FPE_FLTSUB,	/* 7D - INV | DZ | OFL | UFL | IMP | STK */
738 	FPE_FLTDIV,	/* 7E - DNML | DZ | OFL | UFL | IMP | STK */
739 	FPE_FLTSUB,	/* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
740 };
741 
742 /*
743  * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE.
744  *
745  * Clearing exceptions is necessary mainly to avoid IRQ13 bugs.  We now
746  * depend on longjmp() restoring a usable state.  Restoring the state
747  * or examining it might fail if we didn't clear exceptions.
748  *
749  * The error code chosen will be one of the FPE_... macros. It will be
750  * sent as the second argument to old BSD-style signal handlers and as
751  * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers.
752  *
753  * XXX the FP state is not preserved across signal handlers.  So signal
754  * handlers cannot afford to do FP unless they preserve the state or
755  * longjmp() out.  Both preserving the state and longjmp()ing may be
756  * destroyed by IRQ13 bugs.  Clearing FP exceptions is not an acceptable
757  * solution for signals other than SIGFPE.
758  */
759 int
760 npxtrap()
761 {
762 	critical_t savecrit;
763 	u_short control, status;
764 	u_long *exstat;
765 
766 	if (!npx_exists) {
767 		printf("npxtrap: fpcurthread = %p, curthread = %p, npx_exists = %d\n",
768 		       PCPU_GET(fpcurthread), curthread, npx_exists);
769 		panic("npxtrap from nowhere");
770 	}
771 	savecrit = cpu_critical_enter();
772 
773 	/*
774 	 * Interrupt handling (for another interrupt) may have pushed the
775 	 * state to memory.  Fetch the relevant parts of the state from
776 	 * wherever they are.
777 	 */
778 	if (PCPU_GET(fpcurthread) != curthread) {
779 		control = GET_FPU_CW(curthread);
780 		status = GET_FPU_SW(curthread);
781 	} else {
782 		fnstcw(&control);
783 		fnstsw(&status);
784 	}
785 
786 	exstat = GET_FPU_EXSW_PTR(curthread->td_pcb);
787 	*exstat = status;
788 	if (PCPU_GET(fpcurthread) != curthread)
789 		GET_FPU_SW(curthread) &= ~0x80bf;
790 	else
791 		fnclex();
792 	cpu_critical_exit(savecrit);
793 	return (fpetable[status & ((~control & 0x3f) | 0x40)]);
794 }
795 
796 /*
797  * Implement device not available (DNA) exception
798  *
799  * It would be better to switch FP context here (if curthread != fpcurthread)
800  * and not necessarily for every context switch, but it is too hard to
801  * access foreign pcb's.
802  */
803 int
804 npxdna()
805 {
806 	u_long *exstat;
807 	critical_t s;
808 
809 	if (!npx_exists)
810 		return (0);
811 	if (PCPU_GET(fpcurthread) != NULL) {
812 		printf("npxdna: fpcurthread = %p, curthread = %p\n",
813 		       PCPU_GET(fpcurthread), curthread);
814 		panic("npxdna");
815 	}
816 	s = cpu_critical_enter();
817 	stop_emulating();
818 	/*
819 	 * Record new context early in case frstor causes an IRQ13.
820 	 */
821 	PCPU_SET(fpcurthread, curthread);
822 
823 	exstat = GET_FPU_EXSW_PTR(PCPU_GET(curpcb));
824 	*exstat = 0;
825 	/*
826 	 * The following frstor may cause an IRQ13 when the state being
827 	 * restored has a pending error.  The error will appear to have been
828 	 * triggered by the current (npx) user instruction even when that
829 	 * instruction is a no-wait instruction that should not trigger an
830 	 * error (e.g., fnclex).  On at least one 486 system all of the
831 	 * no-wait instructions are broken the same as frstor, so our
832 	 * treatment does not amplify the breakage.  On at least one
833 	 * 386/Cyrix 387 system, fnclex works correctly while frstor and
834 	 * fnsave are broken, so our treatment breaks fnclex if it is the
835 	 * first FPU instruction after a context switch.
836 	 */
837 	fpurstor(&PCPU_GET(curpcb)->pcb_save);
838 	cpu_critical_exit(s);
839 
840 	return (1);
841 }
842 
843 /*
844  * Wrapper for fnsave instruction, partly to handle hardware bugs.  When npx
845  * exceptions are reported via IRQ13, spurious IRQ13's may be triggered by
846  * no-wait npx instructions.  See the Intel application note AP-578 for
847  * details.  This doesn't cause any additional complications here.  IRQ13's
848  * are inherently asynchronous unless the CPU is frozen to deliver them --
849  * one that started in userland may be delivered many instructions later,
850  * after the process has entered the kernel.  It may even be delivered after
851  * the fnsave here completes.  A spurious IRQ13 for the fnsave is handled in
852  * the same way as a very-late-arriving non-spurious IRQ13 from user mode:
853  * it is normally ignored at first because we set fpcurthread to NULL; it is
854  * normally retriggered in npxdna() after return to user mode.
855  *
856  * npxsave() must be called with interrupts disabled, so that it clears
857  * fpcurthread atomically with saving the state.  We require callers to do the
858  * disabling, since most callers need to disable interrupts anyway to call
859  * npxsave() atomically with checking fpcurthread.
860  *
861  * A previous version of npxsave() went to great lengths to excecute fnsave
862  * with interrupts enabled in case executing it froze the CPU.  This case
863  * can't happen, at least for Intel CPU/NPX's.  Spurious IRQ13's don't imply
864  * spurious freezes.
865  */
866 void
867 npxsave(addr)
868 	union savefpu *addr;
869 {
870 
871 	stop_emulating();
872 	fpusave(addr);
873 
874 	start_emulating();
875 	PCPU_SET(fpcurthread, NULL);
876 }
877 
878 static void
879 fpusave(addr)
880 	union savefpu *addr;
881 {
882 
883 #ifdef CPU_ENABLE_SSE
884 	if (cpu_fxsr)
885 		fxsave(addr);
886 	else
887 #endif
888 		fnsave(addr);
889 }
890 
891 static void
892 fpurstor(addr)
893 	union savefpu *addr;
894 {
895 
896 #ifdef CPU_ENABLE_SSE
897 	if (cpu_fxsr)
898 		fxrstor(addr);
899 	else
900 #endif
901 		frstor(addr);
902 }
903 
904 #ifdef I586_CPU_XXX
905 static long
906 timezero(funcname, func)
907 	const char *funcname;
908 	void (*func) __P((void *buf, size_t len));
909 
910 {
911 	void *buf;
912 #define	BUFSIZE		1048576
913 	long usec;
914 	struct timeval finish, start;
915 
916 	buf = malloc(BUFSIZE, M_TEMP, M_NOWAIT);
917 	if (buf == NULL)
918 		return (BUFSIZE);
919 	microtime(&start);
920 	(*func)(buf, BUFSIZE);
921 	microtime(&finish);
922 	usec = 1000000 * (finish.tv_sec - start.tv_sec) +
923 	    finish.tv_usec - start.tv_usec;
924 	if (usec <= 0)
925 		usec = 1;
926 	if (bootverbose)
927 		printf("%s bandwidth = %u kBps\n", funcname,
928 		    (u_int32_t)(((BUFSIZE >> 10) * 1000000) / usec));
929 	free(buf, M_TEMP);
930 	return (usec);
931 }
932 #endif /* I586_CPU */
933 
934 static device_method_t npx_methods[] = {
935 	/* Device interface */
936 	DEVMETHOD(device_identify,	npx_identify),
937 	DEVMETHOD(device_probe,		npx_probe),
938 	DEVMETHOD(device_attach,	npx_attach),
939 	DEVMETHOD(device_detach,	bus_generic_detach),
940 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
941 	DEVMETHOD(device_suspend,	bus_generic_suspend),
942 	DEVMETHOD(device_resume,	bus_generic_resume),
943 
944 	{ 0, 0 }
945 };
946 
947 static driver_t npx_driver = {
948 	"npx",
949 	npx_methods,
950 	1,			/* no softc */
951 };
952 
953 static devclass_t npx_devclass;
954 
955 #ifdef DEV_ISA
956 /*
957  * We prefer to attach to the root nexus so that the usual case (exception 16)
958  * doesn't describe the processor as being `on isa'.
959  */
960 DRIVER_MODULE(npx, nexus, npx_driver, npx_devclass, 0, 0);
961 
962 /*
963  * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
964  */
965 static struct isa_pnp_id npxisa_ids[] = {
966 	{ 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
967 	{ 0 }
968 };
969 
970 static int
971 npxisa_probe(device_t dev)
972 {
973 	int result;
974 	if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) {
975 		device_quiet(dev);
976 	}
977 	return(result);
978 }
979 
980 static int
981 npxisa_attach(device_t dev)
982 {
983 	return (0);
984 }
985 
986 static device_method_t npxisa_methods[] = {
987 	/* Device interface */
988 	DEVMETHOD(device_probe,		npxisa_probe),
989 	DEVMETHOD(device_attach,	npxisa_attach),
990 	DEVMETHOD(device_detach,	bus_generic_detach),
991 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
992 	DEVMETHOD(device_suspend,	bus_generic_suspend),
993 	DEVMETHOD(device_resume,	bus_generic_resume),
994 
995 	{ 0, 0 }
996 };
997 
998 static driver_t npxisa_driver = {
999 	"npxisa",
1000 	npxisa_methods,
1001 	1,			/* no softc */
1002 };
1003 
1004 static devclass_t npxisa_devclass;
1005 
1006 DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0);
1007 #ifndef PC98
1008 DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0);
1009 #endif
1010 #endif /* DEV_ISA */
1011