xref: /freebsd/sys/amd64/amd64/fpu.c (revision 17d6c636720d00f77e5d098daf4c278f89d84f7b)
1 /*-
2  * Copyright (c) 1990 William Jolitz.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by the University of
17  *	California, Berkeley and its contributors.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)npx.c	7.2 (Berkeley) 5/12/91
35  * $FreeBSD$
36  */
37 
38 #include "opt_cpu.h"
39 #include "opt_debug_npx.h"
40 #include "opt_math_emulate.h"
41 #include "opt_npx.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/mutex.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/sysctl.h>
54 #include <machine/bus.h>
55 #include <sys/rman.h>
56 #ifdef NPX_DEBUG
57 #include <sys/syslog.h>
58 #endif
59 #include <sys/signalvar.h>
60 #include <sys/user.h>
61 
62 #ifndef SMP
63 #include <machine/asmacros.h>
64 #endif
65 #include <machine/cputypes.h>
66 #include <machine/frame.h>
67 #include <machine/md_var.h>
68 #include <machine/pcb.h>
69 #include <machine/psl.h>
70 #ifndef SMP
71 #include <machine/clock.h>
72 #endif
73 #include <machine/resource.h>
74 #include <machine/specialreg.h>
75 #include <machine/segments.h>
76 
77 #ifndef SMP
78 #include <i386/isa/icu.h>
79 #ifdef PC98
80 #include <pc98/pc98/pc98.h>
81 #else
82 #include <i386/isa/isa.h>
83 #endif
84 #endif
85 #include <i386/isa/intr_machdep.h>
86 #include <isa/isavar.h>
87 
88 /*
89  * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
90  */
91 
92 /* Configuration flags. */
93 #define	NPX_DISABLE_I586_OPTIMIZED_BCOPY	(1 << 0)
94 #define	NPX_DISABLE_I586_OPTIMIZED_BZERO	(1 << 1)
95 #define	NPX_DISABLE_I586_OPTIMIZED_COPYIO	(1 << 2)
96 #define	NPX_PREFER_EMULATOR			(1 << 3)
97 
98 #ifdef	__GNUC__
99 
100 #define	fldcw(addr)		__asm("fldcw %0" : : "m" (*(addr)))
101 #define	fnclex()		__asm("fnclex")
102 #define	fninit()		__asm("fninit")
103 #define	fnsave(addr)		__asm __volatile("fnsave %0" : "=m" (*(addr)))
104 #define	fnstcw(addr)		__asm __volatile("fnstcw %0" : "=m" (*(addr)))
105 #define	fnstsw(addr)		__asm __volatile("fnstsw %0" : "=m" (*(addr)))
106 #define	fp_divide_by_0()	__asm("fldz; fld1; fdiv %st,%st(1); fnop")
107 #define	frstor(addr)		__asm("frstor %0" : : "m" (*(addr)))
108 #ifdef CPU_ENABLE_SSE
109 #define	fxrstor(addr)		__asm("fxrstor %0" : : "m" (*(addr)))
110 #define	fxsave(addr)		__asm __volatile("fxsave %0" : "=m" (*(addr)))
111 #endif
112 #define	start_emulating()	__asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
113 				      : : "n" (CR0_TS) : "ax")
114 #define	stop_emulating()	__asm("clts")
115 
116 #else	/* not __GNUC__ */
117 
118 void	fldcw		__P((caddr_t addr));
119 void	fnclex		__P((void));
120 void	fninit		__P((void));
121 void	fnsave		__P((caddr_t addr));
122 void	fnstcw		__P((caddr_t addr));
123 void	fnstsw		__P((caddr_t addr));
124 void	fp_divide_by_0	__P((void));
125 void	frstor		__P((caddr_t addr));
126 #ifdef CPU_ENABLE_SSE
127 void	fxsave		__P((caddr_t addr));
128 void	fxrstor		__P((caddr_t addr));
129 #endif
130 void	start_emulating	__P((void));
131 void	stop_emulating	__P((void));
132 
133 #endif	/* __GNUC__ */
134 
135 #ifdef CPU_ENABLE_SSE
136 #define GET_FPU_CW(thread) \
137 	(cpu_fxsr ? \
138 		(thread)->td_pcb->pcb_save.sv_xmm.sv_env.en_cw : \
139 		(thread)->td_pcb->pcb_save.sv_87.sv_env.en_cw)
140 #define GET_FPU_SW(thread) \
141 	(cpu_fxsr ? \
142 		(thread)->td_pcb->pcb_save.sv_xmm.sv_env.en_sw : \
143 		(thread)->td_pcb->pcb_save.sv_87.sv_env.en_sw)
144 #define GET_FPU_EXSW_PTR(pcb) \
145 	(cpu_fxsr ? \
146 		&(pcb)->pcb_save.sv_xmm.sv_ex_sw : \
147 		&(pcb)->pcb_save.sv_87.sv_ex_sw)
148 #else /* CPU_ENABLE_SSE */
149 #define GET_FPU_CW(thread) \
150 	(thread->td_pcb->pcb_save.sv_87.sv_env.en_cw)
151 #define GET_FPU_SW(thread) \
152 	(thread->td_pcb->pcb_save.sv_87.sv_env.en_sw)
153 #define GET_FPU_EXSW_PTR(pcb) \
154 	(&(pcb)->pcb_save.sv_87.sv_ex_sw)
155 #endif /* CPU_ENABLE_SSE */
156 
157 typedef u_char bool_t;
158 
159 static	int	npx_attach	__P((device_t dev));
160 static	void	npx_identify	__P((driver_t *driver, device_t parent));
161 #ifndef SMP
162 static	void	npx_intr	__P((void *));
163 #endif
164 static	int	npx_probe	__P((device_t dev));
165 static	void	fpusave		__P((union savefpu *));
166 static	void	fpurstor	__P((union savefpu *));
167 #ifdef I586_CPU_XXX
168 static	long	timezero	__P((const char *funcname,
169 				     void (*func)(void *buf, size_t len)));
170 #endif /* I586_CPU */
171 
172 int	hw_float;		/* XXX currently just alias for npx_exists */
173 
174 SYSCTL_INT(_hw,HW_FLOATINGPT, floatingpoint,
175 	CTLFLAG_RD, &hw_float, 0,
176 	"Floatingpoint instructions executed in hardware");
177 
178 #ifndef SMP
179 static	volatile u_int		npx_intrs_while_probing;
180 static	volatile u_int		npx_traps_while_probing;
181 #endif
182 
183 static	bool_t			npx_ex16;
184 static	bool_t			npx_exists;
185 static	bool_t			npx_irq13;
186 
187 #ifndef SMP
188 alias_for_inthand_t probetrap;
189 __asm("								\n\
190 	.text							\n\
191 	.p2align 2,0x90						\n\
192 	.type	" __XSTRING(CNAME(probetrap)) ",@function	\n\
193 " __XSTRING(CNAME(probetrap)) ":				\n\
194 	ss							\n\
195 	incl	" __XSTRING(CNAME(npx_traps_while_probing)) "	\n\
196 	fnclex							\n\
197 	iret							\n\
198 ");
199 #endif /* SMP */
200 
201 /*
202  * Identify routine.  Create a connection point on our parent for probing.
203  */
204 static void
205 npx_identify(driver, parent)
206 	driver_t *driver;
207 	device_t parent;
208 {
209 	device_t child;
210 
211 	child = BUS_ADD_CHILD(parent, 0, "npx", 0);
212 	if (child == NULL)
213 		panic("npx_identify");
214 }
215 
216 #ifndef SMP
217 /*
218  * Do minimal handling of npx interrupts to convert them to traps.
219  */
220 static void
221 npx_intr(dummy)
222 	void *dummy;
223 {
224 	struct thread *td;
225 
226 #ifndef SMP
227 	npx_intrs_while_probing++;
228 #endif
229 
230 	/*
231 	 * The BUSY# latch must be cleared in all cases so that the next
232 	 * unmasked npx exception causes an interrupt.
233 	 */
234 #ifdef PC98
235 	outb(0xf8, 0);
236 #else
237 	outb(0xf0, 0);
238 #endif
239 
240 	/*
241 	 * fpcurthread is normally non-null here.  In that case, schedule an
242 	 * AST to finish the exception handling in the correct context
243 	 * (this interrupt may occur after the thread has entered the
244 	 * kernel via a syscall or an interrupt).  Otherwise, the npx
245 	 * state of the thread that caused this interrupt must have been
246 	 * pushed to the thread's pcb, and clearing of the busy latch
247 	 * above has finished the (essentially null) handling of this
248 	 * interrupt.  Control will eventually return to the instruction
249 	 * that caused it and it will repeat.  We will eventually (usually
250 	 * soon) win the race to handle the interrupt properly.
251 	 */
252 	td = PCPU_GET(fpcurthread);
253 	if (td != NULL) {
254 		td->td_pcb->pcb_flags |= PCB_NPXTRAP;
255 		mtx_lock_spin(&sched_lock);
256 		td->td_kse->ke_flags |= KEF_ASTPENDING;
257 		mtx_unlock_spin(&sched_lock);
258 	}
259 }
260 #endif /* !SMP */
261 
262 /*
263  * Probe routine.  Initialize cr0 to give correct behaviour for [f]wait
264  * whether the device exists or not (XXX should be elsewhere).  Set flags
265  * to tell npxattach() what to do.  Modify device struct if npx doesn't
266  * need to use interrupts.  Return 0 if device exists.
267  */
268 static int
269 npx_probe(dev)
270 	device_t dev;
271 {
272 #ifndef SMP
273 	struct gate_descriptor save_idt_npxtrap;
274 	struct resource *ioport_res, *irq_res;
275 	void *irq_cookie;
276 	int ioport_rid, irq_num, irq_rid;
277 	u_short control;
278 	u_short status;
279 
280 	save_idt_npxtrap = idt[16];
281 	setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
282 	ioport_rid = 0;
283 	ioport_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &ioport_rid,
284 	    IO_NPX, IO_NPX, IO_NPXSIZE, RF_ACTIVE);
285 	if (ioport_res == NULL)
286 		panic("npx: can't get ports");
287 #ifdef PC98
288 	if (resource_int_value("npx", 0, "irq", &irq_num) != 0)
289 		irq_num = 8;
290 #else
291 	if (resource_int_value("npx", 0, "irq", &irq_num) != 0)
292 		irq_num = 13;
293 #endif
294 	irq_rid = 0;
295 	irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &ioport_rid, irq_num,
296 	    irq_num, 1, RF_ACTIVE);
297 	if (irq_res == NULL)
298 		panic("npx: can't get IRQ");
299 	if (bus_setup_intr(dev, irq_res, INTR_TYPE_MISC | INTR_FAST, npx_intr,
300 	    NULL, &irq_cookie) != 0)
301 		panic("npx: can't create intr");
302 #endif /* !SMP */
303 
304 	/*
305 	 * Partially reset the coprocessor, if any.  Some BIOS's don't reset
306 	 * it after a warm boot.
307 	 */
308 #ifdef PC98
309 	outb(0xf8,0);
310 #else
311 	outb(0xf1, 0);		/* full reset on some systems, NOP on others */
312 	outb(0xf0, 0);		/* clear BUSY# latch */
313 #endif
314 	/*
315 	 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
316 	 * instructions.  We must set the CR0_MP bit and use the CR0_TS
317 	 * bit to control the trap, because setting the CR0_EM bit does
318 	 * not cause WAIT instructions to trap.  It's important to trap
319 	 * WAIT instructions - otherwise the "wait" variants of no-wait
320 	 * control instructions would degenerate to the "no-wait" variants
321 	 * after FP context switches but work correctly otherwise.  It's
322 	 * particularly important to trap WAITs when there is no NPX -
323 	 * otherwise the "wait" variants would always degenerate.
324 	 *
325 	 * Try setting CR0_NE to get correct error reporting on 486DX's.
326 	 * Setting it should fail or do nothing on lesser processors.
327 	 */
328 	load_cr0(rcr0() | CR0_MP | CR0_NE);
329 	/*
330 	 * But don't trap while we're probing.
331 	 */
332 	stop_emulating();
333 	/*
334 	 * Finish resetting the coprocessor, if any.  If there is an error
335 	 * pending, then we may get a bogus IRQ13, but npx_intr() will handle
336 	 * it OK.  Bogus halts have never been observed, but we enabled
337 	 * IRQ13 and cleared the BUSY# latch early to handle them anyway.
338 	 */
339 	fninit();
340 
341 	device_set_desc(dev, "math processor");
342 
343 #ifdef SMP
344 
345 	/*
346 	 * Exception 16 MUST work for SMP.
347 	 */
348 	npx_ex16 = hw_float = npx_exists = 1;
349 	return (0);
350 
351 #else /* !SMP */
352 
353 	/*
354 	 * Don't use fwait here because it might hang.
355 	 * Don't use fnop here because it usually hangs if there is no FPU.
356 	 */
357 	DELAY(1000);		/* wait for any IRQ13 */
358 #ifdef DIAGNOSTIC
359 	if (npx_intrs_while_probing != 0)
360 		printf("fninit caused %u bogus npx interrupt(s)\n",
361 		       npx_intrs_while_probing);
362 	if (npx_traps_while_probing != 0)
363 		printf("fninit caused %u bogus npx trap(s)\n",
364 		       npx_traps_while_probing);
365 #endif
366 	/*
367 	 * Check for a status of mostly zero.
368 	 */
369 	status = 0x5a5a;
370 	fnstsw(&status);
371 	if ((status & 0xb8ff) == 0) {
372 		/*
373 		 * Good, now check for a proper control word.
374 		 */
375 		control = 0x5a5a;
376 		fnstcw(&control);
377 		if ((control & 0x1f3f) == 0x033f) {
378 			hw_float = npx_exists = 1;
379 			/*
380 			 * We have an npx, now divide by 0 to see if exception
381 			 * 16 works.
382 			 */
383 			control &= ~(1 << 2);	/* enable divide by 0 trap */
384 			fldcw(&control);
385 #ifdef FPU_ERROR_BROKEN
386 			/*
387 			 * FPU error signal doesn't work on some CPU
388 			 * accelerator board.
389 			 */
390 			npx_ex16 = 1;
391 			return (0);
392 #endif
393 			npx_traps_while_probing = npx_intrs_while_probing = 0;
394 			fp_divide_by_0();
395 			if (npx_traps_while_probing != 0) {
396 				/*
397 				 * Good, exception 16 works.
398 				 */
399 				npx_ex16 = 1;
400 				goto no_irq13;
401 			}
402 			if (npx_intrs_while_probing != 0) {
403 				/*
404 				 * Bad, we are stuck with IRQ13.
405 				 */
406 				npx_irq13 = 1;
407 				idt[16] = save_idt_npxtrap;
408 				return (0);
409 			}
410 			/*
411 			 * Worse, even IRQ13 is broken.  Use emulator.
412 			 */
413 		}
414 	}
415 	/*
416 	 * Probe failed, but we want to get to npxattach to initialize the
417 	 * emulator and say that it has been installed.  XXX handle devices
418 	 * that aren't really devices better.
419 	 */
420 	/* FALLTHROUGH */
421 no_irq13:
422 	idt[16] = save_idt_npxtrap;
423 	bus_teardown_intr(dev, irq_res, irq_cookie);
424 
425 	/*
426 	 * XXX hack around brokenness of bus_teardown_intr().  If we left the
427 	 * irq active then we would get it instead of exception 16.
428 	 */
429 	mtx_lock_spin(&icu_lock);
430 	INTRDIS(1 << irq_num);
431 	mtx_unlock_spin(&icu_lock);
432 
433 	bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res);
434 	bus_release_resource(dev, SYS_RES_IOPORT, ioport_rid, ioport_res);
435 	return (0);
436 
437 #endif /* SMP */
438 }
439 
440 /*
441  * Attach routine - announce which it is, and wire into system
442  */
443 int
444 npx_attach(dev)
445 	device_t dev;
446 {
447 	int flags;
448 
449 	if (resource_int_value("npx", 0, "flags", &flags) != 0)
450 		flags = 0;
451 
452 	if (flags)
453 		device_printf(dev, "flags 0x%x ", flags);
454 	if (npx_irq13) {
455 		device_printf(dev, "using IRQ 13 interface\n");
456 	} else {
457 #if defined(MATH_EMULATE) || defined(GPL_MATH_EMULATE)
458 		if (npx_ex16) {
459 			if (!(flags & NPX_PREFER_EMULATOR))
460 				device_printf(dev, "INT 16 interface\n");
461 			else {
462 				device_printf(dev, "FPU exists, but flags request "
463 				    "emulator\n");
464 				hw_float = npx_exists = 0;
465 			}
466 		} else if (npx_exists) {
467 			device_printf(dev, "error reporting broken; using 387 emulator\n");
468 			hw_float = npx_exists = 0;
469 		} else
470 			device_printf(dev, "387 emulator\n");
471 #else
472 		if (npx_ex16) {
473 			device_printf(dev, "INT 16 interface\n");
474 			if (flags & NPX_PREFER_EMULATOR) {
475 				device_printf(dev, "emulator requested, but none compiled "
476 				    "into kernel, using FPU\n");
477 			}
478 		} else
479 			device_printf(dev, "no 387 emulator in kernel and no FPU!\n");
480 #endif
481 	}
482 	npxinit(__INITIAL_NPXCW__);
483 
484 #ifdef I586_CPU_XXX
485 	if (cpu_class == CPUCLASS_586 && npx_ex16 && npx_exists &&
486 	    timezero("i586_bzero()", i586_bzero) <
487 	    timezero("bzero()", bzero) * 4 / 5) {
488 		if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY)) {
489 			bcopy_vector = i586_bcopy;
490 			ovbcopy_vector = i586_bcopy;
491 		}
492 		if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BZERO))
493 			bzero = i586_bzero;
494 		if (!(flags & NPX_DISABLE_I586_OPTIMIZED_COPYIO)) {
495 			copyin_vector = i586_copyin;
496 			copyout_vector = i586_copyout;
497 		}
498 	}
499 #endif
500 
501 	return (0);		/* XXX unused */
502 }
503 
504 /*
505  * Initialize floating point unit.
506  */
507 void
508 npxinit(control)
509 	u_short control;
510 {
511 	static union savefpu dummy;
512 	critical_t savecrit;
513 
514 	if (!npx_exists)
515 		return;
516 	/*
517 	 * fninit has the same h/w bugs as fnsave.  Use the detoxified
518 	 * fnsave to throw away any junk in the fpu.  npxsave() initializes
519 	 * the fpu and sets fpcurthread = NULL as important side effects.
520 	 */
521 	savecrit = cpu_critical_enter();
522 	npxsave(&dummy);
523 	stop_emulating();
524 #ifdef CPU_ENABLE_SSE
525 	/* XXX npxsave() doesn't actually initialize the fpu in the SSE case. */
526 	if (cpu_fxsr)
527 		fninit();
528 #endif
529 	fldcw(&control);
530 	if (PCPU_GET(curpcb) != NULL)
531 		fpusave(&PCPU_GET(curpcb)->pcb_save);
532 	start_emulating();
533 	cpu_critical_exit(savecrit);
534 }
535 
536 /*
537  * Free coprocessor (if we have it).
538  */
539 void
540 npxexit(td)
541 	struct thread *td;
542 {
543 	critical_t savecrit;
544 
545 	savecrit = cpu_critical_enter();
546 	if (td == PCPU_GET(fpcurthread))
547 		npxsave(&PCPU_GET(curpcb)->pcb_save);
548 	cpu_critical_exit(savecrit);
549 #ifdef NPX_DEBUG
550 	if (npx_exists) {
551 		u_int	masked_exceptions;
552 
553 		masked_exceptions = PCPU_GET(curpcb)->pcb_save.sv_87.sv_env.en_cw
554 		    & PCPU_GET(curpcb)->pcb_save.sv_87.sv_env.en_sw & 0x7f;
555 		/*
556 		 * Log exceptions that would have trapped with the old
557 		 * control word (overflow, divide by 0, and invalid operand).
558 		 */
559 		if (masked_exceptions & 0x0d)
560 			log(LOG_ERR,
561 	"pid %d (%s) exited with masked floating point exceptions 0x%02x\n",
562 			    td->td_proc->p_pid, td->td_proc->p_comm,
563 			    masked_exceptions);
564 	}
565 #endif
566 }
567 
568 /*
569  * The following mechanism is used to ensure that the FPE_... value
570  * that is passed as a trapcode to the signal handler of the user
571  * process does not have more than one bit set.
572  *
573  * Multiple bits may be set if the user process modifies the control
574  * word while a status word bit is already set.  While this is a sign
575  * of bad coding, we have no choise than to narrow them down to one
576  * bit, since we must not send a trapcode that is not exactly one of
577  * the FPE_ macros.
578  *
579  * The mechanism has a static table with 127 entries.  Each combination
580  * of the 7 FPU status word exception bits directly translates to a
581  * position in this table, where a single FPE_... value is stored.
582  * This FPE_... value stored there is considered the "most important"
583  * of the exception bits and will be sent as the signal code.  The
584  * precedence of the bits is based upon Intel Document "Numerical
585  * Applications", Chapter "Special Computational Situations".
586  *
587  * The macro to choose one of these values does these steps: 1) Throw
588  * away status word bits that cannot be masked.  2) Throw away the bits
589  * currently masked in the control word, assuming the user isn't
590  * interested in them anymore.  3) Reinsert status word bit 7 (stack
591  * fault) if it is set, which cannot be masked but must be presered.
592  * 4) Use the remaining bits to point into the trapcode table.
593  *
594  * The 6 maskable bits in order of their preference, as stated in the
595  * above referenced Intel manual:
596  * 1  Invalid operation (FP_X_INV)
597  * 1a   Stack underflow
598  * 1b   Stack overflow
599  * 1c   Operand of unsupported format
600  * 1d   SNaN operand.
601  * 2  QNaN operand (not an exception, irrelavant here)
602  * 3  Any other invalid-operation not mentioned above or zero divide
603  *      (FP_X_INV, FP_X_DZ)
604  * 4  Denormal operand (FP_X_DNML)
605  * 5  Numeric over/underflow (FP_X_OFL, FP_X_UFL)
606  * 6  Inexact result (FP_X_IMP)
607  */
608 static char fpetable[128] = {
609 	0,
610 	FPE_FLTINV,	/*  1 - INV */
611 	FPE_FLTUND,	/*  2 - DNML */
612 	FPE_FLTINV,	/*  3 - INV | DNML */
613 	FPE_FLTDIV,	/*  4 - DZ */
614 	FPE_FLTINV,	/*  5 - INV | DZ */
615 	FPE_FLTDIV,	/*  6 - DNML | DZ */
616 	FPE_FLTINV,	/*  7 - INV | DNML | DZ */
617 	FPE_FLTOVF,	/*  8 - OFL */
618 	FPE_FLTINV,	/*  9 - INV | OFL */
619 	FPE_FLTUND,	/*  A - DNML | OFL */
620 	FPE_FLTINV,	/*  B - INV | DNML | OFL */
621 	FPE_FLTDIV,	/*  C - DZ | OFL */
622 	FPE_FLTINV,	/*  D - INV | DZ | OFL */
623 	FPE_FLTDIV,	/*  E - DNML | DZ | OFL */
624 	FPE_FLTINV,	/*  F - INV | DNML | DZ | OFL */
625 	FPE_FLTUND,	/* 10 - UFL */
626 	FPE_FLTINV,	/* 11 - INV | UFL */
627 	FPE_FLTUND,	/* 12 - DNML | UFL */
628 	FPE_FLTINV,	/* 13 - INV | DNML | UFL */
629 	FPE_FLTDIV,	/* 14 - DZ | UFL */
630 	FPE_FLTINV,	/* 15 - INV | DZ | UFL */
631 	FPE_FLTDIV,	/* 16 - DNML | DZ | UFL */
632 	FPE_FLTINV,	/* 17 - INV | DNML | DZ | UFL */
633 	FPE_FLTOVF,	/* 18 - OFL | UFL */
634 	FPE_FLTINV,	/* 19 - INV | OFL | UFL */
635 	FPE_FLTUND,	/* 1A - DNML | OFL | UFL */
636 	FPE_FLTINV,	/* 1B - INV | DNML | OFL | UFL */
637 	FPE_FLTDIV,	/* 1C - DZ | OFL | UFL */
638 	FPE_FLTINV,	/* 1D - INV | DZ | OFL | UFL */
639 	FPE_FLTDIV,	/* 1E - DNML | DZ | OFL | UFL */
640 	FPE_FLTINV,	/* 1F - INV | DNML | DZ | OFL | UFL */
641 	FPE_FLTRES,	/* 20 - IMP */
642 	FPE_FLTINV,	/* 21 - INV | IMP */
643 	FPE_FLTUND,	/* 22 - DNML | IMP */
644 	FPE_FLTINV,	/* 23 - INV | DNML | IMP */
645 	FPE_FLTDIV,	/* 24 - DZ | IMP */
646 	FPE_FLTINV,	/* 25 - INV | DZ | IMP */
647 	FPE_FLTDIV,	/* 26 - DNML | DZ | IMP */
648 	FPE_FLTINV,	/* 27 - INV | DNML | DZ | IMP */
649 	FPE_FLTOVF,	/* 28 - OFL | IMP */
650 	FPE_FLTINV,	/* 29 - INV | OFL | IMP */
651 	FPE_FLTUND,	/* 2A - DNML | OFL | IMP */
652 	FPE_FLTINV,	/* 2B - INV | DNML | OFL | IMP */
653 	FPE_FLTDIV,	/* 2C - DZ | OFL | IMP */
654 	FPE_FLTINV,	/* 2D - INV | DZ | OFL | IMP */
655 	FPE_FLTDIV,	/* 2E - DNML | DZ | OFL | IMP */
656 	FPE_FLTINV,	/* 2F - INV | DNML | DZ | OFL | IMP */
657 	FPE_FLTUND,	/* 30 - UFL | IMP */
658 	FPE_FLTINV,	/* 31 - INV | UFL | IMP */
659 	FPE_FLTUND,	/* 32 - DNML | UFL | IMP */
660 	FPE_FLTINV,	/* 33 - INV | DNML | UFL | IMP */
661 	FPE_FLTDIV,	/* 34 - DZ | UFL | IMP */
662 	FPE_FLTINV,	/* 35 - INV | DZ | UFL | IMP */
663 	FPE_FLTDIV,	/* 36 - DNML | DZ | UFL | IMP */
664 	FPE_FLTINV,	/* 37 - INV | DNML | DZ | UFL | IMP */
665 	FPE_FLTOVF,	/* 38 - OFL | UFL | IMP */
666 	FPE_FLTINV,	/* 39 - INV | OFL | UFL | IMP */
667 	FPE_FLTUND,	/* 3A - DNML | OFL | UFL | IMP */
668 	FPE_FLTINV,	/* 3B - INV | DNML | OFL | UFL | IMP */
669 	FPE_FLTDIV,	/* 3C - DZ | OFL | UFL | IMP */
670 	FPE_FLTINV,	/* 3D - INV | DZ | OFL | UFL | IMP */
671 	FPE_FLTDIV,	/* 3E - DNML | DZ | OFL | UFL | IMP */
672 	FPE_FLTINV,	/* 3F - INV | DNML | DZ | OFL | UFL | IMP */
673 	FPE_FLTSUB,	/* 40 - STK */
674 	FPE_FLTSUB,	/* 41 - INV | STK */
675 	FPE_FLTUND,	/* 42 - DNML | STK */
676 	FPE_FLTSUB,	/* 43 - INV | DNML | STK */
677 	FPE_FLTDIV,	/* 44 - DZ | STK */
678 	FPE_FLTSUB,	/* 45 - INV | DZ | STK */
679 	FPE_FLTDIV,	/* 46 - DNML | DZ | STK */
680 	FPE_FLTSUB,	/* 47 - INV | DNML | DZ | STK */
681 	FPE_FLTOVF,	/* 48 - OFL | STK */
682 	FPE_FLTSUB,	/* 49 - INV | OFL | STK */
683 	FPE_FLTUND,	/* 4A - DNML | OFL | STK */
684 	FPE_FLTSUB,	/* 4B - INV | DNML | OFL | STK */
685 	FPE_FLTDIV,	/* 4C - DZ | OFL | STK */
686 	FPE_FLTSUB,	/* 4D - INV | DZ | OFL | STK */
687 	FPE_FLTDIV,	/* 4E - DNML | DZ | OFL | STK */
688 	FPE_FLTSUB,	/* 4F - INV | DNML | DZ | OFL | STK */
689 	FPE_FLTUND,	/* 50 - UFL | STK */
690 	FPE_FLTSUB,	/* 51 - INV | UFL | STK */
691 	FPE_FLTUND,	/* 52 - DNML | UFL | STK */
692 	FPE_FLTSUB,	/* 53 - INV | DNML | UFL | STK */
693 	FPE_FLTDIV,	/* 54 - DZ | UFL | STK */
694 	FPE_FLTSUB,	/* 55 - INV | DZ | UFL | STK */
695 	FPE_FLTDIV,	/* 56 - DNML | DZ | UFL | STK */
696 	FPE_FLTSUB,	/* 57 - INV | DNML | DZ | UFL | STK */
697 	FPE_FLTOVF,	/* 58 - OFL | UFL | STK */
698 	FPE_FLTSUB,	/* 59 - INV | OFL | UFL | STK */
699 	FPE_FLTUND,	/* 5A - DNML | OFL | UFL | STK */
700 	FPE_FLTSUB,	/* 5B - INV | DNML | OFL | UFL | STK */
701 	FPE_FLTDIV,	/* 5C - DZ | OFL | UFL | STK */
702 	FPE_FLTSUB,	/* 5D - INV | DZ | OFL | UFL | STK */
703 	FPE_FLTDIV,	/* 5E - DNML | DZ | OFL | UFL | STK */
704 	FPE_FLTSUB,	/* 5F - INV | DNML | DZ | OFL | UFL | STK */
705 	FPE_FLTRES,	/* 60 - IMP | STK */
706 	FPE_FLTSUB,	/* 61 - INV | IMP | STK */
707 	FPE_FLTUND,	/* 62 - DNML | IMP | STK */
708 	FPE_FLTSUB,	/* 63 - INV | DNML | IMP | STK */
709 	FPE_FLTDIV,	/* 64 - DZ | IMP | STK */
710 	FPE_FLTSUB,	/* 65 - INV | DZ | IMP | STK */
711 	FPE_FLTDIV,	/* 66 - DNML | DZ | IMP | STK */
712 	FPE_FLTSUB,	/* 67 - INV | DNML | DZ | IMP | STK */
713 	FPE_FLTOVF,	/* 68 - OFL | IMP | STK */
714 	FPE_FLTSUB,	/* 69 - INV | OFL | IMP | STK */
715 	FPE_FLTUND,	/* 6A - DNML | OFL | IMP | STK */
716 	FPE_FLTSUB,	/* 6B - INV | DNML | OFL | IMP | STK */
717 	FPE_FLTDIV,	/* 6C - DZ | OFL | IMP | STK */
718 	FPE_FLTSUB,	/* 6D - INV | DZ | OFL | IMP | STK */
719 	FPE_FLTDIV,	/* 6E - DNML | DZ | OFL | IMP | STK */
720 	FPE_FLTSUB,	/* 6F - INV | DNML | DZ | OFL | IMP | STK */
721 	FPE_FLTUND,	/* 70 - UFL | IMP | STK */
722 	FPE_FLTSUB,	/* 71 - INV | UFL | IMP | STK */
723 	FPE_FLTUND,	/* 72 - DNML | UFL | IMP | STK */
724 	FPE_FLTSUB,	/* 73 - INV | DNML | UFL | IMP | STK */
725 	FPE_FLTDIV,	/* 74 - DZ | UFL | IMP | STK */
726 	FPE_FLTSUB,	/* 75 - INV | DZ | UFL | IMP | STK */
727 	FPE_FLTDIV,	/* 76 - DNML | DZ | UFL | IMP | STK */
728 	FPE_FLTSUB,	/* 77 - INV | DNML | DZ | UFL | IMP | STK */
729 	FPE_FLTOVF,	/* 78 - OFL | UFL | IMP | STK */
730 	FPE_FLTSUB,	/* 79 - INV | OFL | UFL | IMP | STK */
731 	FPE_FLTUND,	/* 7A - DNML | OFL | UFL | IMP | STK */
732 	FPE_FLTSUB,	/* 7B - INV | DNML | OFL | UFL | IMP | STK */
733 	FPE_FLTDIV,	/* 7C - DZ | OFL | UFL | IMP | STK */
734 	FPE_FLTSUB,	/* 7D - INV | DZ | OFL | UFL | IMP | STK */
735 	FPE_FLTDIV,	/* 7E - DNML | DZ | OFL | UFL | IMP | STK */
736 	FPE_FLTSUB,	/* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
737 };
738 
739 /*
740  * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE.
741  *
742  * Clearing exceptions is necessary mainly to avoid IRQ13 bugs.  We now
743  * depend on longjmp() restoring a usable state.  Restoring the state
744  * or examining it might fail if we didn't clear exceptions.
745  *
746  * The error code chosen will be one of the FPE_... macros. It will be
747  * sent as the second argument to old BSD-style signal handlers and as
748  * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers.
749  *
750  * XXX the FP state is not preserved across signal handlers.  So signal
751  * handlers cannot afford to do FP unless they preserve the state or
752  * longjmp() out.  Both preserving the state and longjmp()ing may be
753  * destroyed by IRQ13 bugs.  Clearing FP exceptions is not an acceptable
754  * solution for signals other than SIGFPE.
755  */
756 int
757 npxtrap()
758 {
759 	critical_t savecrit;
760 	u_short control, status;
761 	u_long *exstat;
762 
763 	if (!npx_exists) {
764 		printf("npxtrap: fpcurthread = %p, curthread = %p, npx_exists = %d\n",
765 		       PCPU_GET(fpcurthread), curthread, npx_exists);
766 		panic("npxtrap from nowhere");
767 	}
768 	savecrit = cpu_critical_enter();
769 
770 	/*
771 	 * Interrupt handling (for another interrupt) may have pushed the
772 	 * state to memory.  Fetch the relevant parts of the state from
773 	 * wherever they are.
774 	 */
775 	if (PCPU_GET(fpcurthread) != curthread) {
776 		control = GET_FPU_CW(curthread);
777 		status = GET_FPU_SW(curthread);
778 	} else {
779 		fnstcw(&control);
780 		fnstsw(&status);
781 	}
782 
783 	exstat = GET_FPU_EXSW_PTR(curthread->td_pcb);
784 	*exstat = status;
785 	if (PCPU_GET(fpcurthread) != curthread)
786 		GET_FPU_SW(curthread) &= ~0x80bf;
787 	else
788 		fnclex();
789 	cpu_critical_exit(savecrit);
790 	return (fpetable[status & ((~control & 0x3f) | 0x40)]);
791 }
792 
793 /*
794  * Implement device not available (DNA) exception
795  *
796  * It would be better to switch FP context here (if curthread != fpcurthread)
797  * and not necessarily for every context switch, but it is too hard to
798  * access foreign pcb's.
799  */
800 int
801 npxdna()
802 {
803 	u_long *exstat;
804 	critical_t s;
805 
806 	if (!npx_exists)
807 		return (0);
808 	if (PCPU_GET(fpcurthread) != NULL) {
809 		printf("npxdna: fpcurthread = %p, curthread = %p\n",
810 		       PCPU_GET(fpcurthread), curthread);
811 		panic("npxdna");
812 	}
813 	s = cpu_critical_enter();
814 	stop_emulating();
815 	/*
816 	 * Record new context early in case frstor causes an IRQ13.
817 	 */
818 	PCPU_SET(fpcurthread, curthread);
819 
820 	exstat = GET_FPU_EXSW_PTR(PCPU_GET(curpcb));
821 	*exstat = 0;
822 	/*
823 	 * The following frstor may cause an IRQ13 when the state being
824 	 * restored has a pending error.  The error will appear to have been
825 	 * triggered by the current (npx) user instruction even when that
826 	 * instruction is a no-wait instruction that should not trigger an
827 	 * error (e.g., fnclex).  On at least one 486 system all of the
828 	 * no-wait instructions are broken the same as frstor, so our
829 	 * treatment does not amplify the breakage.  On at least one
830 	 * 386/Cyrix 387 system, fnclex works correctly while frstor and
831 	 * fnsave are broken, so our treatment breaks fnclex if it is the
832 	 * first FPU instruction after a context switch.
833 	 */
834 	fpurstor(&PCPU_GET(curpcb)->pcb_save);
835 	cpu_critical_exit(s);
836 
837 	return (1);
838 }
839 
840 /*
841  * Wrapper for fnsave instruction, partly to handle hardware bugs.  When npx
842  * exceptions are reported via IRQ13, spurious IRQ13's may be triggered by
843  * no-wait npx instructions.  See the Intel application note AP-578 for
844  * details.  This doesn't cause any additional complications here.  IRQ13's
845  * are inherently asynchronous unless the CPU is frozen to deliver them --
846  * one that started in userland may be delivered many instructions later,
847  * after the process has entered the kernel.  It may even be delivered after
848  * the fnsave here completes.  A spurious IRQ13 for the fnsave is handled in
849  * the same way as a very-late-arriving non-spurious IRQ13 from user mode:
850  * it is normally ignored at first because we set fpcurthread to NULL; it is
851  * normally retriggered in npxdna() after return to user mode.
852  *
853  * npxsave() must be called with interrupts disabled, so that it clears
854  * fpcurthread atomically with saving the state.  We require callers to do the
855  * disabling, since most callers need to disable interrupts anyway to call
856  * npxsave() atomically with checking fpcurthread.
857  *
858  * A previous version of npxsave() went to great lengths to excecute fnsave
859  * with interrupts enabled in case executing it froze the CPU.  This case
860  * can't happen, at least for Intel CPU/NPX's.  Spurious IRQ13's don't imply
861  * spurious freezes.
862  */
863 void
864 npxsave(addr)
865 	union savefpu *addr;
866 {
867 
868 	stop_emulating();
869 	fpusave(addr);
870 
871 	start_emulating();
872 	PCPU_SET(fpcurthread, NULL);
873 }
874 
875 static void
876 fpusave(addr)
877 	union savefpu *addr;
878 {
879 
880 #ifdef CPU_ENABLE_SSE
881 	if (cpu_fxsr)
882 		fxsave(addr);
883 	else
884 #endif
885 		fnsave(addr);
886 }
887 
888 static void
889 fpurstor(addr)
890 	union savefpu *addr;
891 {
892 
893 #ifdef CPU_ENABLE_SSE
894 	if (cpu_fxsr)
895 		fxrstor(addr);
896 	else
897 #endif
898 		frstor(addr);
899 }
900 
901 #ifdef I586_CPU_XXX
902 static long
903 timezero(funcname, func)
904 	const char *funcname;
905 	void (*func) __P((void *buf, size_t len));
906 
907 {
908 	void *buf;
909 #define	BUFSIZE		1048576
910 	long usec;
911 	struct timeval finish, start;
912 
913 	buf = malloc(BUFSIZE, M_TEMP, M_NOWAIT);
914 	if (buf == NULL)
915 		return (BUFSIZE);
916 	microtime(&start);
917 	(*func)(buf, BUFSIZE);
918 	microtime(&finish);
919 	usec = 1000000 * (finish.tv_sec - start.tv_sec) +
920 	    finish.tv_usec - start.tv_usec;
921 	if (usec <= 0)
922 		usec = 1;
923 	if (bootverbose)
924 		printf("%s bandwidth = %u kBps\n", funcname,
925 		    (u_int32_t)(((BUFSIZE >> 10) * 1000000) / usec));
926 	free(buf, M_TEMP);
927 	return (usec);
928 }
929 #endif /* I586_CPU */
930 
931 static device_method_t npx_methods[] = {
932 	/* Device interface */
933 	DEVMETHOD(device_identify,	npx_identify),
934 	DEVMETHOD(device_probe,		npx_probe),
935 	DEVMETHOD(device_attach,	npx_attach),
936 	DEVMETHOD(device_detach,	bus_generic_detach),
937 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
938 	DEVMETHOD(device_suspend,	bus_generic_suspend),
939 	DEVMETHOD(device_resume,	bus_generic_resume),
940 
941 	{ 0, 0 }
942 };
943 
944 static driver_t npx_driver = {
945 	"npx",
946 	npx_methods,
947 	1,			/* no softc */
948 };
949 
950 static devclass_t npx_devclass;
951 
952 /*
953  * We prefer to attach to the root nexus so that the usual case (exception 16)
954  * doesn't describe the processor as being `on isa'.
955  */
956 DRIVER_MODULE(npx, nexus, npx_driver, npx_devclass, 0, 0);
957 
958 /*
959  * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
960  */
961 static struct isa_pnp_id npxisa_ids[] = {
962 	{ 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
963 	{ 0 }
964 };
965 
966 static int
967 npxisa_probe(device_t dev)
968 {
969 	int result;
970 	if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) {
971 		device_quiet(dev);
972 	}
973 	return(result);
974 }
975 
976 static int
977 npxisa_attach(device_t dev)
978 {
979 	return (0);
980 }
981 
982 static device_method_t npxisa_methods[] = {
983 	/* Device interface */
984 	DEVMETHOD(device_probe,		npxisa_probe),
985 	DEVMETHOD(device_attach,	npxisa_attach),
986 	DEVMETHOD(device_detach,	bus_generic_detach),
987 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
988 	DEVMETHOD(device_suspend,	bus_generic_suspend),
989 	DEVMETHOD(device_resume,	bus_generic_resume),
990 
991 	{ 0, 0 }
992 };
993 
994 static driver_t npxisa_driver = {
995 	"npxisa",
996 	npxisa_methods,
997 	1,			/* no softc */
998 };
999 
1000 static devclass_t npxisa_devclass;
1001 
1002 DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0);
1003 #ifndef PC98
1004 DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0);
1005 #endif
1006 
1007