xref: /freebsd/sys/amd64/amd64/fpu.c (revision a220d00e74dd245b4fca59c5eca0c53963686325)
1 /*-
2  * Copyright (c) 1990 William Jolitz.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by the University of
17  *	California, Berkeley and its contributors.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)npx.c	7.2 (Berkeley) 5/12/91
35  * $FreeBSD$
36  */
37 
38 #include "opt_cpu.h"
39 #include "opt_debug_npx.h"
40 #include "opt_math_emulate.h"
41 #include "opt_npx.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/mutex.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/sysctl.h>
54 #include <machine/bus.h>
55 #include <sys/rman.h>
56 #ifdef NPX_DEBUG
57 #include <sys/syslog.h>
58 #endif
59 #include <sys/signalvar.h>
60 #include <sys/user.h>
61 
62 #ifndef SMP
63 #include <machine/asmacros.h>
64 #endif
65 #include <machine/cputypes.h>
66 #include <machine/frame.h>
67 #include <machine/md_var.h>
68 #include <machine/pcb.h>
69 #include <machine/psl.h>
70 #ifndef SMP
71 #include <machine/clock.h>
72 #endif
73 #include <machine/resource.h>
74 #include <machine/specialreg.h>
75 #include <machine/segments.h>
76 
77 #ifndef SMP
78 #include <i386/isa/icu.h>
79 #ifdef PC98
80 #include <pc98/pc98/pc98.h>
81 #else
82 #include <i386/isa/isa.h>
83 #endif
84 #endif
85 #include <isa/isavar.h>
86 
87 /*
88  * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
89  */
90 
91 /* Configuration flags. */
92 #define	NPX_DISABLE_I586_OPTIMIZED_BCOPY	(1 << 0)
93 #define	NPX_DISABLE_I586_OPTIMIZED_BZERO	(1 << 1)
94 #define	NPX_DISABLE_I586_OPTIMIZED_COPYIO	(1 << 2)
95 #define	NPX_PREFER_EMULATOR			(1 << 3)
96 
97 #ifdef	__GNUC__
98 
99 #define	fldcw(addr)		__asm("fldcw %0" : : "m" (*(addr)))
100 #define	fnclex()		__asm("fnclex")
101 #define	fninit()		__asm("fninit")
102 #define	fnsave(addr)		__asm __volatile("fnsave %0" : "=m" (*(addr)))
103 #define	fnstcw(addr)		__asm __volatile("fnstcw %0" : "=m" (*(addr)))
104 #define	fnstsw(addr)		__asm __volatile("fnstsw %0" : "=m" (*(addr)))
105 #define	fp_divide_by_0()	__asm("fldz; fld1; fdiv %st,%st(1); fnop")
106 #define	frstor(addr)		__asm("frstor %0" : : "m" (*(addr)))
107 #ifdef CPU_ENABLE_SSE
108 #define	fxrstor(addr)		__asm("fxrstor %0" : : "m" (*(addr)))
109 #define	fxsave(addr)		__asm __volatile("fxsave %0" : "=m" (*(addr)))
110 #endif
111 #define	start_emulating()	__asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
112 				      : : "n" (CR0_TS) : "ax")
113 #define	stop_emulating()	__asm("clts")
114 
115 #else	/* not __GNUC__ */
116 
117 void	fldcw		__P((caddr_t addr));
118 void	fnclex		__P((void));
119 void	fninit		__P((void));
120 void	fnsave		__P((caddr_t addr));
121 void	fnstcw		__P((caddr_t addr));
122 void	fnstsw		__P((caddr_t addr));
123 void	fp_divide_by_0	__P((void));
124 void	frstor		__P((caddr_t addr));
125 #ifdef CPU_ENABLE_SSE
126 void	fxsave		__P((caddr_t addr));
127 void	fxrstor		__P((caddr_t addr));
128 #endif
129 void	start_emulating	__P((void));
130 void	stop_emulating	__P((void));
131 
132 #endif	/* __GNUC__ */
133 
134 #ifdef CPU_ENABLE_SSE
135 #define GET_FPU_CW(thread) \
136 	(cpu_fxsr ? \
137 		(thread)->td_pcb->pcb_save.sv_xmm.sv_env.en_cw : \
138 		(thread)->td_pcb->pcb_save.sv_87.sv_env.en_cw)
139 #define GET_FPU_SW(thread) \
140 	(cpu_fxsr ? \
141 		(thread)->td_pcb->pcb_save.sv_xmm.sv_env.en_sw : \
142 		(thread)->td_pcb->pcb_save.sv_87.sv_env.en_sw)
143 #define GET_FPU_EXSW_PTR(pcb) \
144 	(cpu_fxsr ? \
145 		&(pcb)->pcb_save.sv_xmm.sv_ex_sw : \
146 		&(pcb)->pcb_save.sv_87.sv_ex_sw)
147 #else /* CPU_ENABLE_SSE */
148 #define GET_FPU_CW(thread) \
149 	(thread->td_pcb->pcb_save.sv_87.sv_env.en_cw)
150 #define GET_FPU_SW(thread) \
151 	(thread->td_pcb->pcb_save.sv_87.sv_env.en_sw)
152 #define GET_FPU_EXSW_PTR(pcb) \
153 	(&(pcb)->pcb_save.sv_87.sv_ex_sw)
154 #endif /* CPU_ENABLE_SSE */
155 
156 typedef u_char bool_t;
157 
158 static	int	npx_attach	__P((device_t dev));
159 static	void	npx_identify	__P((driver_t *driver, device_t parent));
160 #ifndef SMP
161 static	void	npx_intr	__P((void *));
162 #endif
163 static	int	npx_probe	__P((device_t dev));
164 static	void	fpusave		__P((union savefpu *));
165 static	void	fpurstor	__P((union savefpu *));
166 #ifdef I586_CPU_XXX
167 static	long	timezero	__P((const char *funcname,
168 				     void (*func)(void *buf, size_t len)));
169 #endif /* I586_CPU */
170 
171 int	hw_float;		/* XXX currently just alias for npx_exists */
172 
173 SYSCTL_INT(_hw,HW_FLOATINGPT, floatingpoint,
174 	CTLFLAG_RD, &hw_float, 0,
175 	"Floatingpoint instructions executed in hardware");
176 
177 #ifndef SMP
178 static	volatile u_int		npx_intrs_while_probing;
179 static	volatile u_int		npx_traps_while_probing;
180 #endif
181 
182 static	bool_t			npx_ex16;
183 static	bool_t			npx_exists;
184 static	bool_t			npx_irq13;
185 
186 #ifndef SMP
187 alias_for_inthand_t probetrap;
188 __asm("								\n\
189 	.text							\n\
190 	.p2align 2,0x90						\n\
191 	.type	" __XSTRING(CNAME(probetrap)) ",@function	\n\
192 " __XSTRING(CNAME(probetrap)) ":				\n\
193 	ss							\n\
194 	incl	" __XSTRING(CNAME(npx_traps_while_probing)) "	\n\
195 	fnclex							\n\
196 	iret							\n\
197 ");
198 #endif /* SMP */
199 
200 /*
201  * Identify routine.  Create a connection point on our parent for probing.
202  */
203 static void
204 npx_identify(driver, parent)
205 	driver_t *driver;
206 	device_t parent;
207 {
208 	device_t child;
209 
210 	child = BUS_ADD_CHILD(parent, 0, "npx", 0);
211 	if (child == NULL)
212 		panic("npx_identify");
213 }
214 
215 #ifndef SMP
216 /*
217  * Do minimal handling of npx interrupts to convert them to traps.
218  */
219 static void
220 npx_intr(dummy)
221 	void *dummy;
222 {
223 	struct thread *td;
224 
225 #ifndef SMP
226 	npx_intrs_while_probing++;
227 #endif
228 
229 	/*
230 	 * The BUSY# latch must be cleared in all cases so that the next
231 	 * unmasked npx exception causes an interrupt.
232 	 */
233 #ifdef PC98
234 	outb(0xf8, 0);
235 #else
236 	outb(0xf0, 0);
237 #endif
238 
239 	/*
240 	 * npxthread is normally non-null here.  In that case, schedule an
241 	 * AST to finish the exception handling in the correct context
242 	 * (this interrupt may occur after the thread has entered the
243 	 * kernel via a syscall or an interrupt).  Otherwise, the npx
244 	 * state of the thread that caused this interrupt must have been
245 	 * pushed to the thread's pcb, and clearing of the busy latch
246 	 * above has finished the (essentially null) handling of this
247 	 * interrupt.  Control will eventually return to the instruction
248 	 * that caused it and it will repeat.  We will eventually (usually
249 	 * soon) win the race to handle the interrupt properly.
250 	 */
251 	td = PCPU_GET(npxthread);
252 	if (td != NULL) {
253 		td->td_pcb->pcb_flags |= PCB_NPXTRAP;
254 		mtx_lock_spin(&sched_lock);
255 		td->td_kse->ke_flags |= KEF_ASTPENDING;
256 		mtx_unlock_spin(&sched_lock);
257 	}
258 }
259 #endif /* !SMP */
260 
261 /*
262  * Probe routine.  Initialize cr0 to give correct behaviour for [f]wait
263  * whether the device exists or not (XXX should be elsewhere).  Set flags
264  * to tell npxattach() what to do.  Modify device struct if npx doesn't
265  * need to use interrupts.  Return 0 if device exists.
266  */
267 static int
268 npx_probe(dev)
269 	device_t dev;
270 {
271 #ifndef SMP
272 	struct gate_descriptor save_idt_npxtrap;
273 	struct resource *ioport_res, *irq_res;
274 	void *irq_cookie;
275 	int ioport_rid, irq_num, irq_rid;
276 	u_short control;
277 	u_short status;
278 
279 	save_idt_npxtrap = idt[16];
280 	setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
281 	ioport_rid = 0;
282 	ioport_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &ioport_rid,
283 	    IO_NPX, IO_NPX, IO_NPXSIZE, RF_ACTIVE);
284 	if (ioport_res == NULL)
285 		panic("npx: can't get ports");
286 #ifdef PC98
287 	if (resource_int_value("npx", 0, "irq", &irq_num) != 0)
288 		irq_num = 8;
289 #else
290 	if (resource_int_value("npx", 0, "irq", &irq_num) != 0)
291 		irq_num = 13;
292 #endif
293 	irq_rid = 0;
294 	irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &ioport_rid, irq_num,
295 	    irq_num, 1, RF_ACTIVE);
296 	if (irq_res == NULL)
297 		panic("npx: can't get IRQ");
298 	if (bus_setup_intr(dev, irq_res, INTR_TYPE_MISC | INTR_FAST, npx_intr,
299 	    NULL, &irq_cookie) != 0)
300 		panic("npx: can't create intr");
301 #endif /* !SMP */
302 
303 	/*
304 	 * Partially reset the coprocessor, if any.  Some BIOS's don't reset
305 	 * it after a warm boot.
306 	 */
307 #ifdef PC98
308 	outb(0xf8,0);
309 #else
310 	outb(0xf1, 0);		/* full reset on some systems, NOP on others */
311 	outb(0xf0, 0);		/* clear BUSY# latch */
312 #endif
313 	/*
314 	 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
315 	 * instructions.  We must set the CR0_MP bit and use the CR0_TS
316 	 * bit to control the trap, because setting the CR0_EM bit does
317 	 * not cause WAIT instructions to trap.  It's important to trap
318 	 * WAIT instructions - otherwise the "wait" variants of no-wait
319 	 * control instructions would degenerate to the "no-wait" variants
320 	 * after FP context switches but work correctly otherwise.  It's
321 	 * particularly important to trap WAITs when there is no NPX -
322 	 * otherwise the "wait" variants would always degenerate.
323 	 *
324 	 * Try setting CR0_NE to get correct error reporting on 486DX's.
325 	 * Setting it should fail or do nothing on lesser processors.
326 	 */
327 	load_cr0(rcr0() | CR0_MP | CR0_NE);
328 	/*
329 	 * But don't trap while we're probing.
330 	 */
331 	stop_emulating();
332 	/*
333 	 * Finish resetting the coprocessor, if any.  If there is an error
334 	 * pending, then we may get a bogus IRQ13, but npx_intr() will handle
335 	 * it OK.  Bogus halts have never been observed, but we enabled
336 	 * IRQ13 and cleared the BUSY# latch early to handle them anyway.
337 	 */
338 	fninit();
339 
340 	device_set_desc(dev, "math processor");
341 
342 #ifdef SMP
343 
344 	/*
345 	 * Exception 16 MUST work for SMP.
346 	 */
347 	npx_ex16 = hw_float = npx_exists = 1;
348 	return (0);
349 
350 #else /* !SMP */
351 
352 	/*
353 	 * Don't use fwait here because it might hang.
354 	 * Don't use fnop here because it usually hangs if there is no FPU.
355 	 */
356 	DELAY(1000);		/* wait for any IRQ13 */
357 #ifdef DIAGNOSTIC
358 	if (npx_intrs_while_probing != 0)
359 		printf("fninit caused %u bogus npx interrupt(s)\n",
360 		       npx_intrs_while_probing);
361 	if (npx_traps_while_probing != 0)
362 		printf("fninit caused %u bogus npx trap(s)\n",
363 		       npx_traps_while_probing);
364 #endif
365 	/*
366 	 * Check for a status of mostly zero.
367 	 */
368 	status = 0x5a5a;
369 	fnstsw(&status);
370 	if ((status & 0xb8ff) == 0) {
371 		/*
372 		 * Good, now check for a proper control word.
373 		 */
374 		control = 0x5a5a;
375 		fnstcw(&control);
376 		if ((control & 0x1f3f) == 0x033f) {
377 			hw_float = npx_exists = 1;
378 			/*
379 			 * We have an npx, now divide by 0 to see if exception
380 			 * 16 works.
381 			 */
382 			control &= ~(1 << 2);	/* enable divide by 0 trap */
383 			fldcw(&control);
384 #ifdef FPU_ERROR_BROKEN
385 			/*
386 			 * FPU error signal doesn't work on some CPU
387 			 * accelerator board.
388 			 */
389 			npx_ex16 = 1;
390 			return (0);
391 #endif
392 			npx_traps_while_probing = npx_intrs_while_probing = 0;
393 			fp_divide_by_0();
394 			if (npx_traps_while_probing != 0) {
395 				/*
396 				 * Good, exception 16 works.
397 				 */
398 				npx_ex16 = 1;
399 				goto no_irq13;
400 			}
401 			if (npx_intrs_while_probing != 0) {
402 				/*
403 				 * Bad, we are stuck with IRQ13.
404 				 */
405 				npx_irq13 = 1;
406 				idt[16] = save_idt_npxtrap;
407 				return (0);
408 			}
409 			/*
410 			 * Worse, even IRQ13 is broken.  Use emulator.
411 			 */
412 		}
413 	}
414 	/*
415 	 * Probe failed, but we want to get to npxattach to initialize the
416 	 * emulator and say that it has been installed.  XXX handle devices
417 	 * that aren't really devices better.
418 	 */
419 	/* FALLTHROUGH */
420 no_irq13:
421 	idt[16] = save_idt_npxtrap;
422 	bus_teardown_intr(dev, irq_res, irq_cookie);
423 
424 	/*
425 	 * XXX hack around brokenness of bus_teardown_intr().  If we left the
426 	 * irq active then we would get it instead of exception 16.
427 	 */
428 	INTRDIS(1 << irq_num);
429 
430 	bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res);
431 	bus_release_resource(dev, SYS_RES_IOPORT, ioport_rid, ioport_res);
432 	return (0);
433 
434 #endif /* SMP */
435 }
436 
437 /*
438  * Attach routine - announce which it is, and wire into system
439  */
440 int
441 npx_attach(dev)
442 	device_t dev;
443 {
444 	int flags;
445 
446 	if (resource_int_value("npx", 0, "flags", &flags) != 0)
447 		flags = 0;
448 
449 	if (flags)
450 		device_printf(dev, "flags 0x%x ", flags);
451 	if (npx_irq13) {
452 		device_printf(dev, "using IRQ 13 interface\n");
453 	} else {
454 #if defined(MATH_EMULATE) || defined(GPL_MATH_EMULATE)
455 		if (npx_ex16) {
456 			if (!(flags & NPX_PREFER_EMULATOR))
457 				device_printf(dev, "INT 16 interface\n");
458 			else {
459 				device_printf(dev, "FPU exists, but flags request "
460 				    "emulator\n");
461 				hw_float = npx_exists = 0;
462 			}
463 		} else if (npx_exists) {
464 			device_printf(dev, "error reporting broken; using 387 emulator\n");
465 			hw_float = npx_exists = 0;
466 		} else
467 			device_printf(dev, "387 emulator\n");
468 #else
469 		if (npx_ex16) {
470 			device_printf(dev, "INT 16 interface\n");
471 			if (flags & NPX_PREFER_EMULATOR) {
472 				device_printf(dev, "emulator requested, but none compiled "
473 				    "into kernel, using FPU\n");
474 			}
475 		} else
476 			device_printf(dev, "no 387 emulator in kernel and no FPU!\n");
477 #endif
478 	}
479 	npxinit(__INITIAL_NPXCW__);
480 
481 #ifdef I586_CPU_XXX
482 	if (cpu_class == CPUCLASS_586 && npx_ex16 && npx_exists &&
483 	    timezero("i586_bzero()", i586_bzero) <
484 	    timezero("bzero()", bzero) * 4 / 5) {
485 		if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY)) {
486 			bcopy_vector = i586_bcopy;
487 			ovbcopy_vector = i586_bcopy;
488 		}
489 		if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BZERO))
490 			bzero = i586_bzero;
491 		if (!(flags & NPX_DISABLE_I586_OPTIMIZED_COPYIO)) {
492 			copyin_vector = i586_copyin;
493 			copyout_vector = i586_copyout;
494 		}
495 	}
496 #endif
497 
498 	return (0);		/* XXX unused */
499 }
500 
501 /*
502  * Initialize floating point unit.
503  */
504 void
505 npxinit(control)
506 	u_short control;
507 {
508 	static union savefpu dummy;
509 	critical_t savecrit;
510 
511 	if (!npx_exists)
512 		return;
513 	/*
514 	 * fninit has the same h/w bugs as fnsave.  Use the detoxified
515 	 * fnsave to throw away any junk in the fpu.  npxsave() initializes
516 	 * the fpu and sets npxthread = NULL as important side effects.
517 	 */
518 	savecrit = critical_enter();
519 	npxsave(&dummy);
520 	stop_emulating();
521 #ifdef CPU_ENABLE_SSE
522 	/* XXX npxsave() doesn't actually initialize the fpu in the SSE case. */
523 	if (cpu_fxsr)
524 		fninit();
525 #endif
526 	fldcw(&control);
527 	if (PCPU_GET(curpcb) != NULL)
528 		fpusave(&PCPU_GET(curpcb)->pcb_save);
529 	start_emulating();
530 	critical_exit(savecrit);
531 }
532 
533 /*
534  * Free coprocessor (if we have it).
535  */
536 void
537 npxexit(td)
538 	struct thread *td;
539 {
540 	critical_t savecrit;
541 
542 	savecrit = critical_enter();
543 	if (td == PCPU_GET(npxthread))
544 		npxsave(&PCPU_GET(curpcb)->pcb_save);
545 	critical_exit(savecrit);
546 #ifdef NPX_DEBUG
547 	if (npx_exists) {
548 		u_int	masked_exceptions;
549 
550 		masked_exceptions = PCPU_GET(curpcb)->pcb_save.sv_87.sv_env.en_cw
551 		    & PCPU_GET(curpcb)->pcb_save.sv_87.sv_env.en_sw & 0x7f;
552 		/*
553 		 * Log exceptions that would have trapped with the old
554 		 * control word (overflow, divide by 0, and invalid operand).
555 		 */
556 		if (masked_exceptions & 0x0d)
557 			log(LOG_ERR,
558 	"pid %d (%s) exited with masked floating point exceptions 0x%02x\n",
559 			    td->td_proc->p_pid, td->td_proc->p_comm,
560 			    masked_exceptions);
561 	}
562 #endif
563 }
564 
565 /*
566  * The following mechanism is used to ensure that the FPE_... value
567  * that is passed as a trapcode to the signal handler of the user
568  * process does not have more than one bit set.
569  *
570  * Multiple bits may be set if the user process modifies the control
571  * word while a status word bit is already set.  While this is a sign
572  * of bad coding, we have no choise than to narrow them down to one
573  * bit, since we must not send a trapcode that is not exactly one of
574  * the FPE_ macros.
575  *
576  * The mechanism has a static table with 127 entries.  Each combination
577  * of the 7 FPU status word exception bits directly translates to a
578  * position in this table, where a single FPE_... value is stored.
579  * This FPE_... value stored there is considered the "most important"
580  * of the exception bits and will be sent as the signal code.  The
581  * precedence of the bits is based upon Intel Document "Numerical
582  * Applications", Chapter "Special Computational Situations".
583  *
584  * The macro to choose one of these values does these steps: 1) Throw
585  * away status word bits that cannot be masked.  2) Throw away the bits
586  * currently masked in the control word, assuming the user isn't
587  * interested in them anymore.  3) Reinsert status word bit 7 (stack
588  * fault) if it is set, which cannot be masked but must be presered.
589  * 4) Use the remaining bits to point into the trapcode table.
590  *
591  * The 6 maskable bits in order of their preference, as stated in the
592  * above referenced Intel manual:
593  * 1  Invalid operation (FP_X_INV)
594  * 1a   Stack underflow
595  * 1b   Stack overflow
596  * 1c   Operand of unsupported format
597  * 1d   SNaN operand.
598  * 2  QNaN operand (not an exception, irrelavant here)
599  * 3  Any other invalid-operation not mentioned above or zero divide
600  *      (FP_X_INV, FP_X_DZ)
601  * 4  Denormal operand (FP_X_DNML)
602  * 5  Numeric over/underflow (FP_X_OFL, FP_X_UFL)
603  * 6  Inexact result (FP_X_IMP)
604  */
605 static char fpetable[128] = {
606 	0,
607 	FPE_FLTINV,	/*  1 - INV */
608 	FPE_FLTUND,	/*  2 - DNML */
609 	FPE_FLTINV,	/*  3 - INV | DNML */
610 	FPE_FLTDIV,	/*  4 - DZ */
611 	FPE_FLTINV,	/*  5 - INV | DZ */
612 	FPE_FLTDIV,	/*  6 - DNML | DZ */
613 	FPE_FLTINV,	/*  7 - INV | DNML | DZ */
614 	FPE_FLTOVF,	/*  8 - OFL */
615 	FPE_FLTINV,	/*  9 - INV | OFL */
616 	FPE_FLTUND,	/*  A - DNML | OFL */
617 	FPE_FLTINV,	/*  B - INV | DNML | OFL */
618 	FPE_FLTDIV,	/*  C - DZ | OFL */
619 	FPE_FLTINV,	/*  D - INV | DZ | OFL */
620 	FPE_FLTDIV,	/*  E - DNML | DZ | OFL */
621 	FPE_FLTINV,	/*  F - INV | DNML | DZ | OFL */
622 	FPE_FLTUND,	/* 10 - UFL */
623 	FPE_FLTINV,	/* 11 - INV | UFL */
624 	FPE_FLTUND,	/* 12 - DNML | UFL */
625 	FPE_FLTINV,	/* 13 - INV | DNML | UFL */
626 	FPE_FLTDIV,	/* 14 - DZ | UFL */
627 	FPE_FLTINV,	/* 15 - INV | DZ | UFL */
628 	FPE_FLTDIV,	/* 16 - DNML | DZ | UFL */
629 	FPE_FLTINV,	/* 17 - INV | DNML | DZ | UFL */
630 	FPE_FLTOVF,	/* 18 - OFL | UFL */
631 	FPE_FLTINV,	/* 19 - INV | OFL | UFL */
632 	FPE_FLTUND,	/* 1A - DNML | OFL | UFL */
633 	FPE_FLTINV,	/* 1B - INV | DNML | OFL | UFL */
634 	FPE_FLTDIV,	/* 1C - DZ | OFL | UFL */
635 	FPE_FLTINV,	/* 1D - INV | DZ | OFL | UFL */
636 	FPE_FLTDIV,	/* 1E - DNML | DZ | OFL | UFL */
637 	FPE_FLTINV,	/* 1F - INV | DNML | DZ | OFL | UFL */
638 	FPE_FLTRES,	/* 20 - IMP */
639 	FPE_FLTINV,	/* 21 - INV | IMP */
640 	FPE_FLTUND,	/* 22 - DNML | IMP */
641 	FPE_FLTINV,	/* 23 - INV | DNML | IMP */
642 	FPE_FLTDIV,	/* 24 - DZ | IMP */
643 	FPE_FLTINV,	/* 25 - INV | DZ | IMP */
644 	FPE_FLTDIV,	/* 26 - DNML | DZ | IMP */
645 	FPE_FLTINV,	/* 27 - INV | DNML | DZ | IMP */
646 	FPE_FLTOVF,	/* 28 - OFL | IMP */
647 	FPE_FLTINV,	/* 29 - INV | OFL | IMP */
648 	FPE_FLTUND,	/* 2A - DNML | OFL | IMP */
649 	FPE_FLTINV,	/* 2B - INV | DNML | OFL | IMP */
650 	FPE_FLTDIV,	/* 2C - DZ | OFL | IMP */
651 	FPE_FLTINV,	/* 2D - INV | DZ | OFL | IMP */
652 	FPE_FLTDIV,	/* 2E - DNML | DZ | OFL | IMP */
653 	FPE_FLTINV,	/* 2F - INV | DNML | DZ | OFL | IMP */
654 	FPE_FLTUND,	/* 30 - UFL | IMP */
655 	FPE_FLTINV,	/* 31 - INV | UFL | IMP */
656 	FPE_FLTUND,	/* 32 - DNML | UFL | IMP */
657 	FPE_FLTINV,	/* 33 - INV | DNML | UFL | IMP */
658 	FPE_FLTDIV,	/* 34 - DZ | UFL | IMP */
659 	FPE_FLTINV,	/* 35 - INV | DZ | UFL | IMP */
660 	FPE_FLTDIV,	/* 36 - DNML | DZ | UFL | IMP */
661 	FPE_FLTINV,	/* 37 - INV | DNML | DZ | UFL | IMP */
662 	FPE_FLTOVF,	/* 38 - OFL | UFL | IMP */
663 	FPE_FLTINV,	/* 39 - INV | OFL | UFL | IMP */
664 	FPE_FLTUND,	/* 3A - DNML | OFL | UFL | IMP */
665 	FPE_FLTINV,	/* 3B - INV | DNML | OFL | UFL | IMP */
666 	FPE_FLTDIV,	/* 3C - DZ | OFL | UFL | IMP */
667 	FPE_FLTINV,	/* 3D - INV | DZ | OFL | UFL | IMP */
668 	FPE_FLTDIV,	/* 3E - DNML | DZ | OFL | UFL | IMP */
669 	FPE_FLTINV,	/* 3F - INV | DNML | DZ | OFL | UFL | IMP */
670 	FPE_FLTSUB,	/* 40 - STK */
671 	FPE_FLTSUB,	/* 41 - INV | STK */
672 	FPE_FLTUND,	/* 42 - DNML | STK */
673 	FPE_FLTSUB,	/* 43 - INV | DNML | STK */
674 	FPE_FLTDIV,	/* 44 - DZ | STK */
675 	FPE_FLTSUB,	/* 45 - INV | DZ | STK */
676 	FPE_FLTDIV,	/* 46 - DNML | DZ | STK */
677 	FPE_FLTSUB,	/* 47 - INV | DNML | DZ | STK */
678 	FPE_FLTOVF,	/* 48 - OFL | STK */
679 	FPE_FLTSUB,	/* 49 - INV | OFL | STK */
680 	FPE_FLTUND,	/* 4A - DNML | OFL | STK */
681 	FPE_FLTSUB,	/* 4B - INV | DNML | OFL | STK */
682 	FPE_FLTDIV,	/* 4C - DZ | OFL | STK */
683 	FPE_FLTSUB,	/* 4D - INV | DZ | OFL | STK */
684 	FPE_FLTDIV,	/* 4E - DNML | DZ | OFL | STK */
685 	FPE_FLTSUB,	/* 4F - INV | DNML | DZ | OFL | STK */
686 	FPE_FLTUND,	/* 50 - UFL | STK */
687 	FPE_FLTSUB,	/* 51 - INV | UFL | STK */
688 	FPE_FLTUND,	/* 52 - DNML | UFL | STK */
689 	FPE_FLTSUB,	/* 53 - INV | DNML | UFL | STK */
690 	FPE_FLTDIV,	/* 54 - DZ | UFL | STK */
691 	FPE_FLTSUB,	/* 55 - INV | DZ | UFL | STK */
692 	FPE_FLTDIV,	/* 56 - DNML | DZ | UFL | STK */
693 	FPE_FLTSUB,	/* 57 - INV | DNML | DZ | UFL | STK */
694 	FPE_FLTOVF,	/* 58 - OFL | UFL | STK */
695 	FPE_FLTSUB,	/* 59 - INV | OFL | UFL | STK */
696 	FPE_FLTUND,	/* 5A - DNML | OFL | UFL | STK */
697 	FPE_FLTSUB,	/* 5B - INV | DNML | OFL | UFL | STK */
698 	FPE_FLTDIV,	/* 5C - DZ | OFL | UFL | STK */
699 	FPE_FLTSUB,	/* 5D - INV | DZ | OFL | UFL | STK */
700 	FPE_FLTDIV,	/* 5E - DNML | DZ | OFL | UFL | STK */
701 	FPE_FLTSUB,	/* 5F - INV | DNML | DZ | OFL | UFL | STK */
702 	FPE_FLTRES,	/* 60 - IMP | STK */
703 	FPE_FLTSUB,	/* 61 - INV | IMP | STK */
704 	FPE_FLTUND,	/* 62 - DNML | IMP | STK */
705 	FPE_FLTSUB,	/* 63 - INV | DNML | IMP | STK */
706 	FPE_FLTDIV,	/* 64 - DZ | IMP | STK */
707 	FPE_FLTSUB,	/* 65 - INV | DZ | IMP | STK */
708 	FPE_FLTDIV,	/* 66 - DNML | DZ | IMP | STK */
709 	FPE_FLTSUB,	/* 67 - INV | DNML | DZ | IMP | STK */
710 	FPE_FLTOVF,	/* 68 - OFL | IMP | STK */
711 	FPE_FLTSUB,	/* 69 - INV | OFL | IMP | STK */
712 	FPE_FLTUND,	/* 6A - DNML | OFL | IMP | STK */
713 	FPE_FLTSUB,	/* 6B - INV | DNML | OFL | IMP | STK */
714 	FPE_FLTDIV,	/* 6C - DZ | OFL | IMP | STK */
715 	FPE_FLTSUB,	/* 6D - INV | DZ | OFL | IMP | STK */
716 	FPE_FLTDIV,	/* 6E - DNML | DZ | OFL | IMP | STK */
717 	FPE_FLTSUB,	/* 6F - INV | DNML | DZ | OFL | IMP | STK */
718 	FPE_FLTUND,	/* 70 - UFL | IMP | STK */
719 	FPE_FLTSUB,	/* 71 - INV | UFL | IMP | STK */
720 	FPE_FLTUND,	/* 72 - DNML | UFL | IMP | STK */
721 	FPE_FLTSUB,	/* 73 - INV | DNML | UFL | IMP | STK */
722 	FPE_FLTDIV,	/* 74 - DZ | UFL | IMP | STK */
723 	FPE_FLTSUB,	/* 75 - INV | DZ | UFL | IMP | STK */
724 	FPE_FLTDIV,	/* 76 - DNML | DZ | UFL | IMP | STK */
725 	FPE_FLTSUB,	/* 77 - INV | DNML | DZ | UFL | IMP | STK */
726 	FPE_FLTOVF,	/* 78 - OFL | UFL | IMP | STK */
727 	FPE_FLTSUB,	/* 79 - INV | OFL | UFL | IMP | STK */
728 	FPE_FLTUND,	/* 7A - DNML | OFL | UFL | IMP | STK */
729 	FPE_FLTSUB,	/* 7B - INV | DNML | OFL | UFL | IMP | STK */
730 	FPE_FLTDIV,	/* 7C - DZ | OFL | UFL | IMP | STK */
731 	FPE_FLTSUB,	/* 7D - INV | DZ | OFL | UFL | IMP | STK */
732 	FPE_FLTDIV,	/* 7E - DNML | DZ | OFL | UFL | IMP | STK */
733 	FPE_FLTSUB,	/* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
734 };
735 
736 /*
737  * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE.
738  *
739  * Clearing exceptions is necessary mainly to avoid IRQ13 bugs.  We now
740  * depend on longjmp() restoring a usable state.  Restoring the state
741  * or examining it might fail if we didn't clear exceptions.
742  *
743  * The error code chosen will be one of the FPE_... macros. It will be
744  * sent as the second argument to old BSD-style signal handlers and as
745  * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers.
746  *
747  * XXX the FP state is not preserved across signal handlers.  So signal
748  * handlers cannot afford to do FP unless they preserve the state or
749  * longjmp() out.  Both preserving the state and longjmp()ing may be
750  * destroyed by IRQ13 bugs.  Clearing FP exceptions is not an acceptable
751  * solution for signals other than SIGFPE.
752  */
753 int
754 npxtrap()
755 {
756 	critical_t savecrit;
757 	u_short control, status;
758 	u_long *exstat;
759 
760 	if (!npx_exists) {
761 		printf("npxtrap: npxthread = %p, curthread = %p, npx_exists = %d\n",
762 		       PCPU_GET(npxthread), curthread, npx_exists);
763 		panic("npxtrap from nowhere");
764 	}
765 	savecrit = critical_enter();
766 
767 	/*
768 	 * Interrupt handling (for another interrupt) may have pushed the
769 	 * state to memory.  Fetch the relevant parts of the state from
770 	 * wherever they are.
771 	 */
772 	if (PCPU_GET(npxthread) != curthread) {
773 		control = GET_FPU_CW(curthread);
774 		status = GET_FPU_SW(curthread);
775 	} else {
776 		fnstcw(&control);
777 		fnstsw(&status);
778 	}
779 
780 	exstat = GET_FPU_EXSW_PTR(curthread->td_pcb);
781 	*exstat = status;
782 	if (PCPU_GET(npxthread) != curthread)
783 		GET_FPU_SW(curthread) &= ~0x80bf;
784 	else
785 		fnclex();
786 	critical_exit(savecrit);
787 	return (fpetable[status & ((~control & 0x3f) | 0x40)]);
788 }
789 
790 /*
791  * Implement device not available (DNA) exception
792  *
793  * It would be better to switch FP context here (if curthread != npxthread)
794  * and not necessarily for every context switch, but it is too hard to
795  * access foreign pcb's.
796  */
797 int
798 npxdna()
799 {
800 	u_long *exstat;
801 	critical_t s;
802 
803 	if (!npx_exists)
804 		return (0);
805 	if (PCPU_GET(npxthread) != NULL) {
806 		printf("npxdna: npxthread = %p, curthread = %p\n",
807 		       PCPU_GET(npxthread), curthread);
808 		panic("npxdna");
809 	}
810 	s = critical_enter();
811 	stop_emulating();
812 	/*
813 	 * Record new context early in case frstor causes an IRQ13.
814 	 */
815 	PCPU_SET(npxthread, curthread);
816 
817 	exstat = GET_FPU_EXSW_PTR(PCPU_GET(curpcb));
818 	*exstat = 0;
819 	/*
820 	 * The following frstor may cause an IRQ13 when the state being
821 	 * restored has a pending error.  The error will appear to have been
822 	 * triggered by the current (npx) user instruction even when that
823 	 * instruction is a no-wait instruction that should not trigger an
824 	 * error (e.g., fnclex).  On at least one 486 system all of the
825 	 * no-wait instructions are broken the same as frstor, so our
826 	 * treatment does not amplify the breakage.  On at least one
827 	 * 386/Cyrix 387 system, fnclex works correctly while frstor and
828 	 * fnsave are broken, so our treatment breaks fnclex if it is the
829 	 * first FPU instruction after a context switch.
830 	 */
831 	fpurstor(&PCPU_GET(curpcb)->pcb_save);
832 	critical_exit(s);
833 
834 	return (1);
835 }
836 
837 /*
838  * Wrapper for fnsave instruction, partly to handle hardware bugs.  When npx
839  * exceptions are reported via IRQ13, spurious IRQ13's may be triggered by
840  * no-wait npx instructions.  See the Intel application note AP-578 for
841  * details.  This doesn't cause any additional complications here.  IRQ13's
842  * are inherently asynchronous unless the CPU is frozen to deliver them --
843  * one that started in userland may be delivered many instructions later,
844  * after the process has entered the kernel.  It may even be delivered after
845  * the fnsave here completes.  A spurious IRQ13 for the fnsave is handled in
846  * the same way as a very-late-arriving non-spurious IRQ13 from user mode:
847  * it is normally ignored at first because we set npxthread to NULL; it is
848  * normally retriggered in npxdna() after return to user mode.
849  *
850  * npxsave() must be called with interrupts disabled, so that it clears
851  * npxthread atomically with saving the state.  We require callers to do the
852  * disabling, since most callers need to disable interrupts anyway to call
853  * npxsave() atomically with checking npxthread.
854  *
855  * A previous version of npxsave() went to great lengths to excecute fnsave
856  * with interrupts enabled in case executing it froze the CPU.  This case
857  * can't happen, at least for Intel CPU/NPX's.  Spurious IRQ13's don't imply
858  * spurious freezes.
859  */
860 void
861 npxsave(addr)
862 	union savefpu *addr;
863 {
864 
865 	stop_emulating();
866 	fpusave(addr);
867 
868 	start_emulating();
869 	PCPU_SET(npxthread, NULL);
870 }
871 
872 static void
873 fpusave(addr)
874 	union savefpu *addr;
875 {
876 
877 #ifdef CPU_ENABLE_SSE
878 	if (cpu_fxsr)
879 		fxsave(addr);
880 	else
881 #endif
882 		fnsave(addr);
883 }
884 
885 static void
886 fpurstor(addr)
887 	union savefpu *addr;
888 {
889 
890 #ifdef CPU_ENABLE_SSE
891 	if (cpu_fxsr)
892 		fxrstor(addr);
893 	else
894 #endif
895 		frstor(addr);
896 }
897 
898 #ifdef I586_CPU_XXX
899 static long
900 timezero(funcname, func)
901 	const char *funcname;
902 	void (*func) __P((void *buf, size_t len));
903 
904 {
905 	void *buf;
906 #define	BUFSIZE		1048576
907 	long usec;
908 	struct timeval finish, start;
909 
910 	buf = malloc(BUFSIZE, M_TEMP, M_NOWAIT);
911 	if (buf == NULL)
912 		return (BUFSIZE);
913 	microtime(&start);
914 	(*func)(buf, BUFSIZE);
915 	microtime(&finish);
916 	usec = 1000000 * (finish.tv_sec - start.tv_sec) +
917 	    finish.tv_usec - start.tv_usec;
918 	if (usec <= 0)
919 		usec = 1;
920 	if (bootverbose)
921 		printf("%s bandwidth = %u kBps\n", funcname,
922 		    (u_int32_t)(((BUFSIZE >> 10) * 1000000) / usec));
923 	free(buf, M_TEMP);
924 	return (usec);
925 }
926 #endif /* I586_CPU */
927 
928 static device_method_t npx_methods[] = {
929 	/* Device interface */
930 	DEVMETHOD(device_identify,	npx_identify),
931 	DEVMETHOD(device_probe,		npx_probe),
932 	DEVMETHOD(device_attach,	npx_attach),
933 	DEVMETHOD(device_detach,	bus_generic_detach),
934 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
935 	DEVMETHOD(device_suspend,	bus_generic_suspend),
936 	DEVMETHOD(device_resume,	bus_generic_resume),
937 
938 	{ 0, 0 }
939 };
940 
941 static driver_t npx_driver = {
942 	"npx",
943 	npx_methods,
944 	1,			/* no softc */
945 };
946 
947 static devclass_t npx_devclass;
948 
949 /*
950  * We prefer to attach to the root nexus so that the usual case (exception 16)
951  * doesn't describe the processor as being `on isa'.
952  */
953 DRIVER_MODULE(npx, nexus, npx_driver, npx_devclass, 0, 0);
954 
955 /*
956  * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
957  */
958 static struct isa_pnp_id npxisa_ids[] = {
959 	{ 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
960 	{ 0 }
961 };
962 
963 static int
964 npxisa_probe(device_t dev)
965 {
966 	int result;
967 	if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) {
968 		device_quiet(dev);
969 	}
970 	return(result);
971 }
972 
973 static int
974 npxisa_attach(device_t dev)
975 {
976 	return (0);
977 }
978 
979 static device_method_t npxisa_methods[] = {
980 	/* Device interface */
981 	DEVMETHOD(device_probe,		npxisa_probe),
982 	DEVMETHOD(device_attach,	npxisa_attach),
983 	DEVMETHOD(device_detach,	bus_generic_detach),
984 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
985 	DEVMETHOD(device_suspend,	bus_generic_suspend),
986 	DEVMETHOD(device_resume,	bus_generic_resume),
987 
988 	{ 0, 0 }
989 };
990 
991 static driver_t npxisa_driver = {
992 	"npxisa",
993 	npxisa_methods,
994 	1,			/* no softc */
995 };
996 
997 static devclass_t npxisa_devclass;
998 
999 DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0);
1000 #ifndef PC98
1001 DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0);
1002 #endif
1003 
1004