xref: /freebsd/sys/amd64/amd64/fpu.c (revision 0de89efe5c443f213c7ea28773ef2dc6cf3af2ed)
1 /*-
2  * Copyright (c) 1990 William Jolitz.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by the University of
17  *	California, Berkeley and its contributors.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)npx.c	7.2 (Berkeley) 5/12/91
35  *	$Id: npx.c,v 1.51 1997/08/18 06:58:44 charnier Exp $
36  */
37 
38 #include "npx.h"
39 #if NNPX > 0
40 
41 #include "opt_cpu.h"
42 #include "opt_math_emulate.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/sysctl.h>
48 #include <sys/conf.h>
49 #include <sys/proc.h>
50 #ifdef NPX_DEBUG
51 #include <sys/syslog.h>
52 #endif
53 #include <sys/signalvar.h>
54 
55 #include <machine/asmacros.h>
56 #include <machine/cpu.h>
57 #include <machine/ipl.h>
58 #include <machine/md_var.h>
59 #include <machine/pcb.h>
60 #include <machine/clock.h>
61 #include <machine/specialreg.h>
62 
63 #include <i386/isa/icu.h>
64 #include <i386/isa/isa_device.h>
65 #include <i386/isa/intr_machdep.h>
66 #include <i386/isa/isa.h>
67 
68 /*
69  * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
70  */
71 
72 /* Configuration flags. */
73 #define	NPX_DISABLE_I586_OPTIMIZED_BCOPY	(1 << 0)
74 #define	NPX_DISABLE_I586_OPTIMIZED_BZERO	(1 << 1)
75 #define	NPX_DISABLE_I586_OPTIMIZED_COPYIO	(1 << 2)
76 
77 /* XXX - should be in header file. */
78 extern void (*bcopy_vector) __P((const void *from, void *to, size_t len));
79 extern void (*ovbcopy_vector) __P((const void *from, void *to, size_t len));
80 extern int (*copyin_vector) __P((const void *udaddr, void *kaddr, size_t len));
81 extern int (*copyout_vector) __P((const void *kaddr, void *udaddr, size_t len));
82 
83 void	i586_bcopy __P((const void *from, void *to, size_t len));
84 void	i586_bzero __P((void *buf, size_t len));
85 int	i586_copyin __P((const void *udaddr, void *kaddr, size_t len));
86 int	i586_copyout __P((const void *kaddr, void *udaddr, size_t len));
87 
88 #ifdef	__GNUC__
89 
90 #define	fldcw(addr)		__asm("fldcw %0" : : "m" (*(addr)))
91 #define	fnclex()		__asm("fnclex")
92 #define	fninit()		__asm("fninit")
93 #define	fnop()			__asm("fnop")
94 #define	fnsave(addr)		__asm("fnsave %0" : "=m" (*(addr)))
95 #define	fnstcw(addr)		__asm("fnstcw %0" : "=m" (*(addr)))
96 #define	fnstsw(addr)		__asm("fnstsw %0" : "=m" (*(addr)))
97 #define	fp_divide_by_0()	__asm("fldz; fld1; fdiv %st,%st(1); fnop")
98 #define	frstor(addr)		__asm("frstor %0" : : "m" (*(addr)))
99 #define	start_emulating()	__asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
100 				      : : "n" (CR0_TS) : "ax")
101 #define	stop_emulating()	__asm("clts")
102 
103 #else	/* not __GNUC__ */
104 
105 void	fldcw		__P((caddr_t addr));
106 void	fnclex		__P((void));
107 void	fninit		__P((void));
108 void	fnop		__P((void));
109 void	fnsave		__P((caddr_t addr));
110 void	fnstcw		__P((caddr_t addr));
111 void	fnstsw		__P((caddr_t addr));
112 void	fp_divide_by_0	__P((void));
113 void	frstor		__P((caddr_t addr));
114 void	start_emulating	__P((void));
115 void	stop_emulating	__P((void));
116 
117 #endif	/* __GNUC__ */
118 
119 typedef u_char bool_t;
120 
121 static	int	npxattach	__P((struct isa_device *dvp));
122 static	int	npxprobe	__P((struct isa_device *dvp));
123 static	int	npxprobe1	__P((struct isa_device *dvp));
124 
125 struct	isa_driver npxdriver = {
126 	npxprobe, npxattach, "npx",
127 };
128 
129 int	hw_float;		/* XXX currently just alias for npx_exists */
130 
131 SYSCTL_INT(_hw,HW_FLOATINGPT, floatingpoint,
132 	CTLFLAG_RD, &hw_float, 0,
133 	"Floatingpoint instructions executed in hardware");
134 
135 static u_int	npx0_imask = SWI_CLOCK_MASK;
136 
137 #ifndef SMP	/* XXX per-cpu on smp */
138 struct proc	*npxproc;
139 #endif
140 
141 static	bool_t			npx_ex16;
142 static	bool_t			npx_exists;
143 static	struct gate_descriptor	npx_idt_probeintr;
144 static	int			npx_intrno;
145 static	volatile u_int		npx_intrs_while_probing;
146 static	bool_t			npx_irq13;
147 static	volatile u_int		npx_traps_while_probing;
148 
149 #ifndef SMP
150 /*
151  * Special interrupt handlers.  Someday intr0-intr15 will be used to count
152  * interrupts.  We'll still need a special exception 16 handler.  The busy
153  * latch stuff in probeintr() can be moved to npxprobe().
154  */
155 inthand_t probeintr;
156 
157 asm
158 ("
159 	.text
160 	.p2align 2,0x90
161 " __XSTRING(CNAME(probeintr)) ":
162 	ss
163 	incl	" __XSTRING(CNAME(npx_intrs_while_probing)) "
164 	pushl	%eax
165 	movb	$0x20,%al	# EOI (asm in strings loses cpp features)
166 	outb	%al,$0xa0	# IO_ICU2
167 	outb	%al,$0x20	# IO_ICU1
168 	movb	$0,%al
169 	outb	%al,$0xf0	# clear BUSY# latch
170 	popl	%eax
171 	iret
172 ");
173 
174 inthand_t probetrap;
175 asm
176 ("
177 	.text
178 	.p2align 2,0x90
179 " __XSTRING(CNAME(probetrap)) ":
180 	ss
181 	incl	" __XSTRING(CNAME(npx_traps_while_probing)) "
182 	fnclex
183 	iret
184 ");
185 #endif /* SMP */
186 
187 
188 /*
189  * Probe routine.  Initialize cr0 to give correct behaviour for [f]wait
190  * whether the device exists or not (XXX should be elsewhere).  Set flags
191  * to tell npxattach() what to do.  Modify device struct if npx doesn't
192  * need to use interrupts.  Return 1 if device exists.
193  */
194 static int
195 npxprobe(dvp)
196 	struct isa_device *dvp;
197 {
198 #ifdef SMP
199 
200 	return npxprobe1(dvp);
201 
202 #else /* SMP */
203 
204 	int	result;
205 	u_long	save_eflags;
206 	u_char	save_icu1_mask;
207 	u_char	save_icu2_mask;
208 	struct	gate_descriptor save_idt_npxintr;
209 	struct	gate_descriptor save_idt_npxtrap;
210 	/*
211 	 * This routine is now just a wrapper for npxprobe1(), to install
212 	 * special npx interrupt and trap handlers, to enable npx interrupts
213 	 * and to disable other interrupts.  Someday isa_configure() will
214 	 * install suitable handlers and run with interrupts enabled so we
215 	 * won't need to do so much here.
216 	 */
217 	npx_intrno = NRSVIDT + ffs(dvp->id_irq) - 1;
218 	save_eflags = read_eflags();
219 	disable_intr();
220 	save_icu1_mask = inb(IO_ICU1 + 1);
221 	save_icu2_mask = inb(IO_ICU2 + 1);
222 	save_idt_npxintr = idt[npx_intrno];
223 	save_idt_npxtrap = idt[16];
224 	outb(IO_ICU1 + 1, ~(IRQ_SLAVE | dvp->id_irq));
225 	outb(IO_ICU2 + 1, ~(dvp->id_irq >> 8));
226 	setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
227 	setidt(npx_intrno, probeintr, SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
228 	npx_idt_probeintr = idt[npx_intrno];
229 	enable_intr();
230 	result = npxprobe1(dvp);
231 	disable_intr();
232 	outb(IO_ICU1 + 1, save_icu1_mask);
233 	outb(IO_ICU2 + 1, save_icu2_mask);
234 	idt[npx_intrno] = save_idt_npxintr;
235 	idt[16] = save_idt_npxtrap;
236 	write_eflags(save_eflags);
237 	return (result);
238 
239 #endif /* SMP */
240 }
241 
242 static int
243 npxprobe1(dvp)
244 	struct isa_device *dvp;
245 {
246 	u_short control;
247 	u_short status;
248 
249 	/*
250 	 * Partially reset the coprocessor, if any.  Some BIOS's don't reset
251 	 * it after a warm boot.
252 	 */
253 	outb(0xf1, 0);		/* full reset on some systems, NOP on others */
254 	outb(0xf0, 0);		/* clear BUSY# latch */
255 	/*
256 	 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
257 	 * instructions.  We must set the CR0_MP bit and use the CR0_TS
258 	 * bit to control the trap, because setting the CR0_EM bit does
259 	 * not cause WAIT instructions to trap.  It's important to trap
260 	 * WAIT instructions - otherwise the "wait" variants of no-wait
261 	 * control instructions would degenerate to the "no-wait" variants
262 	 * after FP context switches but work correctly otherwise.  It's
263 	 * particularly important to trap WAITs when there is no NPX -
264 	 * otherwise the "wait" variants would always degenerate.
265 	 *
266 	 * Try setting CR0_NE to get correct error reporting on 486DX's.
267 	 * Setting it should fail or do nothing on lesser processors.
268 	 */
269 	load_cr0(rcr0() | CR0_MP | CR0_NE);
270 	/*
271 	 * But don't trap while we're probing.
272 	 */
273 	stop_emulating();
274 	/*
275 	 * Finish resetting the coprocessor, if any.  If there is an error
276 	 * pending, then we may get a bogus IRQ13, but probeintr() will handle
277 	 * it OK.  Bogus halts have never been observed, but we enabled
278 	 * IRQ13 and cleared the BUSY# latch early to handle them anyway.
279 	 */
280 	fninit();
281 
282 #ifdef SMP
283 
284 	/*
285 	 * Exception 16 MUST work for SMP.
286 	 */
287 	npx_irq13 = 0;
288 	npx_ex16 = hw_float = npx_exists = 1;
289 	dvp->id_irq = 0;	/* zap the interrupt */
290 	/*
291 	 * special return value to flag that we do not
292 	 * actually use any I/O registers
293 	 */
294 	return (-1);
295 
296 #else /* SMP */
297 
298 	/*
299 	 * Don't use fwait here because it might hang.
300 	 * Don't use fnop here because it usually hangs if there is no FPU.
301 	 */
302 	DELAY(1000);		/* wait for any IRQ13 */
303 #ifdef DIAGNOSTIC
304 	if (npx_intrs_while_probing != 0)
305 		printf("fninit caused %u bogus npx interrupt(s)\n",
306 		       npx_intrs_while_probing);
307 	if (npx_traps_while_probing != 0)
308 		printf("fninit caused %u bogus npx trap(s)\n",
309 		       npx_traps_while_probing);
310 #endif
311 	/*
312 	 * Check for a status of mostly zero.
313 	 */
314 	status = 0x5a5a;
315 	fnstsw(&status);
316 	if ((status & 0xb8ff) == 0) {
317 		/*
318 		 * Good, now check for a proper control word.
319 		 */
320 		control = 0x5a5a;
321 		fnstcw(&control);
322 		if ((control & 0x1f3f) == 0x033f) {
323 			hw_float = npx_exists = 1;
324 			/*
325 			 * We have an npx, now divide by 0 to see if exception
326 			 * 16 works.
327 			 */
328 			control &= ~(1 << 2);	/* enable divide by 0 trap */
329 			fldcw(&control);
330 			npx_traps_while_probing = npx_intrs_while_probing = 0;
331 			fp_divide_by_0();
332 			if (npx_traps_while_probing != 0) {
333 				/*
334 				 * Good, exception 16 works.
335 				 */
336 				npx_ex16 = 1;
337 				dvp->id_irq = 0;	/* zap the interrupt */
338 				/*
339 				 * special return value to flag that we do not
340 				 * actually use any I/O registers
341 				 */
342 				return (-1);
343 			}
344 			if (npx_intrs_while_probing != 0) {
345 				/*
346 				 * Bad, we are stuck with IRQ13.
347 				 */
348 				npx_irq13 = 1;
349 				/*
350 				 * npxattach would be too late to set npx0_imask.
351 				 */
352 				npx0_imask |= dvp->id_irq;
353 				return (IO_NPXSIZE);
354 			}
355 			/*
356 			 * Worse, even IRQ13 is broken.  Use emulator.
357 			 */
358 		}
359 	}
360 	/*
361 	 * Probe failed, but we want to get to npxattach to initialize the
362 	 * emulator and say that it has been installed.  XXX handle devices
363 	 * that aren't really devices better.
364 	 */
365 	dvp->id_irq = 0;
366 	/*
367 	 * special return value to flag that we do not
368 	 * actually use any I/O registers
369 	 */
370 	return (-1);
371 
372 #endif /* SMP */
373 }
374 
375 /*
376  * Attach routine - announce which it is, and wire into system
377  */
378 int
379 npxattach(dvp)
380 	struct isa_device *dvp;
381 {
382 	/* The caller has printed "irq 13" for the npx_irq13 case. */
383 	if (!npx_irq13) {
384 		printf("npx%d: ", dvp->id_unit);
385 		if (npx_ex16)
386 			printf("INT 16 interface\n");
387 #if defined(MATH_EMULATE) || defined(GPL_MATH_EMULATE)
388 		else if (npx_exists) {
389 			printf("error reporting broken; using 387 emulator\n");
390 			hw_float = npx_exists = 0;
391 		} else
392 			printf("387 emulator\n");
393 #else
394 		else
395 			printf("no 387 emulator in kernel!\n");
396 #endif
397 	}
398 	npxinit(__INITIAL_NPXCW__);
399 
400 #if defined(I586_CPU)
401 	if (cpu_class == CPUCLASS_586 && npx_ex16) {
402 		if (!(dvp->id_flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY)) {
403 			bcopy_vector = i586_bcopy;
404 			ovbcopy_vector = i586_bcopy;
405 		}
406 		if (!(dvp->id_flags & NPX_DISABLE_I586_OPTIMIZED_BZERO))
407 			bzero = i586_bzero;
408 		if (!(dvp->id_flags & NPX_DISABLE_I586_OPTIMIZED_COPYIO)) {
409 			copyin_vector = i586_copyin;
410 			copyout_vector = i586_copyout;
411 		}
412 	}
413 #endif
414 
415 	return (1);		/* XXX unused */
416 }
417 
418 /*
419  * Initialize floating point unit.
420  */
421 void
422 npxinit(control)
423 	u_short control;
424 {
425 	struct save87 dummy;
426 
427 	if (!npx_exists)
428 		return;
429 	/*
430 	 * fninit has the same h/w bugs as fnsave.  Use the detoxified
431 	 * fnsave to throw away any junk in the fpu.  npxsave() initializes
432 	 * the fpu and sets npxproc = NULL as important side effects.
433 	 */
434 	npxsave(&dummy);
435 	stop_emulating();
436 	fldcw(&control);
437 	if (curpcb != NULL)
438 		fnsave(&curpcb->pcb_savefpu);
439 	start_emulating();
440 }
441 
442 /*
443  * Free coprocessor (if we have it).
444  */
445 void
446 npxexit(p)
447 	struct proc *p;
448 {
449 
450 	if (p == npxproc)
451 		npxsave(&curpcb->pcb_savefpu);
452 #ifdef NPX_DEBUG
453 	if (npx_exists) {
454 		u_int	masked_exceptions;
455 
456 		masked_exceptions = curpcb->pcb_savefpu.sv_env.en_cw
457 				    & curpcb->pcb_savefpu.sv_env.en_sw & 0x7f;
458 		/*
459 		 * Log exceptions that would have trapped with the old
460 		 * control word (overflow, divide by 0, and invalid operand).
461 		 */
462 		if (masked_exceptions & 0x0d)
463 			log(LOG_ERR,
464 	"pid %d (%s) exited with masked floating point exceptions 0x%02x\n",
465 			    p->p_pid, p->p_comm, masked_exceptions);
466 	}
467 #endif
468 }
469 
470 /*
471  * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE.
472  *
473  * Clearing exceptions is necessary mainly to avoid IRQ13 bugs.  We now
474  * depend on longjmp() restoring a usable state.  Restoring the state
475  * or examining it might fail if we didn't clear exceptions.
476  *
477  * XXX there is no standard way to tell SIGFPE handlers about the error
478  * state.  The old interface:
479  *
480  *	void handler(int sig, int code, struct sigcontext *scp);
481  *
482  * is broken because it is non-ANSI and because the FP state is not in
483  * struct sigcontext.
484  *
485  * XXX the FP state is not preserved across signal handlers.  So signal
486  * handlers cannot afford to do FP unless they preserve the state or
487  * longjmp() out.  Both preserving the state and longjmp()ing may be
488  * destroyed by IRQ13 bugs.  Clearing FP exceptions is not an acceptable
489  * solution for signals other than SIGFPE.
490  */
491 void
492 npxintr(unit)
493 	int unit;
494 {
495 	int code;
496 	struct intrframe *frame;
497 
498 	if (npxproc == NULL || !npx_exists) {
499 		printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
500 		       npxproc, curproc, npx_exists);
501 		panic("npxintr from nowhere");
502 	}
503 	if (npxproc != curproc) {
504 		printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
505 		       npxproc, curproc, npx_exists);
506 		panic("npxintr from non-current process");
507 	}
508 
509 	outb(0xf0, 0);
510 	fnstsw(&curpcb->pcb_savefpu.sv_ex_sw);
511 	fnclex();
512 	fnop();
513 
514 	/*
515 	 * Pass exception to process.
516 	 */
517 	frame = (struct intrframe *)&unit;	/* XXX */
518 	if ((ISPL(frame->if_cs) == SEL_UPL) || (frame->if_eflags & PSL_VM)) {
519 		/*
520 		 * Interrupt is essentially a trap, so we can afford to call
521 		 * the SIGFPE handler (if any) as soon as the interrupt
522 		 * returns.
523 		 *
524 		 * XXX little or nothing is gained from this, and plenty is
525 		 * lost - the interrupt frame has to contain the trap frame
526 		 * (this is otherwise only necessary for the rescheduling trap
527 		 * in doreti, and the frame for that could easily be set up
528 		 * just before it is used).
529 		 */
530 		curproc->p_md.md_regs = (struct trapframe *)&frame->if_es;
531 #ifdef notyet
532 		/*
533 		 * Encode the appropriate code for detailed information on
534 		 * this exception.
535 		 */
536 		code = XXX_ENCODE(curpcb->pcb_savefpu.sv_ex_sw);
537 #else
538 		code = 0;	/* XXX */
539 #endif
540 		trapsignal(curproc, SIGFPE, code);
541 	} else {
542 		/*
543 		 * Nested interrupt.  These losers occur when:
544 		 *	o an IRQ13 is bogusly generated at a bogus time, e.g.:
545 		 *		o immediately after an fnsave or frstor of an
546 		 *		  error state.
547 		 *		o a couple of 386 instructions after
548 		 *		  "fstpl _memvar" causes a stack overflow.
549 		 *	  These are especially nasty when combined with a
550 		 *	  trace trap.
551 		 *	o an IRQ13 occurs at the same time as another higher-
552 		 *	  priority interrupt.
553 		 *
554 		 * Treat them like a true async interrupt.
555 		 */
556 		psignal(curproc, SIGFPE);
557 	}
558 }
559 
560 /*
561  * Implement device not available (DNA) exception
562  *
563  * It would be better to switch FP context here (if curproc != npxproc)
564  * and not necessarily for every context switch, but it is too hard to
565  * access foreign pcb's.
566  */
567 int
568 npxdna()
569 {
570 	if (!npx_exists)
571 		return (0);
572 	if (npxproc != NULL) {
573 		printf("npxdna: npxproc = %p, curproc = %p\n",
574 		       npxproc, curproc);
575 		panic("npxdna");
576 	}
577 	stop_emulating();
578 	/*
579 	 * Record new context early in case frstor causes an IRQ13.
580 	 */
581 	npxproc = curproc;
582 	curpcb->pcb_savefpu.sv_ex_sw = 0;
583 	/*
584 	 * The following frstor may cause an IRQ13 when the state being
585 	 * restored has a pending error.  The error will appear to have been
586 	 * triggered by the current (npx) user instruction even when that
587 	 * instruction is a no-wait instruction that should not trigger an
588 	 * error (e.g., fnclex).  On at least one 486 system all of the
589 	 * no-wait instructions are broken the same as frstor, so our
590 	 * treatment does not amplify the breakage.  On at least one
591 	 * 386/Cyrix 387 system, fnclex works correctly while frstor and
592 	 * fnsave are broken, so our treatment breaks fnclex if it is the
593 	 * first FPU instruction after a context switch.
594 	 */
595 	frstor(&curpcb->pcb_savefpu);
596 
597 	return (1);
598 }
599 
600 /*
601  * Wrapper for fnsave instruction to handle h/w bugs.  If there is an error
602  * pending, then fnsave generates a bogus IRQ13 on some systems.  Force
603  * any IRQ13 to be handled immediately, and then ignore it.  This routine is
604  * often called at splhigh so it must not use many system services.  In
605  * particular, it's much easier to install a special handler than to
606  * guarantee that it's safe to use npxintr() and its supporting code.
607  */
608 void
609 npxsave(addr)
610 	struct save87 *addr;
611 {
612 #ifdef SMP
613 
614 	stop_emulating();
615 	fnsave(addr);
616 	/* fnop(); */
617 	start_emulating();
618 	npxproc = NULL;
619 
620 #else /* SMP */
621 
622 	u_char	icu1_mask;
623 	u_char	icu2_mask;
624 	u_char	old_icu1_mask;
625 	u_char	old_icu2_mask;
626 	struct gate_descriptor	save_idt_npxintr;
627 
628 	disable_intr();
629 	old_icu1_mask = inb(IO_ICU1 + 1);
630 	old_icu2_mask = inb(IO_ICU2 + 1);
631 	save_idt_npxintr = idt[npx_intrno];
632 	outb(IO_ICU1 + 1, old_icu1_mask & ~(IRQ_SLAVE | npx0_imask));
633 	outb(IO_ICU2 + 1, old_icu2_mask & ~(npx0_imask >> 8));
634 	idt[npx_intrno] = npx_idt_probeintr;
635 	enable_intr();
636 	stop_emulating();
637 	fnsave(addr);
638 	fnop();
639 	start_emulating();
640 	npxproc = NULL;
641 	disable_intr();
642 	icu1_mask = inb(IO_ICU1 + 1);	/* masks may have changed */
643 	icu2_mask = inb(IO_ICU2 + 1);
644 	outb(IO_ICU1 + 1,
645 	     (icu1_mask & ~npx0_imask) | (old_icu1_mask & npx0_imask));
646 	outb(IO_ICU2 + 1,
647 	     (icu2_mask & ~(npx0_imask >> 8))
648 	     | (old_icu2_mask & (npx0_imask >> 8)));
649 	idt[npx_intrno] = save_idt_npxintr;
650 	enable_intr();		/* back to usual state */
651 
652 #endif /* SMP */
653 }
654 
655 #endif /* NNPX > 0 */
656