xref: /titanic_52/usr/src/uts/intel/ia32/ml/float.s (revision 4d40e39c66a331aefef24083480939f0e78a9045)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
25 */
26
27/*      Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
28/*      Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T   */
29/*        All Rights Reserved   */
30
31/*      Copyright (c) 1987, 1988 Microsoft Corporation  */
32/*        All Rights Reserved   */
33
34/*
35 * Copyright (c) 2009, Intel Corporation.
36 * All rights reserved.
37 */
38
39#include <sys/asm_linkage.h>
40#include <sys/asm_misc.h>
41#include <sys/regset.h>
42#include <sys/privregs.h>
43#include <sys/x86_archext.h>
44
45#if defined(__lint)
46#include <sys/types.h>
47#include <sys/fp.h>
48#else
49#include "assym.h"
50#endif
51
52#if defined(__lint)
53
54uint_t
55fpu_initial_probe(void)
56{ return (0); }
57
58#else	/* __lint */
59
60	/*
61	 * Returns zero if x87 "chip" is present(!)
62	 */
63	ENTRY_NP(fpu_initial_probe)
64	CLTS
65	fninit
66	fnstsw	%ax
67	movzbl	%al, %eax
68	ret
69	SET_SIZE(fpu_initial_probe)
70
71#endif	/* __lint */
72
73#if defined(__lint)
74
75/*ARGSUSED*/
76void
77fxsave_insn(struct fxsave_state *fx)
78{}
79
80#else	/* __lint */
81
82#if defined(__amd64)
83
84	ENTRY_NP(fxsave_insn)
85	FXSAVEQ	((%rdi))
86	ret
87	SET_SIZE(fxsave_insn)
88
89#elif defined(__i386)
90
91	ENTRY_NP(fxsave_insn)
92	movl	4(%esp), %eax
93	fxsave	(%eax)
94	ret
95	SET_SIZE(fxsave_insn)
96
97#endif
98
99#endif	/* __lint */
100
101#if defined(__i386)
102
103/*
104 * If (num1/num2 > num1/num3) the FPU has the FDIV bug.
105 */
106
107#if defined(__lint)
108
109int
110fpu_probe_pentium_fdivbug(void)
111{ return (0); }
112
113#else	/* __lint */
114
115	ENTRY_NP(fpu_probe_pentium_fdivbug)
116	fldl	.num1
117	fldl	.num2
118	fdivr	%st(1), %st
119	fxch	%st(1)
120	fdivl	.num3
121	fcompp
122	fstsw	%ax
123	sahf
124	jae	0f
125	movl	$1, %eax
126	ret
127
1280:	xorl	%eax, %eax
129	ret
130
131	.align	4
132.num1:	.4byte	0xbce4217d	/* 4.999999 */
133	.4byte	0x4013ffff
134.num2:	.4byte	0x0		/* 15.0 */
135	.4byte	0x402e0000
136.num3:	.4byte	0xde7210bf	/* 14.999999 */
137	.4byte	0x402dffff
138	SET_SIZE(fpu_probe_pentium_fdivbug)
139
140#endif	/* __lint */
141
142/*
143 * To cope with processors that do not implement fxsave/fxrstor
144 * instructions, patch hot paths in the kernel to use them only
145 * when that feature has been detected.
146 */
147
148#if defined(__lint)
149
150void
151patch_sse(void)
152{}
153
154void
155patch_sse2(void)
156{}
157
158void
159patch_xsave(void)
160{}
161
162#else	/* __lint */
163
164	ENTRY_NP(patch_sse)
165	_HOT_PATCH_PROLOG
166	/
167	/	frstor (%ebx); nop	-> fxrstor (%ebx)
168	/
169	_HOT_PATCH(_fxrstor_ebx_insn, _patch_fxrstor_ebx, 3)
170	/
171	/	lock; xorl $0, (%esp)	-> sfence; ret
172	/
173	_HOT_PATCH(_sfence_ret_insn, _patch_sfence_ret, 4)
174	_HOT_PATCH_EPILOG
175	ret
176_fxrstor_ebx_insn:			/ see ndptrap_frstor()
177	fxrstor	(%ebx)
178_ldmxcsr_ebx_insn:			/ see resume_from_zombie()
179	ldmxcsr	(%ebx)
180_sfence_ret_insn:			/ see membar_producer()
181	.byte	0xf, 0xae, 0xf8		/ [sfence instruction]
182	ret
183	SET_SIZE(patch_sse)
184
185	ENTRY_NP(patch_sse2)
186	_HOT_PATCH_PROLOG
187	/
188	/	lock; xorl $0, (%esp)	-> lfence; ret
189	/
190	_HOT_PATCH(_lfence_ret_insn, _patch_lfence_ret, 4)
191	_HOT_PATCH_EPILOG
192	ret
193_lfence_ret_insn:			/ see membar_consumer()
194	.byte	0xf, 0xae, 0xe8		/ [lfence instruction]
195	ret
196	SET_SIZE(patch_sse2)
197
198	/*
199	 * Patch lazy fp restore instructions in the trap handler
200	 * to use xrstor instead of frstor
201	 */
202	ENTRY_NP(patch_xsave)
203	_HOT_PATCH_PROLOG
204	/
205	/	frstor (%ebx); nop	-> xrstor (%ebx)
206	/
207	_HOT_PATCH(_xrstor_ebx_insn, _patch_xrstor_ebx, 3)
208	_HOT_PATCH_EPILOG
209	ret
210_xrstor_ebx_insn:			/ see ndptrap_frstor()
211	#xrstor (%ebx)
212	.byte	0x0f, 0xae, 0x2b
213	SET_SIZE(patch_xsave)
214
215#endif	/* __lint */
216#endif	/* __i386 */
217
218#if defined(__amd64)
219#if defined(__lint)
220
221void
222patch_xsave(void)
223{}
224
225#else	/* __lint */
226
227	/*
228	 * Patch lazy fp restore instructions in the trap handler
229	 * to use xrstor instead of fxrstorq
230	 */
231	ENTRY_NP(patch_xsave)
232	pushq	%rbx
233	pushq	%rbp
234	pushq	%r15
235	/
236	/	FXRSTORQ (%rbx);	-> xrstor (%rbx)
237	/ hot_patch(_xrstor_rbx_insn, _patch_xrstorq_rbx, 4)
238	/
239	leaq	_patch_xrstorq_rbx(%rip), %rbx
240	leaq	_xrstor_rbx_insn(%rip), %rbp
241	movq	$4, %r15
2421:
243	movq	%rbx, %rdi			/* patch address */
244	movzbq	(%rbp), %rsi			/* instruction byte */
245	movq	$1, %rdx			/* count */
246	call	hot_patch_kernel_text
247	addq	$1, %rbx
248	addq	$1, %rbp
249	subq	$1, %r15
250	jnz	1b
251
252	popq	%r15
253	popq	%rbp
254	popq	%rbx
255	ret
256
257_xrstor_rbx_insn:			/ see ndptrap_frstor()
258	#rex.W=1 (.byte 0x48)
259	#xrstor (%rbx)
260	.byte	0x48, 0x0f, 0xae, 0x2b
261	SET_SIZE(patch_xsave)
262
263#endif	/* __lint */
264#endif	/* __amd64 */
265
266/*
267 * One of these routines is called from any lwp with floating
268 * point context as part of the prolog of a context switch.
269 */
270
271#if defined(__lint)
272
273/*ARGSUSED*/
274void
275xsave_ctxt(void *arg)
276{}
277
278/*ARGSUSED*/
279void
280fpxsave_ctxt(void *arg)
281{}
282
283/*ARGSUSED*/
284void
285fpnsave_ctxt(void *arg)
286{}
287
288#else	/* __lint */
289
290#if defined(__amd64)
291
292	ENTRY_NP(fpxsave_ctxt)
293	cmpl	$FPU_EN, FPU_CTX_FPU_FLAGS(%rdi)
294	jne	1f
295
296	movl	$_CONST(FPU_VALID|FPU_EN), FPU_CTX_FPU_FLAGS(%rdi)
297	FXSAVEQ	(FPU_CTX_FPU_REGS(%rdi))
298
299	/*
300	 * On certain AMD processors, the "exception pointers" i.e. the last
301	 * instruction pointer, last data pointer, and last opcode
302	 * are saved by the fxsave instruction ONLY if the exception summary
303	 * bit is set.
304	 *
305	 * To ensure that we don't leak these values into the next context
306	 * on the cpu, we could just issue an fninit here, but that's
307	 * rather slow and so we issue an instruction sequence that
308	 * clears them more quickly, if a little obscurely.
309	 */
310	btw	$7, FXSAVE_STATE_FSW(%rdi)	/* Test saved ES bit */
311	jnc	0f				/* jump if ES = 0 */
312	fnclex		/* clear pending x87 exceptions */
3130:	ffree	%st(7)	/* clear tag bit to remove possible stack overflow */
314	fildl	.fpzero_const(%rip)
315			/* dummy load changes all exception pointers */
316	STTS(%rsi)	/* trap on next fpu touch */
3171:	rep;	ret	/* use 2 byte return instruction when branch target */
318			/* AMD Software Optimization Guide - Section 6.2 */
319	SET_SIZE(fpxsave_ctxt)
320
321	ENTRY_NP(xsave_ctxt)
322	cmpl	$FPU_EN, FPU_CTX_FPU_FLAGS(%rdi)
323	jne	1f
324	movl	$_CONST(FPU_VALID|FPU_EN), FPU_CTX_FPU_FLAGS(%rdi)
325	/*
326	 * Setup xsave flags in EDX:EAX
327	 */
328	movl	FPU_CTX_FPU_XSAVE_MASK(%rdi), %eax
329	movl	FPU_CTX_FPU_XSAVE_MASK+4(%rdi), %edx
330	leaq	FPU_CTX_FPU_REGS(%rdi), %rsi
331	#xsave	(%rsi)
332	.byte	0x0f, 0xae, 0x26
333
334	/*
335	 * (see notes above about "exception pointers")
336	 * TODO: does it apply to any machine that uses xsave?
337	 */
338	btw	$7, FXSAVE_STATE_FSW(%rdi)	/* Test saved ES bit */
339	jnc	0f				/* jump if ES = 0 */
340	fnclex		/* clear pending x87 exceptions */
3410:	ffree	%st(7)	/* clear tag bit to remove possible stack overflow */
342	fildl	.fpzero_const(%rip)
343			/* dummy load changes all exception pointers */
344	STTS(%rsi)	/* trap on next fpu touch */
3451:	ret
346	SET_SIZE(xsave_ctxt)
347
348#elif defined(__i386)
349
350	ENTRY_NP(fpnsave_ctxt)
351	movl	4(%esp), %eax		/* a struct fpu_ctx */
352	cmpl	$FPU_EN, FPU_CTX_FPU_FLAGS(%eax)
353	jne	1f
354
355	movl	$_CONST(FPU_VALID|FPU_EN), FPU_CTX_FPU_FLAGS(%eax)
356	fnsave	FPU_CTX_FPU_REGS(%eax)
357			/* (fnsave also reinitializes x87 state) */
358	STTS(%edx)	/* trap on next fpu touch */
3591:	rep;	ret	/* use 2 byte return instruction when branch target */
360			/* AMD Software Optimization Guide - Section 6.2 */
361	SET_SIZE(fpnsave_ctxt)
362
363	ENTRY_NP(fpxsave_ctxt)
364	movl	4(%esp), %eax		/* a struct fpu_ctx */
365	cmpl	$FPU_EN, FPU_CTX_FPU_FLAGS(%eax)
366	jne	1f
367
368	movl	$_CONST(FPU_VALID|FPU_EN), FPU_CTX_FPU_FLAGS(%eax)
369	fxsave	FPU_CTX_FPU_REGS(%eax)
370			/* (see notes above about "exception pointers") */
371	btw	$7, FXSAVE_STATE_FSW(%eax)	/* Test saved ES bit */
372	jnc	0f				/* jump if ES = 0 */
373	fnclex		/* clear pending x87 exceptions */
3740:	ffree	%st(7)	/* clear tag bit to remove possible stack overflow */
375	fildl	.fpzero_const
376			/* dummy load changes all exception pointers */
377	STTS(%edx)	/* trap on next fpu touch */
3781:	rep;	ret	/* use 2 byte return instruction when branch target */
379			/* AMD Software Optimization Guide - Section 6.2 */
380	SET_SIZE(fpxsave_ctxt)
381
382	ENTRY_NP(xsave_ctxt)
383	movl	4(%esp), %ecx		/* a struct fpu_ctx */
384	cmpl	$FPU_EN, FPU_CTX_FPU_FLAGS(%ecx)
385	jne	1f
386
387	movl	$_CONST(FPU_VALID|FPU_EN), FPU_CTX_FPU_FLAGS(%ecx)
388	movl	FPU_CTX_FPU_XSAVE_MASK(%ecx), %eax
389	movl	FPU_CTX_FPU_XSAVE_MASK+4(%ecx), %edx
390	leal	FPU_CTX_FPU_REGS(%ecx), %ecx
391	#xsave	(%ecx)
392	.byte	0x0f, 0xae, 0x21
393
394	/*
395	 * (see notes above about "exception pointers")
396	 * TODO: does it apply to any machine that uses xsave?
397	 */
398	btw	$7, FXSAVE_STATE_FSW(%ecx)	/* Test saved ES bit */
399	jnc	0f				/* jump if ES = 0 */
400	fnclex		/* clear pending x87 exceptions */
4010:	ffree	%st(7)	/* clear tag bit to remove possible stack overflow */
402	fildl	.fpzero_const
403			/* dummy load changes all exception pointers */
404	STTS(%edx)	/* trap on next fpu touch */
4051:	ret
406	SET_SIZE(xsave_ctxt)
407
408#endif	/* __i386 */
409
410	.align	8
411.fpzero_const:
412	.4byte	0x0
413	.4byte	0x0
414
415#endif	/* __lint */
416
417
418#if defined(__lint)
419
420/*ARGSUSED*/
421void
422fpsave(struct fnsave_state *f)
423{}
424
425/*ARGSUSED*/
426void
427fpxsave(struct fxsave_state *f)
428{}
429
430/*ARGSUSED*/
431void
432xsave(struct xsave_state *f, uint64_t m)
433{}
434
435#else	/* __lint */
436
437#if defined(__amd64)
438
439	ENTRY_NP(fpxsave)
440	CLTS
441	FXSAVEQ	((%rdi))
442	fninit				/* clear exceptions, init x87 tags */
443	STTS(%rdi)			/* set TS bit in %cr0 (disable FPU) */
444	ret
445	SET_SIZE(fpxsave)
446
447	ENTRY_NP(xsave)
448	CLTS
449	movl	%esi, %eax		/* bv mask */
450	movq	%rsi, %rdx
451	shrq	$32, %rdx
452	#xsave	(%rdi)
453	.byte	0x0f, 0xae, 0x27
454
455	fninit				/* clear exceptions, init x87 tags */
456	STTS(%rdi)			/* set TS bit in %cr0 (disable FPU) */
457	ret
458	SET_SIZE(xsave)
459
460#elif defined(__i386)
461
462	ENTRY_NP(fpsave)
463	CLTS
464	movl	4(%esp), %eax
465	fnsave	(%eax)
466	STTS(%eax)			/* set TS bit in %cr0 (disable FPU) */
467	ret
468	SET_SIZE(fpsave)
469
470	ENTRY_NP(fpxsave)
471	CLTS
472	movl	4(%esp), %eax
473	fxsave	(%eax)
474	fninit				/* clear exceptions, init x87 tags */
475	STTS(%eax)			/* set TS bit in %cr0 (disable FPU) */
476	ret
477	SET_SIZE(fpxsave)
478
479	ENTRY_NP(xsave)
480	CLTS
481	movl	4(%esp), %ecx
482	movl	8(%esp), %eax
483	movl	12(%esp), %edx
484	#xsave	(%ecx)
485	.byte	0x0f, 0xae, 0x21
486
487	fninit				/* clear exceptions, init x87 tags */
488	STTS(%eax)			/* set TS bit in %cr0 (disable FPU) */
489	ret
490	SET_SIZE(xsave)
491
492#endif	/* __i386 */
493#endif	/* __lint */
494
495#if defined(__lint)
496
497/*ARGSUSED*/
498void
499fprestore(struct fnsave_state *f)
500{}
501
502/*ARGSUSED*/
503void
504fpxrestore(struct fxsave_state *f)
505{}
506
507/*ARGSUSED*/
508void
509xrestore(struct xsave_state *f, uint64_t m)
510{}
511
512#else	/* __lint */
513
514#if defined(__amd64)
515
516	ENTRY_NP(fpxrestore)
517	CLTS
518	FXRSTORQ	((%rdi))
519	ret
520	SET_SIZE(fpxrestore)
521
522	ENTRY_NP(xrestore)
523	CLTS
524	movl	%esi, %eax		/* bv mask */
525	movq	%rsi, %rdx
526	shrq	$32, %rdx
527	#xrstor	(%rdi)
528	.byte	0x0f, 0xae, 0x2f
529	ret
530	SET_SIZE(xrestore)
531
532#elif defined(__i386)
533
534	ENTRY_NP(fprestore)
535	CLTS
536	movl	4(%esp), %eax
537	frstor	(%eax)
538	ret
539	SET_SIZE(fprestore)
540
541	ENTRY_NP(fpxrestore)
542	CLTS
543	movl	4(%esp), %eax
544	fxrstor	(%eax)
545	ret
546	SET_SIZE(fpxrestore)
547
548	ENTRY_NP(xrestore)
549	CLTS
550	movl	4(%esp), %ecx
551	movl	8(%esp), %eax
552	movl	12(%esp), %edx
553	#xrstor	(%ecx)
554	.byte	0x0f, 0xae, 0x29
555	ret
556	SET_SIZE(xrestore)
557
558#endif	/* __i386 */
559#endif	/* __lint */
560
561/*
562 * Disable the floating point unit.
563 */
564
565#if defined(__lint)
566
567void
568fpdisable(void)
569{}
570
571#else	/* __lint */
572
573#if defined(__amd64)
574
575	ENTRY_NP(fpdisable)
576	STTS(%rdi)			/* set TS bit in %cr0 (disable FPU) */
577	ret
578	SET_SIZE(fpdisable)
579
580#elif defined(__i386)
581
582	ENTRY_NP(fpdisable)
583	STTS(%eax)
584	ret
585	SET_SIZE(fpdisable)
586
587#endif	/* __i386 */
588#endif	/* __lint */
589
590/*
591 * Initialize the fpu hardware.
592 */
593
594#if defined(__lint)
595
596void
597fpinit(void)
598{}
599
600#else	/* __lint */
601
602#if defined(__amd64)
603
604	ENTRY_NP(fpinit)
605	CLTS
606	cmpl	$FP_XSAVE, fp_save_mech
607	je	1f
608
609	/* fxsave */
610	leaq	sse_initial(%rip), %rax
611	FXRSTORQ	((%rax))		/* load clean initial state */
612	ret
613
6141:	/* xsave */
615	leaq	avx_initial(%rip), %rcx
616	xorl	%edx, %edx
617	movl	$XFEATURE_AVX, %eax
618	btl	$X86FSET_AVX, x86_featureset
619	cmovael	%edx, %eax
620	orl	$(XFEATURE_LEGACY_FP | XFEATURE_SSE), %eax
621	/* xrstor (%rcx) */
622	.byte	0x0f, 0xae, 0x29		/* load clean initial state */
623	ret
624	SET_SIZE(fpinit)
625
626#elif defined(__i386)
627
628	ENTRY_NP(fpinit)
629	CLTS
630	cmpl	$FP_FXSAVE, fp_save_mech
631	je	1f
632	cmpl	$FP_XSAVE, fp_save_mech
633	je	2f
634
635	/* fnsave */
636	fninit
637	movl	$x87_initial, %eax
638	frstor	(%eax)			/* load clean initial state */
639	ret
640
6411:	/* fxsave */
642	movl	$sse_initial, %eax
643	fxrstor	(%eax)			/* load clean initial state */
644	ret
645
6462:	/* xsave */
647	movl	$avx_initial, %ecx
648	xorl	%edx, %edx
649	movl	$XFEATURE_AVX, %eax
650	bt	$X86FSET_AVX, x86_featureset
651	cmovael	%edx, %eax
652	orl	$(XFEATURE_LEGACY_FP | XFEATURE_SSE), %eax
653	/* xrstor (%ecx) */
654	.byte	0x0f, 0xae, 0x29	/* load clean initial state */
655	ret
656	SET_SIZE(fpinit)
657
658#endif	/* __i386 */
659#endif	/* __lint */
660
661/*
662 * Clears FPU exception state.
663 * Returns the FP status word.
664 */
665
666#if defined(__lint)
667
668uint32_t
669fperr_reset(void)
670{ return (0); }
671
672uint32_t
673fpxerr_reset(void)
674{ return (0); }
675
676#else	/* __lint */
677
678#if defined(__amd64)
679
680	ENTRY_NP(fperr_reset)
681	CLTS
682	xorl	%eax, %eax
683	fnstsw	%ax
684	fnclex
685	ret
686	SET_SIZE(fperr_reset)
687
688	ENTRY_NP(fpxerr_reset)
689	pushq	%rbp
690	movq	%rsp, %rbp
691	subq	$0x10, %rsp		/* make some temporary space */
692	CLTS
693	stmxcsr	(%rsp)
694	movl	(%rsp), %eax
695	andl	$_BITNOT(SSE_MXCSR_EFLAGS), (%rsp)
696	ldmxcsr	(%rsp)			/* clear processor exceptions */
697	leave
698	ret
699	SET_SIZE(fpxerr_reset)
700
701#elif defined(__i386)
702
703	ENTRY_NP(fperr_reset)
704	CLTS
705	xorl	%eax, %eax
706	fnstsw	%ax
707	fnclex
708	ret
709	SET_SIZE(fperr_reset)
710
711	ENTRY_NP(fpxerr_reset)
712	CLTS
713	subl	$4, %esp		/* make some temporary space */
714	stmxcsr	(%esp)
715	movl	(%esp), %eax
716	andl	$_BITNOT(SSE_MXCSR_EFLAGS), (%esp)
717	ldmxcsr	(%esp)			/* clear processor exceptions */
718	addl	$4, %esp
719	ret
720	SET_SIZE(fpxerr_reset)
721
722#endif	/* __i386 */
723#endif	/* __lint */
724
725#if defined(__lint)
726
727uint32_t
728fpgetcwsw(void)
729{
730	return (0);
731}
732
733#else   /* __lint */
734
735#if defined(__amd64)
736
737	ENTRY_NP(fpgetcwsw)
738	pushq	%rbp
739	movq	%rsp, %rbp
740	subq	$0x10, %rsp		/* make some temporary space	*/
741	CLTS
742	fnstsw	(%rsp)			/* store the status word	*/
743	fnstcw	2(%rsp)			/* store the control word	*/
744	movl	(%rsp), %eax		/* put both in %eax		*/
745	leave
746	ret
747	SET_SIZE(fpgetcwsw)
748
749#elif defined(__i386)
750
751	ENTRY_NP(fpgetcwsw)
752	CLTS
753	subl	$4, %esp		/* make some temporary space	*/
754	fnstsw	(%esp)			/* store the status word	*/
755	fnstcw	2(%esp)			/* store the control word	*/
756	movl	(%esp), %eax		/* put both in %eax		*/
757	addl	$4, %esp
758	ret
759	SET_SIZE(fpgetcwsw)
760
761#endif	/* __i386 */
762#endif  /* __lint */
763
764/*
765 * Returns the MXCSR register.
766 */
767
768#if defined(__lint)
769
770uint32_t
771fpgetmxcsr(void)
772{
773	return (0);
774}
775
776#else   /* __lint */
777
778#if defined(__amd64)
779
780	ENTRY_NP(fpgetmxcsr)
781	pushq	%rbp
782	movq	%rsp, %rbp
783	subq	$0x10, %rsp		/* make some temporary space */
784	CLTS
785	stmxcsr	(%rsp)
786	movl	(%rsp), %eax
787	leave
788	ret
789	SET_SIZE(fpgetmxcsr)
790
791#elif defined(__i386)
792
793	ENTRY_NP(fpgetmxcsr)
794	CLTS
795	subl	$4, %esp		/* make some temporary space */
796	stmxcsr	(%esp)
797	movl	(%esp), %eax
798	addl	$4, %esp
799	ret
800	SET_SIZE(fpgetmxcsr)
801
802#endif	/* __i386 */
803#endif  /* __lint */
804