xref: /freebsd/sys/amd64/include/ieeefp.h (revision 29363fb446372cb3f10bc98664e9767c53fbb457)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1990 Andrew Moore, Talke Studio
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #ifndef _MACHINE_IEEEFP_H_
38 #define _MACHINE_IEEEFP_H_
39 
40 /* Deprecated historical FPU control interface */
41 
42 #include <x86/x86_ieeefp.h>
43 
44 /*
45  * IEEE floating point type, constant and function definitions.
46  * XXX: {FP,SSE}*FLD and {FP,SSE}*OFF are undocumented pollution.
47  */
48 
49 /*
50  * SSE mxcsr register bit-field masks.
51  */
52 #define	SSE_STKY_FLD	0x3f	/* exception flags */
53 #define	SSE_DAZ_FLD	0x40	/* Denormals are zero */
54 #define	SSE_MSKS_FLD	0x1f80	/* exception masks field */
55 #define	SSE_RND_FLD	0x6000	/* rounding control */
56 #define	SSE_FZ_FLD	0x8000	/* flush to zero on underflow */
57 
58 /*
59  * SSE mxcsr register bit-field offsets (shift counts).
60  */
61 #define	SSE_STKY_OFF	0	/* exception flags offset */
62 #define	SSE_DAZ_OFF	6	/* DAZ exception mask offset */
63 #define	SSE_MSKS_OFF	7	/* other exception masks offset */
64 #define	SSE_RND_OFF	13	/* rounding control offset */
65 #define	SSE_FZ_OFF	15	/* flush to zero offset */
66 
67 /*
68  * General notes about conflicting SSE vs FP status bits.
69  * This code assumes that software will not fiddle with the control
70  * bits of the SSE and x87 in such a way to get them out of sync and
71  * still expect this to work.  Break this at your peril.
72  * Because I based this on the i386 port, the x87 state is used for
73  * the fpget*() functions, and is shadowed into the SSE state for
74  * the fpset*() functions.  For dual source fpget*() functions, I
75  * merge the two together.  I think.
76  */
77 
78 static __inline fp_rnd_t
__fpgetround(void)79 __fpgetround(void)
80 {
81 	unsigned short _cw;
82 
83 	__fnstcw(&_cw);
84 	return ((fp_rnd_t)((_cw & FP_RND_FLD) >> FP_RND_OFF));
85 }
86 
87 static __inline fp_rnd_t
__fpsetround(fp_rnd_t _m)88 __fpsetround(fp_rnd_t _m)
89 {
90 	fp_rnd_t _p;
91 	unsigned _mxcsr;
92 	unsigned short _cw, _newcw;
93 
94 	__fnstcw(&_cw);
95 	_p = (fp_rnd_t)((_cw & FP_RND_FLD) >> FP_RND_OFF);
96 	_newcw = _cw & ~FP_RND_FLD;
97 	_newcw |= (_m << FP_RND_OFF) & FP_RND_FLD;
98 	__fnldcw(_cw, _newcw);
99 	__stmxcsr(&_mxcsr);
100 	_mxcsr &= ~SSE_RND_FLD;
101 	_mxcsr |= (_m << SSE_RND_OFF) & SSE_RND_FLD;
102 	__ldmxcsr(&_mxcsr);
103 	return (_p);
104 }
105 
106 /*
107  * Get or set the rounding precision for x87 arithmetic operations.
108  * There is no equivalent SSE mode or control.
109  */
110 
111 static __inline fp_prec_t
__fpgetprec(void)112 __fpgetprec(void)
113 {
114 	unsigned short _cw;
115 
116 	__fnstcw(&_cw);
117 	return ((fp_prec_t)((_cw & FP_PRC_FLD) >> FP_PRC_OFF));
118 }
119 
120 static __inline fp_prec_t
__fpsetprec(fp_prec_t _m)121 __fpsetprec(fp_prec_t _m)
122 {
123 	fp_prec_t _p;
124 	unsigned short _cw, _newcw;
125 
126 	__fnstcw(&_cw);
127 	_p = (fp_prec_t)((_cw & FP_PRC_FLD) >> FP_PRC_OFF);
128 	_newcw = _cw & ~FP_PRC_FLD;
129 	_newcw |= (_m << FP_PRC_OFF) & FP_PRC_FLD;
130 	__fnldcw(_cw, _newcw);
131 	return (_p);
132 }
133 
134 /*
135  * Get or set the exception mask.
136  * Note that the x87 mask bits are inverted by the API -- a mask bit of 1
137  * means disable for x87 and SSE, but for fp*mask() it means enable.
138  */
139 
140 static __inline fp_except_t
__fpgetmask(void)141 __fpgetmask(void)
142 {
143 	unsigned short _cw;
144 
145 	__fnstcw(&_cw);
146 	return ((~_cw & FP_MSKS_FLD) >> FP_MSKS_OFF);
147 }
148 
149 static __inline fp_except_t
__fpsetmask(fp_except_t _m)150 __fpsetmask(fp_except_t _m)
151 {
152 	fp_except_t _p;
153 	unsigned _mxcsr;
154 	unsigned short _cw, _newcw;
155 
156 	__fnstcw(&_cw);
157 	_p = (~_cw & FP_MSKS_FLD) >> FP_MSKS_OFF;
158 	_newcw = _cw & ~FP_MSKS_FLD;
159 	_newcw |= (~_m << FP_MSKS_OFF) & FP_MSKS_FLD;
160 	__fnldcw(_cw, _newcw);
161 	__stmxcsr(&_mxcsr);
162 	/* XXX should we clear non-ieee SSE_DAZ_FLD and SSE_FZ_FLD ? */
163 	_mxcsr &= ~SSE_MSKS_FLD;
164 	_mxcsr |= (~_m << SSE_MSKS_OFF) & SSE_MSKS_FLD;
165 	__ldmxcsr(&_mxcsr);
166 	return (_p);
167 }
168 
169 static __inline fp_except_t
__fpgetsticky(void)170 __fpgetsticky(void)
171 {
172 	unsigned _ex, _mxcsr;
173 	unsigned short _sw;
174 
175 	__fnstsw(&_sw);
176 	_ex = (_sw & FP_STKY_FLD) >> FP_STKY_OFF;
177 	__stmxcsr(&_mxcsr);
178 	_ex |= (_mxcsr & SSE_STKY_FLD) >> SSE_STKY_OFF;
179 	return ((fp_except_t)_ex);
180 }
181 
182 #if !defined(__IEEEFP_NOINLINES__)
183 
184 #define	fpgetmask()	__fpgetmask()
185 #define	fpgetprec()	__fpgetprec()
186 #define	fpgetround()	__fpgetround()
187 #define	fpgetsticky()	__fpgetsticky()
188 #define	fpsetmask(m)	__fpsetmask(m)
189 #define	fpsetprec(m)	__fpsetprec(m)
190 #define	fpsetround(m)	__fpsetround(m)
191 
192 #else /* __IEEEFP_NOINLINES__ */
193 
194 /* Augment the userland declarations. */
195 __BEGIN_DECLS
196 extern fp_rnd_t    fpgetround(void);
197 extern fp_rnd_t    fpsetround(fp_rnd_t);
198 extern fp_except_t fpgetmask(void);
199 extern fp_except_t fpsetmask(fp_except_t);
200 extern fp_except_t fpgetsticky(void);
201 extern fp_except_t fpsetsticky(fp_except_t);
202 fp_prec_t	fpgetprec(void);
203 fp_prec_t	fpsetprec(fp_prec_t);
204 __END_DECLS
205 
206 #endif /* !__IEEEFP_NOINLINES__ */
207 
208 #endif /* !_MACHINE_IEEEFP_H_ */
209