xref: /freebsd/lib/msun/powerpc/fenv.h (revision 62cfcf62f627e5093fb37026a6d8c98e4d2ef04c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef	_FENV_H_
32 #define	_FENV_H_
33 
34 #include <sys/_types.h>
35 #include <machine/endian.h>
36 
37 #ifndef	__fenv_static
38 #define	__fenv_static	static
39 #endif
40 
41 typedef	__uint32_t	fenv_t;
42 typedef	__uint32_t	fexcept_t;
43 
44 /* Exception flags */
45 #define	FE_INEXACT	0x02000000
46 #define	FE_DIVBYZERO	0x04000000
47 #define	FE_UNDERFLOW	0x08000000
48 #define	FE_OVERFLOW	0x10000000
49 #define	FE_INVALID	0x20000000	/* all types of invalid FP ops */
50 
51 /*
52  * The PowerPC architecture has extra invalid flags that indicate the
53  * specific type of invalid operation occurred.  These flags may be
54  * tested, set, and cleared---but not masked---separately.  All of
55  * these bits are cleared when FE_INVALID is cleared, but only
56  * FE_VXSOFT is set when FE_INVALID is explicitly set in software.
57  */
58 #define	FE_VXCVI	0x00000100	/* invalid integer convert */
59 #define	FE_VXSQRT	0x00000200	/* square root of a negative */
60 #define	FE_VXSOFT	0x00000400	/* software-requested exception */
61 #define	FE_VXVC		0x00080000	/* ordered comparison involving NaN */
62 #define	FE_VXIMZ	0x00100000	/* inf * 0 */
63 #define	FE_VXZDZ	0x00200000	/* 0 / 0 */
64 #define	FE_VXIDI	0x00400000	/* inf / inf */
65 #define	FE_VXISI	0x00800000	/* inf - inf */
66 #define	FE_VXSNAN	0x01000000	/* operation on a signalling NaN */
67 #define	FE_ALL_INVALID	(FE_VXCVI | FE_VXSQRT | FE_VXSOFT | FE_VXVC | \
68 			 FE_VXIMZ | FE_VXZDZ | FE_VXIDI | FE_VXISI | \
69 			 FE_VXSNAN | FE_INVALID)
70 #define	FE_ALL_EXCEPT	(FE_DIVBYZERO | FE_INEXACT | \
71 			 FE_ALL_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
72 
73 /* Rounding modes */
74 #define	FE_TONEAREST	0x0000
75 #define	FE_TOWARDZERO	0x0001
76 #define	FE_UPWARD	0x0002
77 #define	FE_DOWNWARD	0x0003
78 #define	_ROUND_MASK	(FE_TONEAREST | FE_DOWNWARD | \
79 			 FE_UPWARD | FE_TOWARDZERO)
80 
81 __BEGIN_DECLS
82 
83 /* Default floating-point environment */
84 extern const fenv_t	__fe_dfl_env;
85 #define	FE_DFL_ENV	(&__fe_dfl_env)
86 
87 /* We need to be able to map status flag positions to mask flag positions */
88 #define	_FPUSW_SHIFT	22
89 #define	_ENABLE_MASK	((FE_DIVBYZERO | FE_INEXACT | FE_INVALID | \
90 			 FE_OVERFLOW | FE_UNDERFLOW) >> _FPUSW_SHIFT)
91 
92 #ifndef _SOFT_FLOAT
93 #ifdef __SPE__
94 #define	__mffs(__env) \
95 	__asm __volatile("mfspr %0, 512" : "=r" ((__env)->__bits.__reg))
96 #define	__mtfsf(__env) \
97 	__asm __volatile("mtspr 512,%0;isync" :: "r" ((__env).__bits.__reg))
98 #else
99 #define	__mffs(__env) \
100 	__asm __volatile("mffs %0" : "=f" ((__env)->__d))
101 #define	__mtfsf(__env) \
102 	__asm __volatile("mtfsf 255,%0" :: "f" ((__env).__d))
103 #endif
104 #else
105 #define	__mffs(__env)
106 #define	__mtfsf(__env)
107 #endif
108 
109 union __fpscr {
110 	double __d;
111 	struct {
112 #if _BYTE_ORDER == _LITTLE_ENDIAN
113 		fenv_t __reg;
114 		__uint32_t __junk;
115 #else
116 		__uint32_t __junk;
117 		fenv_t __reg;
118 #endif
119 	} __bits;
120 };
121 
122 __fenv_static inline int
123 feclearexcept(int __excepts)
124 {
125 	union __fpscr __r;
126 
127 	if (__excepts & FE_INVALID)
128 		__excepts |= FE_ALL_INVALID;
129 	__mffs(&__r);
130 	__r.__bits.__reg &= ~__excepts;
131 	__mtfsf(__r);
132 	return (0);
133 }
134 
135 __fenv_static inline int
136 fegetexceptflag(fexcept_t *__flagp, int __excepts)
137 {
138 	union __fpscr __r;
139 
140 	__mffs(&__r);
141 	*__flagp = __r.__bits.__reg & __excepts;
142 	return (0);
143 }
144 
145 __fenv_static inline int
146 fesetexceptflag(const fexcept_t *__flagp, int __excepts)
147 {
148 	union __fpscr __r;
149 
150 	if (__excepts & FE_INVALID)
151 		__excepts |= FE_ALL_EXCEPT;
152 	__mffs(&__r);
153 	__r.__bits.__reg &= ~__excepts;
154 	__r.__bits.__reg |= *__flagp & __excepts;
155 	__mtfsf(__r);
156 	return (0);
157 }
158 
159 __fenv_static inline int
160 feraiseexcept(int __excepts)
161 {
162 	union __fpscr __r;
163 
164 	if (__excepts & FE_INVALID)
165 		__excepts |= FE_VXSOFT;
166 	__mffs(&__r);
167 	__r.__bits.__reg |= __excepts;
168 	__mtfsf(__r);
169 	return (0);
170 }
171 
172 __fenv_static inline int
173 fetestexcept(int __excepts)
174 {
175 	union __fpscr __r;
176 
177 	__mffs(&__r);
178 	return (__r.__bits.__reg & __excepts);
179 }
180 
181 __fenv_static inline int
182 fegetround(void)
183 {
184 	union __fpscr __r;
185 
186 	__mffs(&__r);
187 	return (__r.__bits.__reg & _ROUND_MASK);
188 }
189 
190 __fenv_static inline int
191 fesetround(int __round)
192 {
193 	union __fpscr __r;
194 
195 	if (__round & ~_ROUND_MASK)
196 		return (-1);
197 	__mffs(&__r);
198 	__r.__bits.__reg &= ~_ROUND_MASK;
199 	__r.__bits.__reg |= __round;
200 	__mtfsf(__r);
201 	return (0);
202 }
203 
204 __fenv_static inline int
205 fegetenv(fenv_t *__envp)
206 {
207 	union __fpscr __r;
208 
209 	__mffs(&__r);
210 	*__envp = __r.__bits.__reg;
211 	return (0);
212 }
213 
214 __fenv_static inline int
215 feholdexcept(fenv_t *__envp)
216 {
217 	union __fpscr __r;
218 
219 	__mffs(&__r);
220 	*__envp = __r.__bits.__reg;
221 	__r.__bits.__reg &= ~(FE_ALL_EXCEPT | _ENABLE_MASK);
222 	__mtfsf(__r);
223 	return (0);
224 }
225 
226 __fenv_static inline int
227 fesetenv(const fenv_t *__envp)
228 {
229 	union __fpscr __r;
230 
231 	__r.__bits.__reg = *__envp;
232 	__mtfsf(__r);
233 	return (0);
234 }
235 
236 __fenv_static inline int
237 feupdateenv(const fenv_t *__envp)
238 {
239 	union __fpscr __r;
240 
241 	__mffs(&__r);
242 	__r.__bits.__reg &= FE_ALL_EXCEPT;
243 	__r.__bits.__reg |= *__envp;
244 	__mtfsf(__r);
245 	return (0);
246 }
247 
248 #if __BSD_VISIBLE
249 
250 /* We currently provide no external definitions of the functions below. */
251 
252 static inline int
253 feenableexcept(int __mask)
254 {
255 	union __fpscr __r;
256 	fenv_t __oldmask;
257 
258 	__mffs(&__r);
259 	__oldmask = __r.__bits.__reg;
260 	__r.__bits.__reg |= (__mask & FE_ALL_EXCEPT) >> _FPUSW_SHIFT;
261 	__mtfsf(__r);
262 	return ((__oldmask & _ENABLE_MASK) << _FPUSW_SHIFT);
263 }
264 
265 static inline int
266 fedisableexcept(int __mask)
267 {
268 	union __fpscr __r;
269 	fenv_t __oldmask;
270 
271 	__mffs(&__r);
272 	__oldmask = __r.__bits.__reg;
273 	__r.__bits.__reg &= ~((__mask & FE_ALL_EXCEPT) >> _FPUSW_SHIFT);
274 	__mtfsf(__r);
275 	return ((__oldmask & _ENABLE_MASK) << _FPUSW_SHIFT);
276 }
277 
278 static inline int
279 fegetexcept(void)
280 {
281 	union __fpscr __r;
282 
283 	__mffs(&__r);
284 	return ((__r.__bits.__reg & _ENABLE_MASK) << _FPUSW_SHIFT);
285 }
286 
287 #endif /* __BSD_VISIBLE */
288 
289 __END_DECLS
290 
291 #endif	/* !_FENV_H_ */
292