1 /* $NetBSD: fpu_implode.c,v 1.6 2005/12/11 12:18:42 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 * Copyright (c) 1992, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This software was developed by the Computer Systems Engineering group 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 11 * contributed to Berkeley. 12 * 13 * All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Lawrence Berkeley Laboratory. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)fpu_implode.c 8.1 (Berkeley) 6/11/93 43 */ 44 45 /* 46 * FPU subroutines: `implode' internal format numbers into the machine's 47 * `packed binary' format. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 #include <sys/types.h> 54 #include <sys/systm.h> 55 56 #include <machine/fpu.h> 57 #include <machine/ieee.h> 58 #include <machine/ieeefp.h> 59 60 #include <powerpc/fpu/fpu_arith.h> 61 #include <powerpc/fpu/fpu_emu.h> 62 #include <powerpc/fpu/fpu_extern.h> 63 #include <powerpc/fpu/fpu_instr.h> 64 65 static int round(struct fpemu *, struct fpn *); 66 static int toinf(struct fpemu *, int); 67 68 /* 69 * Round a number (algorithm from Motorola MC68882 manual, modified for 70 * our internal format). Set inexact exception if rounding is required. 71 * Return true iff we rounded up. 72 * 73 * After rounding, we discard the guard and round bits by shifting right 74 * 2 bits (a la fpu_shr(), but we do not bother with fp->fp_sticky). 75 * This saves effort later. 76 * 77 * Note that we may leave the value 2.0 in fp->fp_mant; it is the caller's 78 * responsibility to fix this if necessary. 79 */ 80 static int 81 round(struct fpemu *fe, struct fpn *fp) 82 { 83 u_int m0, m1, m2, m3; 84 int gr, s; 85 FPU_DECL_CARRY; 86 87 m0 = fp->fp_mant[0]; 88 m1 = fp->fp_mant[1]; 89 m2 = fp->fp_mant[2]; 90 m3 = fp->fp_mant[3]; 91 gr = m3 & 3; 92 s = fp->fp_sticky; 93 94 /* mant >>= FP_NG */ 95 m3 = (m3 >> FP_NG) | (m2 << (32 - FP_NG)); 96 m2 = (m2 >> FP_NG) | (m1 << (32 - FP_NG)); 97 m1 = (m1 >> FP_NG) | (m0 << (32 - FP_NG)); 98 m0 >>= FP_NG; 99 100 if ((gr | s) == 0) /* result is exact: no rounding needed */ 101 goto rounddown; 102 103 fe->fe_cx |= FPSCR_XX|FPSCR_FI; /* inexact */ 104 105 /* Go to rounddown to round down; break to round up. */ 106 switch ((fe->fe_fpscr) & FPSCR_RN) { 107 case FP_RN: 108 default: 109 /* 110 * Round only if guard is set (gr & 2). If guard is set, 111 * but round & sticky both clear, then we want to round 112 * but have a tie, so round to even, i.e., add 1 iff odd. 113 */ 114 if ((gr & 2) == 0) 115 goto rounddown; 116 if ((gr & 1) || fp->fp_sticky || (m3 & 1)) 117 break; 118 goto rounddown; 119 120 case FP_RZ: 121 /* Round towards zero, i.e., down. */ 122 goto rounddown; 123 124 case FP_RM: 125 /* Round towards -Inf: up if negative, down if positive. */ 126 if (fp->fp_sign) 127 break; 128 goto rounddown; 129 130 case FP_RP: 131 /* Round towards +Inf: up if positive, down otherwise. */ 132 if (!fp->fp_sign) 133 break; 134 goto rounddown; 135 } 136 137 /* Bump low bit of mantissa, with carry. */ 138 fe->fe_cx |= FPSCR_FR; 139 140 FPU_ADDS(m3, m3, 1); 141 FPU_ADDCS(m2, m2, 0); 142 FPU_ADDCS(m1, m1, 0); 143 FPU_ADDC(m0, m0, 0); 144 fp->fp_mant[0] = m0; 145 fp->fp_mant[1] = m1; 146 fp->fp_mant[2] = m2; 147 fp->fp_mant[3] = m3; 148 return (1); 149 150 rounddown: 151 fp->fp_mant[0] = m0; 152 fp->fp_mant[1] = m1; 153 fp->fp_mant[2] = m2; 154 fp->fp_mant[3] = m3; 155 return (0); 156 } 157 158 /* 159 * For overflow: return true if overflow is to go to +/-Inf, according 160 * to the sign of the overflowing result. If false, overflow is to go 161 * to the largest magnitude value instead. 162 */ 163 static int 164 toinf(struct fpemu *fe, int sign) 165 { 166 int inf; 167 168 /* look at rounding direction */ 169 switch ((fe->fe_fpscr) & FPSCR_RN) { 170 default: 171 case FP_RN: /* the nearest value is always Inf */ 172 inf = 1; 173 break; 174 175 case FP_RZ: /* toward 0 => never towards Inf */ 176 inf = 0; 177 break; 178 179 case FP_RP: /* toward +Inf iff positive */ 180 inf = sign == 0; 181 break; 182 183 case FP_RM: /* toward -Inf iff negative */ 184 inf = sign; 185 break; 186 } 187 if (inf) 188 fe->fe_cx |= FPSCR_OX; 189 return (inf); 190 } 191 192 /* 193 * fpn -> int (int value returned as return value). 194 * 195 * N.B.: this conversion always rounds towards zero (this is a peculiarity 196 * of the SPARC instruction set). 197 */ 198 u_int 199 fpu_ftoi(struct fpemu *fe, struct fpn *fp) 200 { 201 u_int i; 202 int sign, exp; 203 204 sign = fp->fp_sign; 205 switch (fp->fp_class) { 206 case FPC_ZERO: 207 return (0); 208 209 case FPC_NUM: 210 /* 211 * If exp >= 2^32, overflow. Otherwise shift value right 212 * into last mantissa word (this will not exceed 0xffffffff), 213 * shifting any guard and round bits out into the sticky 214 * bit. Then ``round'' towards zero, i.e., just set an 215 * inexact exception if sticky is set (see round()). 216 * If the result is > 0x80000000, or is positive and equals 217 * 0x80000000, overflow; otherwise the last fraction word 218 * is the result. 219 */ 220 if ((exp = fp->fp_exp) >= 32) 221 break; 222 /* NB: the following includes exp < 0 cases */ 223 if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0) 224 fe->fe_cx |= FPSCR_UX; 225 i = fp->fp_mant[3]; 226 if (i >= ((u_int)0x80000000 + sign)) 227 break; 228 return (sign ? -i : i); 229 230 default: /* Inf, qNaN, sNaN */ 231 break; 232 } 233 /* overflow: replace any inexact exception with invalid */ 234 fe->fe_cx |= FPSCR_VXCVI; 235 return (0x7fffffff + sign); 236 } 237 238 /* 239 * fpn -> extended int (high bits of int value returned as return value). 240 * 241 * N.B.: this conversion always rounds towards zero (this is a peculiarity 242 * of the SPARC instruction set). 243 */ 244 u_int 245 fpu_ftox(struct fpemu *fe, struct fpn *fp, u_int *res) 246 { 247 u_int64_t i; 248 int sign, exp; 249 250 sign = fp->fp_sign; 251 switch (fp->fp_class) { 252 case FPC_ZERO: 253 res[1] = 0; 254 return (0); 255 256 case FPC_NUM: 257 /* 258 * If exp >= 2^64, overflow. Otherwise shift value right 259 * into last mantissa word (this will not exceed 0xffffffffffffffff), 260 * shifting any guard and round bits out into the sticky 261 * bit. Then ``round'' towards zero, i.e., just set an 262 * inexact exception if sticky is set (see round()). 263 * If the result is > 0x8000000000000000, or is positive and equals 264 * 0x8000000000000000, overflow; otherwise the last fraction word 265 * is the result. 266 */ 267 if ((exp = fp->fp_exp) >= 64) 268 break; 269 /* NB: the following includes exp < 0 cases */ 270 if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0) 271 fe->fe_cx |= FPSCR_UX; 272 i = ((u_int64_t)fp->fp_mant[2]<<32)|fp->fp_mant[3]; 273 if (i >= ((u_int64_t)0x8000000000000000LL + sign)) 274 break; 275 return (sign ? -i : i); 276 277 default: /* Inf, qNaN, sNaN */ 278 break; 279 } 280 /* overflow: replace any inexact exception with invalid */ 281 fe->fe_cx |= FPSCR_VXCVI; 282 return (0x7fffffffffffffffLL + sign); 283 } 284 285 /* 286 * fpn -> single (32 bit single returned as return value). 287 * We assume <= 29 bits in a single-precision fraction (1.f part). 288 */ 289 u_int 290 fpu_ftos(struct fpemu *fe, struct fpn *fp) 291 { 292 u_int sign = fp->fp_sign << 31; 293 int exp; 294 295 #define SNG_EXP(e) ((e) << SNG_FRACBITS) /* makes e an exponent */ 296 #define SNG_MASK (SNG_EXP(1) - 1) /* mask for fraction */ 297 298 /* Take care of non-numbers first. */ 299 if (ISNAN(fp)) { 300 /* 301 * Preserve upper bits of NaN, per SPARC V8 appendix N. 302 * Note that fp->fp_mant[0] has the quiet bit set, 303 * even if it is classified as a signalling NaN. 304 */ 305 (void) fpu_shr(fp, FP_NMANT - 1 - SNG_FRACBITS); 306 exp = SNG_EXP_INFNAN; 307 goto done; 308 } 309 if (ISINF(fp)) 310 return (sign | SNG_EXP(SNG_EXP_INFNAN)); 311 if (ISZERO(fp)) 312 return (sign); 313 314 /* 315 * Normals (including subnormals). Drop all the fraction bits 316 * (including the explicit ``implied'' 1 bit) down into the 317 * single-precision range. If the number is subnormal, move 318 * the ``implied'' 1 into the explicit range as well, and shift 319 * right to introduce leading zeroes. Rounding then acts 320 * differently for normals and subnormals: the largest subnormal 321 * may round to the smallest normal (1.0 x 2^minexp), or may 322 * remain subnormal. In the latter case, signal an underflow 323 * if the result was inexact or if underflow traps are enabled. 324 * 325 * Rounding a normal, on the other hand, always produces another 326 * normal (although either way the result might be too big for 327 * single precision, and cause an overflow). If rounding a 328 * normal produces 2.0 in the fraction, we need not adjust that 329 * fraction at all, since both 1.0 and 2.0 are zero under the 330 * fraction mask. 331 * 332 * Note that the guard and round bits vanish from the number after 333 * rounding. 334 */ 335 if ((exp = fp->fp_exp + SNG_EXP_BIAS) <= 0) { /* subnormal */ 336 /* -NG for g,r; -SNG_FRACBITS-exp for fraction */ 337 (void) fpu_shr(fp, FP_NMANT - FP_NG - SNG_FRACBITS - exp); 338 if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(1)) 339 return (sign | SNG_EXP(1) | 0); 340 if ((fe->fe_cx & FPSCR_FI) || 341 (fe->fe_fpscr & FPSCR_UX)) 342 fe->fe_cx |= FPSCR_UX; 343 return (sign | SNG_EXP(0) | fp->fp_mant[3]); 344 } 345 /* -FP_NG for g,r; -1 for implied 1; -SNG_FRACBITS for fraction */ 346 (void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - SNG_FRACBITS); 347 #ifdef DIAGNOSTIC 348 if ((fp->fp_mant[3] & SNG_EXP(1 << FP_NG)) == 0) 349 panic("fpu_ftos"); 350 #endif 351 if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(2)) 352 exp++; 353 if (exp >= SNG_EXP_INFNAN) { 354 /* overflow to inf or to max single */ 355 if (toinf(fe, sign)) 356 return (sign | SNG_EXP(SNG_EXP_INFNAN)); 357 return (sign | SNG_EXP(SNG_EXP_INFNAN - 1) | SNG_MASK); 358 } 359 done: 360 /* phew, made it */ 361 return (sign | SNG_EXP(exp) | (fp->fp_mant[3] & SNG_MASK)); 362 } 363 364 /* 365 * fpn -> double (32 bit high-order result returned; 32-bit low order result 366 * left in res[1]). Assumes <= 61 bits in double precision fraction. 367 * 368 * This code mimics fpu_ftos; see it for comments. 369 */ 370 u_int 371 fpu_ftod(struct fpemu *fe, struct fpn *fp, u_int *res) 372 { 373 u_int sign = fp->fp_sign << 31; 374 int exp; 375 376 #define DBL_EXP(e) ((e) << (DBL_FRACBITS & 31)) 377 #define DBL_MASK (DBL_EXP(1) - 1) 378 379 if (ISNAN(fp)) { 380 (void) fpu_shr(fp, FP_NMANT - 1 - DBL_FRACBITS); 381 exp = DBL_EXP_INFNAN; 382 goto done; 383 } 384 if (ISINF(fp)) { 385 sign |= DBL_EXP(DBL_EXP_INFNAN); 386 goto zero; 387 } 388 if (ISZERO(fp)) { 389 zero: res[1] = 0; 390 return (sign); 391 } 392 393 if ((exp = fp->fp_exp + DBL_EXP_BIAS) <= 0) { 394 (void) fpu_shr(fp, FP_NMANT - FP_NG - DBL_FRACBITS - exp); 395 if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(1)) { 396 res[1] = 0; 397 return (sign | DBL_EXP(1) | 0); 398 } 399 if ((fe->fe_cx & FPSCR_FI) || 400 (fe->fe_fpscr & FPSCR_UX)) 401 fe->fe_cx |= FPSCR_UX; 402 exp = 0; 403 goto done; 404 } 405 (void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - DBL_FRACBITS); 406 if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(2)) 407 exp++; 408 if (exp >= DBL_EXP_INFNAN) { 409 fe->fe_cx |= FPSCR_OX | FPSCR_UX; 410 if (toinf(fe, sign)) { 411 res[1] = 0; 412 return (sign | DBL_EXP(DBL_EXP_INFNAN) | 0); 413 } 414 res[1] = ~0; 415 return (sign | DBL_EXP(DBL_EXP_INFNAN) | DBL_MASK); 416 } 417 done: 418 res[1] = fp->fp_mant[3]; 419 return (sign | DBL_EXP(exp) | (fp->fp_mant[2] & DBL_MASK)); 420 } 421 422 /* 423 * Implode an fpn, writing the result into the given space. 424 */ 425 void 426 fpu_implode(struct fpemu *fe, struct fpn *fp, int type, u_int *space) 427 { 428 429 switch (type) { 430 case FTYPE_LNG: 431 space[0] = fpu_ftox(fe, fp, space); 432 DPRINTF(FPE_REG, ("fpu_implode: long %x %x\n", 433 space[0], space[1])); 434 break; 435 436 case FTYPE_INT: 437 space[0] = 0; 438 space[1] = fpu_ftoi(fe, fp); 439 DPRINTF(FPE_REG, ("fpu_implode: int %x\n", 440 space[1])); 441 break; 442 443 case FTYPE_SNG: 444 space[0] = fpu_ftos(fe, fp); 445 DPRINTF(FPE_REG, ("fpu_implode: single %x\n", 446 space[0])); 447 break; 448 449 case FTYPE_DBL: 450 space[0] = fpu_ftod(fe, fp, space); 451 DPRINTF(FPE_REG, ("fpu_implode: double %x %x\n", 452 space[0], space[1])); 453 break; break; 454 455 default: 456 panic("fpu_implode: invalid type %d", type); 457 } 458 } 459