1 /* $NetBSD: fpu_implode.c,v 1.6 2005/12/11 12:18:42 christos Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)fpu_implode.c 8.1 (Berkeley) 6/11/93 41 */ 42 43 /* 44 * FPU subroutines: `implode' internal format numbers into the machine's 45 * `packed binary' format. 46 */ 47 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 51 #include <sys/types.h> 52 #include <sys/systm.h> 53 54 #include <machine/fpu.h> 55 #include <machine/ieee.h> 56 #include <machine/ieeefp.h> 57 #include <machine/reg.h> 58 59 #include <powerpc/fpu/fpu_arith.h> 60 #include <powerpc/fpu/fpu_emu.h> 61 #include <powerpc/fpu/fpu_extern.h> 62 #include <powerpc/fpu/fpu_instr.h> 63 64 static int round(struct fpemu *, struct fpn *); 65 static int toinf(struct fpemu *, int); 66 67 /* 68 * Round a number (algorithm from Motorola MC68882 manual, modified for 69 * our internal format). Set inexact exception if rounding is required. 70 * Return true iff we rounded up. 71 * 72 * After rounding, we discard the guard and round bits by shifting right 73 * 2 bits (a la fpu_shr(), but we do not bother with fp->fp_sticky). 74 * This saves effort later. 75 * 76 * Note that we may leave the value 2.0 in fp->fp_mant; it is the caller's 77 * responsibility to fix this if necessary. 78 */ 79 static int 80 round(struct fpemu *fe, struct fpn *fp) 81 { 82 u_int m0, m1, m2, m3; 83 int gr, s; 84 FPU_DECL_CARRY; 85 86 m0 = fp->fp_mant[0]; 87 m1 = fp->fp_mant[1]; 88 m2 = fp->fp_mant[2]; 89 m3 = fp->fp_mant[3]; 90 gr = m3 & 3; 91 s = fp->fp_sticky; 92 93 /* mant >>= FP_NG */ 94 m3 = (m3 >> FP_NG) | (m2 << (32 - FP_NG)); 95 m2 = (m2 >> FP_NG) | (m1 << (32 - FP_NG)); 96 m1 = (m1 >> FP_NG) | (m0 << (32 - FP_NG)); 97 m0 >>= FP_NG; 98 99 if ((gr | s) == 0) /* result is exact: no rounding needed */ 100 goto rounddown; 101 102 fe->fe_cx |= FPSCR_XX|FPSCR_FI; /* inexact */ 103 104 /* Go to rounddown to round down; break to round up. */ 105 switch ((fe->fe_fpscr) & FPSCR_RN) { 106 107 case FP_RN: 108 default: 109 /* 110 * Round only if guard is set (gr & 2). If guard is set, 111 * but round & sticky both clear, then we want to round 112 * but have a tie, so round to even, i.e., add 1 iff odd. 113 */ 114 if ((gr & 2) == 0) 115 goto rounddown; 116 if ((gr & 1) || fp->fp_sticky || (m3 & 1)) 117 break; 118 goto rounddown; 119 120 case FP_RZ: 121 /* Round towards zero, i.e., down. */ 122 goto rounddown; 123 124 case FP_RM: 125 /* Round towards -Inf: up if negative, down if positive. */ 126 if (fp->fp_sign) 127 break; 128 goto rounddown; 129 130 case FP_RP: 131 /* Round towards +Inf: up if positive, down otherwise. */ 132 if (!fp->fp_sign) 133 break; 134 goto rounddown; 135 } 136 137 /* Bump low bit of mantissa, with carry. */ 138 fe->fe_cx |= FPSCR_FR; 139 140 FPU_ADDS(m3, m3, 1); 141 FPU_ADDCS(m2, m2, 0); 142 FPU_ADDCS(m1, m1, 0); 143 FPU_ADDC(m0, m0, 0); 144 fp->fp_mant[0] = m0; 145 fp->fp_mant[1] = m1; 146 fp->fp_mant[2] = m2; 147 fp->fp_mant[3] = m3; 148 return (1); 149 150 rounddown: 151 fp->fp_mant[0] = m0; 152 fp->fp_mant[1] = m1; 153 fp->fp_mant[2] = m2; 154 fp->fp_mant[3] = m3; 155 return (0); 156 } 157 158 /* 159 * For overflow: return true if overflow is to go to +/-Inf, according 160 * to the sign of the overflowing result. If false, overflow is to go 161 * to the largest magnitude value instead. 162 */ 163 static int 164 toinf(struct fpemu *fe, int sign) 165 { 166 int inf; 167 168 /* look at rounding direction */ 169 switch ((fe->fe_fpscr) & FPSCR_RN) { 170 171 default: 172 case FP_RN: /* the nearest value is always Inf */ 173 inf = 1; 174 break; 175 176 case FP_RZ: /* toward 0 => never towards Inf */ 177 inf = 0; 178 break; 179 180 case FP_RP: /* toward +Inf iff positive */ 181 inf = sign == 0; 182 break; 183 184 case FP_RM: /* toward -Inf iff negative */ 185 inf = sign; 186 break; 187 } 188 if (inf) 189 fe->fe_cx |= FPSCR_OX; 190 return (inf); 191 } 192 193 /* 194 * fpn -> int (int value returned as return value). 195 * 196 * N.B.: this conversion always rounds towards zero (this is a peculiarity 197 * of the SPARC instruction set). 198 */ 199 u_int 200 fpu_ftoi(struct fpemu *fe, struct fpn *fp) 201 { 202 u_int i; 203 int sign, exp; 204 205 sign = fp->fp_sign; 206 switch (fp->fp_class) { 207 208 case FPC_ZERO: 209 return (0); 210 211 case FPC_NUM: 212 /* 213 * If exp >= 2^32, overflow. Otherwise shift value right 214 * into last mantissa word (this will not exceed 0xffffffff), 215 * shifting any guard and round bits out into the sticky 216 * bit. Then ``round'' towards zero, i.e., just set an 217 * inexact exception if sticky is set (see round()). 218 * If the result is > 0x80000000, or is positive and equals 219 * 0x80000000, overflow; otherwise the last fraction word 220 * is the result. 221 */ 222 if ((exp = fp->fp_exp) >= 32) 223 break; 224 /* NB: the following includes exp < 0 cases */ 225 if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0) 226 fe->fe_cx |= FPSCR_UX; 227 i = fp->fp_mant[3]; 228 if (i >= ((u_int)0x80000000 + sign)) 229 break; 230 return (sign ? -i : i); 231 232 default: /* Inf, qNaN, sNaN */ 233 break; 234 } 235 /* overflow: replace any inexact exception with invalid */ 236 fe->fe_cx |= FPSCR_VXCVI; 237 return (0x7fffffff + sign); 238 } 239 240 /* 241 * fpn -> extended int (high bits of int value returned as return value). 242 * 243 * N.B.: this conversion always rounds towards zero (this is a peculiarity 244 * of the SPARC instruction set). 245 */ 246 u_int 247 fpu_ftox(struct fpemu *fe, struct fpn *fp, u_int *res) 248 { 249 u_int64_t i; 250 int sign, exp; 251 252 sign = fp->fp_sign; 253 switch (fp->fp_class) { 254 255 case FPC_ZERO: 256 res[1] = 0; 257 return (0); 258 259 case FPC_NUM: 260 /* 261 * If exp >= 2^64, overflow. Otherwise shift value right 262 * into last mantissa word (this will not exceed 0xffffffffffffffff), 263 * shifting any guard and round bits out into the sticky 264 * bit. Then ``round'' towards zero, i.e., just set an 265 * inexact exception if sticky is set (see round()). 266 * If the result is > 0x8000000000000000, or is positive and equals 267 * 0x8000000000000000, overflow; otherwise the last fraction word 268 * is the result. 269 */ 270 if ((exp = fp->fp_exp) >= 64) 271 break; 272 /* NB: the following includes exp < 0 cases */ 273 if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0) 274 fe->fe_cx |= FPSCR_UX; 275 i = ((u_int64_t)fp->fp_mant[2]<<32)|fp->fp_mant[3]; 276 if (i >= ((u_int64_t)0x8000000000000000LL + sign)) 277 break; 278 return (sign ? -i : i); 279 280 default: /* Inf, qNaN, sNaN */ 281 break; 282 } 283 /* overflow: replace any inexact exception with invalid */ 284 fe->fe_cx |= FPSCR_VXCVI; 285 return (0x7fffffffffffffffLL + sign); 286 } 287 288 /* 289 * fpn -> single (32 bit single returned as return value). 290 * We assume <= 29 bits in a single-precision fraction (1.f part). 291 */ 292 u_int 293 fpu_ftos(struct fpemu *fe, struct fpn *fp) 294 { 295 u_int sign = fp->fp_sign << 31; 296 int exp; 297 298 #define SNG_EXP(e) ((e) << SNG_FRACBITS) /* makes e an exponent */ 299 #define SNG_MASK (SNG_EXP(1) - 1) /* mask for fraction */ 300 301 /* Take care of non-numbers first. */ 302 if (ISNAN(fp)) { 303 /* 304 * Preserve upper bits of NaN, per SPARC V8 appendix N. 305 * Note that fp->fp_mant[0] has the quiet bit set, 306 * even if it is classified as a signalling NaN. 307 */ 308 (void) fpu_shr(fp, FP_NMANT - 1 - SNG_FRACBITS); 309 exp = SNG_EXP_INFNAN; 310 goto done; 311 } 312 if (ISINF(fp)) 313 return (sign | SNG_EXP(SNG_EXP_INFNAN)); 314 if (ISZERO(fp)) 315 return (sign); 316 317 /* 318 * Normals (including subnormals). Drop all the fraction bits 319 * (including the explicit ``implied'' 1 bit) down into the 320 * single-precision range. If the number is subnormal, move 321 * the ``implied'' 1 into the explicit range as well, and shift 322 * right to introduce leading zeroes. Rounding then acts 323 * differently for normals and subnormals: the largest subnormal 324 * may round to the smallest normal (1.0 x 2^minexp), or may 325 * remain subnormal. In the latter case, signal an underflow 326 * if the result was inexact or if underflow traps are enabled. 327 * 328 * Rounding a normal, on the other hand, always produces another 329 * normal (although either way the result might be too big for 330 * single precision, and cause an overflow). If rounding a 331 * normal produces 2.0 in the fraction, we need not adjust that 332 * fraction at all, since both 1.0 and 2.0 are zero under the 333 * fraction mask. 334 * 335 * Note that the guard and round bits vanish from the number after 336 * rounding. 337 */ 338 if ((exp = fp->fp_exp + SNG_EXP_BIAS) <= 0) { /* subnormal */ 339 /* -NG for g,r; -SNG_FRACBITS-exp for fraction */ 340 (void) fpu_shr(fp, FP_NMANT - FP_NG - SNG_FRACBITS - exp); 341 if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(1)) 342 return (sign | SNG_EXP(1) | 0); 343 if ((fe->fe_cx & FPSCR_FI) || 344 (fe->fe_fpscr & FPSCR_UX)) 345 fe->fe_cx |= FPSCR_UX; 346 return (sign | SNG_EXP(0) | fp->fp_mant[3]); 347 } 348 /* -FP_NG for g,r; -1 for implied 1; -SNG_FRACBITS for fraction */ 349 (void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - SNG_FRACBITS); 350 #ifdef DIAGNOSTIC 351 if ((fp->fp_mant[3] & SNG_EXP(1 << FP_NG)) == 0) 352 panic("fpu_ftos"); 353 #endif 354 if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(2)) 355 exp++; 356 if (exp >= SNG_EXP_INFNAN) { 357 /* overflow to inf or to max single */ 358 if (toinf(fe, sign)) 359 return (sign | SNG_EXP(SNG_EXP_INFNAN)); 360 return (sign | SNG_EXP(SNG_EXP_INFNAN - 1) | SNG_MASK); 361 } 362 done: 363 /* phew, made it */ 364 return (sign | SNG_EXP(exp) | (fp->fp_mant[3] & SNG_MASK)); 365 } 366 367 /* 368 * fpn -> double (32 bit high-order result returned; 32-bit low order result 369 * left in res[1]). Assumes <= 61 bits in double precision fraction. 370 * 371 * This code mimics fpu_ftos; see it for comments. 372 */ 373 u_int 374 fpu_ftod(struct fpemu *fe, struct fpn *fp, u_int *res) 375 { 376 u_int sign = fp->fp_sign << 31; 377 int exp; 378 379 #define DBL_EXP(e) ((e) << (DBL_FRACBITS & 31)) 380 #define DBL_MASK (DBL_EXP(1) - 1) 381 382 if (ISNAN(fp)) { 383 (void) fpu_shr(fp, FP_NMANT - 1 - DBL_FRACBITS); 384 exp = DBL_EXP_INFNAN; 385 goto done; 386 } 387 if (ISINF(fp)) { 388 sign |= DBL_EXP(DBL_EXP_INFNAN); 389 goto zero; 390 } 391 if (ISZERO(fp)) { 392 zero: res[1] = 0; 393 return (sign); 394 } 395 396 if ((exp = fp->fp_exp + DBL_EXP_BIAS) <= 0) { 397 (void) fpu_shr(fp, FP_NMANT - FP_NG - DBL_FRACBITS - exp); 398 if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(1)) { 399 res[1] = 0; 400 return (sign | DBL_EXP(1) | 0); 401 } 402 if ((fe->fe_cx & FPSCR_FI) || 403 (fe->fe_fpscr & FPSCR_UX)) 404 fe->fe_cx |= FPSCR_UX; 405 exp = 0; 406 goto done; 407 } 408 (void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - DBL_FRACBITS); 409 if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(2)) 410 exp++; 411 if (exp >= DBL_EXP_INFNAN) { 412 fe->fe_cx |= FPSCR_OX | FPSCR_UX; 413 if (toinf(fe, sign)) { 414 res[1] = 0; 415 return (sign | DBL_EXP(DBL_EXP_INFNAN) | 0); 416 } 417 res[1] = ~0; 418 return (sign | DBL_EXP(DBL_EXP_INFNAN) | DBL_MASK); 419 } 420 done: 421 res[1] = fp->fp_mant[3]; 422 return (sign | DBL_EXP(exp) | (fp->fp_mant[2] & DBL_MASK)); 423 } 424 425 /* 426 * Implode an fpn, writing the result into the given space. 427 */ 428 void 429 fpu_implode(struct fpemu *fe, struct fpn *fp, int type, u_int *space) 430 { 431 432 switch (type) { 433 434 case FTYPE_LNG: 435 space[0] = fpu_ftox(fe, fp, space); 436 DPRINTF(FPE_REG, ("fpu_implode: long %x %x\n", 437 space[0], space[1])); 438 break; 439 440 case FTYPE_INT: 441 space[0] = 0; 442 space[1] = fpu_ftoi(fe, fp); 443 DPRINTF(FPE_REG, ("fpu_implode: int %x\n", 444 space[1])); 445 break; 446 447 case FTYPE_SNG: 448 space[0] = fpu_ftos(fe, fp); 449 DPRINTF(FPE_REG, ("fpu_implode: single %x\n", 450 space[0])); 451 break; 452 453 case FTYPE_DBL: 454 space[0] = fpu_ftod(fe, fp, space); 455 DPRINTF(FPE_REG, ("fpu_implode: double %x %x\n", 456 space[0], space[1])); 457 break; break; 458 459 default: 460 panic("fpu_implode: invalid type %d", type); 461 } 462 } 463