1 /* $NetBSD: fpu_mul.c,v 1.4 2005/12/11 12:18:42 christos Exp $ */ 2 3 /* 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 * Copyright (c) 1992, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This software was developed by the Computer Systems Engineering group 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 11 * contributed to Berkeley. 12 * 13 * All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Lawrence Berkeley Laboratory. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 */ 42 43 /* 44 * Perform an FPU multiply (return x * y). 45 */ 46 47 #include <sys/cdefs.h> 48 #include <sys/types.h> 49 #include <sys/systm.h> 50 51 #include <machine/fpu.h> 52 53 #include <powerpc/fpu/fpu_arith.h> 54 #include <powerpc/fpu/fpu_emu.h> 55 56 /* 57 * The multiplication algorithm for normal numbers is as follows: 58 * 59 * The fraction of the product is built in the usual stepwise fashion. 60 * Each step consists of shifting the accumulator right one bit 61 * (maintaining any guard bits) and, if the next bit in y is set, 62 * adding the multiplicand (x) to the accumulator. Then, in any case, 63 * we advance one bit leftward in y. Algorithmically: 64 * 65 * A = 0; 66 * for (bit = 0; bit < FP_NMANT; bit++) { 67 * sticky |= A & 1, A >>= 1; 68 * if (Y & (1 << bit)) 69 * A += X; 70 * } 71 * 72 * (X and Y here represent the mantissas of x and y respectively.) 73 * The resultant accumulator (A) is the product's mantissa. It may 74 * be as large as 11.11111... in binary and hence may need to be 75 * shifted right, but at most one bit. 76 * 77 * Since we do not have efficient multiword arithmetic, we code the 78 * accumulator as four separate words, just like any other mantissa. 79 * We use local variables in the hope that this is faster than memory. 80 * We keep x->fp_mant in locals for the same reason. 81 * 82 * In the algorithm above, the bits in y are inspected one at a time. 83 * We will pick them up 32 at a time and then deal with those 32, one 84 * at a time. Note, however, that we know several things about y: 85 * 86 * - the guard and round bits at the bottom are sure to be zero; 87 * 88 * - often many low bits are zero (y is often from a single or double 89 * precision source); 90 * 91 * - bit FP_NMANT-1 is set, and FP_1*2 fits in a word. 92 * 93 * We can also test for 32-zero-bits swiftly. In this case, the center 94 * part of the loop---setting sticky, shifting A, and not adding---will 95 * run 32 times without adding X to A. We can do a 32-bit shift faster 96 * by simply moving words. Since zeros are common, we optimize this case. 97 * Furthermore, since A is initially zero, we can omit the shift as well 98 * until we reach a nonzero word. 99 */ 100 struct fpn * 101 fpu_mul(struct fpemu *fe) 102 { 103 struct fpn *x = &fe->fe_f1, *y = &fe->fe_f2; 104 u_int a3, a2, a1, a0, x3, x2, x1, x0, bit, m; 105 int sticky; 106 FPU_DECL_CARRY; 107 108 /* 109 * Put the `heavier' operand on the right (see fpu_emu.h). 110 * Then we will have one of the following cases, taken in the 111 * following order: 112 * 113 * - y = NaN. Implied: if only one is a signalling NaN, y is. 114 * The result is y. 115 * - y = Inf. Implied: x != NaN (is 0, number, or Inf: the NaN 116 * case was taken care of earlier). 117 * If x = 0, the result is NaN. Otherwise the result 118 * is y, with its sign reversed if x is negative. 119 * - x = 0. Implied: y is 0 or number. 120 * The result is 0 (with XORed sign as usual). 121 * - other. Implied: both x and y are numbers. 122 * The result is x * y (XOR sign, multiply bits, add exponents). 123 */ 124 DPRINTF(FPE_REG, ("fpu_mul:\n")); 125 DUMPFPN(FPE_REG, x); 126 DUMPFPN(FPE_REG, y); 127 DPRINTF(FPE_REG, ("=>\n")); 128 129 ORDER(x, y); 130 if (ISNAN(y)) { 131 y->fp_sign ^= x->fp_sign; 132 fe->fe_cx |= FPSCR_VXSNAN; 133 DUMPFPN(FPE_REG, y); 134 return (y); 135 } 136 if (ISINF(y)) { 137 if (ISZERO(x)) { 138 fe->fe_cx |= FPSCR_VXIMZ; 139 return (fpu_newnan(fe)); 140 } 141 y->fp_sign ^= x->fp_sign; 142 DUMPFPN(FPE_REG, y); 143 return (y); 144 } 145 if (ISZERO(x)) { 146 x->fp_sign ^= y->fp_sign; 147 DUMPFPN(FPE_REG, x); 148 return (x); 149 } 150 151 /* 152 * Setup. In the code below, the mask `m' will hold the current 153 * mantissa byte from y. The variable `bit' denotes the bit 154 * within m. We also define some macros to deal with everything. 155 */ 156 x3 = x->fp_mant[3]; 157 x2 = x->fp_mant[2]; 158 x1 = x->fp_mant[1]; 159 x0 = x->fp_mant[0]; 160 sticky = a3 = a2 = a1 = a0 = 0; 161 162 #define ADD /* A += X */ \ 163 FPU_ADDS(a3, a3, x3); \ 164 FPU_ADDCS(a2, a2, x2); \ 165 FPU_ADDCS(a1, a1, x1); \ 166 FPU_ADDC(a0, a0, x0) 167 168 #define SHR1 /* A >>= 1, with sticky */ \ 169 sticky |= a3 & 1, a3 = (a3 >> 1) | (a2 << 31), \ 170 a2 = (a2 >> 1) | (a1 << 31), a1 = (a1 >> 1) | (a0 << 31), a0 >>= 1 171 172 #define SHR32 /* A >>= 32, with sticky */ \ 173 sticky |= a3, a3 = a2, a2 = a1, a1 = a0, a0 = 0 174 175 #define STEP /* each 1-bit step of the multiplication */ \ 176 SHR1; if (bit & m) { ADD; }; bit <<= 1 177 178 /* 179 * We are ready to begin. The multiply loop runs once for each 180 * of the four 32-bit words. Some words, however, are special. 181 * As noted above, the low order bits of Y are often zero. Even 182 * if not, the first loop can certainly skip the guard bits. 183 * The last word of y has its highest 1-bit in position FP_NMANT-1, 184 * so we stop the loop when we move past that bit. 185 */ 186 if ((m = y->fp_mant[3]) == 0) { 187 /* SHR32; */ /* unneeded since A==0 */ 188 } else { 189 bit = 1 << FP_NG; 190 do { 191 STEP; 192 } while (bit != 0); 193 } 194 if ((m = y->fp_mant[2]) == 0) { 195 SHR32; 196 } else { 197 bit = 1; 198 do { 199 STEP; 200 } while (bit != 0); 201 } 202 if ((m = y->fp_mant[1]) == 0) { 203 SHR32; 204 } else { 205 bit = 1; 206 do { 207 STEP; 208 } while (bit != 0); 209 } 210 m = y->fp_mant[0]; /* definitely != 0 */ 211 bit = 1; 212 do { 213 STEP; 214 } while (bit <= m); 215 216 /* 217 * Done with mantissa calculation. Get exponent and handle 218 * 11.111...1 case, then put result in place. We reuse x since 219 * it already has the right class (FP_NUM). 220 */ 221 m = x->fp_exp + y->fp_exp; 222 if (a0 >= FP_2) { 223 SHR1; 224 m++; 225 } 226 x->fp_sign ^= y->fp_sign; 227 x->fp_exp = m; 228 x->fp_sticky = sticky; 229 x->fp_mant[3] = a3; 230 x->fp_mant[2] = a2; 231 x->fp_mant[1] = a1; 232 x->fp_mant[0] = a0; 233 234 DUMPFPN(FPE_REG, x); 235 return (x); 236 } 237