1 /* $NetBSD: fpu_mul.c,v 1.4 2005/12/11 12:18:42 christos Exp $ */ 2 3 /* 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 * Copyright (c) 1992, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This software was developed by the Computer Systems Engineering group 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 11 * contributed to Berkeley. 12 * 13 * All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Lawrence Berkeley Laboratory. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)fpu_mul.c 8.1 (Berkeley) 6/11/93 43 */ 44 45 /* 46 * Perform an FPU multiply (return x * y). 47 */ 48 49 #include <sys/cdefs.h> 50 #include <sys/types.h> 51 #include <sys/systm.h> 52 53 #include <machine/fpu.h> 54 55 #include <powerpc/fpu/fpu_arith.h> 56 #include <powerpc/fpu/fpu_emu.h> 57 58 /* 59 * The multiplication algorithm for normal numbers is as follows: 60 * 61 * The fraction of the product is built in the usual stepwise fashion. 62 * Each step consists of shifting the accumulator right one bit 63 * (maintaining any guard bits) and, if the next bit in y is set, 64 * adding the multiplicand (x) to the accumulator. Then, in any case, 65 * we advance one bit leftward in y. Algorithmically: 66 * 67 * A = 0; 68 * for (bit = 0; bit < FP_NMANT; bit++) { 69 * sticky |= A & 1, A >>= 1; 70 * if (Y & (1 << bit)) 71 * A += X; 72 * } 73 * 74 * (X and Y here represent the mantissas of x and y respectively.) 75 * The resultant accumulator (A) is the product's mantissa. It may 76 * be as large as 11.11111... in binary and hence may need to be 77 * shifted right, but at most one bit. 78 * 79 * Since we do not have efficient multiword arithmetic, we code the 80 * accumulator as four separate words, just like any other mantissa. 81 * We use local variables in the hope that this is faster than memory. 82 * We keep x->fp_mant in locals for the same reason. 83 * 84 * In the algorithm above, the bits in y are inspected one at a time. 85 * We will pick them up 32 at a time and then deal with those 32, one 86 * at a time. Note, however, that we know several things about y: 87 * 88 * - the guard and round bits at the bottom are sure to be zero; 89 * 90 * - often many low bits are zero (y is often from a single or double 91 * precision source); 92 * 93 * - bit FP_NMANT-1 is set, and FP_1*2 fits in a word. 94 * 95 * We can also test for 32-zero-bits swiftly. In this case, the center 96 * part of the loop---setting sticky, shifting A, and not adding---will 97 * run 32 times without adding X to A. We can do a 32-bit shift faster 98 * by simply moving words. Since zeros are common, we optimize this case. 99 * Furthermore, since A is initially zero, we can omit the shift as well 100 * until we reach a nonzero word. 101 */ 102 struct fpn * 103 fpu_mul(struct fpemu *fe) 104 { 105 struct fpn *x = &fe->fe_f1, *y = &fe->fe_f2; 106 u_int a3, a2, a1, a0, x3, x2, x1, x0, bit, m; 107 int sticky; 108 FPU_DECL_CARRY; 109 110 /* 111 * Put the `heavier' operand on the right (see fpu_emu.h). 112 * Then we will have one of the following cases, taken in the 113 * following order: 114 * 115 * - y = NaN. Implied: if only one is a signalling NaN, y is. 116 * The result is y. 117 * - y = Inf. Implied: x != NaN (is 0, number, or Inf: the NaN 118 * case was taken care of earlier). 119 * If x = 0, the result is NaN. Otherwise the result 120 * is y, with its sign reversed if x is negative. 121 * - x = 0. Implied: y is 0 or number. 122 * The result is 0 (with XORed sign as usual). 123 * - other. Implied: both x and y are numbers. 124 * The result is x * y (XOR sign, multiply bits, add exponents). 125 */ 126 DPRINTF(FPE_REG, ("fpu_mul:\n")); 127 DUMPFPN(FPE_REG, x); 128 DUMPFPN(FPE_REG, y); 129 DPRINTF(FPE_REG, ("=>\n")); 130 131 ORDER(x, y); 132 if (ISNAN(y)) { 133 y->fp_sign ^= x->fp_sign; 134 fe->fe_cx |= FPSCR_VXSNAN; 135 DUMPFPN(FPE_REG, y); 136 return (y); 137 } 138 if (ISINF(y)) { 139 if (ISZERO(x)) { 140 fe->fe_cx |= FPSCR_VXIMZ; 141 return (fpu_newnan(fe)); 142 } 143 y->fp_sign ^= x->fp_sign; 144 DUMPFPN(FPE_REG, y); 145 return (y); 146 } 147 if (ISZERO(x)) { 148 x->fp_sign ^= y->fp_sign; 149 DUMPFPN(FPE_REG, x); 150 return (x); 151 } 152 153 /* 154 * Setup. In the code below, the mask `m' will hold the current 155 * mantissa byte from y. The variable `bit' denotes the bit 156 * within m. We also define some macros to deal with everything. 157 */ 158 x3 = x->fp_mant[3]; 159 x2 = x->fp_mant[2]; 160 x1 = x->fp_mant[1]; 161 x0 = x->fp_mant[0]; 162 sticky = a3 = a2 = a1 = a0 = 0; 163 164 #define ADD /* A += X */ \ 165 FPU_ADDS(a3, a3, x3); \ 166 FPU_ADDCS(a2, a2, x2); \ 167 FPU_ADDCS(a1, a1, x1); \ 168 FPU_ADDC(a0, a0, x0) 169 170 #define SHR1 /* A >>= 1, with sticky */ \ 171 sticky |= a3 & 1, a3 = (a3 >> 1) | (a2 << 31), \ 172 a2 = (a2 >> 1) | (a1 << 31), a1 = (a1 >> 1) | (a0 << 31), a0 >>= 1 173 174 #define SHR32 /* A >>= 32, with sticky */ \ 175 sticky |= a3, a3 = a2, a2 = a1, a1 = a0, a0 = 0 176 177 #define STEP /* each 1-bit step of the multiplication */ \ 178 SHR1; if (bit & m) { ADD; }; bit <<= 1 179 180 /* 181 * We are ready to begin. The multiply loop runs once for each 182 * of the four 32-bit words. Some words, however, are special. 183 * As noted above, the low order bits of Y are often zero. Even 184 * if not, the first loop can certainly skip the guard bits. 185 * The last word of y has its highest 1-bit in position FP_NMANT-1, 186 * so we stop the loop when we move past that bit. 187 */ 188 if ((m = y->fp_mant[3]) == 0) { 189 /* SHR32; */ /* unneeded since A==0 */ 190 } else { 191 bit = 1 << FP_NG; 192 do { 193 STEP; 194 } while (bit != 0); 195 } 196 if ((m = y->fp_mant[2]) == 0) { 197 SHR32; 198 } else { 199 bit = 1; 200 do { 201 STEP; 202 } while (bit != 0); 203 } 204 if ((m = y->fp_mant[1]) == 0) { 205 SHR32; 206 } else { 207 bit = 1; 208 do { 209 STEP; 210 } while (bit != 0); 211 } 212 m = y->fp_mant[0]; /* definitely != 0 */ 213 bit = 1; 214 do { 215 STEP; 216 } while (bit <= m); 217 218 /* 219 * Done with mantissa calculation. Get exponent and handle 220 * 11.111...1 case, then put result in place. We reuse x since 221 * it already has the right class (FP_NUM). 222 */ 223 m = x->fp_exp + y->fp_exp; 224 if (a0 >= FP_2) { 225 SHR1; 226 m++; 227 } 228 x->fp_sign ^= y->fp_sign; 229 x->fp_exp = m; 230 x->fp_sticky = sticky; 231 x->fp_mant[3] = a3; 232 x->fp_mant[2] = a2; 233 x->fp_mant[1] = a1; 234 x->fp_mant[0] = a0; 235 236 DUMPFPN(FPE_REG, x); 237 return (x); 238 } 239