Lines Matching +full:quad +full:- +full:precision
3 /*-
4 * SPDX-License-Identifier: BSD-3-Clause
10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
38 #include <libkern/quad.h>
43 * Our algorithm is based on the following. Split incoming quad values
62 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) +
68 * (2^n) (u1 - u0) (v0 - v1) + [(u1-u0)... = mid]
71 * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done
72 * in just half the precision of the original. (Note that either or both
73 * of (u1 - u0) or (v0 - v1) may be negative.)
77 * Since C does not give us a `int * int = quad' operator, we split
82 * Our product should, strictly speaking, be a `long quad', with 128
119 u.q = -a, negall = 1; in __muldi3()
123 v.q = -b, negall ^= 1; in __muldi3()
142 negmid = 0, udiff = u1 - u0; in __muldi3()
144 negmid = 1, udiff = u0 - u1; in __muldi3()
146 vdiff = v0 - v1; in __muldi3()
148 vdiff = v1 - v0, negmid ^= 1; in __muldi3()
156 prod.ul[H] = high + (negmid ? -mid : mid) + low.ul[L] + in __muldi3()
160 return (negall ? -prod.q : prod.q); in __muldi3()
168 * Multiply two 2N-bit ints to produce a 4N-bit quad, where N is half
169 * the number of bits in an int (whatever that is---the code below
170 * does not care as long as quad.h does its part of the bargain---but
175 * we can get away with native multiplication---none of our input terms
178 * Note that, for u_int l, the quad-precision result
199 /* This is the same small-number optimization as before. */ in __lmulq()
204 udiff = u1 - u0, neg = 0; in __lmulq()
206 udiff = u0 - u1, neg = 1; in __lmulq()
208 vdiff = v0 - v1; in __lmulq()
210 vdiff = v1 - v0, neg ^= 1; in __lmulq()
219 /* if (neg) prod -= mid << N; else prod += mid << N; */ in __lmulq()
222 prodl -= LHUP(mid); in __lmulq()
223 prodh -= HHALF(mid) + (prodl > was); in __lmulq()
238 /* return 4N-bit product */ in __lmulq()