Lines Matching +full:1 +full:- +full:v0
3 /*-
4 * SPDX-License-Identifier: BSD-3-Clause
10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
16 * 1. Redistributions of source code must retain the above copyright
50 * v = 2^n v1 * v0
54 * uv = 2^2n u1 v1 + 2^n u1 v0 + 2^n v1 u0 + u0 v0
55 * = 2^2n u1 v1 + 2^n (u1 v0 + v1 u0) + u0 v0
58 * and add 2^n u0 v0 to the last term and subtract it from the middle.
62 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) +
63 * (2^n + 1) (u0 v0)
68 * (2^n) (u1 - u0) (v0 - v1) + [(u1-u0)... = mid]
69 * (2^n + 1) (u0 v0) [u0v0 = low]
71 * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done
73 * of (u1 - u0) or (v0 - v1) may be negative.)
87 * (2^n)(high) + (2^n)(mid) + (2^n + 1)(low)
109 #define v0 v.ul[L] in __muldi3() macro
113 * u1, u0, v1, and v0 will be directly accessible through the in __muldi3()
119 u.q = -a, negall = 1; in __muldi3()
123 v.q = -b, negall ^= 1; in __muldi3()
129 * are small. Here the product is just u0*v0. in __muldi3()
131 prod.q = __lmulq(u0, v0); in __muldi3()
139 low.q = __lmulq(u0, v0); in __muldi3()
142 negmid = 0, udiff = u1 - u0; in __muldi3()
144 negmid = 1, udiff = u0 - u1; in __muldi3()
145 if (v0 >= v1) in __muldi3()
146 vdiff = v0 - v1; in __muldi3()
148 vdiff = v1 - v0, negmid ^= 1; in __muldi3()
156 prod.ul[H] = high + (negmid ? -mid : mid) + low.ul[L] + in __muldi3()
160 return (negall ? -prod.q : prod.q); in __muldi3()
164 #undef v0 in __muldi3()
168 * Multiply two 2N-bit ints to produce a 4N-bit quad, where N is half
169 * the number of bits in an int (whatever that is---the code below
170 * does not care as long as quad.h does its part of the bargain---but
175 * we can get away with native multiplication---none of our input terms
176 * exceeds (UINT_MAX >> 1).
178 * Note that, for u_int l, the quad-precision result
187 u_int u1, u0, v1, v0, udiff, vdiff, high, mid, low; in __lmulq() local
195 v0 = LHALF(v); in __lmulq()
197 low = u0 * v0; in __lmulq()
199 /* This is the same small-number optimization as before. */ in __lmulq()
204 udiff = u1 - u0, neg = 0; in __lmulq()
206 udiff = u0 - u1, neg = 1; in __lmulq()
207 if (v0 >= v1) in __lmulq()
208 vdiff = v0 - v1; in __lmulq()
210 vdiff = v1 - v0, neg ^= 1; in __lmulq()
219 /* if (neg) prod -= mid << N; else prod += mid << N; */ in __lmulq()
222 prodl -= LHUP(mid); in __lmulq()
223 prodh -= HHALF(mid) + (prodl > was); in __lmulq()
238 /* return 4N-bit product */ in __lmulq()