Lines Matching +full:1 +full:- +full:v0

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
14 * 1. Redistributions of source code must retain the above copyright
48 * v = 2^n v1 * v0
52 * uv = 2^2n u1 v1 + 2^n u1 v0 + 2^n v1 u0 + u0 v0
53 * = 2^2n u1 v1 + 2^n (u1 v0 + v1 u0) + u0 v0
56 * and add 2^n u0 v0 to the last term and subtract it from the middle.
60 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) +
61 * (2^n + 1) (u0 v0)
66 * (2^n) (u1 - u0) (v0 - v1) + [(u1-u0)... = mid]
67 * (2^n + 1) (u0 v0) [u0v0 = low]
69 * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done
71 * of (u1 - u0) or (v0 - v1) may be negative.)
85 * (2^n)(high) + (2^n)(mid) + (2^n + 1)(low)
106 #define v0 v.ul[L] in __muldi3() macro
110 * u1, u0, v1, and v0 will be directly accessible through the in __muldi3()
116 u.q = -a, negall = 1; in __muldi3()
120 v.q = -b, negall ^= 1; in __muldi3()
126 * are small. Here the product is just u0*v0. in __muldi3()
128 prod.q = __lmulq(u0, v0); in __muldi3()
136 low.q = __lmulq(u0, v0); in __muldi3()
139 negmid = 0, udiff = u1 - u0; in __muldi3()
141 negmid = 1, udiff = u0 - u1; in __muldi3()
142 if (v0 >= v1) in __muldi3()
143 vdiff = v0 - v1; in __muldi3()
145 vdiff = v1 - v0, negmid ^= 1; in __muldi3()
153 prod.ul[H] = high + (negmid ? -mid : mid) + low.ul[L] + in __muldi3()
157 return (negall ? -prod.q : prod.q); in __muldi3()
161 #undef v0 in __muldi3()
165 * Multiply two 2N-bit longs to produce a 4N-bit quad, where N is half
166 * the number of bits in a long (whatever that is---the code below
167 * does not care as long as quad.h does its part of the bargain---but
172 * we can get away with native multiplication---none of our input terms
173 * exceeds (ULONG_MAX >> 1).
175 * Note that, for u_long l, the quad-precision result
184 u_long u1, u0, v1, v0, udiff, vdiff, high, mid, low; in __lmulq() local
192 v0 = LHALF(v); in __lmulq()
194 low = u0 * v0; in __lmulq()
196 /* This is the same small-number optimization as before. */ in __lmulq()
201 udiff = u1 - u0, neg = 0; in __lmulq()
203 udiff = u0 - u1, neg = 1; in __lmulq()
204 if (v0 >= v1) in __lmulq()
205 vdiff = v0 - v1; in __lmulq()
207 vdiff = v1 - v0, neg ^= 1; in __lmulq()
216 /* if (neg) prod -= mid << N; else prod += mid << N; */ in __lmulq()
219 prodl -= LHUP(mid); in __lmulq()
220 prodh -= HHALF(mid) + (prodl > was); in __lmulq()
235 /* return 4N-bit product */ in __lmulq()