Lines Matching +full:1 +full:x64 +full:- +full:bit
1 //===----------------------------------------------------------------------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
15 // Boost Software License - Version 1.0 - August 17th, 2003
21 // Software, and to permit third-parties to whom the Software is furnished to
28 // works are solely in the form of machine-executable object code generated by
33 // FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
40 // clang-format off
56 // We need a 64x128-bit multiplication and a subsequent 128-bit shift.
58 // The 64-bit factor is variable and passed in, the 128-bit factor comes
59 // from a lookup table. We know that the 64-bit factor only has 55
60 // significant bits (i.e., the 9 topmost bits are zeros). The 128-bit
66 // at least __j >= 115, so the result is guaranteed to fit into 179 - 115 = 64
68 // the 64x128-bit multiplication.
71 // 1. Best case: the compiler exposes a 128-bit type.
72 // We perform two 64x64-bit multiplications, add the higher 64 bits of the
73 // lower result to the higher result, and shift by __j - 64 bits.
75 // We explicitly cast from 64-bit to 128-bit, so the compiler can tell
76 // that these are only 64-bit inputs, and can map these to the best
78 // x64 machines happen to have matching assembly instructions for
79 // 64x64-bit multiplications and 128-bit shifts.
81 // 2. Second best case: the compiler exposes intrinsics for the x64 assembly
82 // instructions mentioned in 1.
84 // 3. We only have 64x64 bit instructions that return the lower 64 bits of
88 // b. Split both into 31-bit pieces, which guarantees no internal overflow,
90 // c. Split only the first factor into 31-bit pieces, which also guarantees
98 const uint64_t __low1 = __ryu_umul128(__m, __mul[1], &__high1); // 64
105 return __ryu_shiftright128(__sum, __high1, static_cast<uint32_t>(__j - 64));
111 *__vm = __mulShift(4 * __m - 1 - __mmShift, __mul, __j);
118 uint64_t* const __vp, uint64_t* const __vm, const uint32_t __mmShift) { // TRANSITION, VSO-634761
119 __m <<= 1;
124 const uint64_t __mid = __tmp + __ryu_umul128(__m, __mul[1], &__hi);
128 const uint64_t __mid2 = __mid + __mul[1] + (__lo2 < __lo);
130 *__vp = __ryu_shiftright128(__mid2, __hi2, static_cast<uint32_t>(__j - 64 - 1));
132 if (__mmShift == 1) {
133 const uint64_t __lo3 = __lo - __mul[0];
134 const uint64_t __mid3 = __mid - __mul[1] - (__lo3 > __lo);
135 const uint64_t __hi3 = __hi - (__mid3 > __mid);
136 *__vm = __ryu_shiftright128(__mid3, __hi3, static_cast<uint32_t>(__j - 64 - 1));
141 const uint64_t __lo4 = __lo3 - __mul[0];
142 const uint64_t __mid4 = __mid3 - __mul[1] - (__lo4 > __lo3);
143 const uint64_t __hi4 = __hi3 - (__mid4 > __mid3);
144 *__vm = __ryu_shiftright128(__mid4, __hi4, static_cast<uint32_t>(__j - 64));
147 return __ryu_shiftright128(__mid, __hi, static_cast<uint32_t>(__j - 64 - 1));
154 // The average output length is 16.38 digits, so we check high-to-low.
155 // Function precondition: __v is not an 18, 19, or 20-digit number.
156 // (17 digits are sufficient for round-tripping.)
174 return 1;
188 __e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS - 2;
191 __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS - 2;
192 __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
194 const bool __even = (__m2 & 1) == 0;
199 // Implicit bool -> int conversion. True is 1, false is 0.
200 const uint32_t __mmShift = __ieeeMantissa != 0 || __ieeeExponent <= 1;
203 // uint64_t __mm = __mv - 1 - __mmShift;
205 // Step 3: Convert to a decimal power base using 128-bit arithmetic.
211 // I tried special-casing __q == 0, but there was no effect on performance.
212 // This expression is slightly faster than max(0, __log10Pow2(__e2) - 1).
213 const uint32_t __q = __log10Pow2(__e2) - (__e2 > 3);
215 const int32_t __k = __DOUBLE_POW5_INV_BITCOUNT + __pow5bits(static_cast<int32_t>(__q)) - 1;
216 const int32_t __i = -__e2 + static_cast<int32_t>(__q) + __k;
222 const uint32_t __mvMod5 = static_cast<uint32_t>(__mv) - 5 * static_cast<uint32_t>(__div5(__mv));
226 // Same as min(__e2 + (~__mm & 1), __pow5Factor(__mm)) >= __q
227 // <=> __e2 + (~__mm & 1) >= __q && __pow5Factor(__mm) >= __q
229 __vmIsTrailingZeros = __multipleOfPowerOf5(__mv - 1 - __mmShift, __q);
231 // Same as min(__e2 + 1, __pow5Factor(__mp)) >= __q.
232 __vp -= __multipleOfPowerOf5(__mv + 2, __q);
236 // This expression is slightly faster than max(0, __log10Pow5(-__e2) - 1).
237 const uint32_t __q = __log10Pow5(-__e2) - (-__e2 > 1);
239 const int32_t __i = -__e2 - static_cast<int32_t>(__q);
240 const int32_t __k = __pow5bits(__i) - __DOUBLE_POW5_BITCOUNT;
241 const int32_t __j = static_cast<int32_t>(__q) - __k;
243 if (__q <= 1) {
248 // __mm = __mv - 1 - __mmShift, so it has 1 trailing 0 bit iff __mmShift == 1.
249 __vmIsTrailingZeros = __mmShift == 1;
251 // __mp = __mv + 2, so it always has at least one trailing 0 bit.
252 --__vp;
255 // We need to compute min(ntz(__mv), __pow5Factor(__mv) - __e2) >= __q - 1
256 // <=> ntz(__mv) >= __q - 1 && __pow5Factor(__mv) - __e2 >= __q - 1
257 // <=> ntz(__mv) >= __q - 1 (__e2 is negative and -__e2 >= __q)
258 // <=> (__mv & ((1 << (__q - 1)) - 1)) == 0
260 __vrIsTrailingZeros = __multipleOfPowerOf2(__mv, __q - 1);
277 const uint32_t __vmMod10 = static_cast<uint32_t>(__vm) - 10 * static_cast<uint32_t>(__vmDiv10);
279 const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10);
291 const uint32_t __vmMod10 = static_cast<uint32_t>(__vm) - 10 * static_cast<uint32_t>(__vmDiv10);
297 const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10);
310 // We need to take __vr + 1 if __vr is outside bounds or we need to round up.
319 const uint32_t __vrMod100 = static_cast<uint32_t>(__vr) - 100 * static_cast<uint32_t>(__vrDiv100);
327 // 0: 0.03%, 1: 13.8%, 2: 70.6%, 3: 14.0%, 4: 1.40%, 5: 0.14%, 6+: 0.02%
329 // 0: 70.6%, 1: 27.8%, 2: 1.40%, 3: 0.14%, 4+: 0.02%
337 const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10);
344 // We need to take __vr + 1 if __vr is outside bounds or we need to round up.
361 int32_t _Scientific_exponent = _Ryu_exponent + static_cast<int32_t>(__olength) - 1;
367 if (__olength == 1) {
369 // 1e-3 | "0.001" | "1e-03"
370 // 1e4 | "10000" | "1e+04"
371 _Lower = -3;
375 // 1234e-7 | "0.0001234" | "1.234e-04"
377 _Lower = -static_cast<int32_t>(__olength + 3);
390 // - if P > X >= -4, the conversion is with style f [...].
391 // - otherwise, the conversion is with style e [...]."
392 if (-4 <= _Scientific_exponent && _Scientific_exponent < 6) {
403 // --------------|----------|---------------|----------------------|---------------------------------------
405 // 1 | 17290 | 5 | (sometimes adjusted) | when the trimmed digits are nonzero.
406 // --------------|----------|---------------|----------------------|---------------------------------------
408 // --------------|----------|---------------|----------------------|---------------------------------------
409 // -1 | 172.9 | 3 | __olength + 1 | This case can't happen for
410 // -2 | 17.29 | 2 | | __olength == 1, but no additional
411 // -3 | 1.729 | 1 | | code is needed to avoid it.
412 // --------------|----------|---------------|----------------------|---------------------------------------
413 // -4 | 0.1729 | 0 | 2 - _Ryu_exponent | C11 7.21.6.1 "The fprintf function"/8:
414 // -5 | 0.01729 | -1 | | "If a decimal-point character appears,
415 // -6 | 0.001729 | -2 | | at least one digit appears before it."
422 if (_Output == 1) {
424 // For example, 1e23 is exactly "99999999999999991611392" which is 23 digits instead of 24.
427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,0,1,0,1,1,1,0,1,1,1,0,0,0,0,0,
428 1,1,0,0,1,0,1,1,1,0,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,1,0,1,1,0,0,0,0,1,1,1,
429 1,0,0,0,0,0,0,0,1,1,0,1,1,0,0,1,0,1,0,1,0,1,1,0,0,0,0,0,1,1,1,0,0,1,1,1,1,1,0,1,0,1,1,0,1,
430 1,0,0,0,0,0,0,0,0,0,1,1,1,0,0,1,0,0,1,0,0,1,1,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0,0,1,0,0,0,1,
431 0,1,0,1,0,1,1,1,0,0,0,0,0,0,1,1,1,1,0,0,1,0,1,1,1,0,0,0,1,0,1,1,1,1,1,1,0,1,0,1,1,0,0,0,1,
432 1,1,0,1,1,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,1,1,0,0,0,1,0,1,0,0,0,0,0,1,1,0,
433 0,1,0,1,1,1,0,0,1,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,1,1,0,1,0,0,0,0,0,1,1,0,1,0 };
434 _Total_fixed_length -= _Adjustment[_Ryu_exponent];
438 _Total_fixed_length = __olength + 1;
440 _Total_fixed_length = static_cast<uint32_t>(2 - _Ryu_exponent);
443 if (_Last - _First < static_cast<ptrdiff_t>(_Total_fixed_length)) {
459 // with 17 decimal digits, which is double's round-trip limit.)
460 // _Ryu_exponent is [1, 22].
461 // Normalization adds [2, 52] (aside: at least 2 because the pre-normalized mantissa is at least 5).
467 // (That's not a problem for round-tripping, because X is close enough to the original double,
468 // but X isn't mathematically equal to the original double.) This requires a high-precision fallback.
471 // need to re-synthesize it; the original double must have been X, because Ryu wouldn't produce the
472 // same output for two different doubles X and Y). This allows Ryu's output to be used (zero-filled).
474 // (2^53 - 1) / 5^0 (for indexing), (2^53 - 1) / 5^1, ..., (2^53 - 1) / 5^22
483 #else // ^^^ 64-bit ^^^ / vvv 32-bit vvv
492 #endif // ^^^ 32-bit ^^^
505 // Print the decimal digits, left-aligned within [_First, _First + _Total_fixed_length).
508 // Print the decimal digits, right-aligned within [_First, _First + _Total_fixed_length).
512 // We prefer 32-bit operations, even on 64-bit platforms.
517 // Expensive 64-bit division.
519 uint32_t __output2 = static_cast<uint32_t>(_Output - 100000000 * __q);
525 const uint32_t __c0 = (__c % 100) << 1;
526 const uint32_t __c1 = (__c / 100) << 1;
527 const uint32_t __d0 = (__d % 100) << 1;
528 const uint32_t __d1 = (__d / 100) << 1;
530 std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c0, 2);
531 std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c1, 2);
532 std::memcpy(_Mid -= 2, __DIGIT_TABLE + __d0, 2);
533 std::memcpy(_Mid -= 2, __DIGIT_TABLE + __d1, 2);
537 #ifdef __clang__ // TRANSITION, LLVM-38217
538 const uint32_t __c = __output2 - 10000 * (__output2 / 10000);
543 const uint32_t __c0 = (__c % 100) << 1;
544 const uint32_t __c1 = (__c / 100) << 1;
545 std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c0, 2);
546 std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c1, 2);
549 const uint32_t __c = (__output2 % 100) << 1;
551 std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c, 2);
554 const uint32_t __c = __output2 << 1;
555 std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c, 2);
557 *--_Mid = static_cast<char>('0' + __output2);
567 std::memmove(_First, _First + 1, static_cast<size_t>(_Whole_digits));
572 _First[1] = '.';
573 std::memset(_First + 2, '0', static_cast<size_t>(-_Whole_digits));
579 const uint32_t _Total_scientific_length = __olength + (__olength > 1) // digits + possible decimal point
580 + (-100 < _Scientific_exponent && _Scientific_exponent < 100 ? 4 : 5); // + scientific exponent
581 if (_Last - _First < static_cast<ptrdiff_t>(_Total_scientific_length)) {
588 // We prefer 32-bit operations, even on 64-bit platforms.
593 // Expensive 64-bit division.
595 uint32_t __output2 = static_cast<uint32_t>(_Output) - 100000000 * static_cast<uint32_t>(__q);
601 const uint32_t __c0 = (__c % 100) << 1;
602 const uint32_t __c1 = (__c / 100) << 1;
603 const uint32_t __d0 = (__d % 100) << 1;
604 const uint32_t __d1 = (__d / 100) << 1;
605 std::memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c0, 2);
606 std::memcpy(__result + __olength - __i - 3, __DIGIT_TABLE + __c1, 2);
607 std::memcpy(__result + __olength - __i - 5, __DIGIT_TABLE + __d0, 2);
608 std::memcpy(__result + __olength - __i - 7, __DIGIT_TABLE + __d1, 2);
613 #ifdef __clang__ // TRANSITION, LLVM-38217
614 const uint32_t __c = __output2 - 10000 * (__output2 / 10000);
619 const uint32_t __c0 = (__c % 100) << 1;
620 const uint32_t __c1 = (__c / 100) << 1;
621 std::memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c0, 2);
622 std::memcpy(__result + __olength - __i - 3, __DIGIT_TABLE + __c1, 2);
626 const uint32_t __c = (__output2 % 100) << 1;
628 std::memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c, 2);
632 const uint32_t __c = __output2 << 1;
634 __result[2] = __DIGIT_TABLE[__c + 1];
642 if (__olength > 1) {
643 __result[1] = '.';
644 __index = __olength + 1;
646 __index = 1;
652 __result[__index++] = '-';
653 _Scientific_exponent = -_Scientific_exponent;
673 const uint64_t __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
674 const int32_t __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
682 if (__e2 < -52) {
683 // f < 1.
687 // Since 2^52 <= __m2 < 2^53 and 0 <= -__e2 <= 52: 1 <= f = __m2 / 2^-__e2 < 2^53.
688 // Test if the lower -__e2 bits of the significand are 0, i.e. whether the fraction is 0.
689 const uint64_t __mask = (1ull << -__e2) - 1;
695 // f is an integer in the range [1, 2^53).
698 __v->__mantissa = __m2 >> -__e2;
699 __v->__exponent = 0;
706 // Step 1: Decode the floating-point number, and unify normalized and subnormal cases.
712 if (_Last - _First < 5) {
728 return { _First + 1, errc{} };
732 const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);
736 // const uint64_t _Mantissa2 = __ieeeMantissa | (1ull << __DOUBLE_MANTISSA_BITS); // restore implicit bit
738 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS; // bias and normalization
743 // For nonzero integers, _Exponent2 >= -52. (The minimum value occurs when _Mantissa2 * 2^_Exponent2 is 1.
744 // In that case, _Mantissa2 is the implicit 1 bit followed by 52 zeros, so _Exponent2 is -52 to shift away
746 // (as positive exponents make the range non-dense). For that dense range, Ryu will always be used:
749 // Positive exponents are the non-dense range of exactly representable integers. This contains all of the values
750 // for which Ryu can't be used (and a few Ryu-friendly values). We can save time by detecting positive
752 // (so it's okay if we call it with a Ryu-friendly value).
761 // For small integers in the range [1, 2^53), __v.__mantissa might contain trailing (decimal) zeros.
763 // (This is not needed for fixed-point notation, so it might be beneficial to trim
764 // trailing zeros in __to_chars only if needed - once fixed-point notation output is implemented.)
767 const uint32_t __r = static_cast<uint32_t>(__v.__mantissa) - 10 * static_cast<uint32_t>(__q);
783 // clang-format on