1 //===----------------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 // Copyright (c) Microsoft Corporation. 10 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 11 12 // Copyright 2018 Ulf Adams 13 // Copyright (c) Microsoft Corporation. All rights reserved. 14 15 // Boost Software License - Version 1.0 - August 17th, 2003 16 17 // Permission is hereby granted, free of charge, to any person or organization 18 // obtaining a copy of the software and accompanying documentation covered by 19 // this license (the "Software") to use, reproduce, display, distribute, 20 // execute, and transmit the Software, and to prepare derivative works of the 21 // Software, and to permit third-parties to whom the Software is furnished to 22 // do so, all subject to the following: 23 24 // The copyright notices in the Software and this entire statement, including 25 // the above license grant, this restriction and the following disclaimer, 26 // must be included in all copies of the Software, in whole or in part, and 27 // all derivative works of the Software, unless such copies or derivative 28 // works are solely in the form of machine-executable object code generated by 29 // a source language processor. 30 31 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 32 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 33 // FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 34 // SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 35 // FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 36 // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 37 // DEALINGS IN THE SOFTWARE. 38 39 // Avoid formatting to keep the changes with the original code minimal. 40 // clang-format off 41 42 #include "__config" 43 #include "charconv" 44 45 #include "include/ryu/common.h" 46 #include "include/ryu/d2fixed.h" 47 #include "include/ryu/d2s.h" 48 #include "include/ryu/d2s_full_table.h" 49 #include "include/ryu/d2s_intrinsics.h" 50 #include "include/ryu/digit_table.h" 51 #include "include/ryu/ryu.h" 52 53 _LIBCPP_BEGIN_NAMESPACE_STD 54 55 // We need a 64x128-bit multiplication and a subsequent 128-bit shift. 56 // Multiplication: 57 // The 64-bit factor is variable and passed in, the 128-bit factor comes 58 // from a lookup table. We know that the 64-bit factor only has 55 59 // significant bits (i.e., the 9 topmost bits are zeros). The 128-bit 60 // factor only has 124 significant bits (i.e., the 4 topmost bits are 61 // zeros). 62 // Shift: 63 // In principle, the multiplication result requires 55 + 124 = 179 bits to 64 // represent. However, we then shift this value to the right by __j, which is 65 // at least __j >= 115, so the result is guaranteed to fit into 179 - 115 = 64 66 // bits. This means that we only need the topmost 64 significant bits of 67 // the 64x128-bit multiplication. 68 // 69 // There are several ways to do this: 70 // 1. Best case: the compiler exposes a 128-bit type. 71 // We perform two 64x64-bit multiplications, add the higher 64 bits of the 72 // lower result to the higher result, and shift by __j - 64 bits. 73 // 74 // We explicitly cast from 64-bit to 128-bit, so the compiler can tell 75 // that these are only 64-bit inputs, and can map these to the best 76 // possible sequence of assembly instructions. 77 // x64 machines happen to have matching assembly instructions for 78 // 64x64-bit multiplications and 128-bit shifts. 79 // 80 // 2. Second best case: the compiler exposes intrinsics for the x64 assembly 81 // instructions mentioned in 1. 82 // 83 // 3. We only have 64x64 bit instructions that return the lower 64 bits of 84 // the result, i.e., we have to use plain C. 85 // Our inputs are less than the full width, so we have three options: 86 // a. Ignore this fact and just implement the intrinsics manually. 87 // b. Split both into 31-bit pieces, which guarantees no internal overflow, 88 // but requires extra work upfront (unless we change the lookup table). 89 // c. Split only the first factor into 31-bit pieces, which also guarantees 90 // no internal overflow, but requires extra work since the intermediate 91 // results are not perfectly aligned. 92 #ifdef _LIBCPP_INTRINSIC128 93 94 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint64_t __mulShift(const uint64_t __m, const uint64_t* const __mul, const int32_t __j) { 95 // __m is maximum 55 bits 96 uint64_t __high1; // 128 97 const uint64_t __low1 = __ryu_umul128(__m, __mul[1], &__high1); // 64 98 uint64_t __high0; // 64 99 (void) __ryu_umul128(__m, __mul[0], &__high0); // 0 100 const uint64_t __sum = __high0 + __low1; 101 if (__sum < __high0) { 102 ++__high1; // overflow into __high1 103 } 104 return __ryu_shiftright128(__sum, __high1, static_cast<uint32_t>(__j - 64)); 105 } 106 107 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint64_t __mulShiftAll(const uint64_t __m, const uint64_t* const __mul, const int32_t __j, 108 uint64_t* const __vp, uint64_t* const __vm, const uint32_t __mmShift) { 109 *__vp = __mulShift(4 * __m + 2, __mul, __j); 110 *__vm = __mulShift(4 * __m - 1 - __mmShift, __mul, __j); 111 return __mulShift(4 * __m, __mul, __j); 112 } 113 114 #else // ^^^ intrinsics available ^^^ / vvv intrinsics unavailable vvv 115 116 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_ALWAYS_INLINE uint64_t __mulShiftAll(uint64_t __m, const uint64_t* const __mul, const int32_t __j, 117 uint64_t* const __vp, uint64_t* const __vm, const uint32_t __mmShift) { // TRANSITION, VSO-634761 118 __m <<= 1; 119 // __m is maximum 55 bits 120 uint64_t __tmp; 121 const uint64_t __lo = __ryu_umul128(__m, __mul[0], &__tmp); 122 uint64_t __hi; 123 const uint64_t __mid = __tmp + __ryu_umul128(__m, __mul[1], &__hi); 124 __hi += __mid < __tmp; // overflow into __hi 125 126 const uint64_t __lo2 = __lo + __mul[0]; 127 const uint64_t __mid2 = __mid + __mul[1] + (__lo2 < __lo); 128 const uint64_t __hi2 = __hi + (__mid2 < __mid); 129 *__vp = __ryu_shiftright128(__mid2, __hi2, static_cast<uint32_t>(__j - 64 - 1)); 130 131 if (__mmShift == 1) { 132 const uint64_t __lo3 = __lo - __mul[0]; 133 const uint64_t __mid3 = __mid - __mul[1] - (__lo3 > __lo); 134 const uint64_t __hi3 = __hi - (__mid3 > __mid); 135 *__vm = __ryu_shiftright128(__mid3, __hi3, static_cast<uint32_t>(__j - 64 - 1)); 136 } else { 137 const uint64_t __lo3 = __lo + __lo; 138 const uint64_t __mid3 = __mid + __mid + (__lo3 < __lo); 139 const uint64_t __hi3 = __hi + __hi + (__mid3 < __mid); 140 const uint64_t __lo4 = __lo3 - __mul[0]; 141 const uint64_t __mid4 = __mid3 - __mul[1] - (__lo4 > __lo3); 142 const uint64_t __hi4 = __hi3 - (__mid4 > __mid3); 143 *__vm = __ryu_shiftright128(__mid4, __hi4, static_cast<uint32_t>(__j - 64)); 144 } 145 146 return __ryu_shiftright128(__mid, __hi, static_cast<uint32_t>(__j - 64 - 1)); 147 } 148 149 #endif // ^^^ intrinsics unavailable ^^^ 150 151 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __decimalLength17(const uint64_t __v) { 152 // This is slightly faster than a loop. 153 // The average output length is 16.38 digits, so we check high-to-low. 154 // Function precondition: __v is not an 18, 19, or 20-digit number. 155 // (17 digits are sufficient for round-tripping.) 156 _LIBCPP_ASSERT(__v < 100000000000000000u, ""); 157 if (__v >= 10000000000000000u) { return 17; } 158 if (__v >= 1000000000000000u) { return 16; } 159 if (__v >= 100000000000000u) { return 15; } 160 if (__v >= 10000000000000u) { return 14; } 161 if (__v >= 1000000000000u) { return 13; } 162 if (__v >= 100000000000u) { return 12; } 163 if (__v >= 10000000000u) { return 11; } 164 if (__v >= 1000000000u) { return 10; } 165 if (__v >= 100000000u) { return 9; } 166 if (__v >= 10000000u) { return 8; } 167 if (__v >= 1000000u) { return 7; } 168 if (__v >= 100000u) { return 6; } 169 if (__v >= 10000u) { return 5; } 170 if (__v >= 1000u) { return 4; } 171 if (__v >= 100u) { return 3; } 172 if (__v >= 10u) { return 2; } 173 return 1; 174 } 175 176 // A floating decimal representing m * 10^e. 177 struct __floating_decimal_64 { 178 uint64_t __mantissa; 179 int32_t __exponent; 180 }; 181 182 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline __floating_decimal_64 __d2d(const uint64_t __ieeeMantissa, const uint32_t __ieeeExponent) { 183 int32_t __e2; 184 uint64_t __m2; 185 if (__ieeeExponent == 0) { 186 // We subtract 2 so that the bounds computation has 2 additional bits. 187 __e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS - 2; 188 __m2 = __ieeeMantissa; 189 } else { 190 __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS - 2; 191 __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa; 192 } 193 const bool __even = (__m2 & 1) == 0; 194 const bool __acceptBounds = __even; 195 196 // Step 2: Determine the interval of valid decimal representations. 197 const uint64_t __mv = 4 * __m2; 198 // Implicit bool -> int conversion. True is 1, false is 0. 199 const uint32_t __mmShift = __ieeeMantissa != 0 || __ieeeExponent <= 1; 200 // We would compute __mp and __mm like this: 201 // uint64_t __mp = 4 * __m2 + 2; 202 // uint64_t __mm = __mv - 1 - __mmShift; 203 204 // Step 3: Convert to a decimal power base using 128-bit arithmetic. 205 uint64_t __vr, __vp, __vm; 206 int32_t __e10; 207 bool __vmIsTrailingZeros = false; 208 bool __vrIsTrailingZeros = false; 209 if (__e2 >= 0) { 210 // I tried special-casing __q == 0, but there was no effect on performance. 211 // This expression is slightly faster than max(0, __log10Pow2(__e2) - 1). 212 const uint32_t __q = __log10Pow2(__e2) - (__e2 > 3); 213 __e10 = static_cast<int32_t>(__q); 214 const int32_t __k = __DOUBLE_POW5_INV_BITCOUNT + __pow5bits(static_cast<int32_t>(__q)) - 1; 215 const int32_t __i = -__e2 + static_cast<int32_t>(__q) + __k; 216 __vr = __mulShiftAll(__m2, __DOUBLE_POW5_INV_SPLIT[__q], __i, &__vp, &__vm, __mmShift); 217 if (__q <= 21) { 218 // This should use __q <= 22, but I think 21 is also safe. Smaller values 219 // may still be safe, but it's more difficult to reason about them. 220 // Only one of __mp, __mv, and __mm can be a multiple of 5, if any. 221 const uint32_t __mvMod5 = static_cast<uint32_t>(__mv) - 5 * static_cast<uint32_t>(__div5(__mv)); 222 if (__mvMod5 == 0) { 223 __vrIsTrailingZeros = __multipleOfPowerOf5(__mv, __q); 224 } else if (__acceptBounds) { 225 // Same as min(__e2 + (~__mm & 1), __pow5Factor(__mm)) >= __q 226 // <=> __e2 + (~__mm & 1) >= __q && __pow5Factor(__mm) >= __q 227 // <=> true && __pow5Factor(__mm) >= __q, since __e2 >= __q. 228 __vmIsTrailingZeros = __multipleOfPowerOf5(__mv - 1 - __mmShift, __q); 229 } else { 230 // Same as min(__e2 + 1, __pow5Factor(__mp)) >= __q. 231 __vp -= __multipleOfPowerOf5(__mv + 2, __q); 232 } 233 } 234 } else { 235 // This expression is slightly faster than max(0, __log10Pow5(-__e2) - 1). 236 const uint32_t __q = __log10Pow5(-__e2) - (-__e2 > 1); 237 __e10 = static_cast<int32_t>(__q) + __e2; 238 const int32_t __i = -__e2 - static_cast<int32_t>(__q); 239 const int32_t __k = __pow5bits(__i) - __DOUBLE_POW5_BITCOUNT; 240 const int32_t __j = static_cast<int32_t>(__q) - __k; 241 __vr = __mulShiftAll(__m2, __DOUBLE_POW5_SPLIT[__i], __j, &__vp, &__vm, __mmShift); 242 if (__q <= 1) { 243 // {__vr,__vp,__vm} is trailing zeros if {__mv,__mp,__mm} has at least __q trailing 0 bits. 244 // __mv = 4 * __m2, so it always has at least two trailing 0 bits. 245 __vrIsTrailingZeros = true; 246 if (__acceptBounds) { 247 // __mm = __mv - 1 - __mmShift, so it has 1 trailing 0 bit iff __mmShift == 1. 248 __vmIsTrailingZeros = __mmShift == 1; 249 } else { 250 // __mp = __mv + 2, so it always has at least one trailing 0 bit. 251 --__vp; 252 } 253 } else if (__q < 63) { // TRANSITION(ulfjack): Use a tighter bound here. 254 // We need to compute min(ntz(__mv), __pow5Factor(__mv) - __e2) >= __q - 1 255 // <=> ntz(__mv) >= __q - 1 && __pow5Factor(__mv) - __e2 >= __q - 1 256 // <=> ntz(__mv) >= __q - 1 (__e2 is negative and -__e2 >= __q) 257 // <=> (__mv & ((1 << (__q - 1)) - 1)) == 0 258 // We also need to make sure that the left shift does not overflow. 259 __vrIsTrailingZeros = __multipleOfPowerOf2(__mv, __q - 1); 260 } 261 } 262 263 // Step 4: Find the shortest decimal representation in the interval of valid representations. 264 int32_t __removed = 0; 265 uint8_t __lastRemovedDigit = 0; 266 uint64_t _Output; 267 // On average, we remove ~2 digits. 268 if (__vmIsTrailingZeros || __vrIsTrailingZeros) { 269 // General case, which happens rarely (~0.7%). 270 for (;;) { 271 const uint64_t __vpDiv10 = __div10(__vp); 272 const uint64_t __vmDiv10 = __div10(__vm); 273 if (__vpDiv10 <= __vmDiv10) { 274 break; 275 } 276 const uint32_t __vmMod10 = static_cast<uint32_t>(__vm) - 10 * static_cast<uint32_t>(__vmDiv10); 277 const uint64_t __vrDiv10 = __div10(__vr); 278 const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10); 279 __vmIsTrailingZeros &= __vmMod10 == 0; 280 __vrIsTrailingZeros &= __lastRemovedDigit == 0; 281 __lastRemovedDigit = static_cast<uint8_t>(__vrMod10); 282 __vr = __vrDiv10; 283 __vp = __vpDiv10; 284 __vm = __vmDiv10; 285 ++__removed; 286 } 287 if (__vmIsTrailingZeros) { 288 for (;;) { 289 const uint64_t __vmDiv10 = __div10(__vm); 290 const uint32_t __vmMod10 = static_cast<uint32_t>(__vm) - 10 * static_cast<uint32_t>(__vmDiv10); 291 if (__vmMod10 != 0) { 292 break; 293 } 294 const uint64_t __vpDiv10 = __div10(__vp); 295 const uint64_t __vrDiv10 = __div10(__vr); 296 const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10); 297 __vrIsTrailingZeros &= __lastRemovedDigit == 0; 298 __lastRemovedDigit = static_cast<uint8_t>(__vrMod10); 299 __vr = __vrDiv10; 300 __vp = __vpDiv10; 301 __vm = __vmDiv10; 302 ++__removed; 303 } 304 } 305 if (__vrIsTrailingZeros && __lastRemovedDigit == 5 && __vr % 2 == 0) { 306 // Round even if the exact number is .....50..0. 307 __lastRemovedDigit = 4; 308 } 309 // We need to take __vr + 1 if __vr is outside bounds or we need to round up. 310 _Output = __vr + ((__vr == __vm && (!__acceptBounds || !__vmIsTrailingZeros)) || __lastRemovedDigit >= 5); 311 } else { 312 // Specialized for the common case (~99.3%). Percentages below are relative to this. 313 bool __roundUp = false; 314 const uint64_t __vpDiv100 = __div100(__vp); 315 const uint64_t __vmDiv100 = __div100(__vm); 316 if (__vpDiv100 > __vmDiv100) { // Optimization: remove two digits at a time (~86.2%). 317 const uint64_t __vrDiv100 = __div100(__vr); 318 const uint32_t __vrMod100 = static_cast<uint32_t>(__vr) - 100 * static_cast<uint32_t>(__vrDiv100); 319 __roundUp = __vrMod100 >= 50; 320 __vr = __vrDiv100; 321 __vp = __vpDiv100; 322 __vm = __vmDiv100; 323 __removed += 2; 324 } 325 // Loop iterations below (approximately), without optimization above: 326 // 0: 0.03%, 1: 13.8%, 2: 70.6%, 3: 14.0%, 4: 1.40%, 5: 0.14%, 6+: 0.02% 327 // Loop iterations below (approximately), with optimization above: 328 // 0: 70.6%, 1: 27.8%, 2: 1.40%, 3: 0.14%, 4+: 0.02% 329 for (;;) { 330 const uint64_t __vpDiv10 = __div10(__vp); 331 const uint64_t __vmDiv10 = __div10(__vm); 332 if (__vpDiv10 <= __vmDiv10) { 333 break; 334 } 335 const uint64_t __vrDiv10 = __div10(__vr); 336 const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10); 337 __roundUp = __vrMod10 >= 5; 338 __vr = __vrDiv10; 339 __vp = __vpDiv10; 340 __vm = __vmDiv10; 341 ++__removed; 342 } 343 // We need to take __vr + 1 if __vr is outside bounds or we need to round up. 344 _Output = __vr + (__vr == __vm || __roundUp); 345 } 346 const int32_t __exp = __e10 + __removed; 347 348 __floating_decimal_64 __fd; 349 __fd.__exponent = __exp; 350 __fd.__mantissa = _Output; 351 return __fd; 352 } 353 354 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline to_chars_result __to_chars(char* const _First, char* const _Last, const __floating_decimal_64 __v, 355 chars_format _Fmt, const double __f) { 356 // Step 5: Print the decimal representation. 357 uint64_t _Output = __v.__mantissa; 358 int32_t _Ryu_exponent = __v.__exponent; 359 const uint32_t __olength = __decimalLength17(_Output); 360 int32_t _Scientific_exponent = _Ryu_exponent + static_cast<int32_t>(__olength) - 1; 361 362 if (_Fmt == chars_format{}) { 363 int32_t _Lower; 364 int32_t _Upper; 365 366 if (__olength == 1) { 367 // Value | Fixed | Scientific 368 // 1e-3 | "0.001" | "1e-03" 369 // 1e4 | "10000" | "1e+04" 370 _Lower = -3; 371 _Upper = 4; 372 } else { 373 // Value | Fixed | Scientific 374 // 1234e-7 | "0.0001234" | "1.234e-04" 375 // 1234e5 | "123400000" | "1.234e+08" 376 _Lower = -static_cast<int32_t>(__olength + 3); 377 _Upper = 5; 378 } 379 380 if (_Lower <= _Ryu_exponent && _Ryu_exponent <= _Upper) { 381 _Fmt = chars_format::fixed; 382 } else { 383 _Fmt = chars_format::scientific; 384 } 385 } else if (_Fmt == chars_format::general) { 386 // C11 7.21.6.1 "The fprintf function"/8: 387 // "Let P equal [...] 6 if the precision is omitted [...]. 388 // Then, if a conversion with style E would have an exponent of X: 389 // - if P > X >= -4, the conversion is with style f [...]. 390 // - otherwise, the conversion is with style e [...]." 391 if (-4 <= _Scientific_exponent && _Scientific_exponent < 6) { 392 _Fmt = chars_format::fixed; 393 } else { 394 _Fmt = chars_format::scientific; 395 } 396 } 397 398 if (_Fmt == chars_format::fixed) { 399 // Example: _Output == 1729, __olength == 4 400 401 // _Ryu_exponent | Printed | _Whole_digits | _Total_fixed_length | Notes 402 // --------------|----------|---------------|----------------------|--------------------------------------- 403 // 2 | 172900 | 6 | _Whole_digits | Ryu can't be used for printing 404 // 1 | 17290 | 5 | (sometimes adjusted) | when the trimmed digits are nonzero. 405 // --------------|----------|---------------|----------------------|--------------------------------------- 406 // 0 | 1729 | 4 | _Whole_digits | Unified length cases. 407 // --------------|----------|---------------|----------------------|--------------------------------------- 408 // -1 | 172.9 | 3 | __olength + 1 | This case can't happen for 409 // -2 | 17.29 | 2 | | __olength == 1, but no additional 410 // -3 | 1.729 | 1 | | code is needed to avoid it. 411 // --------------|----------|---------------|----------------------|--------------------------------------- 412 // -4 | 0.1729 | 0 | 2 - _Ryu_exponent | C11 7.21.6.1 "The fprintf function"/8: 413 // -5 | 0.01729 | -1 | | "If a decimal-point character appears, 414 // -6 | 0.001729 | -2 | | at least one digit appears before it." 415 416 const int32_t _Whole_digits = static_cast<int32_t>(__olength) + _Ryu_exponent; 417 418 uint32_t _Total_fixed_length; 419 if (_Ryu_exponent >= 0) { // cases "172900" and "1729" 420 _Total_fixed_length = static_cast<uint32_t>(_Whole_digits); 421 if (_Output == 1) { 422 // Rounding can affect the number of digits. 423 // For example, 1e23 is exactly "99999999999999991611392" which is 23 digits instead of 24. 424 // We can use a lookup table to detect this and adjust the total length. 425 static constexpr uint8_t _Adjustment[309] = { 426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,0,1,0,1,1,1,0,1,1,1,0,0,0,0,0, 427 1,1,0,0,1,0,1,1,1,0,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,1,0,1,1,0,0,0,0,1,1,1, 428 1,0,0,0,0,0,0,0,1,1,0,1,1,0,0,1,0,1,0,1,0,1,1,0,0,0,0,0,1,1,1,0,0,1,1,1,1,1,0,1,0,1,1,0,1, 429 1,0,0,0,0,0,0,0,0,0,1,1,1,0,0,1,0,0,1,0,0,1,1,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0,0,1,0,0,0,1, 430 0,1,0,1,0,1,1,1,0,0,0,0,0,0,1,1,1,1,0,0,1,0,1,1,1,0,0,0,1,0,1,1,1,1,1,1,0,1,0,1,1,0,0,0,1, 431 1,1,0,1,1,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,1,1,0,0,0,1,0,1,0,0,0,0,0,1,1,0, 432 0,1,0,1,1,1,0,0,1,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,1,1,0,1,0,0,0,0,0,1,1,0,1,0 }; 433 _Total_fixed_length -= _Adjustment[_Ryu_exponent]; 434 // _Whole_digits doesn't need to be adjusted because these cases won't refer to it later. 435 } 436 } else if (_Whole_digits > 0) { // case "17.29" 437 _Total_fixed_length = __olength + 1; 438 } else { // case "0.001729" 439 _Total_fixed_length = static_cast<uint32_t>(2 - _Ryu_exponent); 440 } 441 442 if (_Last - _First < static_cast<ptrdiff_t>(_Total_fixed_length)) { 443 return { _Last, errc::value_too_large }; 444 } 445 446 char* _Mid; 447 if (_Ryu_exponent > 0) { // case "172900" 448 bool _Can_use_ryu; 449 450 if (_Ryu_exponent > 22) { // 10^22 is the largest power of 10 that's exactly representable as a double. 451 _Can_use_ryu = false; 452 } else { 453 // Ryu generated X: __v.__mantissa * 10^_Ryu_exponent 454 // __v.__mantissa == 2^_Trailing_zero_bits * (__v.__mantissa >> _Trailing_zero_bits) 455 // 10^_Ryu_exponent == 2^_Ryu_exponent * 5^_Ryu_exponent 456 457 // _Trailing_zero_bits is [0, 56] (aside: because 2^56 is the largest power of 2 458 // with 17 decimal digits, which is double's round-trip limit.) 459 // _Ryu_exponent is [1, 22]. 460 // Normalization adds [2, 52] (aside: at least 2 because the pre-normalized mantissa is at least 5). 461 // This adds up to [3, 130], which is well below double's maximum binary exponent 1023. 462 463 // Therefore, we just need to consider (__v.__mantissa >> _Trailing_zero_bits) * 5^_Ryu_exponent. 464 465 // If that product would exceed 53 bits, then X can't be exactly represented as a double. 466 // (That's not a problem for round-tripping, because X is close enough to the original double, 467 // but X isn't mathematically equal to the original double.) This requires a high-precision fallback. 468 469 // If the product is 53 bits or smaller, then X can be exactly represented as a double (and we don't 470 // need to re-synthesize it; the original double must have been X, because Ryu wouldn't produce the 471 // same output for two different doubles X and Y). This allows Ryu's output to be used (zero-filled). 472 473 // (2^53 - 1) / 5^0 (for indexing), (2^53 - 1) / 5^1, ..., (2^53 - 1) / 5^22 474 static constexpr uint64_t _Max_shifted_mantissa[23] = { 475 9007199254740991u, 1801439850948198u, 360287970189639u, 72057594037927u, 14411518807585u, 476 2882303761517u, 576460752303u, 115292150460u, 23058430092u, 4611686018u, 922337203u, 184467440u, 477 36893488u, 7378697u, 1475739u, 295147u, 59029u, 11805u, 2361u, 472u, 94u, 18u, 3u }; 478 479 unsigned long _Trailing_zero_bits; 480 #ifdef _LIBCPP_HAS_BITSCAN64 481 (void) _BitScanForward64(&_Trailing_zero_bits, __v.__mantissa); // __v.__mantissa is guaranteed nonzero 482 #else // ^^^ 64-bit ^^^ / vvv 32-bit vvv 483 const uint32_t _Low_mantissa = static_cast<uint32_t>(__v.__mantissa); 484 if (_Low_mantissa != 0) { 485 (void) _BitScanForward(&_Trailing_zero_bits, _Low_mantissa); 486 } else { 487 const uint32_t _High_mantissa = static_cast<uint32_t>(__v.__mantissa >> 32); // nonzero here 488 (void) _BitScanForward(&_Trailing_zero_bits, _High_mantissa); 489 _Trailing_zero_bits += 32; 490 } 491 #endif // ^^^ 32-bit ^^^ 492 const uint64_t _Shifted_mantissa = __v.__mantissa >> _Trailing_zero_bits; 493 _Can_use_ryu = _Shifted_mantissa <= _Max_shifted_mantissa[_Ryu_exponent]; 494 } 495 496 if (!_Can_use_ryu) { 497 // Print the integer exactly. 498 // Performance note: This will redundantly perform bounds checking. 499 // Performance note: This will redundantly decompose the IEEE representation. 500 return __d2fixed_buffered_n(_First, _Last, __f, 0); 501 } 502 503 // _Can_use_ryu 504 // Print the decimal digits, left-aligned within [_First, _First + _Total_fixed_length). 505 _Mid = _First + __olength; 506 } else { // cases "1729", "17.29", and "0.001729" 507 // Print the decimal digits, right-aligned within [_First, _First + _Total_fixed_length). 508 _Mid = _First + _Total_fixed_length; 509 } 510 511 // We prefer 32-bit operations, even on 64-bit platforms. 512 // We have at most 17 digits, and uint32_t can store 9 digits. 513 // If _Output doesn't fit into uint32_t, we cut off 8 digits, 514 // so the rest will fit into uint32_t. 515 if ((_Output >> 32) != 0) { 516 // Expensive 64-bit division. 517 const uint64_t __q = __div1e8(_Output); 518 uint32_t __output2 = static_cast<uint32_t>(_Output - 100000000 * __q); 519 _Output = __q; 520 521 const uint32_t __c = __output2 % 10000; 522 __output2 /= 10000; 523 const uint32_t __d = __output2 % 10000; 524 const uint32_t __c0 = (__c % 100) << 1; 525 const uint32_t __c1 = (__c / 100) << 1; 526 const uint32_t __d0 = (__d % 100) << 1; 527 const uint32_t __d1 = (__d / 100) << 1; 528 529 _VSTD::memcpy(_Mid -= 2, __DIGIT_TABLE + __c0, 2); 530 _VSTD::memcpy(_Mid -= 2, __DIGIT_TABLE + __c1, 2); 531 _VSTD::memcpy(_Mid -= 2, __DIGIT_TABLE + __d0, 2); 532 _VSTD::memcpy(_Mid -= 2, __DIGIT_TABLE + __d1, 2); 533 } 534 uint32_t __output2 = static_cast<uint32_t>(_Output); 535 while (__output2 >= 10000) { 536 #ifdef __clang__ // TRANSITION, LLVM-38217 537 const uint32_t __c = __output2 - 10000 * (__output2 / 10000); 538 #else 539 const uint32_t __c = __output2 % 10000; 540 #endif 541 __output2 /= 10000; 542 const uint32_t __c0 = (__c % 100) << 1; 543 const uint32_t __c1 = (__c / 100) << 1; 544 _VSTD::memcpy(_Mid -= 2, __DIGIT_TABLE + __c0, 2); 545 _VSTD::memcpy(_Mid -= 2, __DIGIT_TABLE + __c1, 2); 546 } 547 if (__output2 >= 100) { 548 const uint32_t __c = (__output2 % 100) << 1; 549 __output2 /= 100; 550 _VSTD::memcpy(_Mid -= 2, __DIGIT_TABLE + __c, 2); 551 } 552 if (__output2 >= 10) { 553 const uint32_t __c = __output2 << 1; 554 _VSTD::memcpy(_Mid -= 2, __DIGIT_TABLE + __c, 2); 555 } else { 556 *--_Mid = static_cast<char>('0' + __output2); 557 } 558 559 if (_Ryu_exponent > 0) { // case "172900" with _Can_use_ryu 560 // Performance note: it might be more efficient to do this immediately after setting _Mid. 561 _VSTD::memset(_First + __olength, '0', static_cast<size_t>(_Ryu_exponent)); 562 } else if (_Ryu_exponent == 0) { // case "1729" 563 // Done! 564 } else if (_Whole_digits > 0) { // case "17.29" 565 // Performance note: moving digits might not be optimal. 566 _VSTD::memmove(_First, _First + 1, static_cast<size_t>(_Whole_digits)); 567 _First[_Whole_digits] = '.'; 568 } else { // case "0.001729" 569 // Performance note: a larger memset() followed by overwriting '.' might be more efficient. 570 _First[0] = '0'; 571 _First[1] = '.'; 572 _VSTD::memset(_First + 2, '0', static_cast<size_t>(-_Whole_digits)); 573 } 574 575 return { _First + _Total_fixed_length, errc{} }; 576 } 577 578 const uint32_t _Total_scientific_length = __olength + (__olength > 1) // digits + possible decimal point 579 + (-100 < _Scientific_exponent && _Scientific_exponent < 100 ? 4 : 5); // + scientific exponent 580 if (_Last - _First < static_cast<ptrdiff_t>(_Total_scientific_length)) { 581 return { _Last, errc::value_too_large }; 582 } 583 char* const __result = _First; 584 585 // Print the decimal digits. 586 uint32_t __i = 0; 587 // We prefer 32-bit operations, even on 64-bit platforms. 588 // We have at most 17 digits, and uint32_t can store 9 digits. 589 // If _Output doesn't fit into uint32_t, we cut off 8 digits, 590 // so the rest will fit into uint32_t. 591 if ((_Output >> 32) != 0) { 592 // Expensive 64-bit division. 593 const uint64_t __q = __div1e8(_Output); 594 uint32_t __output2 = static_cast<uint32_t>(_Output) - 100000000 * static_cast<uint32_t>(__q); 595 _Output = __q; 596 597 const uint32_t __c = __output2 % 10000; 598 __output2 /= 10000; 599 const uint32_t __d = __output2 % 10000; 600 const uint32_t __c0 = (__c % 100) << 1; 601 const uint32_t __c1 = (__c / 100) << 1; 602 const uint32_t __d0 = (__d % 100) << 1; 603 const uint32_t __d1 = (__d / 100) << 1; 604 _VSTD::memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c0, 2); 605 _VSTD::memcpy(__result + __olength - __i - 3, __DIGIT_TABLE + __c1, 2); 606 _VSTD::memcpy(__result + __olength - __i - 5, __DIGIT_TABLE + __d0, 2); 607 _VSTD::memcpy(__result + __olength - __i - 7, __DIGIT_TABLE + __d1, 2); 608 __i += 8; 609 } 610 uint32_t __output2 = static_cast<uint32_t>(_Output); 611 while (__output2 >= 10000) { 612 #ifdef __clang__ // TRANSITION, LLVM-38217 613 const uint32_t __c = __output2 - 10000 * (__output2 / 10000); 614 #else 615 const uint32_t __c = __output2 % 10000; 616 #endif 617 __output2 /= 10000; 618 const uint32_t __c0 = (__c % 100) << 1; 619 const uint32_t __c1 = (__c / 100) << 1; 620 _VSTD::memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c0, 2); 621 _VSTD::memcpy(__result + __olength - __i - 3, __DIGIT_TABLE + __c1, 2); 622 __i += 4; 623 } 624 if (__output2 >= 100) { 625 const uint32_t __c = (__output2 % 100) << 1; 626 __output2 /= 100; 627 _VSTD::memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c, 2); 628 __i += 2; 629 } 630 if (__output2 >= 10) { 631 const uint32_t __c = __output2 << 1; 632 // We can't use memcpy here: the decimal dot goes between these two digits. 633 __result[2] = __DIGIT_TABLE[__c + 1]; 634 __result[0] = __DIGIT_TABLE[__c]; 635 } else { 636 __result[0] = static_cast<char>('0' + __output2); 637 } 638 639 // Print decimal point if needed. 640 uint32_t __index; 641 if (__olength > 1) { 642 __result[1] = '.'; 643 __index = __olength + 1; 644 } else { 645 __index = 1; 646 } 647 648 // Print the exponent. 649 __result[__index++] = 'e'; 650 if (_Scientific_exponent < 0) { 651 __result[__index++] = '-'; 652 _Scientific_exponent = -_Scientific_exponent; 653 } else { 654 __result[__index++] = '+'; 655 } 656 657 if (_Scientific_exponent >= 100) { 658 const int32_t __c = _Scientific_exponent % 10; 659 _VSTD::memcpy(__result + __index, __DIGIT_TABLE + 2 * (_Scientific_exponent / 10), 2); 660 __result[__index + 2] = static_cast<char>('0' + __c); 661 __index += 3; 662 } else { 663 _VSTD::memcpy(__result + __index, __DIGIT_TABLE + 2 * _Scientific_exponent, 2); 664 __index += 2; 665 } 666 667 return { _First + _Total_scientific_length, errc{} }; 668 } 669 670 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline bool __d2d_small_int(const uint64_t __ieeeMantissa, const uint32_t __ieeeExponent, 671 __floating_decimal_64* const __v) { 672 const uint64_t __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa; 673 const int32_t __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS; 674 675 if (__e2 > 0) { 676 // f = __m2 * 2^__e2 >= 2^53 is an integer. 677 // Ignore this case for now. 678 return false; 679 } 680 681 if (__e2 < -52) { 682 // f < 1. 683 return false; 684 } 685 686 // Since 2^52 <= __m2 < 2^53 and 0 <= -__e2 <= 52: 1 <= f = __m2 / 2^-__e2 < 2^53. 687 // Test if the lower -__e2 bits of the significand are 0, i.e. whether the fraction is 0. 688 const uint64_t __mask = (1ull << -__e2) - 1; 689 const uint64_t __fraction = __m2 & __mask; 690 if (__fraction != 0) { 691 return false; 692 } 693 694 // f is an integer in the range [1, 2^53). 695 // Note: __mantissa might contain trailing (decimal) 0's. 696 // Note: since 2^53 < 10^16, there is no need to adjust __decimalLength17(). 697 __v->__mantissa = __m2 >> -__e2; 698 __v->__exponent = 0; 699 return true; 700 } 701 702 [[nodiscard]] to_chars_result __d2s_buffered_n(char* const _First, char* const _Last, const double __f, 703 const chars_format _Fmt) { 704 705 // Step 1: Decode the floating-point number, and unify normalized and subnormal cases. 706 const uint64_t __bits = __double_to_bits(__f); 707 708 // Case distinction; exit early for the easy cases. 709 if (__bits == 0) { 710 if (_Fmt == chars_format::scientific) { 711 if (_Last - _First < 5) { 712 return { _Last, errc::value_too_large }; 713 } 714 715 _VSTD::memcpy(_First, "0e+00", 5); 716 717 return { _First + 5, errc{} }; 718 } 719 720 // Print "0" for chars_format::fixed, chars_format::general, and chars_format{}. 721 if (_First == _Last) { 722 return { _Last, errc::value_too_large }; 723 } 724 725 *_First = '0'; 726 727 return { _First + 1, errc{} }; 728 } 729 730 // Decode __bits into mantissa and exponent. 731 const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1); 732 const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS); 733 734 if (_Fmt == chars_format::fixed) { 735 // const uint64_t _Mantissa2 = __ieeeMantissa | (1ull << __DOUBLE_MANTISSA_BITS); // restore implicit bit 736 const int32_t _Exponent2 = static_cast<int32_t>(__ieeeExponent) 737 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS; // bias and normalization 738 739 // Normal values are equal to _Mantissa2 * 2^_Exponent2. 740 // (Subnormals are different, but they'll be rejected by the _Exponent2 test here, so they can be ignored.) 741 742 // For nonzero integers, _Exponent2 >= -52. (The minimum value occurs when _Mantissa2 * 2^_Exponent2 is 1. 743 // In that case, _Mantissa2 is the implicit 1 bit followed by 52 zeros, so _Exponent2 is -52 to shift away 744 // the zeros.) The dense range of exactly representable integers has negative or zero exponents 745 // (as positive exponents make the range non-dense). For that dense range, Ryu will always be used: 746 // every digit is necessary to uniquely identify the value, so Ryu must print them all. 747 748 // Positive exponents are the non-dense range of exactly representable integers. This contains all of the values 749 // for which Ryu can't be used (and a few Ryu-friendly values). We can save time by detecting positive 750 // exponents here and skipping Ryu. Calling __d2fixed_buffered_n() with precision 0 is valid for all integers 751 // (so it's okay if we call it with a Ryu-friendly value). 752 if (_Exponent2 > 0) { 753 return __d2fixed_buffered_n(_First, _Last, __f, 0); 754 } 755 } 756 757 __floating_decimal_64 __v; 758 const bool __isSmallInt = __d2d_small_int(__ieeeMantissa, __ieeeExponent, &__v); 759 if (__isSmallInt) { 760 // For small integers in the range [1, 2^53), __v.__mantissa might contain trailing (decimal) zeros. 761 // For scientific notation we need to move these zeros into the exponent. 762 // (This is not needed for fixed-point notation, so it might be beneficial to trim 763 // trailing zeros in __to_chars only if needed - once fixed-point notation output is implemented.) 764 for (;;) { 765 const uint64_t __q = __div10(__v.__mantissa); 766 const uint32_t __r = static_cast<uint32_t>(__v.__mantissa) - 10 * static_cast<uint32_t>(__q); 767 if (__r != 0) { 768 break; 769 } 770 __v.__mantissa = __q; 771 ++__v.__exponent; 772 } 773 } else { 774 __v = __d2d(__ieeeMantissa, __ieeeExponent); 775 } 776 777 return __to_chars(_First, _Last, __v, _Fmt, __f); 778 } 779 780 _LIBCPP_END_NAMESPACE_STD 781 782 // clang-format on 783