xref: /freebsd/contrib/llvm-project/libcxx/src/ryu/d2fixed.cpp (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 // Copyright (c) Microsoft Corporation.
10 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 
12 // Copyright 2018 Ulf Adams
13 // Copyright (c) Microsoft Corporation. All rights reserved.
14 
15 // Boost Software License - Version 1.0 - August 17th, 2003
16 
17 // Permission is hereby granted, free of charge, to any person or organization
18 // obtaining a copy of the software and accompanying documentation covered by
19 // this license (the "Software") to use, reproduce, display, distribute,
20 // execute, and transmit the Software, and to prepare derivative works of the
21 // Software, and to permit third-parties to whom the Software is furnished to
22 // do so, all subject to the following:
23 
24 // The copyright notices in the Software and this entire statement, including
25 // the above license grant, this restriction and the following disclaimer,
26 // must be included in all copies of the Software, in whole or in part, and
27 // all derivative works of the Software, unless such copies or derivative
28 // works are solely in the form of machine-executable object code generated by
29 // a source language processor.
30 
31 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 // FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
34 // SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
35 // FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
36 // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
37 // DEALINGS IN THE SOFTWARE.
38 
39 // Avoid formatting to keep the changes with the original code minimal.
40 // clang-format off
41 
42 #include "__config"
43 #include "charconv"
44 #include "cstring"
45 #include "system_error"
46 
47 #include "include/ryu/common.h"
48 #include "include/ryu/d2fixed.h"
49 #include "include/ryu/d2fixed_full_table.h"
50 #include "include/ryu/d2s.h"
51 #include "include/ryu/d2s_intrinsics.h"
52 #include "include/ryu/digit_table.h"
53 
54 _LIBCPP_BEGIN_NAMESPACE_STD
55 
56 inline constexpr int __POW10_ADDITIONAL_BITS = 120;
57 
58 #ifdef _LIBCPP_INTRINSIC128
59 // Returns the low 64 bits of the high 128 bits of the 256-bit product of a and b.
60 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint64_t __umul256_hi128_lo64(
61   const uint64_t __aHi, const uint64_t __aLo, const uint64_t __bHi, const uint64_t __bLo) {
62   uint64_t __b00Hi;
63   const uint64_t __b00Lo = __ryu_umul128(__aLo, __bLo, &__b00Hi);
64   uint64_t __b01Hi;
65   const uint64_t __b01Lo = __ryu_umul128(__aLo, __bHi, &__b01Hi);
66   uint64_t __b10Hi;
67   const uint64_t __b10Lo = __ryu_umul128(__aHi, __bLo, &__b10Hi);
68   uint64_t __b11Hi;
69   const uint64_t __b11Lo = __ryu_umul128(__aHi, __bHi, &__b11Hi);
70   (void) __b00Lo; // unused
71   (void) __b11Hi; // unused
72   const uint64_t __temp1Lo = __b10Lo + __b00Hi;
73   const uint64_t __temp1Hi = __b10Hi + (__temp1Lo < __b10Lo);
74   const uint64_t __temp2Lo = __b01Lo + __temp1Lo;
75   const uint64_t __temp2Hi = __b01Hi + (__temp2Lo < __b01Lo);
76   return __b11Lo + __temp1Hi + __temp2Hi;
77 }
78 
79 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __uint128_mod1e9(const uint64_t __vHi, const uint64_t __vLo) {
80   // After multiplying, we're going to shift right by 29, then truncate to uint32_t.
81   // This means that we need only 29 + 32 = 61 bits, so we can truncate to uint64_t before shifting.
82   const uint64_t __multiplied = __umul256_hi128_lo64(__vHi, __vLo, 0x89705F4136B4A597u, 0x31680A88F8953031u);
83 
84   // For uint32_t truncation, see the __mod1e9() comment in d2s_intrinsics.h.
85   const uint32_t __shifted = static_cast<uint32_t>(__multiplied >> 29);
86 
87   return static_cast<uint32_t>(__vLo) - 1000000000 * __shifted;
88 }
89 #endif // ^^^ intrinsics available ^^^
90 
91 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __mulShift_mod1e9(const uint64_t __m, const uint64_t* const __mul, const int32_t __j) {
92   uint64_t __high0;                                               // 64
93   const uint64_t __low0 = __ryu_umul128(__m, __mul[0], &__high0); // 0
94   uint64_t __high1;                                               // 128
95   const uint64_t __low1 = __ryu_umul128(__m, __mul[1], &__high1); // 64
96   uint64_t __high2;                                               // 192
97   const uint64_t __low2 = __ryu_umul128(__m, __mul[2], &__high2); // 128
98   const uint64_t __s0low = __low0;                  // 0
99   (void) __s0low; // unused
100   const uint64_t __s0high = __low1 + __high0;       // 64
101   const uint32_t __c1 = __s0high < __low1;
102   const uint64_t __s1low = __low2 + __high1 + __c1; // 128
103   const uint32_t __c2 = __s1low < __low2; // __high1 + __c1 can't overflow, so compare against __low2
104   const uint64_t __s1high = __high2 + __c2;         // 192
105   _LIBCPP_ASSERT(__j >= 128, "");
106   _LIBCPP_ASSERT(__j <= 180, "");
107 #ifdef _LIBCPP_INTRINSIC128
108   const uint32_t __dist = static_cast<uint32_t>(__j - 128); // __dist: [0, 52]
109   const uint64_t __shiftedhigh = __s1high >> __dist;
110   const uint64_t __shiftedlow = __ryu_shiftright128(__s1low, __s1high, __dist);
111   return __uint128_mod1e9(__shiftedhigh, __shiftedlow);
112 #else // ^^^ intrinsics available ^^^ / vvv intrinsics unavailable vvv
113   if (__j < 160) { // __j: [128, 160)
114     const uint64_t __r0 = __mod1e9(__s1high);
115     const uint64_t __r1 = __mod1e9((__r0 << 32) | (__s1low >> 32));
116     const uint64_t __r2 = ((__r1 << 32) | (__s1low & 0xffffffff));
117     return __mod1e9(__r2 >> (__j - 128));
118   } else { // __j: [160, 192)
119     const uint64_t __r0 = __mod1e9(__s1high);
120     const uint64_t __r1 = ((__r0 << 32) | (__s1low >> 32));
121     return __mod1e9(__r1 >> (__j - 160));
122   }
123 #endif // ^^^ intrinsics unavailable ^^^
124 }
125 
126 void __append_n_digits(const uint32_t __olength, uint32_t __digits, char* const __result) {
127   uint32_t __i = 0;
128   while (__digits >= 10000) {
129 #ifdef __clang__ // TRANSITION, LLVM-38217
130     const uint32_t __c = __digits - 10000 * (__digits / 10000);
131 #else
132     const uint32_t __c = __digits % 10000;
133 #endif
134     __digits /= 10000;
135     const uint32_t __c0 = (__c % 100) << 1;
136     const uint32_t __c1 = (__c / 100) << 1;
137     _VSTD::memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c0, 2);
138     _VSTD::memcpy(__result + __olength - __i - 4, __DIGIT_TABLE + __c1, 2);
139     __i += 4;
140   }
141   if (__digits >= 100) {
142     const uint32_t __c = (__digits % 100) << 1;
143     __digits /= 100;
144     _VSTD::memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c, 2);
145     __i += 2;
146   }
147   if (__digits >= 10) {
148     const uint32_t __c = __digits << 1;
149     _VSTD::memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c, 2);
150   } else {
151     __result[0] = static_cast<char>('0' + __digits);
152   }
153 }
154 
155 _LIBCPP_HIDE_FROM_ABI inline void __append_d_digits(const uint32_t __olength, uint32_t __digits, char* const __result) {
156   uint32_t __i = 0;
157   while (__digits >= 10000) {
158 #ifdef __clang__ // TRANSITION, LLVM-38217
159     const uint32_t __c = __digits - 10000 * (__digits / 10000);
160 #else
161     const uint32_t __c = __digits % 10000;
162 #endif
163     __digits /= 10000;
164     const uint32_t __c0 = (__c % 100) << 1;
165     const uint32_t __c1 = (__c / 100) << 1;
166     _VSTD::memcpy(__result + __olength + 1 - __i - 2, __DIGIT_TABLE + __c0, 2);
167     _VSTD::memcpy(__result + __olength + 1 - __i - 4, __DIGIT_TABLE + __c1, 2);
168     __i += 4;
169   }
170   if (__digits >= 100) {
171     const uint32_t __c = (__digits % 100) << 1;
172     __digits /= 100;
173     _VSTD::memcpy(__result + __olength + 1 - __i - 2, __DIGIT_TABLE + __c, 2);
174     __i += 2;
175   }
176   if (__digits >= 10) {
177     const uint32_t __c = __digits << 1;
178     __result[2] = __DIGIT_TABLE[__c + 1];
179     __result[1] = '.';
180     __result[0] = __DIGIT_TABLE[__c];
181   } else {
182     __result[1] = '.';
183     __result[0] = static_cast<char>('0' + __digits);
184   }
185 }
186 
187 _LIBCPP_HIDE_FROM_ABI inline void __append_c_digits(const uint32_t __count, uint32_t __digits, char* const __result) {
188   uint32_t __i = 0;
189   for (; __i < __count - 1; __i += 2) {
190     const uint32_t __c = (__digits % 100) << 1;
191     __digits /= 100;
192     _VSTD::memcpy(__result + __count - __i - 2, __DIGIT_TABLE + __c, 2);
193   }
194   if (__i < __count) {
195     const char __c = static_cast<char>('0' + (__digits % 10));
196     __result[__count - __i - 1] = __c;
197   }
198 }
199 
200 void __append_nine_digits(uint32_t __digits, char* const __result) {
201   if (__digits == 0) {
202     _VSTD::memset(__result, '0', 9);
203     return;
204   }
205 
206   for (uint32_t __i = 0; __i < 5; __i += 4) {
207 #ifdef __clang__ // TRANSITION, LLVM-38217
208     const uint32_t __c = __digits - 10000 * (__digits / 10000);
209 #else
210     const uint32_t __c = __digits % 10000;
211 #endif
212     __digits /= 10000;
213     const uint32_t __c0 = (__c % 100) << 1;
214     const uint32_t __c1 = (__c / 100) << 1;
215     _VSTD::memcpy(__result + 7 - __i, __DIGIT_TABLE + __c0, 2);
216     _VSTD::memcpy(__result + 5 - __i, __DIGIT_TABLE + __c1, 2);
217   }
218   __result[0] = static_cast<char>('0' + __digits);
219 }
220 
221 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __indexForExponent(const uint32_t __e) {
222   return (__e + 15) / 16;
223 }
224 
225 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __pow10BitsForIndex(const uint32_t __idx) {
226   return 16 * __idx + __POW10_ADDITIONAL_BITS;
227 }
228 
229 [[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __lengthForIndex(const uint32_t __idx) {
230   // +1 for ceil, +16 for mantissa, +8 to round up when dividing by 9
231   return (__log10Pow2(16 * static_cast<int32_t>(__idx)) + 1 + 16 + 8) / 9;
232 }
233 
234 [[nodiscard]] to_chars_result __d2fixed_buffered_n(char* _First, char* const _Last, const double __d,
235   const uint32_t __precision) {
236   char* const _Original_first = _First;
237 
238   const uint64_t __bits = __double_to_bits(__d);
239 
240   // Case distinction; exit early for the easy cases.
241   if (__bits == 0) {
242     const int32_t _Total_zero_length = 1 // leading zero
243       + static_cast<int32_t>(__precision != 0) // possible decimal point
244       + static_cast<int32_t>(__precision); // zeroes after decimal point
245 
246     if (_Last - _First < _Total_zero_length) {
247       return { _Last, errc::value_too_large };
248     }
249 
250     *_First++ = '0';
251     if (__precision > 0) {
252       *_First++ = '.';
253       _VSTD::memset(_First, '0', __precision);
254       _First += __precision;
255     }
256     return { _First, errc{} };
257   }
258 
259   // Decode __bits into mantissa and exponent.
260   const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);
261   const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS);
262 
263   int32_t __e2;
264   uint64_t __m2;
265   if (__ieeeExponent == 0) {
266     __e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
267     __m2 = __ieeeMantissa;
268   } else {
269     __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
270     __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
271   }
272 
273   bool __nonzero = false;
274   if (__e2 >= -52) {
275     const uint32_t __idx = __e2 < 0 ? 0 : __indexForExponent(static_cast<uint32_t>(__e2));
276     const uint32_t __p10bits = __pow10BitsForIndex(__idx);
277     const int32_t __len = static_cast<int32_t>(__lengthForIndex(__idx));
278     for (int32_t __i = __len - 1; __i >= 0; --__i) {
279       const uint32_t __j = __p10bits - __e2;
280       // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
281       // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
282       const uint32_t __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT[__POW10_OFFSET[__idx] + __i],
283         static_cast<int32_t>(__j + 8));
284       if (__nonzero) {
285         if (_Last - _First < 9) {
286           return { _Last, errc::value_too_large };
287         }
288         __append_nine_digits(__digits, _First);
289         _First += 9;
290       } else if (__digits != 0) {
291         const uint32_t __olength = __decimalLength9(__digits);
292         if (_Last - _First < static_cast<ptrdiff_t>(__olength)) {
293           return { _Last, errc::value_too_large };
294         }
295         __append_n_digits(__olength, __digits, _First);
296         _First += __olength;
297         __nonzero = true;
298       }
299     }
300   }
301   if (!__nonzero) {
302     if (_First == _Last) {
303       return { _Last, errc::value_too_large };
304     }
305     *_First++ = '0';
306   }
307   if (__precision > 0) {
308     if (_First == _Last) {
309       return { _Last, errc::value_too_large };
310     }
311     *_First++ = '.';
312   }
313   if (__e2 < 0) {
314     const int32_t __idx = -__e2 / 16;
315     const uint32_t __blocks = __precision / 9 + 1;
316     // 0 = don't round up; 1 = round up unconditionally; 2 = round up if odd.
317     int __roundUp = 0;
318     uint32_t __i = 0;
319     if (__blocks <= __MIN_BLOCK_2[__idx]) {
320       __i = __blocks;
321       if (_Last - _First < static_cast<ptrdiff_t>(__precision)) {
322         return { _Last, errc::value_too_large };
323       }
324       _VSTD::memset(_First, '0', __precision);
325       _First += __precision;
326     } else if (__i < __MIN_BLOCK_2[__idx]) {
327       __i = __MIN_BLOCK_2[__idx];
328       if (_Last - _First < static_cast<ptrdiff_t>(9 * __i)) {
329         return { _Last, errc::value_too_large };
330       }
331       _VSTD::memset(_First, '0', 9 * __i);
332       _First += 9 * __i;
333     }
334     for (; __i < __blocks; ++__i) {
335       const int32_t __j = __ADDITIONAL_BITS_2 + (-__e2 - 16 * __idx);
336       const uint32_t __p = __POW10_OFFSET_2[__idx] + __i - __MIN_BLOCK_2[__idx];
337       if (__p >= __POW10_OFFSET_2[__idx + 1]) {
338         // If the remaining digits are all 0, then we might as well use memset.
339         // No rounding required in this case.
340         const uint32_t __fill = __precision - 9 * __i;
341         if (_Last - _First < static_cast<ptrdiff_t>(__fill)) {
342           return { _Last, errc::value_too_large };
343         }
344         _VSTD::memset(_First, '0', __fill);
345         _First += __fill;
346         break;
347       }
348       // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
349       // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
350       uint32_t __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT_2[__p], __j + 8);
351       if (__i < __blocks - 1) {
352         if (_Last - _First < 9) {
353           return { _Last, errc::value_too_large };
354         }
355         __append_nine_digits(__digits, _First);
356         _First += 9;
357       } else {
358         const uint32_t __maximum = __precision - 9 * __i;
359         uint32_t __lastDigit = 0;
360         for (uint32_t __k = 0; __k < 9 - __maximum; ++__k) {
361           __lastDigit = __digits % 10;
362           __digits /= 10;
363         }
364         if (__lastDigit != 5) {
365           __roundUp = __lastDigit > 5;
366         } else {
367           // Is m * 10^(additionalDigits + 1) / 2^(-__e2) integer?
368           const int32_t __requiredTwos = -__e2 - static_cast<int32_t>(__precision) - 1;
369           const bool __trailingZeros = __requiredTwos <= 0
370             || (__requiredTwos < 60 && __multipleOfPowerOf2(__m2, static_cast<uint32_t>(__requiredTwos)));
371           __roundUp = __trailingZeros ? 2 : 1;
372         }
373         if (__maximum > 0) {
374           if (_Last - _First < static_cast<ptrdiff_t>(__maximum)) {
375             return { _Last, errc::value_too_large };
376           }
377           __append_c_digits(__maximum, __digits, _First);
378           _First += __maximum;
379         }
380         break;
381       }
382     }
383     if (__roundUp != 0) {
384       char* _Round = _First;
385       char* _Dot = _Last;
386       while (true) {
387         if (_Round == _Original_first) {
388           _Round[0] = '1';
389           if (_Dot != _Last) {
390             _Dot[0] = '0';
391             _Dot[1] = '.';
392           }
393           if (_First == _Last) {
394             return { _Last, errc::value_too_large };
395           }
396           *_First++ = '0';
397           break;
398         }
399         --_Round;
400         const char __c = _Round[0];
401         if (__c == '.') {
402           _Dot = _Round;
403         } else if (__c == '9') {
404           _Round[0] = '0';
405           __roundUp = 1;
406         } else {
407           if (__roundUp == 1 || __c % 2 != 0) {
408             _Round[0] = __c + 1;
409           }
410           break;
411         }
412       }
413     }
414   } else {
415     if (_Last - _First < static_cast<ptrdiff_t>(__precision)) {
416       return { _Last, errc::value_too_large };
417     }
418     _VSTD::memset(_First, '0', __precision);
419     _First += __precision;
420   }
421   return { _First, errc{} };
422 }
423 
424 [[nodiscard]] to_chars_result __d2exp_buffered_n(char* _First, char* const _Last, const double __d,
425   uint32_t __precision) {
426   char* const _Original_first = _First;
427 
428   const uint64_t __bits = __double_to_bits(__d);
429 
430   // Case distinction; exit early for the easy cases.
431   if (__bits == 0) {
432     const int32_t _Total_zero_length = 1 // leading zero
433       + static_cast<int32_t>(__precision != 0) // possible decimal point
434       + static_cast<int32_t>(__precision) // zeroes after decimal point
435       + 4; // "e+00"
436     if (_Last - _First < _Total_zero_length) {
437       return { _Last, errc::value_too_large };
438     }
439     *_First++ = '0';
440     if (__precision > 0) {
441       *_First++ = '.';
442       _VSTD::memset(_First, '0', __precision);
443       _First += __precision;
444     }
445     _VSTD::memcpy(_First, "e+00", 4);
446     _First += 4;
447     return { _First, errc{} };
448   }
449 
450   // Decode __bits into mantissa and exponent.
451   const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);
452   const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS);
453 
454   int32_t __e2;
455   uint64_t __m2;
456   if (__ieeeExponent == 0) {
457     __e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
458     __m2 = __ieeeMantissa;
459   } else {
460     __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
461     __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
462   }
463 
464   const bool __printDecimalPoint = __precision > 0;
465   ++__precision;
466   uint32_t __digits = 0;
467   uint32_t __printedDigits = 0;
468   uint32_t __availableDigits = 0;
469   int32_t __exp = 0;
470   if (__e2 >= -52) {
471     const uint32_t __idx = __e2 < 0 ? 0 : __indexForExponent(static_cast<uint32_t>(__e2));
472     const uint32_t __p10bits = __pow10BitsForIndex(__idx);
473     const int32_t __len = static_cast<int32_t>(__lengthForIndex(__idx));
474     for (int32_t __i = __len - 1; __i >= 0; --__i) {
475       const uint32_t __j = __p10bits - __e2;
476       // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
477       // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
478       __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT[__POW10_OFFSET[__idx] + __i],
479         static_cast<int32_t>(__j + 8));
480       if (__printedDigits != 0) {
481         if (__printedDigits + 9 > __precision) {
482           __availableDigits = 9;
483           break;
484         }
485         if (_Last - _First < 9) {
486           return { _Last, errc::value_too_large };
487         }
488         __append_nine_digits(__digits, _First);
489         _First += 9;
490         __printedDigits += 9;
491       } else if (__digits != 0) {
492         __availableDigits = __decimalLength9(__digits);
493         __exp = __i * 9 + static_cast<int32_t>(__availableDigits) - 1;
494         if (__availableDigits > __precision) {
495           break;
496         }
497         if (__printDecimalPoint) {
498           if (_Last - _First < static_cast<ptrdiff_t>(__availableDigits + 1)) {
499             return { _Last, errc::value_too_large };
500           }
501           __append_d_digits(__availableDigits, __digits, _First);
502           _First += __availableDigits + 1; // +1 for decimal point
503         } else {
504           if (_First == _Last) {
505             return { _Last, errc::value_too_large };
506           }
507           *_First++ = static_cast<char>('0' + __digits);
508         }
509         __printedDigits = __availableDigits;
510         __availableDigits = 0;
511       }
512     }
513   }
514 
515   if (__e2 < 0 && __availableDigits == 0) {
516     const int32_t __idx = -__e2 / 16;
517     for (int32_t __i = __MIN_BLOCK_2[__idx]; __i < 200; ++__i) {
518       const int32_t __j = __ADDITIONAL_BITS_2 + (-__e2 - 16 * __idx);
519       const uint32_t __p = __POW10_OFFSET_2[__idx] + static_cast<uint32_t>(__i) - __MIN_BLOCK_2[__idx];
520       // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
521       // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
522       __digits = (__p >= __POW10_OFFSET_2[__idx + 1]) ? 0 : __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT_2[__p], __j + 8);
523       if (__printedDigits != 0) {
524         if (__printedDigits + 9 > __precision) {
525           __availableDigits = 9;
526           break;
527         }
528         if (_Last - _First < 9) {
529           return { _Last, errc::value_too_large };
530         }
531         __append_nine_digits(__digits, _First);
532         _First += 9;
533         __printedDigits += 9;
534       } else if (__digits != 0) {
535         __availableDigits = __decimalLength9(__digits);
536         __exp = -(__i + 1) * 9 + static_cast<int32_t>(__availableDigits) - 1;
537         if (__availableDigits > __precision) {
538           break;
539         }
540         if (__printDecimalPoint) {
541           if (_Last - _First < static_cast<ptrdiff_t>(__availableDigits + 1)) {
542             return { _Last, errc::value_too_large };
543           }
544           __append_d_digits(__availableDigits, __digits, _First);
545           _First += __availableDigits + 1; // +1 for decimal point
546         } else {
547           if (_First == _Last) {
548             return { _Last, errc::value_too_large };
549           }
550           *_First++ = static_cast<char>('0' + __digits);
551         }
552         __printedDigits = __availableDigits;
553         __availableDigits = 0;
554       }
555     }
556   }
557 
558   const uint32_t __maximum = __precision - __printedDigits;
559   if (__availableDigits == 0) {
560     __digits = 0;
561   }
562   uint32_t __lastDigit = 0;
563   if (__availableDigits > __maximum) {
564     for (uint32_t __k = 0; __k < __availableDigits - __maximum; ++__k) {
565       __lastDigit = __digits % 10;
566       __digits /= 10;
567     }
568   }
569   // 0 = don't round up; 1 = round up unconditionally; 2 = round up if odd.
570   int __roundUp = 0;
571   if (__lastDigit != 5) {
572     __roundUp = __lastDigit > 5;
573   } else {
574     // Is m * 2^__e2 * 10^(__precision + 1 - __exp) integer?
575     // __precision was already increased by 1, so we don't need to write + 1 here.
576     const int32_t __rexp = static_cast<int32_t>(__precision) - __exp;
577     const int32_t __requiredTwos = -__e2 - __rexp;
578     bool __trailingZeros = __requiredTwos <= 0
579       || (__requiredTwos < 60 && __multipleOfPowerOf2(__m2, static_cast<uint32_t>(__requiredTwos)));
580     if (__rexp < 0) {
581       const int32_t __requiredFives = -__rexp;
582       __trailingZeros = __trailingZeros && __multipleOfPowerOf5(__m2, static_cast<uint32_t>(__requiredFives));
583     }
584     __roundUp = __trailingZeros ? 2 : 1;
585   }
586   if (__printedDigits != 0) {
587     if (_Last - _First < static_cast<ptrdiff_t>(__maximum)) {
588       return { _Last, errc::value_too_large };
589     }
590     if (__digits == 0) {
591       _VSTD::memset(_First, '0', __maximum);
592     } else {
593       __append_c_digits(__maximum, __digits, _First);
594     }
595     _First += __maximum;
596   } else {
597     if (__printDecimalPoint) {
598       if (_Last - _First < static_cast<ptrdiff_t>(__maximum + 1)) {
599         return { _Last, errc::value_too_large };
600       }
601       __append_d_digits(__maximum, __digits, _First);
602       _First += __maximum + 1; // +1 for decimal point
603     } else {
604       if (_First == _Last) {
605         return { _Last, errc::value_too_large };
606       }
607       *_First++ = static_cast<char>('0' + __digits);
608     }
609   }
610   if (__roundUp != 0) {
611     char* _Round = _First;
612     while (true) {
613       if (_Round == _Original_first) {
614         _Round[0] = '1';
615         ++__exp;
616         break;
617       }
618       --_Round;
619       const char __c = _Round[0];
620       if (__c == '.') {
621         // Keep going.
622       } else if (__c == '9') {
623         _Round[0] = '0';
624         __roundUp = 1;
625       } else {
626         if (__roundUp == 1 || __c % 2 != 0) {
627           _Round[0] = __c + 1;
628         }
629         break;
630       }
631     }
632   }
633 
634   char _Sign_character;
635 
636   if (__exp < 0) {
637     _Sign_character = '-';
638     __exp = -__exp;
639   } else {
640     _Sign_character = '+';
641   }
642 
643   const int _Exponent_part_length = __exp >= 100
644     ? 5 // "e+NNN"
645     : 4; // "e+NN"
646 
647   if (_Last - _First < _Exponent_part_length) {
648     return { _Last, errc::value_too_large };
649   }
650 
651   *_First++ = 'e';
652   *_First++ = _Sign_character;
653 
654   if (__exp >= 100) {
655     const int32_t __c = __exp % 10;
656     _VSTD::memcpy(_First, __DIGIT_TABLE + 2 * (__exp / 10), 2);
657     _First[2] = static_cast<char>('0' + __c);
658     _First += 3;
659   } else {
660     _VSTD::memcpy(_First, __DIGIT_TABLE + 2 * __exp, 2);
661     _First += 2;
662   }
663 
664   return { _First, errc{} };
665 }
666 
667 _LIBCPP_END_NAMESPACE_STD
668 
669 // clang-format on
670