Lines Matching +full:64 +full:bit
60 // modulo 64.
62 // of Ryu, the shift value is always < 64.
66 _LIBCPP_ASSERT_INTERNAL(__dist < 64, "");
78 *__productHi = __temp >> 64;
84 // of Ryu, the shift value is always < 64.
88 _LIBCPP_ASSERT_INTERNAL(__dist < 64, "");
89 auto __temp = __lo | ((unsigned __int128)__hi << 64);
90 // For x64 128-bit shfits using the `shrd` instruction and two 64-bit
91 // registers, the shift value is modulo 64. Thus the `& 63` is free.
128 // We don't need to handle the case __dist >= 64 here (see above).
129 _LIBCPP_ASSERT_INTERNAL(__dist < 64, "");
132 return (__hi << (64 - __dist)) | (__lo >> __dist);
133 #else // ^^^ 64-bit ^^^ / vvv 32-bit vvv
134 // Avoid a 64-bit shift by taking advantage of the range of shift values.
136 return (__hi << (64 - __dist)) | (static_cast<uint32_t>(__lo >> 32) >> (__dist - 32));
137 #endif // ^^^ 32-bit ^^^
144 // Returns the high 64 bits of the 128-bit product of __a and __b.
154 // On 32-bit platforms, compilers typically generate calls to library
155 // functions for 64-bit divisions, even if the divisor is a constant.
160 // in the same way as 64-bit compilers would do.
187 // Avoid 64-bit math as much as possible.
189 // perform 32x64-bit multiplication and 64-bit subtraction.
199 #else // ^^^ 32-bit ^^^ / vvv 64-bit vvv
225 #endif // ^^^ 64-bit ^^^
251 _LIBCPP_ASSERT_INTERNAL(__p < 64, "");