Lines Matching +full:high +full:- +full:precision
23 * n0 = ((*(int*)&one)>>29)^1; * index of high word *
24 * ix0 = *(n0+(int*)&x); * high word of x *
25 * ix1 = *((1-n0)+(int*)&x); * low word of x *
27 * value. That is non-ANSI, and, moreover, the gcc instruction
136 /* Get a 64-bit int from a double. */
172 /* Set a double from a 64-bit int. */
310 /* The above works on non-i386 too, but we use this to check v. */
316 * Attempt to get strict C99 semantics for assignment with non-C99 compilers.
378 __s = __w - (a); \
379 (b) = ((a) - (__w - __s)) + ((b) - __s); \
386 * "Normalize" the terms in the infinite-precision expression a + b for
389 * the same precision as 'a' and the resulting b is the rounding error.)
396 * extra precision in a + b. This is required by C standards but broken
399 * algorithm would be destroyed by non-null strict assignments. (The
400 * compilers are correct to be broken -- the efficiency of all floating
405 * any extra precision into the type of 'a' -- 'a' should have type float_t,
408 * reduce their own extra-precision and efficiency problems. In
421 (b) = ((a) - __w) + (b); \
427 __r = __ia - __vw; \
436 (b) = ((a) - __w) + (b); \
442 * Set x += c, where x is represented in extra precision as a + b.
452 * 2**20 times smaller than 'a' to give about 20 extra bits of precision.
458 * or by having |c| a few percent smaller than |a|. Pre-normalization of
493 * precision might depend on the runtime precision and/or on compiler
496 * runtime precision by always doing the main mixing step in long double
497 * precision. Try to reduce dependencies on optimizations by adding the
499 * precision).
533 * In particular, I*Inf is corrupted to NaN+I*Inf, and I*-0 is corrupted
534 * to -0.0+I*0.0.
583 * Extra precision causes more problems in practice, and we only centralize
594 * This casts to double to kill any extra precision. This depends in rnint()
597 * inefficient if there actually is extra precision, but is hard in rnint()
601 * the rounding precision is variable at runtime on x86 so the in rnint()
603 * rounding precision is always the default is too fragile. This in rnint()
607 return ((double)(x + 0x1.8p52) - 0x1.8p52); in rnint()
615 * extra precision case, usually without losing efficiency. in rnintf()
617 return ((float)(x + 0x1.8p23F) - 0x1.8p23F); in rnintf()
622 * The complications for extra precision are smaller for rnintl() since it
623 * can safely assume that the rounding precision has been increased from
635 return (x + __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2 - in rnintl()
690 * The following are fast floor macros for 0 <= |x| < 0x1p(N-1), where
691 * N is the precision of the type of x. These macros are used in the
692 * half-cycle trignometric functions (e.g., sinpi(x)).
695 (j0) = (((ix) >> 23) & 0xff) - 0x7f; \
701 (j0) = (((ix) >> 20) & 0x7ff) - 0x3ff; \
706 (lx) &= ~((uint32_t)0xffffffff >> ((j0) - 20)); \
712 j0 = ix - 0x3fff + 1; \
715 (lx) &= ~((((lx) << 32)-1) >> (j0)); \
718 _m = (uint64_t)-1 >> (j0); \
729 e = u.bits.exp - 16383; \
731 m = ((1llu << 49) - 1) >> (e + 1); \
735 m = (uint64_t)-1 >> (e - 48); \
739 (ar) = (x) - (ai); \
743 * For a subnormal double entity split into high and low parts, compute ilogb.
751 j = -1022; in subnormal_ilogb()
753 j -= 21; in subnormal_ilogb()
758 for (; i < 0x7fffffff; i <<= 1) j -= 1; in subnormal_ilogb()
772 for (j = -126; i < 0x7fffffff; i <<= 1) j -=1; in subnormal_ilogbf()
789 if (!(rp)->lo_set) \
790 RETURNF((rp)->hi); \
791 RETURNF((rp)->hi + (rp)->lo); \
794 if (!(rp)->lo_set) \
795 RETURNI((rp)->hi); \
796 RETURNI((rp)->hi + (rp)->lo); \
809 /* double precision kernel functions */
821 /* float precision kernel functions */
839 /* long double precision kernel functions */