Lines Matching +full:double +full:- +full:precision
25 * ix1 = *((1-n0)+(int*)&x); * low word of x *
27 * value. That is non-ANSI, and, moreover, the gcc instruction
35 * A union which permits us to convert between a double and two 32 bit
49 /* A union which permits us to convert between a long double and
56 long double value;
75 long double value;
94 double value;
112 double value;
126 /* Get two 32 bit ints from a double. */
136 /* Get a 64-bit int from a double. */
144 /* Get the more significant 32 bit int from a double. */
153 /* Get the less significant 32 bit int from a double. */
162 /* Set a double from two 32 bit ints. */
172 /* Set a double from a 64-bit int. */
180 /* Set the more significant 32 bits of a double from an int. */
190 /* Set the less significant 32 bits of a double from an int. */
232 * double.
245 * long double.
257 /* Get expsign as a 16 bit int from a long double. */
267 * Set an 80 bit long double from a 16 bit int expsign and a 64 bit int
280 * Set a 128 bit long double from a 16 bit int expsign and two 64 bit ints
293 /* Set expsign of a long double from a 16 bit int. */
304 /* Long double constants are broken on i386. */
310 /* The above works on non-i386 too, but we use this to check v. */
316 * Attempt to get strict C99 semantics for assignment with non-C99 compilers.
324 if (sizeof(type) >= sizeof(long double)) \
336 #define ENTERI() ENTERIT(long double)
378 __s = __w - (a); \
379 (b) = ((a) - (__w - __s)) + ((b) - __s); \
386 * "Normalize" the terms in the infinite-precision expression a + b for
389 * the same precision as 'a' and the resulting b is the rounding error.)
396 * extra precision in a + b. This is required by C standards but broken
399 * algorithm would be destroyed by non-null strict assignments. (The
400 * compilers are correct to be broken -- the efficiency of all floating
405 * any extra precision into the type of 'a' -- 'a' should have type float_t,
406 * double_t or long double. b's type should be no larger than 'a's type.
408 * reduce their own extra-precision and efficiency problems. In
421 (b) = ((a) - __w) + (b); \
424 /* The next 2 assertions are weak if (a) is already long double. */ \
425 assert((long double)__ia + __ib == (long double)(a) + (b)); \
427 __r = __ia - __vw; \
436 (b) = ((a) - __w) + (b); \
442 * Set x += c, where x is represented in extra precision as a + b.
452 * 2**20 times smaller than 'a' to give about 20 extra bits of precision.
458 * or by having |c| a few percent smaller than |a|. Pre-normalization of
493 * precision might depend on the runtime precision and/or on compiler
496 * runtime precision by always doing the main mixing step in long double
497 * precision. Try to reduce dependencies on optimizations by adding the
498 * the 0's in different precisions (unless everything is in long double
499 * precision).
516 double complex f;
517 double a[2];
520 long double complex f;
521 long double a[2];
533 * In particular, I*Inf is corrupted to NaN+I*Inf, and I*-0 is corrupted
534 * to -0.0+I*0.0.
554 static __inline double complex
555 CMPLX(double x, double y) in CMPLX()
566 static __inline long double complex
567 CMPLXL(long double x, long double y) in CMPLXL()
583 * Extra precision causes more problems in practice, and we only centralize
590 static inline double
594 * This casts to double to kill any extra precision. This depends in rnint()
597 * inefficient if there actually is extra precision, but is hard in rnint()
601 * the rounding precision is variable at runtime on x86 so the in rnint()
603 * rounding precision is always the default is too fragile. This in rnint()
607 return ((double)(x + 0x1.8p52) - 0x1.8p52); in rnint()
615 * extra precision case, usually without losing efficiency. in rnintf()
617 return ((float)(x + 0x1.8p23F) - 0x1.8p23F); in rnintf()
622 * The complications for extra precision are smaller for rnintl() since it
623 * can safely assume that the rounding precision has been increased from
625 * optimizations from limiting the range to double. We just need it for
632 static inline long double
633 rnintl(long double x) in rnintl()
635 return (x + __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2 - in rnintl()
648 sizeof(__float_t) == sizeof(long double) ? irintf(x) : \
649 sizeof(x) == sizeof(double) && \
650 sizeof(__double_t) == sizeof(long double) ? irintd(x) : \
651 sizeof(x) == sizeof(long double) ? irintl(x) : (int)(x))
669 irintd(double x) in irintd()
680 irintl(long double x) in irintl()
690 * The following are fast floor macros for 0 <= |x| < 0x1p(N-1), where
691 * N is the precision of the type of x. These macros are used in the
692 * half-cycle trignometric functions (e.g., sinpi(x)).
695 (j0) = (((ix) >> 23) & 0xff) - 0x7f; \
701 (j0) = (((ix) >> 20) & 0x7ff) - 0x3ff; \
706 (lx) &= ~((uint32_t)0xffffffff >> ((j0) - 20)); \
712 j0 = ix - 0x3fff + 1; \
715 (lx) &= ~((((lx) << 32)-1) >> (j0)); \
718 _m = (uint64_t)-1 >> (j0); \
729 e = u.bits.exp - 16383; \
731 m = ((1llu << 49) - 1) >> (e + 1); \
735 m = (uint64_t)-1 >> (e - 48); \
739 (ar) = (x) - (ai); \
743 * For a subnormal double entity split into high and low parts, compute ilogb.
751 j = -1022; in subnormal_ilogb()
753 j -= 21; in subnormal_ilogb()
758 for (; i < 0x7fffffff; i <<= 1) j -= 1; in subnormal_ilogb()
772 for (j = -126; i < 0x7fffffff; i <<= 1) j -=1; in subnormal_ilogbf()
789 if (!(rp)->lo_set) \
790 RETURNF((rp)->hi); \
791 RETURNF((rp)->hi + (rp)->lo); \
794 if (!(rp)->lo_set) \
795 RETURNI((rp)->hi); \
796 RETURNI((rp)->hi + (rp)->lo); \
807 int __kernel_rem_pio2(double*,double*,int,int,int);
809 /* double precision kernel functions */
811 int __ieee754_rem_pio2(double,double*);
813 double __kernel_sin(double,double,int);
814 double __kernel_cos(double,double);
815 double __kernel_tan(double,double,int);
816 double __ldexp_exp(double,int);
818 double complex __ldexp_cexp(double complex,int);
821 /* float precision kernel functions */
823 int __ieee754_rem_pio2f(float,double*);
826 float __kernel_sindf(double);
829 float __kernel_cosdf(double);
832 float __kernel_tandf(double,int);
839 /* long double precision kernel functions */
840 long double __kernel_sinl(long double, long double, int);
841 long double __kernel_cosl(long double, long double);
842 long double __kernel_tanl(long double, long double, int);