Lines Matching refs:vdm

208 		  struct vfp_double *vdm, u32 fpscr)  in vfp_propagate_nan()  argument
215 if (vdm) in vfp_propagate_nan()
216 tm = vfp_double_type(vdm); in vfp_propagate_nan()
232 nan = vdm; in vfp_propagate_nan()
270 struct vfp_double vdm, vdd; in vfp_double_fsqrt() local
273 vfp_double_unpack(&vdm, vfp_get_double(dm)); in vfp_double_fsqrt()
274 tm = vfp_double_type(&vdm); in vfp_double_fsqrt()
279 ret = vfp_propagate_nan(vdp, &vdm, NULL, fpscr); in vfp_double_fsqrt()
280 else if (vdm.sign == 0) { in vfp_double_fsqrt()
282 vdp = &vdm; in vfp_double_fsqrt()
303 vfp_double_normalise_denormal(&vdm); in vfp_double_fsqrt()
308 if (vdm.sign) in vfp_double_fsqrt()
311 vfp_double_dump("sqrt", &vdm); in vfp_double_fsqrt()
317 vdd.exponent = ((vdm.exponent - 1023) >> 1) + 1023; in vfp_double_fsqrt()
318 vdd.significand = (u64)vfp_estimate_sqrt_significand(vdm.exponent, vdm.significand >> 32) << 31; in vfp_double_fsqrt()
322 vdm.significand >>= 1 + (vdm.exponent & 1); in vfp_double_fsqrt()
323 vdd.significand += 2 + vfp_estimate_div128to64(vdm.significand, 0, vdd.significand); in vfp_double_fsqrt()
335 vdm.significand <<= 2; in vfp_double_fsqrt()
337 sub128(&remh, &reml, vdm.significand, 0, termh, terml); in vfp_double_fsqrt()
441 struct vfp_double vdm; in vfp_double_fcvts() local
446 vfp_double_unpack(&vdm, vfp_get_double(dm)); in vfp_double_fcvts()
448 tm = vfp_double_type(&vdm); in vfp_double_fcvts()
457 vfp_double_normalise_denormal(&vdm); in vfp_double_fcvts()
459 vsd.sign = vdm.sign; in vfp_double_fcvts()
460 vsd.significand = vfp_hi64to32jamming(vdm.significand); in vfp_double_fcvts()
473 vsd.exponent = vdm.exponent - (1023 - 127); in vfp_double_fcvts()
484 struct vfp_double vdm; in vfp_double_fuito() local
487 vdm.sign = 0; in vfp_double_fuito()
488 vdm.exponent = 1023 + 63 - 1; in vfp_double_fuito()
489 vdm.significand = (u64)m; in vfp_double_fuito()
491 return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fuito"); in vfp_double_fuito()
496 struct vfp_double vdm; in vfp_double_fsito() local
499 vdm.sign = (m & 0x80000000) >> 16; in vfp_double_fsito()
500 vdm.exponent = 1023 + 63 - 1; in vfp_double_fsito()
501 vdm.significand = vdm.sign ? -m : m; in vfp_double_fsito()
503 return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fsito"); in vfp_double_fsito()
508 struct vfp_double vdm; in vfp_double_ftoui() local
513 vfp_double_unpack(&vdm, vfp_get_double(dm)); in vfp_double_ftoui()
518 tm = vfp_double_type(&vdm); in vfp_double_ftoui()
523 vdm.sign = 0; in vfp_double_ftoui()
525 if (vdm.exponent >= 1023 + 32) { in vfp_double_ftoui()
526 d = vdm.sign ? 0 : 0xffffffff; in vfp_double_ftoui()
528 } else if (vdm.exponent >= 1023 - 1) { in vfp_double_ftoui()
529 int shift = 1023 + 63 - vdm.exponent; in vfp_double_ftoui()
535 d = (vdm.significand << 1) >> shift; in vfp_double_ftoui()
536 rem = vdm.significand << (65 - shift); in vfp_double_ftoui()
544 } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) { in vfp_double_ftoui()
555 if (d && vdm.sign) { in vfp_double_ftoui()
562 if (vdm.exponent | vdm.significand) { in vfp_double_ftoui()
564 if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0) in vfp_double_ftoui()
566 else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) { in vfp_double_ftoui()
587 struct vfp_double vdm; in vfp_double_ftosi() local
592 vfp_double_unpack(&vdm, vfp_get_double(dm)); in vfp_double_ftosi()
593 vfp_double_dump("VDM", &vdm); in vfp_double_ftosi()
598 tm = vfp_double_type(&vdm); in vfp_double_ftosi()
605 } else if (vdm.exponent >= 1023 + 32) { in vfp_double_ftosi()
607 if (vdm.sign) in vfp_double_ftosi()
610 } else if (vdm.exponent >= 1023 - 1) { in vfp_double_ftosi()
611 int shift = 1023 + 63 - vdm.exponent; /* 58 */ in vfp_double_ftosi()
614 d = (vdm.significand << 1) >> shift; in vfp_double_ftosi()
615 rem = vdm.significand << (65 - shift); in vfp_double_ftosi()
623 } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) { in vfp_double_ftosi()
629 if (d > 0x7fffffff + (vdm.sign != 0)) { in vfp_double_ftosi()
630 d = 0x7fffffff + (vdm.sign != 0); in vfp_double_ftosi()
635 if (vdm.sign) in vfp_double_ftosi()
639 if (vdm.exponent | vdm.significand) { in vfp_double_ftosi()
641 if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0) in vfp_double_ftosi()
643 else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) in vfp_double_ftosi()
684 struct vfp_double *vdm, u32 fpscr) in vfp_double_fadd_nonnumber() argument
691 tm = vfp_double_type(vdm); in vfp_double_fadd_nonnumber()
697 if (vdn->sign ^ vdm->sign) { in vfp_double_fadd_nonnumber()
718 return vfp_propagate_nan(vdd, vdn, vdm, fpscr); in vfp_double_fadd_nonnumber()
726 struct vfp_double *vdm, u32 fpscr) in vfp_double_add() argument
732 vdm->significand & (1ULL << 63)) { in vfp_double_add()
735 vfp_double_dump("VDM", vdm); in vfp_double_add()
743 if (vdn->exponent < vdm->exponent) { in vfp_double_add()
745 vdn = vdm; in vfp_double_add()
746 vdm = t; in vfp_double_add()
754 return vfp_double_fadd_nonnumber(vdd, vdn, vdm, fpscr); in vfp_double_add()
766 exp_diff = vdn->exponent - vdm->exponent; in vfp_double_add()
767 m_sig = vfp_shiftright64jamming(vdm->significand, exp_diff); in vfp_double_add()
772 if (vdn->sign ^ vdm->sign) { in vfp_double_add()
791 struct vfp_double *vdm, u32 fpscr) in vfp_double_multiply() argument
794 vfp_double_dump("VDM", vdm); in vfp_double_multiply()
801 if (vdn->exponent < vdm->exponent) { in vfp_double_multiply()
803 vdn = vdm; in vfp_double_multiply()
804 vdm = t; in vfp_double_multiply()
808 vdd->sign = vdn->sign ^ vdm->sign; in vfp_double_multiply()
814 if (vdn->significand || (vdm->exponent == 2047 && vdm->significand)) in vfp_double_multiply()
815 return vfp_propagate_nan(vdd, vdn, vdm, fpscr); in vfp_double_multiply()
816 if ((vdm->exponent | vdm->significand) == 0) { in vfp_double_multiply()
829 if ((vdm->exponent | vdm->significand) == 0) { in vfp_double_multiply()
840 vdd->exponent = vdn->exponent + vdm->exponent - 1023 + 2; in vfp_double_multiply()
841 vdd->significand = vfp_hi64multiply64(vdn->significand, vdm->significand); in vfp_double_multiply()
853 struct vfp_double vdd, vdp, vdn, vdm; in vfp_double_multiply_accumulate() local
860 vfp_double_unpack(&vdm, vfp_get_double(dm)); in vfp_double_multiply_accumulate()
861 if (vdm.exponent == 0 && vdm.significand) in vfp_double_multiply_accumulate()
862 vfp_double_normalise_denormal(&vdm); in vfp_double_multiply_accumulate()
864 exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr); in vfp_double_multiply_accumulate()
920 struct vfp_double vdd, vdn, vdm; in vfp_double_fmul() local
927 vfp_double_unpack(&vdm, vfp_get_double(dm)); in vfp_double_fmul()
928 if (vdm.exponent == 0 && vdm.significand) in vfp_double_fmul()
929 vfp_double_normalise_denormal(&vdm); in vfp_double_fmul()
931 exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); in vfp_double_fmul()
940 struct vfp_double vdd, vdn, vdm; in vfp_double_fnmul() local
947 vfp_double_unpack(&vdm, vfp_get_double(dm)); in vfp_double_fnmul()
948 if (vdm.exponent == 0 && vdm.significand) in vfp_double_fnmul()
949 vfp_double_normalise_denormal(&vdm); in vfp_double_fnmul()
951 exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); in vfp_double_fnmul()
962 struct vfp_double vdd, vdn, vdm; in vfp_double_fadd() local
969 vfp_double_unpack(&vdm, vfp_get_double(dm)); in vfp_double_fadd()
970 if (vdm.exponent == 0 && vdm.significand) in vfp_double_fadd()
971 vfp_double_normalise_denormal(&vdm); in vfp_double_fadd()
973 exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr); in vfp_double_fadd()
983 struct vfp_double vdd, vdn, vdm; in vfp_double_fsub() local
990 vfp_double_unpack(&vdm, vfp_get_double(dm)); in vfp_double_fsub()
991 if (vdm.exponent == 0 && vdm.significand) in vfp_double_fsub()
992 vfp_double_normalise_denormal(&vdm); in vfp_double_fsub()
997 vdm.sign = vfp_sign_negate(vdm.sign); in vfp_double_fsub()
999 exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr); in vfp_double_fsub()
1009 struct vfp_double vdd, vdn, vdm; in vfp_double_fdiv() local
1014 vfp_double_unpack(&vdm, vfp_get_double(dm)); in vfp_double_fdiv()
1016 vdd.sign = vdn.sign ^ vdm.sign; in vfp_double_fdiv()
1019 tm = vfp_double_type(&vdm); in vfp_double_fdiv()
1061 vfp_double_normalise_denormal(&vdm); in vfp_double_fdiv()
1066 vdd.exponent = vdn.exponent - vdm.exponent + 1023 - 1; in vfp_double_fdiv()
1067 vdm.significand <<= 1; in vfp_double_fdiv()
1068 if (vdm.significand <= (2 * vdn.significand)) { in vfp_double_fdiv()
1072 vdd.significand = vfp_estimate_div128to64(vdn.significand, 0, vdm.significand); in vfp_double_fdiv()
1075 mul64to128(&termh, &terml, vdm.significand, vdd.significand); in vfp_double_fdiv()
1079 add128(&remh, &reml, remh, reml, 0, vdm.significand); in vfp_double_fdiv()
1086 exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr); in vfp_double_fdiv()
1092 exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr); in vfp_double_fdiv()