Lines Matching full:t4

263 	uint64_t x, f, t0, t1, t2, t3, t4;  in f256_montymul()  local
295 t4 = (uint64_t)(z >> 64); in f256_montymul()
312 z = t4 + (z >> 64); in f256_montymul()
314 t4 = (uint64_t)(z >> 64); in f256_montymul()
329 t4 += (uint64_t)(z >> 64); in f256_montymul()
345 * t4 can be only 0 or 1 in f256_montymul()
347 * We can therefore subtract p from t, conditionally on t4, to in f256_montymul()
350 z = (unsigned __int128)t0 + t4; in f256_montymul()
352 z = (unsigned __int128)t1 - (t4 << 32) + (z >> 64); in f256_montymul()
356 t3 = t3 - (uint64_t)(z >> 127) - t4 + (t4 << 32); in f256_montymul()
365 uint64_t x, f, t0, t1, t2, t3, t4; in f256_montymul()
413 t4 = _addcarry_u64(0, t3, f, &t3); in f256_montymul()
416 (void)_subborrow_u64(k, t4, 0, &t4); in f256_montymul()
448 t4 = _addcarry_u64(0, t3, t4, &t3); in f256_montymul()
459 (void)_addcarry_u64(k, t4, 0, &t4); in f256_montymul()
475 * t4 can be only 0 or 1 in f256_montymul()
477 * We can therefore subtract p from t, conditionally on t4, to in f256_montymul()
480 k = _addcarry_u64(0, t0, t4, &t0); in f256_montymul()
481 k = _addcarry_u64(k, t1, -(t4 << 32), &t1); in f256_montymul()
482 k = _addcarry_u64(k, t2, -t4, &t2); in f256_montymul()
483 (void)_addcarry_u64(k, t3, (t4 << 32) - (t4 << 1), &t3); in f256_montymul()
798 uint64_t t1[4], t2[4], t3[4], t4[4]; in p256_double() local
836 f256_montymul(t4, P->y, P->z); in p256_double()
837 f256_add(P->z, t4, t4); in p256_double()
845 f256_montysquare(t4, t3); in p256_double()
846 f256_add(t4, t4, t4); in p256_double()
847 f256_sub(P->y, P->y, t4); in p256_double()
899 uint64_t t1[4], t2[4], t3[4], t4[4], t5[4], t6[4], t7[4], tt; in p256_add() local
907 f256_montymul(t4, P2->z, t3); in p256_add()
908 f256_montymul(t3, P1->y, t4); in p256_add()
911 * Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4). in p256_add()
913 f256_montysquare(t4, P1->z); in p256_add()
914 f256_montymul(t2, P2->x, t4); in p256_add()
915 f256_montymul(t5, P1->z, t4); in p256_add()
916 f256_montymul(t4, P2->y, t5); in p256_add()
919 * Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4). in p256_add()
924 f256_sub(t4, t4, t3); in p256_add()
925 f256_final_reduce(t4); in p256_add()
926 tt = t4[0] | t4[1] | t4[2] | t4[3]; in p256_add()
940 f256_montysquare(P1->x, t4); in p256_add()
949 f256_montymul(P1->y, t4, t6); in p256_add()
1011 uint64_t t1[4], t2[4], t3[4], t4[4], t5[4], t6[4], t7[4], tt; in p256_add_mixed() local
1021 * Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4). in p256_add_mixed()
1023 f256_montysquare(t4, P1->z); in p256_add_mixed()
1024 f256_montymul(t2, P2->x, t4); in p256_add_mixed()
1025 f256_montymul(t5, P1->z, t4); in p256_add_mixed()
1026 f256_montymul(t4, P2->y, t5); in p256_add_mixed()
1029 * Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4). in p256_add_mixed()
1034 f256_sub(t4, t4, t3); in p256_add_mixed()
1035 f256_final_reduce(t4); in p256_add_mixed()
1036 tt = t4[0] | t4[1] | t4[2] | t4[3]; in p256_add_mixed()
1050 f256_montysquare(P1->x, t4); in p256_add_mixed()
1059 f256_montymul(P1->y, t4, t6); in p256_add_mixed()
1128 uint64_t t1[4], t2[4], t3[4], t4[4], t5[4], t6[4], t7[4], tt, zz;
1144 * Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4).
1146 f256_montysquare(t4, P1->z);
1147 f256_montymul(t2, P2->x, t4);
1148 f256_montymul(t5, P1->z, t4);
1149 f256_montymul(t4, P2->y, t5);
1152 * Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4).
1156 f256_sub(t4, t4, t3);
1163 f256_final_reduce(t4);
1164 tt = t2[0] | t2[1] | t2[2] | t2[3] | t4[0] | t4[1] | t4[2] | t4[3];
1177 f256_montysquare(P1->x, t4);
1186 f256_montymul(P1->y, t4, t6);
1213 * Compute m = 3*(x2^2 - 1) (in t4).
1215 f256_montysquare(t4, P2->x);
1216 f256_sub(t4, t4, F256_R);
1217 f256_add(t5, t4, t4);
1218 f256_add(t4, t4, t5);
1223 f256_montysquare(t5, t4);
1231 f256_montymul(t6, t6, t4);