1*0957b409SSimon J. Gerraty /* 2*0957b409SSimon J. Gerraty * Copyright (c) 2018 Thomas Pornin <pornin@bolet.org> 3*0957b409SSimon J. Gerraty * 4*0957b409SSimon J. Gerraty * Permission is hereby granted, free of charge, to any person obtaining 5*0957b409SSimon J. Gerraty * a copy of this software and associated documentation files (the 6*0957b409SSimon J. Gerraty * "Software"), to deal in the Software without restriction, including 7*0957b409SSimon J. Gerraty * without limitation the rights to use, copy, modify, merge, publish, 8*0957b409SSimon J. Gerraty * distribute, sublicense, and/or sell copies of the Software, and to 9*0957b409SSimon J. Gerraty * permit persons to whom the Software is furnished to do so, subject to 10*0957b409SSimon J. Gerraty * the following conditions: 11*0957b409SSimon J. Gerraty * 12*0957b409SSimon J. Gerraty * The above copyright notice and this permission notice shall be 13*0957b409SSimon J. Gerraty * included in all copies or substantial portions of the Software. 14*0957b409SSimon J. Gerraty * 15*0957b409SSimon J. Gerraty * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16*0957b409SSimon J. Gerraty * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 17*0957b409SSimon J. Gerraty * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18*0957b409SSimon J. Gerraty * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 19*0957b409SSimon J. Gerraty * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 20*0957b409SSimon J. Gerraty * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21*0957b409SSimon J. Gerraty * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22*0957b409SSimon J. Gerraty * SOFTWARE. 23*0957b409SSimon J. Gerraty */ 24*0957b409SSimon J. Gerraty 25*0957b409SSimon J. Gerraty #include "inner.h" 26*0957b409SSimon J. Gerraty 27*0957b409SSimon J. Gerraty #if BR_INT128 || BR_UMUL128 28*0957b409SSimon J. Gerraty 29*0957b409SSimon J. Gerraty #if BR_UMUL128 30*0957b409SSimon J. Gerraty #include <intrin.h> 31*0957b409SSimon J. Gerraty #endif 32*0957b409SSimon J. Gerraty 33*0957b409SSimon J. Gerraty static const unsigned char GEN[] = { 34*0957b409SSimon J. Gerraty 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 35*0957b409SSimon J. Gerraty 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 36*0957b409SSimon J. Gerraty 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 37*0957b409SSimon J. Gerraty 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 38*0957b409SSimon J. Gerraty }; 39*0957b409SSimon J. Gerraty 40*0957b409SSimon J. Gerraty static const unsigned char ORDER[] = { 41*0957b409SSimon J. Gerraty 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 42*0957b409SSimon J. Gerraty 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 43*0957b409SSimon J. Gerraty 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 44*0957b409SSimon J. Gerraty 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF 45*0957b409SSimon J. Gerraty }; 46*0957b409SSimon J. Gerraty 47*0957b409SSimon J. Gerraty static const unsigned char * 48*0957b409SSimon J. Gerraty api_generator(int curve, size_t *len) 49*0957b409SSimon J. Gerraty { 50*0957b409SSimon J. Gerraty (void)curve; 51*0957b409SSimon J. Gerraty *len = 32; 52*0957b409SSimon J. Gerraty return GEN; 53*0957b409SSimon J. Gerraty } 54*0957b409SSimon J. Gerraty 55*0957b409SSimon J. Gerraty static const unsigned char * 56*0957b409SSimon J. Gerraty api_order(int curve, size_t *len) 57*0957b409SSimon J. Gerraty { 58*0957b409SSimon J. Gerraty (void)curve; 59*0957b409SSimon J. Gerraty *len = 32; 60*0957b409SSimon J. Gerraty return ORDER; 61*0957b409SSimon J. Gerraty } 62*0957b409SSimon J. Gerraty 63*0957b409SSimon J. Gerraty static size_t 64*0957b409SSimon J. Gerraty api_xoff(int curve, size_t *len) 65*0957b409SSimon J. Gerraty { 66*0957b409SSimon J. Gerraty (void)curve; 67*0957b409SSimon J. Gerraty *len = 32; 68*0957b409SSimon J. Gerraty return 0; 69*0957b409SSimon J. Gerraty } 70*0957b409SSimon J. Gerraty 71*0957b409SSimon J. Gerraty /* 72*0957b409SSimon J. Gerraty * A field element is encoded as four 64-bit integers, in basis 2^63. 73*0957b409SSimon J. Gerraty * Operations return partially reduced values, which may range up to 74*0957b409SSimon J. Gerraty * 2^255+37. 75*0957b409SSimon J. Gerraty */ 76*0957b409SSimon J. Gerraty 77*0957b409SSimon J. Gerraty #define MASK63 (((uint64_t)1 << 63) - (uint64_t)1) 78*0957b409SSimon J. Gerraty 79*0957b409SSimon J. Gerraty /* 80*0957b409SSimon J. Gerraty * Swap two field elements, conditionally on a flag. 81*0957b409SSimon J. Gerraty */ 82*0957b409SSimon J. Gerraty static inline void 83*0957b409SSimon J. Gerraty f255_cswap(uint64_t *a, uint64_t *b, uint32_t ctl) 84*0957b409SSimon J. Gerraty { 85*0957b409SSimon J. Gerraty uint64_t m, w; 86*0957b409SSimon J. Gerraty 87*0957b409SSimon J. Gerraty m = -(uint64_t)ctl; 88*0957b409SSimon J. Gerraty w = m & (a[0] ^ b[0]); a[0] ^= w; b[0] ^= w; 89*0957b409SSimon J. Gerraty w = m & (a[1] ^ b[1]); a[1] ^= w; b[1] ^= w; 90*0957b409SSimon J. Gerraty w = m & (a[2] ^ b[2]); a[2] ^= w; b[2] ^= w; 91*0957b409SSimon J. Gerraty w = m & (a[3] ^ b[3]); a[3] ^= w; b[3] ^= w; 92*0957b409SSimon J. Gerraty } 93*0957b409SSimon J. Gerraty 94*0957b409SSimon J. Gerraty /* 95*0957b409SSimon J. Gerraty * Addition in the field. 96*0957b409SSimon J. Gerraty */ 97*0957b409SSimon J. Gerraty static inline void 98*0957b409SSimon J. Gerraty f255_add(uint64_t *d, const uint64_t *a, const uint64_t *b) 99*0957b409SSimon J. Gerraty { 100*0957b409SSimon J. Gerraty #if BR_INT128 101*0957b409SSimon J. Gerraty 102*0957b409SSimon J. Gerraty uint64_t t0, t1, t2, t3, cc; 103*0957b409SSimon J. Gerraty unsigned __int128 z; 104*0957b409SSimon J. Gerraty 105*0957b409SSimon J. Gerraty z = (unsigned __int128)a[0] + (unsigned __int128)b[0]; 106*0957b409SSimon J. Gerraty t0 = (uint64_t)z; 107*0957b409SSimon J. Gerraty z = (unsigned __int128)a[1] + (unsigned __int128)b[1] + (z >> 64); 108*0957b409SSimon J. Gerraty t1 = (uint64_t)z; 109*0957b409SSimon J. Gerraty z = (unsigned __int128)a[2] + (unsigned __int128)b[2] + (z >> 64); 110*0957b409SSimon J. Gerraty t2 = (uint64_t)z; 111*0957b409SSimon J. Gerraty z = (unsigned __int128)a[3] + (unsigned __int128)b[3] + (z >> 64); 112*0957b409SSimon J. Gerraty t3 = (uint64_t)z & MASK63; 113*0957b409SSimon J. Gerraty cc = (uint64_t)(z >> 63); 114*0957b409SSimon J. Gerraty 115*0957b409SSimon J. Gerraty /* 116*0957b409SSimon J. Gerraty * Since operands are at most 2^255+37, the sum is at most 117*0957b409SSimon J. Gerraty * 2^256+74; thus, the carry cc is equal to 0, 1 or 2. 118*0957b409SSimon J. Gerraty * 119*0957b409SSimon J. Gerraty * We use: 2^255 = 19 mod p. 120*0957b409SSimon J. Gerraty * Since we add 0, 19 or 38 to a value that fits on 255 bits, 121*0957b409SSimon J. Gerraty * the result is at most 2^255+37. 122*0957b409SSimon J. Gerraty */ 123*0957b409SSimon J. Gerraty z = (unsigned __int128)t0 + (unsigned __int128)(19 * cc); 124*0957b409SSimon J. Gerraty d[0] = (uint64_t)z; 125*0957b409SSimon J. Gerraty z = (unsigned __int128)t1 + (z >> 64); 126*0957b409SSimon J. Gerraty d[1] = (uint64_t)z; 127*0957b409SSimon J. Gerraty z = (unsigned __int128)t2 + (z >> 64); 128*0957b409SSimon J. Gerraty d[2] = (uint64_t)z; 129*0957b409SSimon J. Gerraty d[3] = t3 + (uint64_t)(z >> 64); 130*0957b409SSimon J. Gerraty 131*0957b409SSimon J. Gerraty #elif BR_UMUL128 132*0957b409SSimon J. Gerraty 133*0957b409SSimon J. Gerraty uint64_t t0, t1, t2, t3, cc; 134*0957b409SSimon J. Gerraty unsigned char k; 135*0957b409SSimon J. Gerraty 136*0957b409SSimon J. Gerraty k = _addcarry_u64(0, a[0], b[0], &t0); 137*0957b409SSimon J. Gerraty k = _addcarry_u64(k, a[1], b[1], &t1); 138*0957b409SSimon J. Gerraty k = _addcarry_u64(k, a[2], b[2], &t2); 139*0957b409SSimon J. Gerraty k = _addcarry_u64(k, a[3], b[3], &t3); 140*0957b409SSimon J. Gerraty cc = (k << 1) + (t3 >> 63); 141*0957b409SSimon J. Gerraty t3 &= MASK63; 142*0957b409SSimon J. Gerraty 143*0957b409SSimon J. Gerraty /* 144*0957b409SSimon J. Gerraty * Since operands are at most 2^255+37, the sum is at most 145*0957b409SSimon J. Gerraty * 2^256+74; thus, the carry cc is equal to 0, 1 or 2. 146*0957b409SSimon J. Gerraty * 147*0957b409SSimon J. Gerraty * We use: 2^255 = 19 mod p. 148*0957b409SSimon J. Gerraty * Since we add 0, 19 or 38 to a value that fits on 255 bits, 149*0957b409SSimon J. Gerraty * the result is at most 2^255+37. 150*0957b409SSimon J. Gerraty */ 151*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t0, 19 * cc, &d[0]); 152*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t1, 0, &d[1]); 153*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t2, 0, &d[2]); 154*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, t3, 0, &d[3]); 155*0957b409SSimon J. Gerraty 156*0957b409SSimon J. Gerraty #endif 157*0957b409SSimon J. Gerraty } 158*0957b409SSimon J. Gerraty 159*0957b409SSimon J. Gerraty /* 160*0957b409SSimon J. Gerraty * Subtraction. 161*0957b409SSimon J. Gerraty * On input, limbs must fit on 60 bits each. On output, result is 162*0957b409SSimon J. Gerraty * partially reduced, with max value 2^255+19456; moreover, all 163*0957b409SSimon J. Gerraty * limbs will fit on 51 bits, except the low limb, which may have 164*0957b409SSimon J. Gerraty * value up to 2^51+19455. 165*0957b409SSimon J. Gerraty */ 166*0957b409SSimon J. Gerraty static inline void 167*0957b409SSimon J. Gerraty f255_sub(uint64_t *d, const uint64_t *a, const uint64_t *b) 168*0957b409SSimon J. Gerraty { 169*0957b409SSimon J. Gerraty #if BR_INT128 170*0957b409SSimon J. Gerraty 171*0957b409SSimon J. Gerraty /* 172*0957b409SSimon J. Gerraty * We compute t = 2^256 - 38 + a - b, which is necessarily 173*0957b409SSimon J. Gerraty * positive but lower than 2^256 + 2^255, since a <= 2^255 + 37 174*0957b409SSimon J. Gerraty * and b <= 2^255 + 37. We then subtract 0, p or 2*p, depending 175*0957b409SSimon J. Gerraty * on the two upper bits of t (bits 255 and 256). 176*0957b409SSimon J. Gerraty */ 177*0957b409SSimon J. Gerraty 178*0957b409SSimon J. Gerraty uint64_t t0, t1, t2, t3, t4, cc; 179*0957b409SSimon J. Gerraty unsigned __int128 z; 180*0957b409SSimon J. Gerraty 181*0957b409SSimon J. Gerraty z = (unsigned __int128)a[0] - (unsigned __int128)b[0] - 38; 182*0957b409SSimon J. Gerraty t0 = (uint64_t)z; 183*0957b409SSimon J. Gerraty cc = -(uint64_t)(z >> 64); 184*0957b409SSimon J. Gerraty z = (unsigned __int128)a[1] - (unsigned __int128)b[1] 185*0957b409SSimon J. Gerraty - (unsigned __int128)cc; 186*0957b409SSimon J. Gerraty t1 = (uint64_t)z; 187*0957b409SSimon J. Gerraty cc = -(uint64_t)(z >> 64); 188*0957b409SSimon J. Gerraty z = (unsigned __int128)a[2] - (unsigned __int128)b[2] 189*0957b409SSimon J. Gerraty - (unsigned __int128)cc; 190*0957b409SSimon J. Gerraty t2 = (uint64_t)z; 191*0957b409SSimon J. Gerraty cc = -(uint64_t)(z >> 64); 192*0957b409SSimon J. Gerraty z = (unsigned __int128)a[3] - (unsigned __int128)b[3] 193*0957b409SSimon J. Gerraty - (unsigned __int128)cc; 194*0957b409SSimon J. Gerraty t3 = (uint64_t)z; 195*0957b409SSimon J. Gerraty t4 = 1 + (uint64_t)(z >> 64); 196*0957b409SSimon J. Gerraty 197*0957b409SSimon J. Gerraty /* 198*0957b409SSimon J. Gerraty * We have a 257-bit result. The two top bits can be 00, 01 or 10, 199*0957b409SSimon J. Gerraty * but not 11 (value t <= 2^256 - 38 + 2^255 + 37 = 2^256 + 2^255 - 1). 200*0957b409SSimon J. Gerraty * Therefore, we can truncate to 255 bits, and add 0, 19 or 38. 201*0957b409SSimon J. Gerraty * This guarantees that the result is at most 2^255+37. 202*0957b409SSimon J. Gerraty */ 203*0957b409SSimon J. Gerraty cc = (38 & -t4) + (19 & -(t3 >> 63)); 204*0957b409SSimon J. Gerraty t3 &= MASK63; 205*0957b409SSimon J. Gerraty z = (unsigned __int128)t0 + (unsigned __int128)cc; 206*0957b409SSimon J. Gerraty d[0] = (uint64_t)z; 207*0957b409SSimon J. Gerraty z = (unsigned __int128)t1 + (z >> 64); 208*0957b409SSimon J. Gerraty d[1] = (uint64_t)z; 209*0957b409SSimon J. Gerraty z = (unsigned __int128)t2 + (z >> 64); 210*0957b409SSimon J. Gerraty d[2] = (uint64_t)z; 211*0957b409SSimon J. Gerraty d[3] = t3 + (uint64_t)(z >> 64); 212*0957b409SSimon J. Gerraty 213*0957b409SSimon J. Gerraty #elif BR_UMUL128 214*0957b409SSimon J. Gerraty 215*0957b409SSimon J. Gerraty /* 216*0957b409SSimon J. Gerraty * We compute t = 2^256 - 38 + a - b, which is necessarily 217*0957b409SSimon J. Gerraty * positive but lower than 2^256 + 2^255, since a <= 2^255 + 37 218*0957b409SSimon J. Gerraty * and b <= 2^255 + 37. We then subtract 0, p or 2*p, depending 219*0957b409SSimon J. Gerraty * on the two upper bits of t (bits 255 and 256). 220*0957b409SSimon J. Gerraty */ 221*0957b409SSimon J. Gerraty 222*0957b409SSimon J. Gerraty uint64_t t0, t1, t2, t3, t4; 223*0957b409SSimon J. Gerraty unsigned char k; 224*0957b409SSimon J. Gerraty 225*0957b409SSimon J. Gerraty k = _subborrow_u64(0, a[0], b[0], &t0); 226*0957b409SSimon J. Gerraty k = _subborrow_u64(k, a[1], b[1], &t1); 227*0957b409SSimon J. Gerraty k = _subborrow_u64(k, a[2], b[2], &t2); 228*0957b409SSimon J. Gerraty k = _subborrow_u64(k, a[3], b[3], &t3); 229*0957b409SSimon J. Gerraty (void)_subborrow_u64(k, 1, 0, &t4); 230*0957b409SSimon J. Gerraty 231*0957b409SSimon J. Gerraty k = _subborrow_u64(0, t0, 38, &t0); 232*0957b409SSimon J. Gerraty k = _subborrow_u64(k, t1, 0, &t1); 233*0957b409SSimon J. Gerraty k = _subborrow_u64(k, t2, 0, &t2); 234*0957b409SSimon J. Gerraty k = _subborrow_u64(k, t3, 0, &t3); 235*0957b409SSimon J. Gerraty (void)_subborrow_u64(k, t4, 0, &t4); 236*0957b409SSimon J. Gerraty 237*0957b409SSimon J. Gerraty /* 238*0957b409SSimon J. Gerraty * We have a 257-bit result. The two top bits can be 00, 01 or 10, 239*0957b409SSimon J. Gerraty * but not 11 (value t <= 2^256 - 38 + 2^255 + 37 = 2^256 + 2^255 - 1). 240*0957b409SSimon J. Gerraty * Therefore, we can truncate to 255 bits, and add 0, 19 or 38. 241*0957b409SSimon J. Gerraty * This guarantees that the result is at most 2^255+37. 242*0957b409SSimon J. Gerraty */ 243*0957b409SSimon J. Gerraty t4 = (38 & -t4) + (19 & -(t3 >> 63)); 244*0957b409SSimon J. Gerraty t3 &= MASK63; 245*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t0, t4, &d[0]); 246*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t1, 0, &d[1]); 247*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t2, 0, &d[2]); 248*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, t3, 0, &d[3]); 249*0957b409SSimon J. Gerraty 250*0957b409SSimon J. Gerraty #endif 251*0957b409SSimon J. Gerraty } 252*0957b409SSimon J. Gerraty 253*0957b409SSimon J. Gerraty /* 254*0957b409SSimon J. Gerraty * Multiplication. 255*0957b409SSimon J. Gerraty */ 256*0957b409SSimon J. Gerraty static inline void 257*0957b409SSimon J. Gerraty f255_mul(uint64_t *d, uint64_t *a, uint64_t *b) 258*0957b409SSimon J. Gerraty { 259*0957b409SSimon J. Gerraty #if BR_INT128 260*0957b409SSimon J. Gerraty 261*0957b409SSimon J. Gerraty unsigned __int128 z; 262*0957b409SSimon J. Gerraty uint64_t t0, t1, t2, t3, t4, t5, t6, t7, th; 263*0957b409SSimon J. Gerraty 264*0957b409SSimon J. Gerraty /* 265*0957b409SSimon J. Gerraty * Compute the product a*b over plain integers. 266*0957b409SSimon J. Gerraty */ 267*0957b409SSimon J. Gerraty z = (unsigned __int128)a[0] * (unsigned __int128)b[0]; 268*0957b409SSimon J. Gerraty t0 = (uint64_t)z; 269*0957b409SSimon J. Gerraty z = (unsigned __int128)a[0] * (unsigned __int128)b[1] + (z >> 64); 270*0957b409SSimon J. Gerraty t1 = (uint64_t)z; 271*0957b409SSimon J. Gerraty z = (unsigned __int128)a[0] * (unsigned __int128)b[2] + (z >> 64); 272*0957b409SSimon J. Gerraty t2 = (uint64_t)z; 273*0957b409SSimon J. Gerraty z = (unsigned __int128)a[0] * (unsigned __int128)b[3] + (z >> 64); 274*0957b409SSimon J. Gerraty t3 = (uint64_t)z; 275*0957b409SSimon J. Gerraty t4 = (uint64_t)(z >> 64); 276*0957b409SSimon J. Gerraty 277*0957b409SSimon J. Gerraty z = (unsigned __int128)a[1] * (unsigned __int128)b[0] 278*0957b409SSimon J. Gerraty + (unsigned __int128)t1; 279*0957b409SSimon J. Gerraty t1 = (uint64_t)z; 280*0957b409SSimon J. Gerraty z = (unsigned __int128)a[1] * (unsigned __int128)b[1] 281*0957b409SSimon J. Gerraty + (unsigned __int128)t2 + (z >> 64); 282*0957b409SSimon J. Gerraty t2 = (uint64_t)z; 283*0957b409SSimon J. Gerraty z = (unsigned __int128)a[1] * (unsigned __int128)b[2] 284*0957b409SSimon J. Gerraty + (unsigned __int128)t3 + (z >> 64); 285*0957b409SSimon J. Gerraty t3 = (uint64_t)z; 286*0957b409SSimon J. Gerraty z = (unsigned __int128)a[1] * (unsigned __int128)b[3] 287*0957b409SSimon J. Gerraty + (unsigned __int128)t4 + (z >> 64); 288*0957b409SSimon J. Gerraty t4 = (uint64_t)z; 289*0957b409SSimon J. Gerraty t5 = (uint64_t)(z >> 64); 290*0957b409SSimon J. Gerraty 291*0957b409SSimon J. Gerraty z = (unsigned __int128)a[2] * (unsigned __int128)b[0] 292*0957b409SSimon J. Gerraty + (unsigned __int128)t2; 293*0957b409SSimon J. Gerraty t2 = (uint64_t)z; 294*0957b409SSimon J. Gerraty z = (unsigned __int128)a[2] * (unsigned __int128)b[1] 295*0957b409SSimon J. Gerraty + (unsigned __int128)t3 + (z >> 64); 296*0957b409SSimon J. Gerraty t3 = (uint64_t)z; 297*0957b409SSimon J. Gerraty z = (unsigned __int128)a[2] * (unsigned __int128)b[2] 298*0957b409SSimon J. Gerraty + (unsigned __int128)t4 + (z >> 64); 299*0957b409SSimon J. Gerraty t4 = (uint64_t)z; 300*0957b409SSimon J. Gerraty z = (unsigned __int128)a[2] * (unsigned __int128)b[3] 301*0957b409SSimon J. Gerraty + (unsigned __int128)t5 + (z >> 64); 302*0957b409SSimon J. Gerraty t5 = (uint64_t)z; 303*0957b409SSimon J. Gerraty t6 = (uint64_t)(z >> 64); 304*0957b409SSimon J. Gerraty 305*0957b409SSimon J. Gerraty z = (unsigned __int128)a[3] * (unsigned __int128)b[0] 306*0957b409SSimon J. Gerraty + (unsigned __int128)t3; 307*0957b409SSimon J. Gerraty t3 = (uint64_t)z; 308*0957b409SSimon J. Gerraty z = (unsigned __int128)a[3] * (unsigned __int128)b[1] 309*0957b409SSimon J. Gerraty + (unsigned __int128)t4 + (z >> 64); 310*0957b409SSimon J. Gerraty t4 = (uint64_t)z; 311*0957b409SSimon J. Gerraty z = (unsigned __int128)a[3] * (unsigned __int128)b[2] 312*0957b409SSimon J. Gerraty + (unsigned __int128)t5 + (z >> 64); 313*0957b409SSimon J. Gerraty t5 = (uint64_t)z; 314*0957b409SSimon J. Gerraty z = (unsigned __int128)a[3] * (unsigned __int128)b[3] 315*0957b409SSimon J. Gerraty + (unsigned __int128)t6 + (z >> 64); 316*0957b409SSimon J. Gerraty t6 = (uint64_t)z; 317*0957b409SSimon J. Gerraty t7 = (uint64_t)(z >> 64); 318*0957b409SSimon J. Gerraty 319*0957b409SSimon J. Gerraty /* 320*0957b409SSimon J. Gerraty * Modulo p, we have: 321*0957b409SSimon J. Gerraty * 322*0957b409SSimon J. Gerraty * 2^255 = 19 323*0957b409SSimon J. Gerraty * 2^510 = 19*19 = 361 324*0957b409SSimon J. Gerraty * 325*0957b409SSimon J. Gerraty * We split the intermediate t into three parts, in basis 326*0957b409SSimon J. Gerraty * 2^255. The low one will be in t0..t3; the middle one in t4..t7. 327*0957b409SSimon J. Gerraty * The upper one can only be a single bit (th), since the 328*0957b409SSimon J. Gerraty * multiplication operands are at most 2^255+37 each. 329*0957b409SSimon J. Gerraty */ 330*0957b409SSimon J. Gerraty th = t7 >> 62; 331*0957b409SSimon J. Gerraty t7 = ((t7 << 1) | (t6 >> 63)) & MASK63; 332*0957b409SSimon J. Gerraty t6 = (t6 << 1) | (t5 >> 63); 333*0957b409SSimon J. Gerraty t5 = (t5 << 1) | (t4 >> 63); 334*0957b409SSimon J. Gerraty t4 = (t4 << 1) | (t3 >> 63); 335*0957b409SSimon J. Gerraty t3 &= MASK63; 336*0957b409SSimon J. Gerraty 337*0957b409SSimon J. Gerraty /* 338*0957b409SSimon J. Gerraty * Multiply the middle part (t4..t7) by 19. We truncate it to 339*0957b409SSimon J. Gerraty * 255 bits; the extra bits will go along with th. 340*0957b409SSimon J. Gerraty */ 341*0957b409SSimon J. Gerraty z = (unsigned __int128)t4 * 19; 342*0957b409SSimon J. Gerraty t4 = (uint64_t)z; 343*0957b409SSimon J. Gerraty z = (unsigned __int128)t5 * 19 + (z >> 64); 344*0957b409SSimon J. Gerraty t5 = (uint64_t)z; 345*0957b409SSimon J. Gerraty z = (unsigned __int128)t6 * 19 + (z >> 64); 346*0957b409SSimon J. Gerraty t6 = (uint64_t)z; 347*0957b409SSimon J. Gerraty z = (unsigned __int128)t7 * 19 + (z >> 64); 348*0957b409SSimon J. Gerraty t7 = (uint64_t)z & MASK63; 349*0957b409SSimon J. Gerraty 350*0957b409SSimon J. Gerraty th = (361 & -th) + (19 * (uint64_t)(z >> 63)); 351*0957b409SSimon J. Gerraty 352*0957b409SSimon J. Gerraty /* 353*0957b409SSimon J. Gerraty * Add elements together. 354*0957b409SSimon J. Gerraty * At this point: 355*0957b409SSimon J. Gerraty * t0..t3 fits on 255 bits. 356*0957b409SSimon J. Gerraty * t4..t7 fits on 255 bits. 357*0957b409SSimon J. Gerraty * th <= 361 + 342 = 703. 358*0957b409SSimon J. Gerraty */ 359*0957b409SSimon J. Gerraty z = (unsigned __int128)t0 + (unsigned __int128)t4 360*0957b409SSimon J. Gerraty + (unsigned __int128)th; 361*0957b409SSimon J. Gerraty t0 = (uint64_t)z; 362*0957b409SSimon J. Gerraty z = (unsigned __int128)t1 + (unsigned __int128)t5 + (z >> 64); 363*0957b409SSimon J. Gerraty t1 = (uint64_t)z; 364*0957b409SSimon J. Gerraty z = (unsigned __int128)t2 + (unsigned __int128)t6 + (z >> 64); 365*0957b409SSimon J. Gerraty t2 = (uint64_t)z; 366*0957b409SSimon J. Gerraty z = (unsigned __int128)t3 + (unsigned __int128)t7 + (z >> 64); 367*0957b409SSimon J. Gerraty t3 = (uint64_t)z & MASK63; 368*0957b409SSimon J. Gerraty th = (uint64_t)(z >> 63); 369*0957b409SSimon J. Gerraty 370*0957b409SSimon J. Gerraty /* 371*0957b409SSimon J. Gerraty * Since the sum is at most 2^256 + 703, the two upper bits, in th, 372*0957b409SSimon J. Gerraty * can only have value 0, 1 or 2. We just add th*19, which 373*0957b409SSimon J. Gerraty * guarantees a result of at most 2^255+37. 374*0957b409SSimon J. Gerraty */ 375*0957b409SSimon J. Gerraty z = (unsigned __int128)t0 + (19 * th); 376*0957b409SSimon J. Gerraty d[0] = (uint64_t)z; 377*0957b409SSimon J. Gerraty z = (unsigned __int128)t1 + (z >> 64); 378*0957b409SSimon J. Gerraty d[1] = (uint64_t)z; 379*0957b409SSimon J. Gerraty z = (unsigned __int128)t2 + (z >> 64); 380*0957b409SSimon J. Gerraty d[2] = (uint64_t)z; 381*0957b409SSimon J. Gerraty d[3] = t3 + (uint64_t)(z >> 64); 382*0957b409SSimon J. Gerraty 383*0957b409SSimon J. Gerraty #elif BR_UMUL128 384*0957b409SSimon J. Gerraty 385*0957b409SSimon J. Gerraty uint64_t t0, t1, t2, t3, t4, t5, t6, t7, th; 386*0957b409SSimon J. Gerraty uint64_t h0, h1, h2, h3; 387*0957b409SSimon J. Gerraty unsigned char k; 388*0957b409SSimon J. Gerraty 389*0957b409SSimon J. Gerraty /* 390*0957b409SSimon J. Gerraty * Compute the product a*b over plain integers. 391*0957b409SSimon J. Gerraty */ 392*0957b409SSimon J. Gerraty t0 = _umul128(a[0], b[0], &h0); 393*0957b409SSimon J. Gerraty t1 = _umul128(a[0], b[1], &h1); 394*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t1, h0, &t1); 395*0957b409SSimon J. Gerraty t2 = _umul128(a[0], b[2], &h2); 396*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t2, h1, &t2); 397*0957b409SSimon J. Gerraty t3 = _umul128(a[0], b[3], &h3); 398*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t3, h2, &t3); 399*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, h3, 0, &t4); 400*0957b409SSimon J. Gerraty 401*0957b409SSimon J. Gerraty k = _addcarry_u64(0, _umul128(a[1], b[0], &h0), t1, &t1); 402*0957b409SSimon J. Gerraty k = _addcarry_u64(k, _umul128(a[1], b[1], &h1), t2, &t2); 403*0957b409SSimon J. Gerraty k = _addcarry_u64(k, _umul128(a[1], b[2], &h2), t3, &t3); 404*0957b409SSimon J. Gerraty k = _addcarry_u64(k, _umul128(a[1], b[3], &h3), t4, &t4); 405*0957b409SSimon J. Gerraty t5 = k; 406*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t2, h0, &t2); 407*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t3, h1, &t3); 408*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t4, h2, &t4); 409*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, t5, h3, &t5); 410*0957b409SSimon J. Gerraty 411*0957b409SSimon J. Gerraty k = _addcarry_u64(0, _umul128(a[2], b[0], &h0), t2, &t2); 412*0957b409SSimon J. Gerraty k = _addcarry_u64(k, _umul128(a[2], b[1], &h1), t3, &t3); 413*0957b409SSimon J. Gerraty k = _addcarry_u64(k, _umul128(a[2], b[2], &h2), t4, &t4); 414*0957b409SSimon J. Gerraty k = _addcarry_u64(k, _umul128(a[2], b[3], &h3), t5, &t5); 415*0957b409SSimon J. Gerraty t6 = k; 416*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t3, h0, &t3); 417*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t4, h1, &t4); 418*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t5, h2, &t5); 419*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, t6, h3, &t6); 420*0957b409SSimon J. Gerraty 421*0957b409SSimon J. Gerraty k = _addcarry_u64(0, _umul128(a[3], b[0], &h0), t3, &t3); 422*0957b409SSimon J. Gerraty k = _addcarry_u64(k, _umul128(a[3], b[1], &h1), t4, &t4); 423*0957b409SSimon J. Gerraty k = _addcarry_u64(k, _umul128(a[3], b[2], &h2), t5, &t5); 424*0957b409SSimon J. Gerraty k = _addcarry_u64(k, _umul128(a[3], b[3], &h3), t6, &t6); 425*0957b409SSimon J. Gerraty t7 = k; 426*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t4, h0, &t4); 427*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t5, h1, &t5); 428*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t6, h2, &t6); 429*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, t7, h3, &t7); 430*0957b409SSimon J. Gerraty 431*0957b409SSimon J. Gerraty /* 432*0957b409SSimon J. Gerraty * Modulo p, we have: 433*0957b409SSimon J. Gerraty * 434*0957b409SSimon J. Gerraty * 2^255 = 19 435*0957b409SSimon J. Gerraty * 2^510 = 19*19 = 361 436*0957b409SSimon J. Gerraty * 437*0957b409SSimon J. Gerraty * We split the intermediate t into three parts, in basis 438*0957b409SSimon J. Gerraty * 2^255. The low one will be in t0..t3; the middle one in t4..t7. 439*0957b409SSimon J. Gerraty * The upper one can only be a single bit (th), since the 440*0957b409SSimon J. Gerraty * multiplication operands are at most 2^255+37 each. 441*0957b409SSimon J. Gerraty */ 442*0957b409SSimon J. Gerraty th = t7 >> 62; 443*0957b409SSimon J. Gerraty t7 = ((t7 << 1) | (t6 >> 63)) & MASK63; 444*0957b409SSimon J. Gerraty t6 = (t6 << 1) | (t5 >> 63); 445*0957b409SSimon J. Gerraty t5 = (t5 << 1) | (t4 >> 63); 446*0957b409SSimon J. Gerraty t4 = (t4 << 1) | (t3 >> 63); 447*0957b409SSimon J. Gerraty t3 &= MASK63; 448*0957b409SSimon J. Gerraty 449*0957b409SSimon J. Gerraty /* 450*0957b409SSimon J. Gerraty * Multiply the middle part (t4..t7) by 19. We truncate it to 451*0957b409SSimon J. Gerraty * 255 bits; the extra bits will go along with th. 452*0957b409SSimon J. Gerraty */ 453*0957b409SSimon J. Gerraty t4 = _umul128(t4, 19, &h0); 454*0957b409SSimon J. Gerraty t5 = _umul128(t5, 19, &h1); 455*0957b409SSimon J. Gerraty t6 = _umul128(t6, 19, &h2); 456*0957b409SSimon J. Gerraty t7 = _umul128(t7, 19, &h3); 457*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t5, h0, &t5); 458*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t6, h1, &t6); 459*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t7, h2, &t7); 460*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, h3, 0, &h3); 461*0957b409SSimon J. Gerraty th = (361 & -th) + (19 * ((h3 << 1) + (t7 >> 63))); 462*0957b409SSimon J. Gerraty t7 &= MASK63; 463*0957b409SSimon J. Gerraty 464*0957b409SSimon J. Gerraty /* 465*0957b409SSimon J. Gerraty * Add elements together. 466*0957b409SSimon J. Gerraty * At this point: 467*0957b409SSimon J. Gerraty * t0..t3 fits on 255 bits. 468*0957b409SSimon J. Gerraty * t4..t7 fits on 255 bits. 469*0957b409SSimon J. Gerraty * th <= 361 + 342 = 703. 470*0957b409SSimon J. Gerraty */ 471*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t0, t4, &t0); 472*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t1, t5, &t1); 473*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t2, t6, &t2); 474*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t3, t7, &t3); 475*0957b409SSimon J. Gerraty t4 = k; 476*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t0, th, &t0); 477*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t1, 0, &t1); 478*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t2, 0, &t2); 479*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t3, 0, &t3); 480*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, t4, 0, &t4); 481*0957b409SSimon J. Gerraty 482*0957b409SSimon J. Gerraty th = (t4 << 1) + (t3 >> 63); 483*0957b409SSimon J. Gerraty t3 &= MASK63; 484*0957b409SSimon J. Gerraty 485*0957b409SSimon J. Gerraty /* 486*0957b409SSimon J. Gerraty * Since the sum is at most 2^256 + 703, the two upper bits, in th, 487*0957b409SSimon J. Gerraty * can only have value 0, 1 or 2. We just add th*19, which 488*0957b409SSimon J. Gerraty * guarantees a result of at most 2^255+37. 489*0957b409SSimon J. Gerraty */ 490*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t0, 19 * th, &d[0]); 491*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t1, 0, &d[1]); 492*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t2, 0, &d[2]); 493*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, t3, 0, &d[3]); 494*0957b409SSimon J. Gerraty 495*0957b409SSimon J. Gerraty #endif 496*0957b409SSimon J. Gerraty } 497*0957b409SSimon J. Gerraty 498*0957b409SSimon J. Gerraty /* 499*0957b409SSimon J. Gerraty * Multiplication by A24 = 121665. 500*0957b409SSimon J. Gerraty */ 501*0957b409SSimon J. Gerraty static inline void 502*0957b409SSimon J. Gerraty f255_mul_a24(uint64_t *d, const uint64_t *a) 503*0957b409SSimon J. Gerraty { 504*0957b409SSimon J. Gerraty #if BR_INT128 505*0957b409SSimon J. Gerraty 506*0957b409SSimon J. Gerraty uint64_t t0, t1, t2, t3; 507*0957b409SSimon J. Gerraty unsigned __int128 z; 508*0957b409SSimon J. Gerraty 509*0957b409SSimon J. Gerraty z = (unsigned __int128)a[0] * 121665; 510*0957b409SSimon J. Gerraty t0 = (uint64_t)z; 511*0957b409SSimon J. Gerraty z = (unsigned __int128)a[1] * 121665 + (z >> 64); 512*0957b409SSimon J. Gerraty t1 = (uint64_t)z; 513*0957b409SSimon J. Gerraty z = (unsigned __int128)a[2] * 121665 + (z >> 64); 514*0957b409SSimon J. Gerraty t2 = (uint64_t)z; 515*0957b409SSimon J. Gerraty z = (unsigned __int128)a[3] * 121665 + (z >> 64); 516*0957b409SSimon J. Gerraty t3 = (uint64_t)z & MASK63; 517*0957b409SSimon J. Gerraty 518*0957b409SSimon J. Gerraty z = (unsigned __int128)t0 + (19 * (uint64_t)(z >> 63)); 519*0957b409SSimon J. Gerraty t0 = (uint64_t)z; 520*0957b409SSimon J. Gerraty z = (unsigned __int128)t1 + (z >> 64); 521*0957b409SSimon J. Gerraty t1 = (uint64_t)z; 522*0957b409SSimon J. Gerraty z = (unsigned __int128)t2 + (z >> 64); 523*0957b409SSimon J. Gerraty t2 = (uint64_t)z; 524*0957b409SSimon J. Gerraty t3 = t3 + (uint64_t)(z >> 64); 525*0957b409SSimon J. Gerraty 526*0957b409SSimon J. Gerraty z = (unsigned __int128)t0 + (19 & -(t3 >> 63)); 527*0957b409SSimon J. Gerraty d[0] = (uint64_t)z; 528*0957b409SSimon J. Gerraty z = (unsigned __int128)t1 + (z >> 64); 529*0957b409SSimon J. Gerraty d[1] = (uint64_t)z; 530*0957b409SSimon J. Gerraty z = (unsigned __int128)t2 + (z >> 64); 531*0957b409SSimon J. Gerraty d[2] = (uint64_t)z; 532*0957b409SSimon J. Gerraty d[3] = (t3 & MASK63) + (uint64_t)(z >> 64); 533*0957b409SSimon J. Gerraty 534*0957b409SSimon J. Gerraty #elif BR_UMUL128 535*0957b409SSimon J. Gerraty 536*0957b409SSimon J. Gerraty uint64_t t0, t1, t2, t3, t4, h0, h1, h2, h3; 537*0957b409SSimon J. Gerraty unsigned char k; 538*0957b409SSimon J. Gerraty 539*0957b409SSimon J. Gerraty t0 = _umul128(a[0], 121665, &h0); 540*0957b409SSimon J. Gerraty t1 = _umul128(a[1], 121665, &h1); 541*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t1, h0, &t1); 542*0957b409SSimon J. Gerraty t2 = _umul128(a[2], 121665, &h2); 543*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t2, h1, &t2); 544*0957b409SSimon J. Gerraty t3 = _umul128(a[3], 121665, &h3); 545*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t3, h2, &t3); 546*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, h3, 0, &t4); 547*0957b409SSimon J. Gerraty 548*0957b409SSimon J. Gerraty t4 = (t4 << 1) + (t3 >> 63); 549*0957b409SSimon J. Gerraty t3 &= MASK63; 550*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t0, 19 * t4, &t0); 551*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t1, 0, &t1); 552*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t2, 0, &t2); 553*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, t3, 0, &t3); 554*0957b409SSimon J. Gerraty 555*0957b409SSimon J. Gerraty t4 = 19 & -(t3 >> 63); 556*0957b409SSimon J. Gerraty t3 &= MASK63; 557*0957b409SSimon J. Gerraty k = _addcarry_u64(0, t0, t4, &d[0]); 558*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t1, 0, &d[1]); 559*0957b409SSimon J. Gerraty k = _addcarry_u64(k, t2, 0, &d[2]); 560*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, t3, 0, &d[3]); 561*0957b409SSimon J. Gerraty 562*0957b409SSimon J. Gerraty #endif 563*0957b409SSimon J. Gerraty } 564*0957b409SSimon J. Gerraty 565*0957b409SSimon J. Gerraty /* 566*0957b409SSimon J. Gerraty * Finalize reduction. 567*0957b409SSimon J. Gerraty */ 568*0957b409SSimon J. Gerraty static inline void 569*0957b409SSimon J. Gerraty f255_final_reduce(uint64_t *a) 570*0957b409SSimon J. Gerraty { 571*0957b409SSimon J. Gerraty #if BR_INT128 572*0957b409SSimon J. Gerraty 573*0957b409SSimon J. Gerraty uint64_t t0, t1, t2, t3, m; 574*0957b409SSimon J. Gerraty unsigned __int128 z; 575*0957b409SSimon J. Gerraty 576*0957b409SSimon J. Gerraty /* 577*0957b409SSimon J. Gerraty * We add 19. If the result (in t) is below 2^255, then a[] 578*0957b409SSimon J. Gerraty * is already less than 2^255-19, thus already reduced. 579*0957b409SSimon J. Gerraty * Otherwise, we subtract 2^255 from t[], in which case we 580*0957b409SSimon J. Gerraty * have t = a - (2^255-19), and that's our result. 581*0957b409SSimon J. Gerraty */ 582*0957b409SSimon J. Gerraty z = (unsigned __int128)a[0] + 19; 583*0957b409SSimon J. Gerraty t0 = (uint64_t)z; 584*0957b409SSimon J. Gerraty z = (unsigned __int128)a[1] + (z >> 64); 585*0957b409SSimon J. Gerraty t1 = (uint64_t)z; 586*0957b409SSimon J. Gerraty z = (unsigned __int128)a[2] + (z >> 64); 587*0957b409SSimon J. Gerraty t2 = (uint64_t)z; 588*0957b409SSimon J. Gerraty t3 = a[3] + (uint64_t)(z >> 64); 589*0957b409SSimon J. Gerraty 590*0957b409SSimon J. Gerraty m = -(t3 >> 63); 591*0957b409SSimon J. Gerraty t3 &= MASK63; 592*0957b409SSimon J. Gerraty a[0] ^= m & (a[0] ^ t0); 593*0957b409SSimon J. Gerraty a[1] ^= m & (a[1] ^ t1); 594*0957b409SSimon J. Gerraty a[2] ^= m & (a[2] ^ t2); 595*0957b409SSimon J. Gerraty a[3] ^= m & (a[3] ^ t3); 596*0957b409SSimon J. Gerraty 597*0957b409SSimon J. Gerraty #elif BR_UMUL128 598*0957b409SSimon J. Gerraty 599*0957b409SSimon J. Gerraty uint64_t t0, t1, t2, t3, m; 600*0957b409SSimon J. Gerraty unsigned char k; 601*0957b409SSimon J. Gerraty 602*0957b409SSimon J. Gerraty /* 603*0957b409SSimon J. Gerraty * We add 19. If the result (in t) is below 2^255, then a[] 604*0957b409SSimon J. Gerraty * is already less than 2^255-19, thus already reduced. 605*0957b409SSimon J. Gerraty * Otherwise, we subtract 2^255 from t[], in which case we 606*0957b409SSimon J. Gerraty * have t = a - (2^255-19), and that's our result. 607*0957b409SSimon J. Gerraty */ 608*0957b409SSimon J. Gerraty k = _addcarry_u64(0, a[0], 19, &t0); 609*0957b409SSimon J. Gerraty k = _addcarry_u64(k, a[1], 0, &t1); 610*0957b409SSimon J. Gerraty k = _addcarry_u64(k, a[2], 0, &t2); 611*0957b409SSimon J. Gerraty (void)_addcarry_u64(k, a[3], 0, &t3); 612*0957b409SSimon J. Gerraty 613*0957b409SSimon J. Gerraty m = -(t3 >> 63); 614*0957b409SSimon J. Gerraty t3 &= MASK63; 615*0957b409SSimon J. Gerraty a[0] ^= m & (a[0] ^ t0); 616*0957b409SSimon J. Gerraty a[1] ^= m & (a[1] ^ t1); 617*0957b409SSimon J. Gerraty a[2] ^= m & (a[2] ^ t2); 618*0957b409SSimon J. Gerraty a[3] ^= m & (a[3] ^ t3); 619*0957b409SSimon J. Gerraty 620*0957b409SSimon J. Gerraty #endif 621*0957b409SSimon J. Gerraty } 622*0957b409SSimon J. Gerraty 623*0957b409SSimon J. Gerraty static uint32_t 624*0957b409SSimon J. Gerraty api_mul(unsigned char *G, size_t Glen, 625*0957b409SSimon J. Gerraty const unsigned char *kb, size_t kblen, int curve) 626*0957b409SSimon J. Gerraty { 627*0957b409SSimon J. Gerraty unsigned char k[32]; 628*0957b409SSimon J. Gerraty uint64_t x1[4], x2[4], z2[4], x3[4], z3[4]; 629*0957b409SSimon J. Gerraty uint32_t swap; 630*0957b409SSimon J. Gerraty int i; 631*0957b409SSimon J. Gerraty 632*0957b409SSimon J. Gerraty (void)curve; 633*0957b409SSimon J. Gerraty 634*0957b409SSimon J. Gerraty /* 635*0957b409SSimon J. Gerraty * Points are encoded over exactly 32 bytes. Multipliers must fit 636*0957b409SSimon J. Gerraty * in 32 bytes as well. 637*0957b409SSimon J. Gerraty */ 638*0957b409SSimon J. Gerraty if (Glen != 32 || kblen > 32) { 639*0957b409SSimon J. Gerraty return 0; 640*0957b409SSimon J. Gerraty } 641*0957b409SSimon J. Gerraty 642*0957b409SSimon J. Gerraty /* 643*0957b409SSimon J. Gerraty * RFC 7748 mandates that the high bit of the last point byte must 644*0957b409SSimon J. Gerraty * be ignored/cleared. 645*0957b409SSimon J. Gerraty */ 646*0957b409SSimon J. Gerraty x1[0] = br_dec64le(&G[ 0]); 647*0957b409SSimon J. Gerraty x1[1] = br_dec64le(&G[ 8]); 648*0957b409SSimon J. Gerraty x1[2] = br_dec64le(&G[16]); 649*0957b409SSimon J. Gerraty x1[3] = br_dec64le(&G[24]) & MASK63; 650*0957b409SSimon J. Gerraty 651*0957b409SSimon J. Gerraty /* 652*0957b409SSimon J. Gerraty * We can use memset() to clear values, because exact-width types 653*0957b409SSimon J. Gerraty * like uint64_t are guaranteed to have no padding bits or 654*0957b409SSimon J. Gerraty * trap representations. 655*0957b409SSimon J. Gerraty */ 656*0957b409SSimon J. Gerraty memset(x2, 0, sizeof x2); 657*0957b409SSimon J. Gerraty x2[0] = 1; 658*0957b409SSimon J. Gerraty memset(z2, 0, sizeof z2); 659*0957b409SSimon J. Gerraty memcpy(x3, x1, sizeof x1); 660*0957b409SSimon J. Gerraty memcpy(z3, x2, sizeof x2); 661*0957b409SSimon J. Gerraty 662*0957b409SSimon J. Gerraty /* 663*0957b409SSimon J. Gerraty * The multiplier is provided in big-endian notation, and 664*0957b409SSimon J. Gerraty * possibly shorter than 32 bytes. 665*0957b409SSimon J. Gerraty */ 666*0957b409SSimon J. Gerraty memset(k, 0, (sizeof k) - kblen); 667*0957b409SSimon J. Gerraty memcpy(k + (sizeof k) - kblen, kb, kblen); 668*0957b409SSimon J. Gerraty k[31] &= 0xF8; 669*0957b409SSimon J. Gerraty k[0] &= 0x7F; 670*0957b409SSimon J. Gerraty k[0] |= 0x40; 671*0957b409SSimon J. Gerraty 672*0957b409SSimon J. Gerraty swap = 0; 673*0957b409SSimon J. Gerraty 674*0957b409SSimon J. Gerraty for (i = 254; i >= 0; i --) { 675*0957b409SSimon J. Gerraty uint64_t a[4], aa[4], b[4], bb[4], e[4]; 676*0957b409SSimon J. Gerraty uint64_t c[4], d[4], da[4], cb[4]; 677*0957b409SSimon J. Gerraty uint32_t kt; 678*0957b409SSimon J. Gerraty 679*0957b409SSimon J. Gerraty kt = (k[31 - (i >> 3)] >> (i & 7)) & 1; 680*0957b409SSimon J. Gerraty swap ^= kt; 681*0957b409SSimon J. Gerraty f255_cswap(x2, x3, swap); 682*0957b409SSimon J. Gerraty f255_cswap(z2, z3, swap); 683*0957b409SSimon J. Gerraty swap = kt; 684*0957b409SSimon J. Gerraty 685*0957b409SSimon J. Gerraty /* A = x_2 + z_2 */ 686*0957b409SSimon J. Gerraty f255_add(a, x2, z2); 687*0957b409SSimon J. Gerraty 688*0957b409SSimon J. Gerraty /* AA = A^2 */ 689*0957b409SSimon J. Gerraty f255_mul(aa, a, a); 690*0957b409SSimon J. Gerraty 691*0957b409SSimon J. Gerraty /* B = x_2 - z_2 */ 692*0957b409SSimon J. Gerraty f255_sub(b, x2, z2); 693*0957b409SSimon J. Gerraty 694*0957b409SSimon J. Gerraty /* BB = B^2 */ 695*0957b409SSimon J. Gerraty f255_mul(bb, b, b); 696*0957b409SSimon J. Gerraty 697*0957b409SSimon J. Gerraty /* E = AA - BB */ 698*0957b409SSimon J. Gerraty f255_sub(e, aa, bb); 699*0957b409SSimon J. Gerraty 700*0957b409SSimon J. Gerraty /* C = x_3 + z_3 */ 701*0957b409SSimon J. Gerraty f255_add(c, x3, z3); 702*0957b409SSimon J. Gerraty 703*0957b409SSimon J. Gerraty /* D = x_3 - z_3 */ 704*0957b409SSimon J. Gerraty f255_sub(d, x3, z3); 705*0957b409SSimon J. Gerraty 706*0957b409SSimon J. Gerraty /* DA = D * A */ 707*0957b409SSimon J. Gerraty f255_mul(da, d, a); 708*0957b409SSimon J. Gerraty 709*0957b409SSimon J. Gerraty /* CB = C * B */ 710*0957b409SSimon J. Gerraty f255_mul(cb, c, b); 711*0957b409SSimon J. Gerraty 712*0957b409SSimon J. Gerraty /* x_3 = (DA + CB)^2 */ 713*0957b409SSimon J. Gerraty f255_add(x3, da, cb); 714*0957b409SSimon J. Gerraty f255_mul(x3, x3, x3); 715*0957b409SSimon J. Gerraty 716*0957b409SSimon J. Gerraty /* z_3 = x_1 * (DA - CB)^2 */ 717*0957b409SSimon J. Gerraty f255_sub(z3, da, cb); 718*0957b409SSimon J. Gerraty f255_mul(z3, z3, z3); 719*0957b409SSimon J. Gerraty f255_mul(z3, x1, z3); 720*0957b409SSimon J. Gerraty 721*0957b409SSimon J. Gerraty /* x_2 = AA * BB */ 722*0957b409SSimon J. Gerraty f255_mul(x2, aa, bb); 723*0957b409SSimon J. Gerraty 724*0957b409SSimon J. Gerraty /* z_2 = E * (AA + a24 * E) */ 725*0957b409SSimon J. Gerraty f255_mul_a24(z2, e); 726*0957b409SSimon J. Gerraty f255_add(z2, aa, z2); 727*0957b409SSimon J. Gerraty f255_mul(z2, e, z2); 728*0957b409SSimon J. Gerraty } 729*0957b409SSimon J. Gerraty 730*0957b409SSimon J. Gerraty f255_cswap(x2, x3, swap); 731*0957b409SSimon J. Gerraty f255_cswap(z2, z3, swap); 732*0957b409SSimon J. Gerraty 733*0957b409SSimon J. Gerraty /* 734*0957b409SSimon J. Gerraty * Compute 1/z2 = z2^(p-2). Since p = 2^255-19, we can mutualize 735*0957b409SSimon J. Gerraty * most non-squarings. We use x1 and x3, now useless, as temporaries. 736*0957b409SSimon J. Gerraty */ 737*0957b409SSimon J. Gerraty memcpy(x1, z2, sizeof z2); 738*0957b409SSimon J. Gerraty for (i = 0; i < 15; i ++) { 739*0957b409SSimon J. Gerraty f255_mul(x1, x1, x1); 740*0957b409SSimon J. Gerraty f255_mul(x1, x1, z2); 741*0957b409SSimon J. Gerraty } 742*0957b409SSimon J. Gerraty memcpy(x3, x1, sizeof x1); 743*0957b409SSimon J. Gerraty for (i = 0; i < 14; i ++) { 744*0957b409SSimon J. Gerraty int j; 745*0957b409SSimon J. Gerraty 746*0957b409SSimon J. Gerraty for (j = 0; j < 16; j ++) { 747*0957b409SSimon J. Gerraty f255_mul(x3, x3, x3); 748*0957b409SSimon J. Gerraty } 749*0957b409SSimon J. Gerraty f255_mul(x3, x3, x1); 750*0957b409SSimon J. Gerraty } 751*0957b409SSimon J. Gerraty for (i = 14; i >= 0; i --) { 752*0957b409SSimon J. Gerraty f255_mul(x3, x3, x3); 753*0957b409SSimon J. Gerraty if ((0xFFEB >> i) & 1) { 754*0957b409SSimon J. Gerraty f255_mul(x3, z2, x3); 755*0957b409SSimon J. Gerraty } 756*0957b409SSimon J. Gerraty } 757*0957b409SSimon J. Gerraty 758*0957b409SSimon J. Gerraty /* 759*0957b409SSimon J. Gerraty * Compute x2/z2. We have 1/z2 in x3. 760*0957b409SSimon J. Gerraty */ 761*0957b409SSimon J. Gerraty f255_mul(x2, x2, x3); 762*0957b409SSimon J. Gerraty f255_final_reduce(x2); 763*0957b409SSimon J. Gerraty 764*0957b409SSimon J. Gerraty /* 765*0957b409SSimon J. Gerraty * Encode the final x2 value in little-endian. 766*0957b409SSimon J. Gerraty */ 767*0957b409SSimon J. Gerraty br_enc64le(G, x2[0]); 768*0957b409SSimon J. Gerraty br_enc64le(G + 8, x2[1]); 769*0957b409SSimon J. Gerraty br_enc64le(G + 16, x2[2]); 770*0957b409SSimon J. Gerraty br_enc64le(G + 24, x2[3]); 771*0957b409SSimon J. Gerraty return 1; 772*0957b409SSimon J. Gerraty } 773*0957b409SSimon J. Gerraty 774*0957b409SSimon J. Gerraty static size_t 775*0957b409SSimon J. Gerraty api_mulgen(unsigned char *R, 776*0957b409SSimon J. Gerraty const unsigned char *x, size_t xlen, int curve) 777*0957b409SSimon J. Gerraty { 778*0957b409SSimon J. Gerraty const unsigned char *G; 779*0957b409SSimon J. Gerraty size_t Glen; 780*0957b409SSimon J. Gerraty 781*0957b409SSimon J. Gerraty G = api_generator(curve, &Glen); 782*0957b409SSimon J. Gerraty memcpy(R, G, Glen); 783*0957b409SSimon J. Gerraty api_mul(R, Glen, x, xlen, curve); 784*0957b409SSimon J. Gerraty return Glen; 785*0957b409SSimon J. Gerraty } 786*0957b409SSimon J. Gerraty 787*0957b409SSimon J. Gerraty static uint32_t 788*0957b409SSimon J. Gerraty api_muladd(unsigned char *A, const unsigned char *B, size_t len, 789*0957b409SSimon J. Gerraty const unsigned char *x, size_t xlen, 790*0957b409SSimon J. Gerraty const unsigned char *y, size_t ylen, int curve) 791*0957b409SSimon J. Gerraty { 792*0957b409SSimon J. Gerraty /* 793*0957b409SSimon J. Gerraty * We don't implement this method, since it is used for ECDSA 794*0957b409SSimon J. Gerraty * only, and there is no ECDSA over Curve25519 (which instead 795*0957b409SSimon J. Gerraty * uses EdDSA). 796*0957b409SSimon J. Gerraty */ 797*0957b409SSimon J. Gerraty (void)A; 798*0957b409SSimon J. Gerraty (void)B; 799*0957b409SSimon J. Gerraty (void)len; 800*0957b409SSimon J. Gerraty (void)x; 801*0957b409SSimon J. Gerraty (void)xlen; 802*0957b409SSimon J. Gerraty (void)y; 803*0957b409SSimon J. Gerraty (void)ylen; 804*0957b409SSimon J. Gerraty (void)curve; 805*0957b409SSimon J. Gerraty return 0; 806*0957b409SSimon J. Gerraty } 807*0957b409SSimon J. Gerraty 808*0957b409SSimon J. Gerraty /* see bearssl_ec.h */ 809*0957b409SSimon J. Gerraty const br_ec_impl br_ec_c25519_m64 = { 810*0957b409SSimon J. Gerraty (uint32_t)0x20000000, 811*0957b409SSimon J. Gerraty &api_generator, 812*0957b409SSimon J. Gerraty &api_order, 813*0957b409SSimon J. Gerraty &api_xoff, 814*0957b409SSimon J. Gerraty &api_mul, 815*0957b409SSimon J. Gerraty &api_mulgen, 816*0957b409SSimon J. Gerraty &api_muladd 817*0957b409SSimon J. Gerraty }; 818*0957b409SSimon J. Gerraty 819*0957b409SSimon J. Gerraty /* see bearssl_ec.h */ 820*0957b409SSimon J. Gerraty const br_ec_impl * 821*0957b409SSimon J. Gerraty br_ec_c25519_m64_get(void) 822*0957b409SSimon J. Gerraty { 823*0957b409SSimon J. Gerraty return &br_ec_c25519_m64; 824*0957b409SSimon J. Gerraty } 825*0957b409SSimon J. Gerraty 826*0957b409SSimon J. Gerraty #else 827*0957b409SSimon J. Gerraty 828*0957b409SSimon J. Gerraty /* see bearssl_ec.h */ 829*0957b409SSimon J. Gerraty const br_ec_impl * 830*0957b409SSimon J. Gerraty br_ec_c25519_m64_get(void) 831*0957b409SSimon J. Gerraty { 832*0957b409SSimon J. Gerraty return 0; 833*0957b409SSimon J. Gerraty } 834*0957b409SSimon J. Gerraty 835*0957b409SSimon J. Gerraty #endif 836