xref: /freebsd/contrib/bearssl/src/ec/ec_p256_m62.c (revision cc9e6590773dba57440750c124173ed531349a06)
1 /*
2  * Copyright (c) 2018 Thomas Pornin <pornin@bolet.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sublicense, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "inner.h"
26 
27 #if BR_INT128 || BR_UMUL128
28 
29 #if BR_UMUL128
30 #include <intrin.h>
31 #endif
32 
33 static const unsigned char P256_G[] = {
34 	0x04, 0x6B, 0x17, 0xD1, 0xF2, 0xE1, 0x2C, 0x42, 0x47, 0xF8,
35 	0xBC, 0xE6, 0xE5, 0x63, 0xA4, 0x40, 0xF2, 0x77, 0x03, 0x7D,
36 	0x81, 0x2D, 0xEB, 0x33, 0xA0, 0xF4, 0xA1, 0x39, 0x45, 0xD8,
37 	0x98, 0xC2, 0x96, 0x4F, 0xE3, 0x42, 0xE2, 0xFE, 0x1A, 0x7F,
38 	0x9B, 0x8E, 0xE7, 0xEB, 0x4A, 0x7C, 0x0F, 0x9E, 0x16, 0x2B,
39 	0xCE, 0x33, 0x57, 0x6B, 0x31, 0x5E, 0xCE, 0xCB, 0xB6, 0x40,
40 	0x68, 0x37, 0xBF, 0x51, 0xF5
41 };
42 
43 static const unsigned char P256_N[] = {
44 	0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF,
45 	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0xE6, 0xFA, 0xAD,
46 	0xA7, 0x17, 0x9E, 0x84, 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63,
47 	0x25, 0x51
48 };
49 
50 static const unsigned char *
api_generator(int curve,size_t * len)51 api_generator(int curve, size_t *len)
52 {
53 	(void)curve;
54 	*len = sizeof P256_G;
55 	return P256_G;
56 }
57 
58 static const unsigned char *
api_order(int curve,size_t * len)59 api_order(int curve, size_t *len)
60 {
61 	(void)curve;
62 	*len = sizeof P256_N;
63 	return P256_N;
64 }
65 
66 static size_t
api_xoff(int curve,size_t * len)67 api_xoff(int curve, size_t *len)
68 {
69 	(void)curve;
70 	*len = 32;
71 	return 1;
72 }
73 
74 /*
75  * A field element is encoded as five 64-bit integers, in basis 2^52.
76  * Limbs may occasionally exceed 2^52.
77  *
78  * A _partially reduced_ value is such that the following hold:
79  *   - top limb is less than 2^48 + 2^30
80  *   - the other limbs fit on 53 bits each
81  * In particular, such a value is less than twice the modulus p.
82  */
83 
84 #define BIT(n)   ((uint64_t)1 << (n))
85 #define MASK48   (BIT(48) - BIT(0))
86 #define MASK52   (BIT(52) - BIT(0))
87 
88 /* R = 2^260 mod p */
89 static const uint64_t F256_R[] = {
90 	0x0000000000010, 0xF000000000000, 0xFFFFFFFFFFFFF,
91 	0xFFEFFFFFFFFFF, 0x00000000FFFFF
92 };
93 
94 /* Curve equation is y^2 = x^3 - 3*x + B. This constant is B*R mod p
95    (Montgomery representation of B). */
96 static const uint64_t P256_B_MONTY[] = {
97 	0xDF6229C4BDDFD, 0xCA8843090D89C, 0x212ED6ACF005C,
98 	0x83415A220ABF7, 0x0C30061DD4874
99 };
100 
101 /*
102  * Addition in the field. Carry propagation is not performed.
103  * On input, limbs may be up to 63 bits each; on output, they will
104  * be up to one bit more than on input.
105  */
106 static inline void
f256_add(uint64_t * d,const uint64_t * a,const uint64_t * b)107 f256_add(uint64_t *d, const uint64_t *a, const uint64_t *b)
108 {
109 	d[0] = a[0] + b[0];
110 	d[1] = a[1] + b[1];
111 	d[2] = a[2] + b[2];
112 	d[3] = a[3] + b[3];
113 	d[4] = a[4] + b[4];
114 }
115 
116 /*
117  * Partially reduce the provided value.
118  * Input: limbs can go up to 61 bits each.
119  * Output: partially reduced.
120  */
121 static inline void
f256_partial_reduce(uint64_t * a)122 f256_partial_reduce(uint64_t *a)
123 {
124 	uint64_t w, cc, s;
125 
126 	/*
127 	 * Propagate carries.
128 	 */
129 	w = a[0];
130 	a[0] = w & MASK52;
131 	cc = w >> 52;
132 	w = a[1] + cc;
133 	a[1] = w & MASK52;
134 	cc = w >> 52;
135 	w = a[2] + cc;
136 	a[2] = w & MASK52;
137 	cc = w >> 52;
138 	w = a[3] + cc;
139 	a[3] = w & MASK52;
140 	cc = w >> 52;
141 	a[4] += cc;
142 
143 	s = a[4] >> 48;             /* s < 2^14 */
144 	a[0] += s;                  /* a[0] < 2^52 + 2^14 */
145 	w = a[1] - (s << 44);
146 	a[1] = w & MASK52;          /* a[1] < 2^52 */
147 	cc = -(w >> 52) & 0xFFF;    /* cc < 16 */
148 	w = a[2] - cc;
149 	a[2] = w & MASK52;          /* a[2] < 2^52 */
150 	cc = w >> 63;               /* cc = 0 or 1 */
151 	w = a[3] - cc - (s << 36);
152 	a[3] = w & MASK52;          /* a[3] < 2^52 */
153 	cc = w >> 63;               /* cc = 0 or 1 */
154 	w = a[4] & MASK48;
155 	a[4] = w + (s << 16) - cc;  /* a[4] < 2^48 + 2^30 */
156 }
157 
158 /*
159  * Subtraction in the field.
160  * Input: limbs must fit on 60 bits each; in particular, the complete
161  * integer will be less than 2^268 + 2^217.
162  * Output: partially reduced.
163  */
164 static inline void
f256_sub(uint64_t * d,const uint64_t * a,const uint64_t * b)165 f256_sub(uint64_t *d, const uint64_t *a, const uint64_t *b)
166 {
167 	uint64_t t[5], w, s, cc;
168 
169 	/*
170 	 * We compute d = 2^13*p + a - b; this ensures a positive
171 	 * intermediate value.
172 	 *
173 	 * Each individual addition/subtraction may yield a positive or
174 	 * negative result; thus, we need to handle a signed carry, thus
175 	 * with sign extension. We prefer not to use signed types (int64_t)
176 	 * because conversion from unsigned to signed is cumbersome (a
177 	 * direct cast with the top bit set is undefined behavior; instead,
178 	 * we have to use pointer aliasing, using the guaranteed properties
179 	 * of exact-width types, but this requires the compiler to optimize
180 	 * away the writes and reads from RAM), and right-shifting a
181 	 * signed negative value is implementation-defined. Therefore,
182 	 * we use a custom sign extension.
183 	 */
184 
185 	w = a[0] - b[0] - BIT(13);
186 	t[0] = w & MASK52;
187 	cc = w >> 52;
188 	cc |= -(cc & BIT(11));
189 	w = a[1] - b[1] + cc;
190 	t[1] = w & MASK52;
191 	cc = w >> 52;
192 	cc |= -(cc & BIT(11));
193 	w = a[2] - b[2] + cc;
194 	t[2] = (w & MASK52) + BIT(5);
195 	cc = w >> 52;
196 	cc |= -(cc & BIT(11));
197 	w = a[3] - b[3] + cc;
198 	t[3] = (w & MASK52) + BIT(49);
199 	cc = w >> 52;
200 	cc |= -(cc & BIT(11));
201 	t[4] = (BIT(61) - BIT(29)) + a[4] - b[4] + cc;
202 
203 	/*
204 	 * Perform partial reduction. Rule is:
205 	 *  2^256 = 2^224 - 2^192 - 2^96 + 1 mod p
206 	 *
207 	 * At that point:
208 	 *    0 <= t[0] <= 2^52 - 1
209 	 *    0 <= t[1] <= 2^52 - 1
210 	 *    2^5 <= t[2] <= 2^52 + 2^5 - 1
211 	 *    2^49 <= t[3] <= 2^52 + 2^49 - 1
212 	 *    2^59 < t[4] <= 2^61 + 2^60 - 2^29
213 	 *
214 	 * Thus, the value 's' (t[4] / 2^48) will be necessarily
215 	 * greater than 2048, and less than 12288.
216 	 */
217 	s = t[4] >> 48;
218 
219 	d[0] = t[0] + s;             /* d[0] <= 2^52 + 12287 */
220 	w = t[1] - (s << 44);
221 	d[1] = w & MASK52;           /* d[1] <= 2^52 - 1 */
222 	cc = -(w >> 52) & 0xFFF;     /* cc <= 48 */
223 	w = t[2] - cc;
224 	cc = w >> 63;                /* cc = 0 or 1 */
225 	d[2] = w + (cc << 52);       /* d[2] <= 2^52 + 31 */
226 	w = t[3] - cc - (s << 36);
227 	cc = w >> 63;                /* cc = 0 or 1 */
228 	d[3] = w + (cc << 52);       /* t[3] <= 2^52 + 2^49 - 1 */
229 	d[4] = (t[4] & MASK48) + (s << 16) - cc;  /* d[4] < 2^48 + 2^30 */
230 
231 	/*
232 	 * If s = 0, then none of the limbs is modified, and there cannot
233 	 * be an overflow; if s != 0, then (s << 16) > cc, and there is
234 	 * no overflow either.
235 	 */
236 }
237 
238 /*
239  * Montgomery multiplication in the field.
240  * Input: limbs must fit on 56 bits each.
241  * Output: partially reduced.
242  */
243 static void
f256_montymul(uint64_t * d,const uint64_t * a,const uint64_t * b)244 f256_montymul(uint64_t *d, const uint64_t *a, const uint64_t *b)
245 {
246 #if BR_INT128
247 
248 	int i;
249 	uint64_t t[5];
250 
251 	t[0] = 0;
252 	t[1] = 0;
253 	t[2] = 0;
254 	t[3] = 0;
255 	t[4] = 0;
256 	for (i = 0; i < 5; i ++) {
257 		uint64_t x, f, cc, w, s;
258 		unsigned __int128 z;
259 
260 		/*
261 		 * Since limbs of a[] and b[] fit on 56 bits each,
262 		 * each individual product fits on 112 bits. Also,
263 		 * the factor f fits on 52 bits, so f<<48 fits on
264 		 * 112 bits too. This guarantees that carries (cc)
265 		 * will fit on 62 bits, thus no overflow.
266 		 *
267 		 * The operations below compute:
268 		 *   t <- (t + x*b + f*p) / 2^64
269 		 */
270 		x = a[i];
271 		z = (unsigned __int128)b[0] * (unsigned __int128)x
272 			+ (unsigned __int128)t[0];
273 		f = (uint64_t)z & MASK52;
274 		cc = (uint64_t)(z >> 52);
275 		z = (unsigned __int128)b[1] * (unsigned __int128)x
276 			+ (unsigned __int128)t[1] + cc
277 			+ ((unsigned __int128)f << 44);
278 		t[0] = (uint64_t)z & MASK52;
279 		cc = (uint64_t)(z >> 52);
280 		z = (unsigned __int128)b[2] * (unsigned __int128)x
281 			+ (unsigned __int128)t[2] + cc;
282 		t[1] = (uint64_t)z & MASK52;
283 		cc = (uint64_t)(z >> 52);
284 		z = (unsigned __int128)b[3] * (unsigned __int128)x
285 			+ (unsigned __int128)t[3] + cc
286 			+ ((unsigned __int128)f << 36);
287 		t[2] = (uint64_t)z & MASK52;
288 		cc = (uint64_t)(z >> 52);
289 		z = (unsigned __int128)b[4] * (unsigned __int128)x
290 			+ (unsigned __int128)t[4] + cc
291 			+ ((unsigned __int128)f << 48)
292 			- ((unsigned __int128)f << 16);
293 		t[3] = (uint64_t)z & MASK52;
294 		t[4] = (uint64_t)(z >> 52);
295 
296 		/*
297 		 * t[4] may be up to 62 bits here; we need to do a
298 		 * partial reduction. Note that limbs t[0] to t[3]
299 		 * fit on 52 bits each.
300 		 */
301 		s = t[4] >> 48;             /* s < 2^14 */
302 		t[0] += s;                  /* t[0] < 2^52 + 2^14 */
303 		w = t[1] - (s << 44);
304 		t[1] = w & MASK52;          /* t[1] < 2^52 */
305 		cc = -(w >> 52) & 0xFFF;    /* cc < 16 */
306 		w = t[2] - cc;
307 		t[2] = w & MASK52;          /* t[2] < 2^52 */
308 		cc = w >> 63;               /* cc = 0 or 1 */
309 		w = t[3] - cc - (s << 36);
310 		t[3] = w & MASK52;          /* t[3] < 2^52 */
311 		cc = w >> 63;               /* cc = 0 or 1 */
312 		w = t[4] & MASK48;
313 		t[4] = w + (s << 16) - cc;  /* t[4] < 2^48 + 2^30 */
314 
315 		/*
316 		 * The final t[4] cannot overflow because cc is 0 or 1,
317 		 * and cc can be 1 only if s != 0.
318 		 */
319 	}
320 
321 	d[0] = t[0];
322 	d[1] = t[1];
323 	d[2] = t[2];
324 	d[3] = t[3];
325 	d[4] = t[4];
326 
327 #elif BR_UMUL128
328 
329 	int i;
330 	uint64_t t[5];
331 
332 	t[0] = 0;
333 	t[1] = 0;
334 	t[2] = 0;
335 	t[3] = 0;
336 	t[4] = 0;
337 	for (i = 0; i < 5; i ++) {
338 		uint64_t x, f, cc, w, s, zh, zl;
339 		unsigned char k;
340 
341 		/*
342 		 * Since limbs of a[] and b[] fit on 56 bits each,
343 		 * each individual product fits on 112 bits. Also,
344 		 * the factor f fits on 52 bits, so f<<48 fits on
345 		 * 112 bits too. This guarantees that carries (cc)
346 		 * will fit on 62 bits, thus no overflow.
347 		 *
348 		 * The operations below compute:
349 		 *   t <- (t + x*b + f*p) / 2^64
350 		 */
351 		x = a[i];
352 		zl = _umul128(b[0], x, &zh);
353 		k = _addcarry_u64(0, t[0], zl, &zl);
354 		(void)_addcarry_u64(k, 0, zh, &zh);
355 		f = zl & MASK52;
356 		cc = (zl >> 52) | (zh << 12);
357 
358 		zl = _umul128(b[1], x, &zh);
359 		k = _addcarry_u64(0, t[1], zl, &zl);
360 		(void)_addcarry_u64(k, 0, zh, &zh);
361 		k = _addcarry_u64(0, cc, zl, &zl);
362 		(void)_addcarry_u64(k, 0, zh, &zh);
363 		k = _addcarry_u64(0, f << 44, zl, &zl);
364 		(void)_addcarry_u64(k, f >> 20, zh, &zh);
365 		t[0] = zl & MASK52;
366 		cc = (zl >> 52) | (zh << 12);
367 
368 		zl = _umul128(b[2], x, &zh);
369 		k = _addcarry_u64(0, t[2], zl, &zl);
370 		(void)_addcarry_u64(k, 0, zh, &zh);
371 		k = _addcarry_u64(0, cc, zl, &zl);
372 		(void)_addcarry_u64(k, 0, zh, &zh);
373 		t[1] = zl & MASK52;
374 		cc = (zl >> 52) | (zh << 12);
375 
376 		zl = _umul128(b[3], x, &zh);
377 		k = _addcarry_u64(0, t[3], zl, &zl);
378 		(void)_addcarry_u64(k, 0, zh, &zh);
379 		k = _addcarry_u64(0, cc, zl, &zl);
380 		(void)_addcarry_u64(k, 0, zh, &zh);
381 		k = _addcarry_u64(0, f << 36, zl, &zl);
382 		(void)_addcarry_u64(k, f >> 28, zh, &zh);
383 		t[2] = zl & MASK52;
384 		cc = (zl >> 52) | (zh << 12);
385 
386 		zl = _umul128(b[4], x, &zh);
387 		k = _addcarry_u64(0, t[4], zl, &zl);
388 		(void)_addcarry_u64(k, 0, zh, &zh);
389 		k = _addcarry_u64(0, cc, zl, &zl);
390 		(void)_addcarry_u64(k, 0, zh, &zh);
391 		k = _addcarry_u64(0, f << 48, zl, &zl);
392 		(void)_addcarry_u64(k, f >> 16, zh, &zh);
393 		k = _subborrow_u64(0, zl, f << 16, &zl);
394 		(void)_subborrow_u64(k, zh, f >> 48, &zh);
395 		t[3] = zl & MASK52;
396 		t[4] = (zl >> 52) | (zh << 12);
397 
398 		/*
399 		 * t[4] may be up to 62 bits here; we need to do a
400 		 * partial reduction. Note that limbs t[0] to t[3]
401 		 * fit on 52 bits each.
402 		 */
403 		s = t[4] >> 48;             /* s < 2^14 */
404 		t[0] += s;                  /* t[0] < 2^52 + 2^14 */
405 		w = t[1] - (s << 44);
406 		t[1] = w & MASK52;          /* t[1] < 2^52 */
407 		cc = -(w >> 52) & 0xFFF;    /* cc < 16 */
408 		w = t[2] - cc;
409 		t[2] = w & MASK52;          /* t[2] < 2^52 */
410 		cc = w >> 63;               /* cc = 0 or 1 */
411 		w = t[3] - cc - (s << 36);
412 		t[3] = w & MASK52;          /* t[3] < 2^52 */
413 		cc = w >> 63;               /* cc = 0 or 1 */
414 		w = t[4] & MASK48;
415 		t[4] = w + (s << 16) - cc;  /* t[4] < 2^48 + 2^30 */
416 
417 		/*
418 		 * The final t[4] cannot overflow because cc is 0 or 1,
419 		 * and cc can be 1 only if s != 0.
420 		 */
421 	}
422 
423 	d[0] = t[0];
424 	d[1] = t[1];
425 	d[2] = t[2];
426 	d[3] = t[3];
427 	d[4] = t[4];
428 
429 #endif
430 }
431 
432 /*
433  * Montgomery squaring in the field; currently a basic wrapper around
434  * multiplication (inline, should be optimized away).
435  * TODO: see if some extra speed can be gained here.
436  */
437 static inline void
f256_montysquare(uint64_t * d,const uint64_t * a)438 f256_montysquare(uint64_t *d, const uint64_t *a)
439 {
440 	f256_montymul(d, a, a);
441 }
442 
443 /*
444  * Convert to Montgomery representation.
445  */
446 static void
f256_tomonty(uint64_t * d,const uint64_t * a)447 f256_tomonty(uint64_t *d, const uint64_t *a)
448 {
449 	/*
450 	 * R2 = 2^520 mod p.
451 	 * If R = 2^260 mod p, then R2 = R^2 mod p; and the Montgomery
452 	 * multiplication of a by R2 is: a*R2/R = a*R mod p, i.e. the
453 	 * conversion to Montgomery representation.
454 	 */
455 	static const uint64_t R2[] = {
456 		0x0000000000300, 0xFFFFFFFF00000, 0xFFFFEFFFFFFFB,
457 		0xFDFFFFFFFFFFF, 0x0000004FFFFFF
458 	};
459 
460 	f256_montymul(d, a, R2);
461 }
462 
463 /*
464  * Convert from Montgomery representation.
465  */
466 static void
f256_frommonty(uint64_t * d,const uint64_t * a)467 f256_frommonty(uint64_t *d, const uint64_t *a)
468 {
469 	/*
470 	 * Montgomery multiplication by 1 is division by 2^260 modulo p.
471 	 */
472 	static const uint64_t one[] = { 1, 0, 0, 0, 0 };
473 
474 	f256_montymul(d, a, one);
475 }
476 
477 /*
478  * Inversion in the field. If the source value is 0 modulo p, then this
479  * returns 0 or p. This function uses Montgomery representation.
480  */
481 static void
f256_invert(uint64_t * d,const uint64_t * a)482 f256_invert(uint64_t *d, const uint64_t *a)
483 {
484 	/*
485 	 * We compute a^(p-2) mod p. The exponent pattern (from high to
486 	 * low) is:
487 	 *  - 32 bits of value 1
488 	 *  - 31 bits of value 0
489 	 *  - 1 bit of value 1
490 	 *  - 96 bits of value 0
491 	 *  - 94 bits of value 1
492 	 *  - 1 bit of value 0
493 	 *  - 1 bit of value 1
494 	 * To speed up the square-and-multiply algorithm, we precompute
495 	 * a^(2^31-1).
496 	 */
497 
498 	uint64_t r[5], t[5];
499 	int i;
500 
501 	memcpy(t, a, sizeof t);
502 	for (i = 0; i < 30; i ++) {
503 		f256_montysquare(t, t);
504 		f256_montymul(t, t, a);
505 	}
506 
507 	memcpy(r, t, sizeof t);
508 	for (i = 224; i >= 0; i --) {
509 		f256_montysquare(r, r);
510 		switch (i) {
511 		case 0:
512 		case 2:
513 		case 192:
514 		case 224:
515 			f256_montymul(r, r, a);
516 			break;
517 		case 3:
518 		case 34:
519 		case 65:
520 			f256_montymul(r, r, t);
521 			break;
522 		}
523 	}
524 	memcpy(d, r, sizeof r);
525 }
526 
527 /*
528  * Finalize reduction.
529  * Input value should be partially reduced.
530  * On output, limbs a[0] to a[3] fit on 52 bits each, limb a[4] fits
531  * on 48 bits, and the integer is less than p.
532  */
533 static inline void
f256_final_reduce(uint64_t * a)534 f256_final_reduce(uint64_t *a)
535 {
536 	uint64_t r[5], t[5], w, cc;
537 	int i;
538 
539 	/*
540 	 * Propagate carries to ensure that limbs 0 to 3 fit on 52 bits.
541 	 */
542 	cc = 0;
543 	for (i = 0; i < 5; i ++) {
544 		w = a[i] + cc;
545 		r[i] = w & MASK52;
546 		cc = w >> 52;
547 	}
548 
549 	/*
550 	 * We compute t = r + (2^256 - p) = r + 2^224 - 2^192 - 2^96 + 1.
551 	 * If t < 2^256, then r < p, and we return r. Otherwise, we
552 	 * want to return r - p = t - 2^256.
553 	 */
554 
555 	/*
556 	 * Add 2^224 + 1, and propagate carries to ensure that limbs
557 	 * t[0] to t[3] fit in 52 bits each.
558 	 */
559 	w = r[0] + 1;
560 	t[0] = w & MASK52;
561 	cc = w >> 52;
562 	w = r[1] + cc;
563 	t[1] = w & MASK52;
564 	cc = w >> 52;
565 	w = r[2] + cc;
566 	t[2] = w & MASK52;
567 	cc = w >> 52;
568 	w = r[3] + cc;
569 	t[3] = w & MASK52;
570 	cc = w >> 52;
571 	t[4] = r[4] + cc + BIT(16);
572 
573 	/*
574 	 * Subtract 2^192 + 2^96. Since we just added 2^224 + 1, the
575 	 * result cannot be negative.
576 	 */
577 	w = t[1] - BIT(44);
578 	t[1] = w & MASK52;
579 	cc = w >> 63;
580 	w = t[2] - cc;
581 	t[2] = w & MASK52;
582 	cc = w >> 63;
583 	w = t[3] - BIT(36) - cc;
584 	t[3] = w & MASK52;
585 	cc = w >> 63;
586 	t[4] -= cc;
587 
588 	/*
589 	 * If the top limb t[4] fits on 48 bits, then r[] is already
590 	 * in the proper range. Otherwise, t[] is the value to return
591 	 * (truncated to 256 bits).
592 	 */
593 	cc = -(t[4] >> 48);
594 	t[4] &= MASK48;
595 	for (i = 0; i < 5; i ++) {
596 		a[i] = r[i] ^ (cc & (r[i] ^ t[i]));
597 	}
598 }
599 
600 /*
601  * Points in affine and Jacobian coordinates.
602  *
603  *  - In affine coordinates, the point-at-infinity cannot be encoded.
604  *  - Jacobian coordinates (X,Y,Z) correspond to affine (X/Z^2,Y/Z^3);
605  *    if Z = 0 then this is the point-at-infinity.
606  */
607 typedef struct {
608 	uint64_t x[5];
609 	uint64_t y[5];
610 } p256_affine;
611 
612 typedef struct {
613 	uint64_t x[5];
614 	uint64_t y[5];
615 	uint64_t z[5];
616 } p256_jacobian;
617 
618 /*
619  * Decode a field element (unsigned big endian notation).
620  */
621 static void
f256_decode(uint64_t * a,const unsigned char * buf)622 f256_decode(uint64_t *a, const unsigned char *buf)
623 {
624 	uint64_t w0, w1, w2, w3;
625 
626 	w3 = br_dec64be(buf +  0);
627 	w2 = br_dec64be(buf +  8);
628 	w1 = br_dec64be(buf + 16);
629 	w0 = br_dec64be(buf + 24);
630 	a[0] = w0 & MASK52;
631 	a[1] = ((w0 >> 52) | (w1 << 12)) & MASK52;
632 	a[2] = ((w1 >> 40) | (w2 << 24)) & MASK52;
633 	a[3] = ((w2 >> 28) | (w3 << 36)) & MASK52;
634 	a[4] = w3 >> 16;
635 }
636 
637 /*
638  * Encode a field element (unsigned big endian notation). The field
639  * element MUST be fully reduced.
640  */
641 static void
f256_encode(unsigned char * buf,const uint64_t * a)642 f256_encode(unsigned char *buf, const uint64_t *a)
643 {
644 	uint64_t w0, w1, w2, w3;
645 
646 	w0 = a[0] | (a[1] << 52);
647 	w1 = (a[1] >> 12) | (a[2] << 40);
648 	w2 = (a[2] >> 24) | (a[3] << 28);
649 	w3 = (a[3] >> 36) | (a[4] << 16);
650 	br_enc64be(buf +  0, w3);
651 	br_enc64be(buf +  8, w2);
652 	br_enc64be(buf + 16, w1);
653 	br_enc64be(buf + 24, w0);
654 }
655 
656 /*
657  * Decode a point. The returned point is in Jacobian coordinates, but
658  * with z = 1. If the encoding is invalid, or encodes a point which is
659  * not on the curve, or encodes the point at infinity, then this function
660  * returns 0. Otherwise, 1 is returned.
661  *
662  * The buffer is assumed to have length exactly 65 bytes.
663  */
664 static uint32_t
point_decode(p256_jacobian * P,const unsigned char * buf)665 point_decode(p256_jacobian *P, const unsigned char *buf)
666 {
667 	uint64_t x[5], y[5], t[5], x3[5], tt;
668 	uint32_t r;
669 
670 	/*
671 	 * Header byte shall be 0x04.
672 	 */
673 	r = EQ(buf[0], 0x04);
674 
675 	/*
676 	 * Decode X and Y coordinates, and convert them into
677 	 * Montgomery representation.
678 	 */
679 	f256_decode(x, buf +  1);
680 	f256_decode(y, buf + 33);
681 	f256_tomonty(x, x);
682 	f256_tomonty(y, y);
683 
684 	/*
685 	 * Verify y^2 = x^3 + A*x + B. In curve P-256, A = -3.
686 	 * Note that the Montgomery representation of 0 is 0. We must
687 	 * take care to apply the final reduction to make sure we have
688 	 * 0 and not p.
689 	 */
690 	f256_montysquare(t, y);
691 	f256_montysquare(x3, x);
692 	f256_montymul(x3, x3, x);
693 	f256_sub(t, t, x3);
694 	f256_add(t, t, x);
695 	f256_add(t, t, x);
696 	f256_add(t, t, x);
697 	f256_sub(t, t, P256_B_MONTY);
698 	f256_final_reduce(t);
699 	tt = t[0] | t[1] | t[2] | t[3] | t[4];
700 	r &= EQ((uint32_t)(tt | (tt >> 32)), 0);
701 
702 	/*
703 	 * Return the point in Jacobian coordinates (and Montgomery
704 	 * representation).
705 	 */
706 	memcpy(P->x, x, sizeof x);
707 	memcpy(P->y, y, sizeof y);
708 	memcpy(P->z, F256_R, sizeof F256_R);
709 	return r;
710 }
711 
712 /*
713  * Final conversion for a point:
714  *  - The point is converted back to affine coordinates.
715  *  - Final reduction is performed.
716  *  - The point is encoded into the provided buffer.
717  *
718  * If the point is the point-at-infinity, all operations are performed,
719  * but the buffer contents are indeterminate, and 0 is returned. Otherwise,
720  * the encoded point is written in the buffer, and 1 is returned.
721  */
722 static uint32_t
point_encode(unsigned char * buf,const p256_jacobian * P)723 point_encode(unsigned char *buf, const p256_jacobian *P)
724 {
725 	uint64_t t1[5], t2[5], z;
726 
727 	/* Set t1 = 1/z^2 and t2 = 1/z^3. */
728 	f256_invert(t2, P->z);
729 	f256_montysquare(t1, t2);
730 	f256_montymul(t2, t2, t1);
731 
732 	/* Compute affine coordinates x (in t1) and y (in t2). */
733 	f256_montymul(t1, P->x, t1);
734 	f256_montymul(t2, P->y, t2);
735 
736 	/* Convert back from Montgomery representation, and finalize
737 	   reductions. */
738 	f256_frommonty(t1, t1);
739 	f256_frommonty(t2, t2);
740 	f256_final_reduce(t1);
741 	f256_final_reduce(t2);
742 
743 	/* Encode. */
744 	buf[0] = 0x04;
745 	f256_encode(buf +  1, t1);
746 	f256_encode(buf + 33, t2);
747 
748 	/* Return success if and only if P->z != 0. */
749 	z = P->z[0] | P->z[1] | P->z[2] | P->z[3] | P->z[4];
750 	return NEQ((uint32_t)(z | z >> 32), 0);
751 }
752 
753 /*
754  * Point doubling in Jacobian coordinates: point P is doubled.
755  * Note: if the source point is the point-at-infinity, then the result is
756  * still the point-at-infinity, which is correct. Moreover, if the three
757  * coordinates were zero, then they still are zero in the returned value.
758  */
759 static void
p256_double(p256_jacobian * P)760 p256_double(p256_jacobian *P)
761 {
762 	/*
763 	 * Doubling formulas are:
764 	 *
765 	 *   s = 4*x*y^2
766 	 *   m = 3*(x + z^2)*(x - z^2)
767 	 *   x' = m^2 - 2*s
768 	 *   y' = m*(s - x') - 8*y^4
769 	 *   z' = 2*y*z
770 	 *
771 	 * These formulas work for all points, including points of order 2
772 	 * and points at infinity:
773 	 *   - If y = 0 then z' = 0. But there is no such point in P-256
774 	 *     anyway.
775 	 *   - If z = 0 then z' = 0.
776 	 */
777 	uint64_t t1[5], t2[5], t3[5], t4[5];
778 
779 	/*
780 	 * Compute z^2 in t1.
781 	 */
782 	f256_montysquare(t1, P->z);
783 
784 	/*
785 	 * Compute x-z^2 in t2 and x+z^2 in t1.
786 	 */
787 	f256_add(t2, P->x, t1);
788 	f256_sub(t1, P->x, t1);
789 
790 	/*
791 	 * Compute 3*(x+z^2)*(x-z^2) in t1.
792 	 */
793 	f256_montymul(t3, t1, t2);
794 	f256_add(t1, t3, t3);
795 	f256_add(t1, t3, t1);
796 
797 	/*
798 	 * Compute 4*x*y^2 (in t2) and 2*y^2 (in t3).
799 	 */
800 	f256_montysquare(t3, P->y);
801 	f256_add(t3, t3, t3);
802 	f256_montymul(t2, P->x, t3);
803 	f256_add(t2, t2, t2);
804 
805 	/*
806 	 * Compute x' = m^2 - 2*s.
807 	 */
808 	f256_montysquare(P->x, t1);
809 	f256_sub(P->x, P->x, t2);
810 	f256_sub(P->x, P->x, t2);
811 
812 	/*
813 	 * Compute z' = 2*y*z.
814 	 */
815 	f256_montymul(t4, P->y, P->z);
816 	f256_add(P->z, t4, t4);
817 	f256_partial_reduce(P->z);
818 
819 	/*
820 	 * Compute y' = m*(s - x') - 8*y^4. Note that we already have
821 	 * 2*y^2 in t3.
822 	 */
823 	f256_sub(t2, t2, P->x);
824 	f256_montymul(P->y, t1, t2);
825 	f256_montysquare(t4, t3);
826 	f256_add(t4, t4, t4);
827 	f256_sub(P->y, P->y, t4);
828 }
829 
830 /*
831  * Point addition (Jacobian coordinates): P1 is replaced with P1+P2.
832  * This function computes the wrong result in the following cases:
833  *
834  *   - If P1 == 0 but P2 != 0
835  *   - If P1 != 0 but P2 == 0
836  *   - If P1 == P2
837  *
838  * In all three cases, P1 is set to the point at infinity.
839  *
840  * Returned value is 0 if one of the following occurs:
841  *
842  *   - P1 and P2 have the same Y coordinate.
843  *   - P1 == 0 and P2 == 0.
844  *   - The Y coordinate of one of the points is 0 and the other point is
845  *     the point at infinity.
846  *
847  * The third case cannot actually happen with valid points, since a point
848  * with Y == 0 is a point of order 2, and there is no point of order 2 on
849  * curve P-256.
850  *
851  * Therefore, assuming that P1 != 0 and P2 != 0 on input, then the caller
852  * can apply the following:
853  *
854  *   - If the result is not the point at infinity, then it is correct.
855  *   - Otherwise, if the returned value is 1, then this is a case of
856  *     P1+P2 == 0, so the result is indeed the point at infinity.
857  *   - Otherwise, P1 == P2, so a "double" operation should have been
858  *     performed.
859  *
860  * Note that you can get a returned value of 0 with a correct result,
861  * e.g. if P1 and P2 have the same Y coordinate, but distinct X coordinates.
862  */
863 static uint32_t
p256_add(p256_jacobian * P1,const p256_jacobian * P2)864 p256_add(p256_jacobian *P1, const p256_jacobian *P2)
865 {
866 	/*
867 	 * Addtions formulas are:
868 	 *
869 	 *   u1 = x1 * z2^2
870 	 *   u2 = x2 * z1^2
871 	 *   s1 = y1 * z2^3
872 	 *   s2 = y2 * z1^3
873 	 *   h = u2 - u1
874 	 *   r = s2 - s1
875 	 *   x3 = r^2 - h^3 - 2 * u1 * h^2
876 	 *   y3 = r * (u1 * h^2 - x3) - s1 * h^3
877 	 *   z3 = h * z1 * z2
878 	 */
879 	uint64_t t1[5], t2[5], t3[5], t4[5], t5[5], t6[5], t7[5], tt;
880 	uint32_t ret;
881 
882 	/*
883 	 * Compute u1 = x1*z2^2 (in t1) and s1 = y1*z2^3 (in t3).
884 	 */
885 	f256_montysquare(t3, P2->z);
886 	f256_montymul(t1, P1->x, t3);
887 	f256_montymul(t4, P2->z, t3);
888 	f256_montymul(t3, P1->y, t4);
889 
890 	/*
891 	 * Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4).
892 	 */
893 	f256_montysquare(t4, P1->z);
894 	f256_montymul(t2, P2->x, t4);
895 	f256_montymul(t5, P1->z, t4);
896 	f256_montymul(t4, P2->y, t5);
897 
898 	/*
899 	 * Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4).
900 	 * We need to test whether r is zero, so we will do some extra
901 	 * reduce.
902 	 */
903 	f256_sub(t2, t2, t1);
904 	f256_sub(t4, t4, t3);
905 	f256_final_reduce(t4);
906 	tt = t4[0] | t4[1] | t4[2] | t4[3] | t4[4];
907 	ret = (uint32_t)(tt | (tt >> 32));
908 	ret = (ret | -ret) >> 31;
909 
910 	/*
911 	 * Compute u1*h^2 (in t6) and h^3 (in t5);
912 	 */
913 	f256_montysquare(t7, t2);
914 	f256_montymul(t6, t1, t7);
915 	f256_montymul(t5, t7, t2);
916 
917 	/*
918 	 * Compute x3 = r^2 - h^3 - 2*u1*h^2.
919 	 */
920 	f256_montysquare(P1->x, t4);
921 	f256_sub(P1->x, P1->x, t5);
922 	f256_sub(P1->x, P1->x, t6);
923 	f256_sub(P1->x, P1->x, t6);
924 
925 	/*
926 	 * Compute y3 = r*(u1*h^2 - x3) - s1*h^3.
927 	 */
928 	f256_sub(t6, t6, P1->x);
929 	f256_montymul(P1->y, t4, t6);
930 	f256_montymul(t1, t5, t3);
931 	f256_sub(P1->y, P1->y, t1);
932 
933 	/*
934 	 * Compute z3 = h*z1*z2.
935 	 */
936 	f256_montymul(t1, P1->z, P2->z);
937 	f256_montymul(P1->z, t1, t2);
938 
939 	return ret;
940 }
941 
942 /*
943  * Point addition (mixed coordinates): P1 is replaced with P1+P2.
944  * This is a specialised function for the case when P2 is a non-zero point
945  * in affine coordinates.
946  *
947  * This function computes the wrong result in the following cases:
948  *
949  *   - If P1 == 0
950  *   - If P1 == P2
951  *
952  * In both cases, P1 is set to the point at infinity.
953  *
954  * Returned value is 0 if one of the following occurs:
955  *
956  *   - P1 and P2 have the same Y (affine) coordinate.
957  *   - The Y coordinate of P2 is 0 and P1 is the point at infinity.
958  *
959  * The second case cannot actually happen with valid points, since a point
960  * with Y == 0 is a point of order 2, and there is no point of order 2 on
961  * curve P-256.
962  *
963  * Therefore, assuming that P1 != 0 on input, then the caller
964  * can apply the following:
965  *
966  *   - If the result is not the point at infinity, then it is correct.
967  *   - Otherwise, if the returned value is 1, then this is a case of
968  *     P1+P2 == 0, so the result is indeed the point at infinity.
969  *   - Otherwise, P1 == P2, so a "double" operation should have been
970  *     performed.
971  *
972  * Again, a value of 0 may be returned in some cases where the addition
973  * result is correct.
974  */
975 static uint32_t
p256_add_mixed(p256_jacobian * P1,const p256_affine * P2)976 p256_add_mixed(p256_jacobian *P1, const p256_affine *P2)
977 {
978 	/*
979 	 * Addtions formulas are:
980 	 *
981 	 *   u1 = x1
982 	 *   u2 = x2 * z1^2
983 	 *   s1 = y1
984 	 *   s2 = y2 * z1^3
985 	 *   h = u2 - u1
986 	 *   r = s2 - s1
987 	 *   x3 = r^2 - h^3 - 2 * u1 * h^2
988 	 *   y3 = r * (u1 * h^2 - x3) - s1 * h^3
989 	 *   z3 = h * z1
990 	 */
991 	uint64_t t1[5], t2[5], t3[5], t4[5], t5[5], t6[5], t7[5], tt;
992 	uint32_t ret;
993 
994 	/*
995 	 * Compute u1 = x1 (in t1) and s1 = y1 (in t3).
996 	 */
997 	memcpy(t1, P1->x, sizeof t1);
998 	memcpy(t3, P1->y, sizeof t3);
999 
1000 	/*
1001 	 * Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4).
1002 	 */
1003 	f256_montysquare(t4, P1->z);
1004 	f256_montymul(t2, P2->x, t4);
1005 	f256_montymul(t5, P1->z, t4);
1006 	f256_montymul(t4, P2->y, t5);
1007 
1008 	/*
1009 	 * Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4).
1010 	 * We need to test whether r is zero, so we will do some extra
1011 	 * reduce.
1012 	 */
1013 	f256_sub(t2, t2, t1);
1014 	f256_sub(t4, t4, t3);
1015 	f256_final_reduce(t4);
1016 	tt = t4[0] | t4[1] | t4[2] | t4[3] | t4[4];
1017 	ret = (uint32_t)(tt | (tt >> 32));
1018 	ret = (ret | -ret) >> 31;
1019 
1020 	/*
1021 	 * Compute u1*h^2 (in t6) and h^3 (in t5);
1022 	 */
1023 	f256_montysquare(t7, t2);
1024 	f256_montymul(t6, t1, t7);
1025 	f256_montymul(t5, t7, t2);
1026 
1027 	/*
1028 	 * Compute x3 = r^2 - h^3 - 2*u1*h^2.
1029 	 */
1030 	f256_montysquare(P1->x, t4);
1031 	f256_sub(P1->x, P1->x, t5);
1032 	f256_sub(P1->x, P1->x, t6);
1033 	f256_sub(P1->x, P1->x, t6);
1034 
1035 	/*
1036 	 * Compute y3 = r*(u1*h^2 - x3) - s1*h^3.
1037 	 */
1038 	f256_sub(t6, t6, P1->x);
1039 	f256_montymul(P1->y, t4, t6);
1040 	f256_montymul(t1, t5, t3);
1041 	f256_sub(P1->y, P1->y, t1);
1042 
1043 	/*
1044 	 * Compute z3 = h*z1*z2.
1045 	 */
1046 	f256_montymul(P1->z, P1->z, t2);
1047 
1048 	return ret;
1049 }
1050 
1051 #if 0
1052 /* unused */
1053 /*
1054  * Point addition (mixed coordinates, complete): P1 is replaced with P1+P2.
1055  * This is a specialised function for the case when P2 is a non-zero point
1056  * in affine coordinates.
1057  *
1058  * This function returns the correct result in all cases.
1059  */
1060 static uint32_t
1061 p256_add_complete_mixed(p256_jacobian *P1, const p256_affine *P2)
1062 {
1063 	/*
1064 	 * Addtions formulas, in the general case, are:
1065 	 *
1066 	 *   u1 = x1
1067 	 *   u2 = x2 * z1^2
1068 	 *   s1 = y1
1069 	 *   s2 = y2 * z1^3
1070 	 *   h = u2 - u1
1071 	 *   r = s2 - s1
1072 	 *   x3 = r^2 - h^3 - 2 * u1 * h^2
1073 	 *   y3 = r * (u1 * h^2 - x3) - s1 * h^3
1074 	 *   z3 = h * z1
1075 	 *
1076 	 * These formulas mishandle the two following cases:
1077 	 *
1078 	 *  - If P1 is the point-at-infinity (z1 = 0), then z3 is
1079 	 *    incorrectly set to 0.
1080 	 *
1081 	 *  - If P1 = P2, then u1 = u2 and s1 = s2, and x3, y3 and z3
1082 	 *    are all set to 0.
1083 	 *
1084 	 * However, if P1 + P2 = 0, then u1 = u2 but s1 != s2, and then
1085 	 * we correctly get z3 = 0 (the point-at-infinity).
1086 	 *
1087 	 * To fix the case P1 = 0, we perform at the end a copy of P2
1088 	 * over P1, conditional to z1 = 0.
1089 	 *
1090 	 * For P1 = P2: in that case, both h and r are set to 0, and
1091 	 * we get x3, y3 and z3 equal to 0. We can test for that
1092 	 * occurrence to make a mask which will be all-one if P1 = P2,
1093 	 * or all-zero otherwise; then we can compute the double of P2
1094 	 * and add it, combined with the mask, to (x3,y3,z3).
1095 	 *
1096 	 * Using the doubling formulas in p256_double() on (x2,y2),
1097 	 * simplifying since P2 is affine (i.e. z2 = 1, implicitly),
1098 	 * we get:
1099 	 *   s = 4*x2*y2^2
1100 	 *   m = 3*(x2 + 1)*(x2 - 1)
1101 	 *   x' = m^2 - 2*s
1102 	 *   y' = m*(s - x') - 8*y2^4
1103 	 *   z' = 2*y2
1104 	 * which requires only 6 multiplications. Added to the 11
1105 	 * multiplications of the normal mixed addition in Jacobian
1106 	 * coordinates, we get a cost of 17 multiplications in total.
1107 	 */
1108 	uint64_t t1[5], t2[5], t3[5], t4[5], t5[5], t6[5], t7[5], tt, zz;
1109 	int i;
1110 
1111 	/*
1112 	 * Set zz to -1 if P1 is the point at infinity, 0 otherwise.
1113 	 */
1114 	zz = P1->z[0] | P1->z[1] | P1->z[2] | P1->z[3] | P1->z[4];
1115 	zz = ((zz | -zz) >> 63) - (uint64_t)1;
1116 
1117 	/*
1118 	 * Compute u1 = x1 (in t1) and s1 = y1 (in t3).
1119 	 */
1120 	memcpy(t1, P1->x, sizeof t1);
1121 	memcpy(t3, P1->y, sizeof t3);
1122 
1123 	/*
1124 	 * Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4).
1125 	 */
1126 	f256_montysquare(t4, P1->z);
1127 	f256_montymul(t2, P2->x, t4);
1128 	f256_montymul(t5, P1->z, t4);
1129 	f256_montymul(t4, P2->y, t5);
1130 
1131 	/*
1132 	 * Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4).
1133 	 * reduce.
1134 	 */
1135 	f256_sub(t2, t2, t1);
1136 	f256_sub(t4, t4, t3);
1137 
1138 	/*
1139 	 * If both h = 0 and r = 0, then P1 = P2, and we want to set
1140 	 * the mask tt to -1; otherwise, the mask will be 0.
1141 	 */
1142 	f256_final_reduce(t2);
1143 	f256_final_reduce(t4);
1144 	tt = t2[0] | t2[1] | t2[2] | t2[3] | t2[4]
1145 		| t4[0] | t4[1] | t4[2] | t4[3] | t4[4];
1146 	tt = ((tt | -tt) >> 63) - (uint64_t)1;
1147 
1148 	/*
1149 	 * Compute u1*h^2 (in t6) and h^3 (in t5);
1150 	 */
1151 	f256_montysquare(t7, t2);
1152 	f256_montymul(t6, t1, t7);
1153 	f256_montymul(t5, t7, t2);
1154 
1155 	/*
1156 	 * Compute x3 = r^2 - h^3 - 2*u1*h^2.
1157 	 */
1158 	f256_montysquare(P1->x, t4);
1159 	f256_sub(P1->x, P1->x, t5);
1160 	f256_sub(P1->x, P1->x, t6);
1161 	f256_sub(P1->x, P1->x, t6);
1162 
1163 	/*
1164 	 * Compute y3 = r*(u1*h^2 - x3) - s1*h^3.
1165 	 */
1166 	f256_sub(t6, t6, P1->x);
1167 	f256_montymul(P1->y, t4, t6);
1168 	f256_montymul(t1, t5, t3);
1169 	f256_sub(P1->y, P1->y, t1);
1170 
1171 	/*
1172 	 * Compute z3 = h*z1.
1173 	 */
1174 	f256_montymul(P1->z, P1->z, t2);
1175 
1176 	/*
1177 	 * The "double" result, in case P1 = P2.
1178 	 */
1179 
1180 	/*
1181 	 * Compute z' = 2*y2 (in t1).
1182 	 */
1183 	f256_add(t1, P2->y, P2->y);
1184 	f256_partial_reduce(t1);
1185 
1186 	/*
1187 	 * Compute 2*(y2^2) (in t2) and s = 4*x2*(y2^2) (in t3).
1188 	 */
1189 	f256_montysquare(t2, P2->y);
1190 	f256_add(t2, t2, t2);
1191 	f256_add(t3, t2, t2);
1192 	f256_montymul(t3, P2->x, t3);
1193 
1194 	/*
1195 	 * Compute m = 3*(x2^2 - 1) (in t4).
1196 	 */
1197 	f256_montysquare(t4, P2->x);
1198 	f256_sub(t4, t4, F256_R);
1199 	f256_add(t5, t4, t4);
1200 	f256_add(t4, t4, t5);
1201 
1202 	/*
1203 	 * Compute x' = m^2 - 2*s (in t5).
1204 	 */
1205 	f256_montysquare(t5, t4);
1206 	f256_sub(t5, t3);
1207 	f256_sub(t5, t3);
1208 
1209 	/*
1210 	 * Compute y' = m*(s - x') - 8*y2^4 (in t6).
1211 	 */
1212 	f256_sub(t6, t3, t5);
1213 	f256_montymul(t6, t6, t4);
1214 	f256_montysquare(t7, t2);
1215 	f256_sub(t6, t6, t7);
1216 	f256_sub(t6, t6, t7);
1217 
1218 	/*
1219 	 * We now have the alternate (doubling) coordinates in (t5,t6,t1).
1220 	 * We combine them with (x3,y3,z3).
1221 	 */
1222 	for (i = 0; i < 5; i ++) {
1223 		P1->x[i] |= tt & t5[i];
1224 		P1->y[i] |= tt & t6[i];
1225 		P1->z[i] |= tt & t1[i];
1226 	}
1227 
1228 	/*
1229 	 * If P1 = 0, then we get z3 = 0 (which is invalid); if z1 is 0,
1230 	 * then we want to replace the result with a copy of P2. The
1231 	 * test on z1 was done at the start, in the zz mask.
1232 	 */
1233 	for (i = 0; i < 5; i ++) {
1234 		P1->x[i] ^= zz & (P1->x[i] ^ P2->x[i]);
1235 		P1->y[i] ^= zz & (P1->y[i] ^ P2->y[i]);
1236 		P1->z[i] ^= zz & (P1->z[i] ^ F256_R[i]);
1237 	}
1238 }
1239 #endif
1240 
1241 /*
1242  * Inner function for computing a point multiplication. A window is
1243  * provided, with points 1*P to 15*P in affine coordinates.
1244  *
1245  * Assumptions:
1246  *  - All provided points are valid points on the curve.
1247  *  - Multiplier is non-zero, and smaller than the curve order.
1248  *  - Everything is in Montgomery representation.
1249  */
1250 static void
point_mul_inner(p256_jacobian * R,const p256_affine * W,const unsigned char * k,size_t klen)1251 point_mul_inner(p256_jacobian *R, const p256_affine *W,
1252 	const unsigned char *k, size_t klen)
1253 {
1254 	p256_jacobian Q;
1255 	uint32_t qz;
1256 
1257 	memset(&Q, 0, sizeof Q);
1258 	qz = 1;
1259 	while (klen -- > 0) {
1260 		int i;
1261 		unsigned bk;
1262 
1263 		bk = *k ++;
1264 		for (i = 0; i < 2; i ++) {
1265 			uint32_t bits;
1266 			uint32_t bnz;
1267 			p256_affine T;
1268 			p256_jacobian U;
1269 			uint32_t n;
1270 			int j;
1271 			uint64_t m;
1272 
1273 			p256_double(&Q);
1274 			p256_double(&Q);
1275 			p256_double(&Q);
1276 			p256_double(&Q);
1277 			bits = (bk >> 4) & 0x0F;
1278 			bnz = NEQ(bits, 0);
1279 
1280 			/*
1281 			 * Lookup point in window. If the bits are 0,
1282 			 * we get something invalid, which is not a
1283 			 * problem because we will use it only if the
1284 			 * bits are non-zero.
1285 			 */
1286 			memset(&T, 0, sizeof T);
1287 			for (n = 0; n < 15; n ++) {
1288 				m = -(uint64_t)EQ(bits, n + 1);
1289 				T.x[0] |= m & W[n].x[0];
1290 				T.x[1] |= m & W[n].x[1];
1291 				T.x[2] |= m & W[n].x[2];
1292 				T.x[3] |= m & W[n].x[3];
1293 				T.x[4] |= m & W[n].x[4];
1294 				T.y[0] |= m & W[n].y[0];
1295 				T.y[1] |= m & W[n].y[1];
1296 				T.y[2] |= m & W[n].y[2];
1297 				T.y[3] |= m & W[n].y[3];
1298 				T.y[4] |= m & W[n].y[4];
1299 			}
1300 
1301 			U = Q;
1302 			p256_add_mixed(&U, &T);
1303 
1304 			/*
1305 			 * If qz is still 1, then Q was all-zeros, and this
1306 			 * is conserved through p256_double().
1307 			 */
1308 			m = -(uint64_t)(bnz & qz);
1309 			for (j = 0; j < 5; j ++) {
1310 				Q.x[j] ^= m & (Q.x[j] ^ T.x[j]);
1311 				Q.y[j] ^= m & (Q.y[j] ^ T.y[j]);
1312 				Q.z[j] ^= m & (Q.z[j] ^ F256_R[j]);
1313 			}
1314 			CCOPY(bnz & ~qz, &Q, &U, sizeof Q);
1315 			qz &= ~bnz;
1316 			bk <<= 4;
1317 		}
1318 	}
1319 	*R = Q;
1320 }
1321 
1322 /*
1323  * Convert a window from Jacobian to affine coordinates. A single
1324  * field inversion is used. This function works for windows up to
1325  * 32 elements.
1326  *
1327  * The destination array (aff[]) and the source array (jac[]) may
1328  * overlap, provided that the start of aff[] is not after the start of
1329  * jac[]. Even if the arrays do _not_ overlap, the source array is
1330  * modified.
1331  */
1332 static void
window_to_affine(p256_affine * aff,p256_jacobian * jac,int num)1333 window_to_affine(p256_affine *aff, p256_jacobian *jac, int num)
1334 {
1335 	/*
1336 	 * Convert the window points to affine coordinates. We use the
1337 	 * following trick to mutualize the inversion computation: if
1338 	 * we have z1, z2, z3, and z4, and want to invert all of them,
1339 	 * we compute u = 1/(z1*z2*z3*z4), and then we have:
1340 	 *   1/z1 = u*z2*z3*z4
1341 	 *   1/z2 = u*z1*z3*z4
1342 	 *   1/z3 = u*z1*z2*z4
1343 	 *   1/z4 = u*z1*z2*z3
1344 	 *
1345 	 * The partial products are computed recursively:
1346 	 *
1347 	 *  - on input (z_1,z_2), return (z_2,z_1) and z_1*z_2
1348 	 *  - on input (z_1,z_2,... z_n):
1349 	 *       recurse on (z_1,z_2,... z_(n/2)) -> r1 and m1
1350 	 *       recurse on (z_(n/2+1),z_(n/2+2)... z_n) -> r2 and m2
1351 	 *       multiply elements of r1 by m2 -> s1
1352 	 *       multiply elements of r2 by m1 -> s2
1353 	 *       return r1||r2 and m1*m2
1354 	 *
1355 	 * In the example below, we suppose that we have 14 elements.
1356 	 * Let z1, z2,... zE be the 14 values to invert (index noted in
1357 	 * hexadecimal, starting at 1).
1358 	 *
1359 	 *  - Depth 1:
1360 	 *      swap(z1, z2); z12 = z1*z2
1361 	 *      swap(z3, z4); z34 = z3*z4
1362 	 *      swap(z5, z6); z56 = z5*z6
1363 	 *      swap(z7, z8); z78 = z7*z8
1364 	 *      swap(z9, zA); z9A = z9*zA
1365 	 *      swap(zB, zC); zBC = zB*zC
1366 	 *      swap(zD, zE); zDE = zD*zE
1367 	 *
1368 	 *  - Depth 2:
1369 	 *      z1 <- z1*z34, z2 <- z2*z34, z3 <- z3*z12, z4 <- z4*z12
1370 	 *      z1234 = z12*z34
1371 	 *      z5 <- z5*z78, z6 <- z6*z78, z7 <- z7*z56, z8 <- z8*z56
1372 	 *      z5678 = z56*z78
1373 	 *      z9 <- z9*zBC, zA <- zA*zBC, zB <- zB*z9A, zC <- zC*z9A
1374 	 *      z9ABC = z9A*zBC
1375 	 *
1376 	 *  - Depth 3:
1377 	 *      z1 <- z1*z5678, z2 <- z2*z5678, z3 <- z3*z5678, z4 <- z4*z5678
1378 	 *      z5 <- z5*z1234, z6 <- z6*z1234, z7 <- z7*z1234, z8 <- z8*z1234
1379 	 *      z12345678 = z1234*z5678
1380 	 *      z9 <- z9*zDE, zA <- zA*zDE, zB <- zB*zDE, zC <- zC*zDE
1381 	 *      zD <- zD*z9ABC, zE*z9ABC
1382 	 *      z9ABCDE = z9ABC*zDE
1383 	 *
1384 	 *  - Depth 4:
1385 	 *      multiply z1..z8 by z9ABCDE
1386 	 *      multiply z9..zE by z12345678
1387 	 *      final z = z12345678*z9ABCDE
1388 	 */
1389 
1390 	uint64_t z[16][5];
1391 	int i, k, s;
1392 #define zt   (z[15])
1393 #define zu   (z[14])
1394 #define zv   (z[13])
1395 
1396 	/*
1397 	 * First recursion step (pairwise swapping and multiplication).
1398 	 * If there is an odd number of elements, then we "invent" an
1399 	 * extra one with coordinate Z = 1 (in Montgomery representation).
1400 	 */
1401 	for (i = 0; (i + 1) < num; i += 2) {
1402 		memcpy(zt, jac[i].z, sizeof zt);
1403 		memcpy(jac[i].z, jac[i + 1].z, sizeof zt);
1404 		memcpy(jac[i + 1].z, zt, sizeof zt);
1405 		f256_montymul(z[i >> 1], jac[i].z, jac[i + 1].z);
1406 	}
1407 	if ((num & 1) != 0) {
1408 		memcpy(z[num >> 1], jac[num - 1].z, sizeof zt);
1409 		memcpy(jac[num - 1].z, F256_R, sizeof F256_R);
1410 	}
1411 
1412 	/*
1413 	 * Perform further recursion steps. At the entry of each step,
1414 	 * the process has been done for groups of 's' points. The
1415 	 * integer k is the log2 of s.
1416 	 */
1417 	for (k = 1, s = 2; s < num; k ++, s <<= 1) {
1418 		int n;
1419 
1420 		for (i = 0; i < num; i ++) {
1421 			f256_montymul(jac[i].z, jac[i].z, z[(i >> k) ^ 1]);
1422 		}
1423 		n = (num + s - 1) >> k;
1424 		for (i = 0; i < (n >> 1); i ++) {
1425 			f256_montymul(z[i], z[i << 1], z[(i << 1) + 1]);
1426 		}
1427 		if ((n & 1) != 0) {
1428 			memmove(z[n >> 1], z[n], sizeof zt);
1429 		}
1430 	}
1431 
1432 	/*
1433 	 * Invert the final result, and convert all points.
1434 	 */
1435 	f256_invert(zt, z[0]);
1436 	for (i = 0; i < num; i ++) {
1437 		f256_montymul(zv, jac[i].z, zt);
1438 		f256_montysquare(zu, zv);
1439 		f256_montymul(zv, zv, zu);
1440 		f256_montymul(aff[i].x, jac[i].x, zu);
1441 		f256_montymul(aff[i].y, jac[i].y, zv);
1442 	}
1443 }
1444 
1445 /*
1446  * Multiply the provided point by an integer.
1447  * Assumptions:
1448  *  - Source point is a valid curve point.
1449  *  - Source point is not the point-at-infinity.
1450  *  - Integer is not 0, and is lower than the curve order.
1451  * If these conditions are not met, then the result is indeterminate
1452  * (but the process is still constant-time).
1453  */
1454 static void
p256_mul(p256_jacobian * P,const unsigned char * k,size_t klen)1455 p256_mul(p256_jacobian *P, const unsigned char *k, size_t klen)
1456 {
1457 	union {
1458 		p256_affine aff[15];
1459 		p256_jacobian jac[15];
1460 	} window;
1461 	int i;
1462 
1463 	/*
1464 	 * Compute window, in Jacobian coordinates.
1465 	 */
1466 	window.jac[0] = *P;
1467 	for (i = 2; i < 16; i ++) {
1468 		window.jac[i - 1] = window.jac[(i >> 1) - 1];
1469 		if ((i & 1) == 0) {
1470 			p256_double(&window.jac[i - 1]);
1471 		} else {
1472 			p256_add(&window.jac[i - 1], &window.jac[i >> 1]);
1473 		}
1474 	}
1475 
1476 	/*
1477 	 * Convert the window points to affine coordinates. Point
1478 	 * window[0] is the source point, already in affine coordinates.
1479 	 */
1480 	window_to_affine(window.aff, window.jac, 15);
1481 
1482 	/*
1483 	 * Perform point multiplication.
1484 	 */
1485 	point_mul_inner(P, window.aff, k, klen);
1486 }
1487 
1488 /*
1489  * Precomputed window for the conventional generator: P256_Gwin[n]
1490  * contains (n+1)*G (affine coordinates, in Montgomery representation).
1491  */
1492 static const p256_affine P256_Gwin[] = {
1493 	{
1494 		{ 0x30D418A9143C1, 0xC4FEDB60179E7, 0x62251075BA95F,
1495 		  0x5C669FB732B77, 0x08905F76B5375 },
1496 		{ 0x5357CE95560A8, 0x43A19E45CDDF2, 0x21F3258B4AB8E,
1497 		  0xD8552E88688DD, 0x0571FF18A5885 }
1498 	},
1499 	{
1500 		{ 0x46D410DDD64DF, 0x0B433827D8500, 0x1490D9AA6AE3C,
1501 		  0xA3A832205038D, 0x06BB32E52DCF3 },
1502 		{ 0x48D361BEE1A57, 0xB7B236FF82F36, 0x042DBE152CD7C,
1503 		  0xA3AA9A8FB0E92, 0x08C577517A5B8 }
1504 	},
1505 	{
1506 		{ 0x3F904EEBC1272, 0x9E87D81FBFFAC, 0xCBBC98B027F84,
1507 		  0x47E46AD77DD87, 0x06936A3FD6FF7 },
1508 		{ 0x5C1FC983A7EBD, 0xC3861FE1AB04C, 0x2EE98E583E47A,
1509 		  0xC06A88208311A, 0x05F06A2AB587C }
1510 	},
1511 	{
1512 		{ 0xB50D46918DCC5, 0xD7623C17374B0, 0x100AF24650A6E,
1513 		  0x76ABCDAACACE8, 0x077362F591B01 },
1514 		{ 0xF24CE4CBABA68, 0x17AD6F4472D96, 0xDDD22E1762847,
1515 		  0x862EB6C36DEE5, 0x04B14C39CC5AB }
1516 	},
1517 	{
1518 		{ 0x8AAEC45C61F5C, 0x9D4B9537DBE1B, 0x76C20C90EC649,
1519 		  0x3C7D41CB5AAD0, 0x0907960649052 },
1520 		{ 0x9B4AE7BA4F107, 0xF75EB882BEB30, 0x7A1F6873C568E,
1521 		  0x915C540A9877E, 0x03A076BB9DD1E }
1522 	},
1523 	{
1524 		{ 0x47373E77664A1, 0xF246CEE3E4039, 0x17A3AD55AE744,
1525 		  0x673C50A961A5B, 0x03074B5964213 },
1526 		{ 0x6220D377E44BA, 0x30DFF14B593D3, 0x639F11299C2B5,
1527 		  0x75F5424D44CEF, 0x04C9916DEA07F }
1528 	},
1529 	{
1530 		{ 0x354EA0173B4F1, 0x3C23C00F70746, 0x23BB082BD2021,
1531 		  0xE03E43EAAB50C, 0x03BA5119D3123 },
1532 		{ 0xD0303F5B9D4DE, 0x17DA67BDD2847, 0xC941956742F2F,
1533 		  0x8670F933BDC77, 0x0AEDD9164E240 }
1534 	},
1535 	{
1536 		{ 0x4CD19499A78FB, 0x4BF9B345527F1, 0x2CFC6B462AB5C,
1537 		  0x30CDF90F02AF0, 0x0763891F62652 },
1538 		{ 0xA3A9532D49775, 0xD7F9EBA15F59D, 0x60BBF021E3327,
1539 		  0xF75C23C7B84BE, 0x06EC12F2C706D }
1540 	},
1541 	{
1542 		{ 0x6E8F264E20E8E, 0xC79A7A84175C9, 0xC8EB00ABE6BFE,
1543 		  0x16A4CC09C0444, 0x005B3081D0C4E },
1544 		{ 0x777AA45F33140, 0xDCE5D45E31EB7, 0xB12F1A56AF7BE,
1545 		  0xF9B2B6E019A88, 0x086659CDFD835 }
1546 	},
1547 	{
1548 		{ 0xDBD19DC21EC8C, 0x94FCF81392C18, 0x250B4998F9868,
1549 		  0x28EB37D2CD648, 0x0C61C947E4B34 },
1550 		{ 0x407880DD9E767, 0x0C83FBE080C2B, 0x9BE5D2C43A899,
1551 		  0xAB4EF7D2D6577, 0x08719A555B3B4 }
1552 	},
1553 	{
1554 		{ 0x260A6245E4043, 0x53E7FDFE0EA7D, 0xAC1AB59DE4079,
1555 		  0x072EFF3A4158D, 0x0E7090F1949C9 },
1556 		{ 0x85612B944E886, 0xE857F61C81A76, 0xAD643D250F939,
1557 		  0x88DAC0DAA891E, 0x089300244125B }
1558 	},
1559 	{
1560 		{ 0x1AA7D26977684, 0x58A345A3304B7, 0x37385EABDEDEF,
1561 		  0x155E409D29DEE, 0x0EE1DF780B83E },
1562 		{ 0x12D91CBB5B437, 0x65A8956370CAC, 0xDE6D66170ED2F,
1563 		  0xAC9B8228CFA8A, 0x0FF57C95C3238 }
1564 	},
1565 	{
1566 		{ 0x25634B2ED7097, 0x9156FD30DCCC4, 0x9E98110E35676,
1567 		  0x7594CBCD43F55, 0x038477ACC395B },
1568 		{ 0x2B90C00EE17FF, 0xF842ED2E33575, 0x1F5BC16874838,
1569 		  0x7968CD06422BD, 0x0BC0876AB9E7B }
1570 	},
1571 	{
1572 		{ 0xA35BB0CF664AF, 0x68F9707E3A242, 0x832660126E48F,
1573 		  0x72D2717BF54C6, 0x0AAE7333ED12C },
1574 		{ 0x2DB7995D586B1, 0xE732237C227B5, 0x65E7DBBE29569,
1575 		  0xBBBD8E4193E2A, 0x052706DC3EAA1 }
1576 	},
1577 	{
1578 		{ 0xD8B7BC60055BE, 0xD76E27E4B72BC, 0x81937003CC23E,
1579 		  0xA090E337424E4, 0x02AA0E43EAD3D },
1580 		{ 0x524F6383C45D2, 0x422A41B2540B8, 0x8A4797D766355,
1581 		  0xDF444EFA6DE77, 0x0042170A9079A }
1582 	},
1583 };
1584 
1585 /*
1586  * Multiply the conventional generator of the curve by the provided
1587  * integer. Return is written in *P.
1588  *
1589  * Assumptions:
1590  *  - Integer is not 0, and is lower than the curve order.
1591  * If this conditions is not met, then the result is indeterminate
1592  * (but the process is still constant-time).
1593  */
1594 static void
p256_mulgen(p256_jacobian * P,const unsigned char * k,size_t klen)1595 p256_mulgen(p256_jacobian *P, const unsigned char *k, size_t klen)
1596 {
1597 	point_mul_inner(P, P256_Gwin, k, klen);
1598 }
1599 
1600 /*
1601  * Return 1 if all of the following hold:
1602  *  - klen <= 32
1603  *  - k != 0
1604  *  - k is lower than the curve order
1605  * Otherwise, return 0.
1606  *
1607  * Constant-time behaviour: only klen may be observable.
1608  */
1609 static uint32_t
check_scalar(const unsigned char * k,size_t klen)1610 check_scalar(const unsigned char *k, size_t klen)
1611 {
1612 	uint32_t z;
1613 	int32_t c;
1614 	size_t u;
1615 
1616 	if (klen > 32) {
1617 		return 0;
1618 	}
1619 	z = 0;
1620 	for (u = 0; u < klen; u ++) {
1621 		z |= k[u];
1622 	}
1623 	if (klen == 32) {
1624 		c = 0;
1625 		for (u = 0; u < klen; u ++) {
1626 			c |= -(int32_t)EQ0(c) & CMP(k[u], P256_N[u]);
1627 		}
1628 	} else {
1629 		c = -1;
1630 	}
1631 	return NEQ(z, 0) & LT0(c);
1632 }
1633 
1634 static uint32_t
api_mul(unsigned char * G,size_t Glen,const unsigned char * k,size_t klen,int curve)1635 api_mul(unsigned char *G, size_t Glen,
1636 	const unsigned char *k, size_t klen, int curve)
1637 {
1638 	uint32_t r;
1639 	p256_jacobian P;
1640 
1641 	(void)curve;
1642 	if (Glen != 65) {
1643 		return 0;
1644 	}
1645 	r = check_scalar(k, klen);
1646 	r &= point_decode(&P, G);
1647 	p256_mul(&P, k, klen);
1648 	r &= point_encode(G, &P);
1649 	return r;
1650 }
1651 
1652 static size_t
api_mulgen(unsigned char * R,const unsigned char * k,size_t klen,int curve)1653 api_mulgen(unsigned char *R,
1654 	const unsigned char *k, size_t klen, int curve)
1655 {
1656 	p256_jacobian P;
1657 
1658 	(void)curve;
1659 	p256_mulgen(&P, k, klen);
1660 	point_encode(R, &P);
1661 	return 65;
1662 }
1663 
1664 static uint32_t
api_muladd(unsigned char * A,const unsigned char * B,size_t len,const unsigned char * x,size_t xlen,const unsigned char * y,size_t ylen,int curve)1665 api_muladd(unsigned char *A, const unsigned char *B, size_t len,
1666 	const unsigned char *x, size_t xlen,
1667 	const unsigned char *y, size_t ylen, int curve)
1668 {
1669 	/*
1670 	 * We might want to use Shamir's trick here: make a composite
1671 	 * window of u*P+v*Q points, to merge the two doubling-ladders
1672 	 * into one. This, however, has some complications:
1673 	 *
1674 	 *  - During the computation, we may hit the point-at-infinity.
1675 	 *    Thus, we would need p256_add_complete_mixed() (complete
1676 	 *    formulas for point addition), with a higher cost (17 muls
1677 	 *    instead of 11).
1678 	 *
1679 	 *  - A 4-bit window would be too large, since it would involve
1680 	 *    16*16-1 = 255 points. For the same window size as in the
1681 	 *    p256_mul() case, we would need to reduce the window size
1682 	 *    to 2 bits, and thus perform twice as many non-doubling
1683 	 *    point additions.
1684 	 *
1685 	 *  - The window may itself contain the point-at-infinity, and
1686 	 *    thus cannot be in all generality be made of affine points.
1687 	 *    Instead, we would need to make it a window of points in
1688 	 *    Jacobian coordinates. Even p256_add_complete_mixed() would
1689 	 *    be inappropriate.
1690 	 *
1691 	 * For these reasons, the code below performs two separate
1692 	 * point multiplications, then computes the final point addition
1693 	 * (which is both a "normal" addition, and a doubling, to handle
1694 	 * all cases).
1695 	 */
1696 
1697 	p256_jacobian P, Q;
1698 	uint32_t r, t, s;
1699 	uint64_t z;
1700 
1701 	(void)curve;
1702 	if (len != 65) {
1703 		return 0;
1704 	}
1705 	r = point_decode(&P, A);
1706 	p256_mul(&P, x, xlen);
1707 	if (B == NULL) {
1708 		p256_mulgen(&Q, y, ylen);
1709 	} else {
1710 		r &= point_decode(&Q, B);
1711 		p256_mul(&Q, y, ylen);
1712 	}
1713 
1714 	/*
1715 	 * The final addition may fail in case both points are equal.
1716 	 */
1717 	t = p256_add(&P, &Q);
1718 	f256_final_reduce(P.z);
1719 	z = P.z[0] | P.z[1] | P.z[2] | P.z[3] | P.z[4];
1720 	s = EQ((uint32_t)(z | (z >> 32)), 0);
1721 	p256_double(&Q);
1722 
1723 	/*
1724 	 * If s is 1 then either P+Q = 0 (t = 1) or P = Q (t = 0). So we
1725 	 * have the following:
1726 	 *
1727 	 *   s = 0, t = 0   return P (normal addition)
1728 	 *   s = 0, t = 1   return P (normal addition)
1729 	 *   s = 1, t = 0   return Q (a 'double' case)
1730 	 *   s = 1, t = 1   report an error (P+Q = 0)
1731 	 */
1732 	CCOPY(s & ~t, &P, &Q, sizeof Q);
1733 	point_encode(A, &P);
1734 	r &= ~(s & t);
1735 	return r;
1736 }
1737 
1738 /* see bearssl_ec.h */
1739 const br_ec_impl br_ec_p256_m62 = {
1740 	(uint32_t)0x00800000,
1741 	&api_generator,
1742 	&api_order,
1743 	&api_xoff,
1744 	&api_mul,
1745 	&api_mulgen,
1746 	&api_muladd
1747 };
1748 
1749 /* see bearssl_ec.h */
1750 const br_ec_impl *
br_ec_p256_m62_get(void)1751 br_ec_p256_m62_get(void)
1752 {
1753 	return &br_ec_p256_m62;
1754 }
1755 
1756 #else
1757 
1758 /* see bearssl_ec.h */
1759 const br_ec_impl *
br_ec_p256_m62_get(void)1760 br_ec_p256_m62_get(void)
1761 {
1762 	return 0;
1763 }
1764 
1765 #endif
1766