xref: /freebsd/crypto/openssl/crypto/ec/ecp_sm2p256.c (revision 046c625e9382e17da953767b881aaa782fa73af8)
1 /*
2  * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
3  *
4  * Licensed under the Apache License 2.0 (the "License").  You may not use
5  * this file except in compliance with the License.  You can obtain a copy
6  * in the file LICENSE in the source distribution or at
7  * https://www.openssl.org/source/license.html
8  *
9  */
10 
11 /*
12  * SM2 low level APIs are deprecated for public use, but still ok for
13  * internal use.
14  */
15 #include "internal/deprecated.h"
16 
17 #include <string.h>
18 #include <openssl/err.h>
19 #include "crypto/bn.h"
20 #include "ec_local.h"
21 #include "internal/common.h"
22 #include "internal/constant_time.h"
23 
24 #define P256_LIMBS (256 / BN_BITS2)
25 
26 #if !defined(OPENSSL_NO_SM2_PRECOMP)
27 extern const BN_ULONG ecp_sm2p256_precomputed[8 * 32 * 256];
28 #endif
29 
30 typedef struct {
31     BN_ULONG X[P256_LIMBS];
32     BN_ULONG Y[P256_LIMBS];
33     BN_ULONG Z[P256_LIMBS];
34 } P256_POINT;
35 
36 typedef struct {
37     BN_ULONG X[P256_LIMBS];
38     BN_ULONG Y[P256_LIMBS];
39 } P256_POINT_AFFINE;
40 
41 #if !defined(OPENSSL_NO_SM2_PRECOMP)
42 /* Coordinates of G, for which we have precomputed tables */
43 ALIGN32 static const BN_ULONG def_xG[P256_LIMBS] = {
44     0x715a4589334c74c7, 0x8fe30bbff2660be1,
45     0x5f9904466a39c994, 0x32c4ae2c1f198119
46 };
47 
48 ALIGN32 static const BN_ULONG def_yG[P256_LIMBS] = {
49     0x02df32e52139f0a0, 0xd0a9877cc62a4740,
50     0x59bdcee36b692153, 0xbc3736a2f4f6779c,
51 };
52 #endif
53 
54 /* p and order for SM2 according to GB/T 32918.5-2017 */
55 ALIGN32 static const BN_ULONG def_p[P256_LIMBS] = {
56     0xffffffffffffffff, 0xffffffff00000000,
57     0xffffffffffffffff, 0xfffffffeffffffff
58 };
59 
60 ALIGN32 static const BN_ULONG ONE[P256_LIMBS] = {1, 0, 0, 0};
61 
62 /* Functions implemented in assembly */
63 /*
64  * Most of below mentioned functions *preserve* the property of inputs
65  * being fully reduced, i.e. being in [0, modulus) range. Simply put if
66  * inputs are fully reduced, then output is too.
67  */
68 /* Right shift: a >> 1 */
69 void bn_rshift1(BN_ULONG *a);
70 /* Sub: r = a - b */
71 void bn_sub(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b);
72 /* Modular div by 2: r = a / 2 mod p */
73 void ecp_sm2p256_div_by_2(BN_ULONG *r, const BN_ULONG *a);
74 /* Modular div by 2: r = a / 2 mod n, where n = ord(p) */
75 void ecp_sm2p256_div_by_2_mod_ord(BN_ULONG *r, const BN_ULONG *a);
76 /* Modular add: r = a + b mod p */
77 void ecp_sm2p256_add(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b);
78 /* Modular sub: r = a - b mod p */
79 void ecp_sm2p256_sub(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b);
80 /* Modular sub: r = a - b mod n, where n = ord(p) */
81 void ecp_sm2p256_sub_mod_ord(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b);
82 /* Modular mul by 3: out = 3 * a mod p */
83 void ecp_sm2p256_mul_by_3(BN_ULONG *r, const BN_ULONG *a);
84 /* Modular mul: r = a * b mod p */
85 void ecp_sm2p256_mul(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b);
86 /* Modular sqr: r = a ^ 2 mod p */
87 void ecp_sm2p256_sqr(BN_ULONG *r, const BN_ULONG *a);
88 
is_zeros(const BN_ULONG * a)89 static ossl_inline BN_ULONG is_zeros(const BN_ULONG *a)
90 {
91     BN_ULONG res;
92 
93     res = a[0] | a[1] | a[2] | a[3];
94 
95     return constant_time_is_zero_64(res);
96 }
97 
is_equal(const BN_ULONG * a,const BN_ULONG * b)98 static ossl_inline int is_equal(const BN_ULONG *a, const BN_ULONG *b)
99 {
100     BN_ULONG res;
101 
102     res = a[0] ^ b[0];
103     res |= a[1] ^ b[1];
104     res |= a[2] ^ b[2];
105     res |= a[3] ^ b[3];
106 
107     return constant_time_is_zero_64(res);
108 }
109 
is_greater(const BN_ULONG * a,const BN_ULONG * b)110 static ossl_inline int is_greater(const BN_ULONG *a, const BN_ULONG *b)
111 {
112     int i;
113 
114     for (i = P256_LIMBS - 1; i >= 0; --i) {
115         if (a[i] > b[i])
116             return 1;
117         if (a[i] < b[i])
118             return -1;
119     }
120 
121     return 0;
122 }
123 
124 #define is_one(a) is_equal(a, ONE)
125 #define is_even(a) !(a[0] & 1)
126 #define is_point_equal(a, b)     \
127     is_equal(a->X, b->X) &&      \
128     is_equal(a->Y, b->Y) &&      \
129     is_equal(a->Z, b->Z)
130 
131 /* Bignum and field elements conversion */
132 #define ecp_sm2p256_bignum_field_elem(out, in) \
133     bn_copy_words(out, in, P256_LIMBS)
134 
135 /* Binary algorithm for inversion in Fp */
136 #define BN_MOD_INV(out, in, mod_div, mod_sub, mod) \
137     do {                                           \
138         ALIGN32 BN_ULONG u[4];                     \
139         ALIGN32 BN_ULONG v[4];                     \
140         ALIGN32 BN_ULONG x1[4] = {1, 0, 0, 0};     \
141         ALIGN32 BN_ULONG x2[4] = {0};              \
142                                                    \
143         if (is_zeros(in))                          \
144             return;                                \
145         memcpy(u, in, 32);                         \
146         memcpy(v, mod, 32);                        \
147         while (!is_one(u) && !is_one(v)) {         \
148             while (is_even(u)) {                   \
149                 bn_rshift1(u);                     \
150                 mod_div(x1, x1);                   \
151             }                                      \
152             while (is_even(v)) {                   \
153                 bn_rshift1(v);                     \
154                 mod_div(x2, x2);                   \
155             }                                      \
156             if (is_greater(u, v) == 1) {           \
157                 bn_sub(u, u, v);                   \
158                 mod_sub(x1, x1, x2);               \
159             } else {                               \
160                 bn_sub(v, v, u);                   \
161                 mod_sub(x2, x2, x1);               \
162             }                                      \
163         }                                          \
164         if (is_one(u))                             \
165             memcpy(out, x1, 32);                   \
166         else                                       \
167             memcpy(out, x2, 32);                   \
168     } while (0)
169 
170 /* Modular inverse |out| = |in|^(-1) mod |p|. */
ecp_sm2p256_mod_inverse(BN_ULONG * out,const BN_ULONG * in)171 static ossl_inline void ecp_sm2p256_mod_inverse(BN_ULONG* out,
172                                                 const BN_ULONG* in) {
173     BN_MOD_INV(out, in, ecp_sm2p256_div_by_2, ecp_sm2p256_sub, def_p);
174 }
175 
176 /* Point double: R <- P + P */
ecp_sm2p256_point_double(P256_POINT * R,const P256_POINT * P)177 static void ecp_sm2p256_point_double(P256_POINT *R, const P256_POINT *P)
178 {
179     unsigned int i;
180     ALIGN32 BN_ULONG tmp0[P256_LIMBS];
181     ALIGN32 BN_ULONG tmp1[P256_LIMBS];
182     ALIGN32 BN_ULONG tmp2[P256_LIMBS];
183 
184     /* zero-check P->Z */
185     if (is_zeros(P->Z)) {
186         for (i = 0; i < P256_LIMBS; ++i)
187             R->Z[i] = 0;
188 
189         return;
190     }
191 
192     ecp_sm2p256_sqr(tmp0, P->Z);
193     ecp_sm2p256_sub(tmp1, P->X, tmp0);
194     ecp_sm2p256_add(tmp0, P->X, tmp0);
195     ecp_sm2p256_mul(tmp1, tmp1, tmp0);
196     ecp_sm2p256_mul_by_3(tmp1, tmp1);
197     ecp_sm2p256_add(R->Y, P->Y, P->Y);
198     ecp_sm2p256_mul(R->Z, R->Y, P->Z);
199     ecp_sm2p256_sqr(R->Y, R->Y);
200     ecp_sm2p256_mul(tmp2, R->Y, P->X);
201     ecp_sm2p256_sqr(R->Y, R->Y);
202     ecp_sm2p256_div_by_2(R->Y, R->Y);
203     ecp_sm2p256_sqr(R->X, tmp1);
204     ecp_sm2p256_add(tmp0, tmp2, tmp2);
205     ecp_sm2p256_sub(R->X, R->X, tmp0);
206     ecp_sm2p256_sub(tmp0, tmp2, R->X);
207     ecp_sm2p256_mul(tmp0, tmp0, tmp1);
208     ecp_sm2p256_sub(tmp1, tmp0, R->Y);
209     memcpy(R->Y, tmp1, 32);
210 }
211 
212 /* Point add affine: R <- P + Q */
ecp_sm2p256_point_add_affine(P256_POINT * R,const P256_POINT * P,const P256_POINT_AFFINE * Q)213 static void ecp_sm2p256_point_add_affine(P256_POINT *R, const P256_POINT *P,
214                                          const P256_POINT_AFFINE *Q)
215 {
216     unsigned int i;
217     ALIGN32 BN_ULONG tmp0[P256_LIMBS] = {0};
218     ALIGN32 BN_ULONG tmp1[P256_LIMBS] = {0};
219     ALIGN32 BN_ULONG tmp2[P256_LIMBS] = {0};
220     ALIGN32 BN_ULONG tmp3[P256_LIMBS] = {0};
221 
222     /* zero-check P->Z */
223     if (is_zeros(P->Z)) {
224         for (i = 0; i < P256_LIMBS; ++i) {
225             R->X[i] = Q->X[i];
226             R->Y[i] = Q->Y[i];
227             R->Z[i] = 0;
228         }
229         R->Z[0] = 1;
230 
231         return;
232     }
233 
234     ecp_sm2p256_sqr(tmp0, P->Z);
235     ecp_sm2p256_mul(tmp1, tmp0, P->Z);
236     ecp_sm2p256_mul(tmp0, tmp0, Q->X);
237     ecp_sm2p256_mul(tmp1, tmp1, Q->Y);
238     ecp_sm2p256_sub(tmp0, tmp0, P->X);
239     ecp_sm2p256_sub(tmp1, tmp1, P->Y);
240 
241     /* zero-check tmp0, tmp1 */
242     if (is_zeros(tmp0)) {
243         if (is_zeros(tmp1)) {
244             P256_POINT K;
245 
246             for (i = 0; i < P256_LIMBS; ++i) {
247                 K.X[i] = Q->X[i];
248                 K.Y[i] = Q->Y[i];
249                 K.Z[i] = 0;
250             }
251             K.Z[0] = 1;
252             ecp_sm2p256_point_double(R, &K);
253         } else {
254             for (i = 0; i < P256_LIMBS; ++i)
255                 R->Z[i] = 0;
256         }
257 
258         return;
259     }
260 
261     ecp_sm2p256_mul(R->Z, P->Z, tmp0);
262     ecp_sm2p256_sqr(tmp2, tmp0);
263     ecp_sm2p256_mul(tmp3, tmp2, tmp0);
264     ecp_sm2p256_mul(tmp2, tmp2, P->X);
265     ecp_sm2p256_add(tmp0, tmp2, tmp2);
266     ecp_sm2p256_sqr(R->X, tmp1);
267     ecp_sm2p256_sub(R->X, R->X, tmp0);
268     ecp_sm2p256_sub(R->X, R->X, tmp3);
269     ecp_sm2p256_sub(tmp2, tmp2, R->X);
270     ecp_sm2p256_mul(tmp2, tmp2, tmp1);
271     ecp_sm2p256_mul(tmp3, tmp3, P->Y);
272     ecp_sm2p256_sub(R->Y, tmp2, tmp3);
273 }
274 
275 /* Point add: R <- P + Q */
ecp_sm2p256_point_add(P256_POINT * R,const P256_POINT * P,const P256_POINT * Q)276 static void ecp_sm2p256_point_add(P256_POINT *R, const P256_POINT *P,
277                                   const P256_POINT *Q)
278 {
279     unsigned int i;
280     ALIGN32 BN_ULONG tmp0[P256_LIMBS] = {0};
281     ALIGN32 BN_ULONG tmp1[P256_LIMBS] = {0};
282     ALIGN32 BN_ULONG tmp2[P256_LIMBS] = {0};
283 
284     /* zero-check P | Q ->Z */
285     if (is_zeros(P->Z)) {
286         for (i = 0; i < P256_LIMBS; ++i) {
287             R->X[i] = Q->X[i];
288             R->Y[i] = Q->Y[i];
289             R->Z[i] = Q->Z[i];
290         }
291 
292         return;
293     } else if (is_zeros(Q->Z)) {
294         for (i = 0; i < P256_LIMBS; ++i) {
295             R->X[i] = P->X[i];
296             R->Y[i] = P->Y[i];
297             R->Z[i] = P->Z[i];
298         }
299 
300         return;
301     } else if (is_point_equal(P, Q)) {
302         ecp_sm2p256_point_double(R, Q);
303 
304         return;
305     }
306 
307     ecp_sm2p256_sqr(tmp0, P->Z);
308     ecp_sm2p256_mul(tmp1, tmp0, P->Z);
309     ecp_sm2p256_mul(tmp0, tmp0, Q->X);
310     ecp_sm2p256_mul(tmp1, tmp1, Q->Y);
311     ecp_sm2p256_mul(R->Y, P->Y, Q->Z);
312     ecp_sm2p256_mul(R->Z, Q->Z, P->Z);
313     ecp_sm2p256_sqr(tmp2, Q->Z);
314     ecp_sm2p256_mul(R->Y, tmp2, R->Y);
315     ecp_sm2p256_mul(R->X, tmp2, P->X);
316     ecp_sm2p256_sub(tmp0, tmp0, R->X);
317     ecp_sm2p256_mul(R->Z, tmp0, R->Z);
318     ecp_sm2p256_sub(tmp1, tmp1, R->Y);
319     ecp_sm2p256_sqr(tmp2, tmp0);
320     ecp_sm2p256_mul(tmp0, tmp0, tmp2);
321     ecp_sm2p256_mul(tmp2, tmp2, R->X);
322     ecp_sm2p256_sqr(R->X, tmp1);
323     ecp_sm2p256_sub(R->X, R->X, tmp2);
324     ecp_sm2p256_sub(R->X, R->X, tmp2);
325     ecp_sm2p256_sub(R->X, R->X, tmp0);
326     ecp_sm2p256_sub(tmp2, tmp2, R->X);
327     ecp_sm2p256_mul(tmp2, tmp1, tmp2);
328     ecp_sm2p256_mul(tmp0, tmp0, R->Y);
329     ecp_sm2p256_sub(R->Y, tmp2, tmp0);
330 }
331 
332 #if !defined(OPENSSL_NO_SM2_PRECOMP)
333 /* Base point mul by scalar: k - scalar, G - base point */
ecp_sm2p256_point_G_mul_by_scalar(P256_POINT * R,const BN_ULONG * k)334 static void ecp_sm2p256_point_G_mul_by_scalar(P256_POINT *R, const BN_ULONG *k)
335 {
336     unsigned int i, index, mask = 0xff;
337     P256_POINT_AFFINE Q;
338 
339     memset(R, 0, sizeof(P256_POINT));
340 
341     if (is_zeros(k))
342         return;
343 
344     index = k[0] & mask;
345     if (index) {
346         index = index * 8;
347         memcpy(R->X, ecp_sm2p256_precomputed + index, 32);
348         memcpy(R->Y, ecp_sm2p256_precomputed + index + P256_LIMBS, 32);
349         R->Z[0] = 1;
350     }
351 
352     for (i = 1; i < 32; ++i) {
353         index = (k[i / 8] >> (8 * (i % 8))) & mask;
354 
355         if (index) {
356             index = index + i * 256;
357             index = index * 8;
358             memcpy(Q.X, ecp_sm2p256_precomputed + index, 32);
359             memcpy(Q.Y, ecp_sm2p256_precomputed + index + P256_LIMBS, 32);
360             ecp_sm2p256_point_add_affine(R, R, &Q);
361         }
362     }
363 }
364 #endif
365 
366 /*
367  * Affine point mul by scalar: k - scalar, P - affine point
368  */
ecp_sm2p256_point_P_mul_by_scalar(P256_POINT * R,const BN_ULONG * k,P256_POINT_AFFINE P)369 static void ecp_sm2p256_point_P_mul_by_scalar(P256_POINT *R, const BN_ULONG *k,
370                                               P256_POINT_AFFINE P)
371 {
372     int i, init = 0;
373     unsigned int index, mask = 0x0f;
374     ALIGN64 P256_POINT precomputed[16];
375 
376     memset(R, 0, sizeof(P256_POINT));
377 
378     if (is_zeros(k))
379         return;
380 
381     /* The first value of the precomputed table is P. */
382     memcpy(precomputed[1].X, P.X, 32);
383     memcpy(precomputed[1].Y, P.Y, 32);
384     precomputed[1].Z[0] = 1;
385     precomputed[1].Z[1] = 0;
386     precomputed[1].Z[2] = 0;
387     precomputed[1].Z[3] = 0;
388 
389     /* The second value of the precomputed table is 2P. */
390     ecp_sm2p256_point_double(&precomputed[2], &precomputed[1]);
391 
392     /* The subsequent elements are 3P, 4P, and so on. */
393     for (i = 3; i < 16; ++i)
394         ecp_sm2p256_point_add_affine(&precomputed[i], &precomputed[i - 1], &P);
395 
396     for (i = 64 - 1; i >= 0; --i) {
397         index = (k[i / 16] >> (4 * (i % 16))) & mask;
398 
399         if (init == 0) {
400             if (index) {
401                 memcpy(R, &precomputed[index], sizeof(P256_POINT));
402                 init = 1;
403             }
404         } else {
405             ecp_sm2p256_point_double(R, R);
406             ecp_sm2p256_point_double(R, R);
407             ecp_sm2p256_point_double(R, R);
408             ecp_sm2p256_point_double(R, R);
409             if (index)
410                 ecp_sm2p256_point_add(R, R, &precomputed[index]);
411         }
412     }
413 }
414 
415 /* Get affine point */
ecp_sm2p256_point_get_affine(P256_POINT_AFFINE * R,const P256_POINT * P)416 static void ecp_sm2p256_point_get_affine(P256_POINT_AFFINE *R,
417                                          const P256_POINT *P)
418 {
419     ALIGN32 BN_ULONG z_inv3[P256_LIMBS] = {0};
420     ALIGN32 BN_ULONG z_inv2[P256_LIMBS] = {0};
421 
422     if (is_one(P->Z)) {
423         memcpy(R->X, P->X, 32);
424         memcpy(R->Y, P->Y, 32);
425         return;
426     }
427 
428     ecp_sm2p256_mod_inverse(z_inv3, P->Z);
429     ecp_sm2p256_sqr(z_inv2, z_inv3);
430     ecp_sm2p256_mul(R->X, P->X, z_inv2);
431     ecp_sm2p256_mul(z_inv3, z_inv3, z_inv2);
432     ecp_sm2p256_mul(R->Y, P->Y, z_inv3);
433 }
434 
435 #if !defined(OPENSSL_NO_SM2_PRECOMP)
ecp_sm2p256_is_affine_G(const EC_POINT * generator)436 static int ecp_sm2p256_is_affine_G(const EC_POINT *generator)
437 {
438     return (bn_get_top(generator->X) == P256_LIMBS)
439             && (bn_get_top(generator->Y) == P256_LIMBS)
440             && is_equal(bn_get_words(generator->X), def_xG)
441             && is_equal(bn_get_words(generator->Y), def_yG)
442             && (generator->Z_is_one == 1);
443 }
444 #endif
445 
446 /* r = sum(scalar[i]*point[i]) */
ecp_sm2p256_windowed_mul(const EC_GROUP * group,P256_POINT * r,const BIGNUM ** scalar,const EC_POINT ** point,size_t num,BN_CTX * ctx)447 static int ecp_sm2p256_windowed_mul(const EC_GROUP *group,
448                                     P256_POINT *r,
449                                     const BIGNUM **scalar,
450                                     const EC_POINT **point,
451                                     size_t num, BN_CTX *ctx)
452 {
453     unsigned int i;
454     int ret = 0;
455     const BIGNUM **scalars = NULL;
456     ALIGN32 BN_ULONG k[P256_LIMBS] = {0};
457     P256_POINT kP;
458     ALIGN32 union {
459         P256_POINT p;
460         P256_POINT_AFFINE a;
461     } t, p;
462 
463     if (num > OPENSSL_MALLOC_MAX_NELEMS(P256_POINT)
464         || (scalars = OPENSSL_malloc(num * sizeof(BIGNUM *))) == NULL) {
465         ECerr(ERR_LIB_EC, ERR_R_MALLOC_FAILURE);
466         goto err;
467     }
468 
469     memset(r, 0, sizeof(P256_POINT));
470 
471     for (i = 0; i < num; i++) {
472         if (EC_POINT_is_at_infinity(group, point[i]))
473             continue;
474 
475         if ((BN_num_bits(scalar[i]) > 256) || BN_is_negative(scalar[i])) {
476             BIGNUM *tmp;
477 
478             if ((tmp = BN_CTX_get(ctx)) == NULL)
479                 goto err;
480             if (!BN_nnmod(tmp, scalar[i], group->order, ctx)) {
481                 ECerr(ERR_LIB_EC, ERR_R_BN_LIB);
482                 goto err;
483             }
484             scalars[i] = tmp;
485         } else {
486             scalars[i] = scalar[i];
487         }
488 
489         if (ecp_sm2p256_bignum_field_elem(k, scalars[i]) <= 0
490             || ecp_sm2p256_bignum_field_elem(p.p.X, point[i]->X) <= 0
491             || ecp_sm2p256_bignum_field_elem(p.p.Y, point[i]->Y) <= 0
492             || ecp_sm2p256_bignum_field_elem(p.p.Z, point[i]->Z) <= 0) {
493             ECerr(ERR_LIB_EC, EC_R_COORDINATES_OUT_OF_RANGE);
494             goto err;
495         }
496 
497         ecp_sm2p256_point_get_affine(&t.a, &p.p);
498         ecp_sm2p256_point_P_mul_by_scalar(&kP, k, t.a);
499         ecp_sm2p256_point_add(r, r, &kP);
500     }
501 
502     ret = 1;
503 err:
504     OPENSSL_free(scalars);
505     return ret;
506 }
507 
508 /* r = scalar*G + sum(scalars[i]*points[i]) */
ecp_sm2p256_points_mul(const EC_GROUP * group,EC_POINT * r,const BIGNUM * scalar,size_t num,const EC_POINT * points[],const BIGNUM * scalars[],BN_CTX * ctx)509 static int ecp_sm2p256_points_mul(const EC_GROUP *group,
510                                   EC_POINT *r,
511                                   const BIGNUM *scalar,
512                                   size_t num,
513                                   const EC_POINT *points[],
514                                   const BIGNUM *scalars[], BN_CTX *ctx)
515 {
516     int ret = 0, p_is_infinity = 0;
517     const EC_POINT *generator = NULL;
518     ALIGN32 BN_ULONG k[P256_LIMBS] = {0};
519     ALIGN32 union {
520         P256_POINT p;
521         P256_POINT_AFFINE a;
522     } t, p;
523 
524     if ((num + 1) == 0 || (num + 1) > OPENSSL_MALLOC_MAX_NELEMS(void *)) {
525         ECerr(ERR_LIB_EC, ERR_R_MALLOC_FAILURE);
526         goto err;
527     }
528 
529     BN_CTX_start(ctx);
530 
531     if (scalar) {
532         generator = EC_GROUP_get0_generator(group);
533         if (generator == NULL) {
534             ECerr(ERR_LIB_EC, EC_R_UNDEFINED_GENERATOR);
535             goto err;
536         }
537 
538         if (!ecp_sm2p256_bignum_field_elem(k, scalar)) {
539             ECerr(ERR_LIB_EC, EC_R_COORDINATES_OUT_OF_RANGE);
540             goto err;
541         }
542 #if !defined(OPENSSL_NO_SM2_PRECOMP)
543         if (ecp_sm2p256_is_affine_G(generator)) {
544             ecp_sm2p256_point_G_mul_by_scalar(&p.p, k);
545         } else
546 #endif
547         {
548             /* if no precomputed table */
549             const EC_POINT *new_generator[1];
550             const BIGNUM *g_scalars[1];
551 
552             new_generator[0] = generator;
553             g_scalars[0] = scalar;
554 
555             if (!ecp_sm2p256_windowed_mul(group, &p.p, g_scalars, new_generator,
556                                           (new_generator[0] != NULL
557                                            && g_scalars[0] != NULL), ctx))
558                 goto err;
559         }
560     } else {
561         p_is_infinity = 1;
562     }
563     if (num) {
564         P256_POINT *out = &t.p;
565 
566         if (p_is_infinity)
567             out = &p.p;
568 
569         if (!ecp_sm2p256_windowed_mul(group, out, scalars, points, num, ctx))
570             goto err;
571 
572         if (!p_is_infinity)
573             ecp_sm2p256_point_add(&p.p, &p.p, out);
574     }
575 
576     /* Not constant-time, but we're only operating on the public output. */
577     if (!bn_set_words(r->X, p.p.X, P256_LIMBS)
578         || !bn_set_words(r->Y, p.p.Y, P256_LIMBS)
579         || !bn_set_words(r->Z, p.p.Z, P256_LIMBS))
580         goto err;
581     r->Z_is_one = is_equal(bn_get_words(r->Z), ONE) & 1;
582 
583     ret = 1;
584 err:
585     BN_CTX_end(ctx);
586     return ret;
587 }
588 
ecp_sm2p256_field_mul(const EC_GROUP * group,BIGNUM * r,const BIGNUM * a,const BIGNUM * b,BN_CTX * ctx)589 static int ecp_sm2p256_field_mul(const EC_GROUP *group, BIGNUM *r,
590                                  const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
591 {
592     ALIGN32 BN_ULONG a_fe[P256_LIMBS] = {0};
593     ALIGN32 BN_ULONG b_fe[P256_LIMBS] = {0};
594     ALIGN32 BN_ULONG r_fe[P256_LIMBS] = {0};
595 
596     if (a == NULL || b == NULL || r == NULL)
597         return 0;
598 
599     if (!ecp_sm2p256_bignum_field_elem(a_fe, a)
600         || !ecp_sm2p256_bignum_field_elem(b_fe, b)) {
601         ECerr(ERR_LIB_EC, EC_R_COORDINATES_OUT_OF_RANGE);
602         return 0;
603     }
604 
605     ecp_sm2p256_mul(r_fe, a_fe, b_fe);
606 
607     if (!bn_set_words(r, r_fe, P256_LIMBS))
608         return 0;
609 
610     return 1;
611 }
612 
ecp_sm2p256_field_sqr(const EC_GROUP * group,BIGNUM * r,const BIGNUM * a,BN_CTX * ctx)613 static int ecp_sm2p256_field_sqr(const EC_GROUP *group, BIGNUM *r,
614                                  const BIGNUM *a, BN_CTX *ctx)
615 {
616     ALIGN32 BN_ULONG a_fe[P256_LIMBS] = {0};
617     ALIGN32 BN_ULONG r_fe[P256_LIMBS] = {0};
618 
619     if (a == NULL || r == NULL)
620         return 0;
621 
622     if (!ecp_sm2p256_bignum_field_elem(a_fe, a)) {
623         ECerr(ERR_LIB_EC, EC_R_COORDINATES_OUT_OF_RANGE);
624         return 0;
625     }
626 
627     ecp_sm2p256_sqr(r_fe, a_fe);
628 
629     if (!bn_set_words(r, r_fe, P256_LIMBS))
630         return 0;
631 
632     return 1;
633 }
634 
EC_GFp_sm2p256_method(void)635 const EC_METHOD *EC_GFp_sm2p256_method(void)
636 {
637     static const EC_METHOD ret = {
638         EC_FLAGS_DEFAULT_OCT,
639         NID_X9_62_prime_field,
640         ossl_ec_GFp_simple_group_init,
641         ossl_ec_GFp_simple_group_finish,
642         ossl_ec_GFp_simple_group_clear_finish,
643         ossl_ec_GFp_simple_group_copy,
644         ossl_ec_GFp_simple_group_set_curve,
645         ossl_ec_GFp_simple_group_get_curve,
646         ossl_ec_GFp_simple_group_get_degree,
647         ossl_ec_group_simple_order_bits,
648         ossl_ec_GFp_simple_group_check_discriminant,
649         ossl_ec_GFp_simple_point_init,
650         ossl_ec_GFp_simple_point_finish,
651         ossl_ec_GFp_simple_point_clear_finish,
652         ossl_ec_GFp_simple_point_copy,
653         ossl_ec_GFp_simple_point_set_to_infinity,
654         ossl_ec_GFp_simple_point_set_affine_coordinates,
655         ossl_ec_GFp_simple_point_get_affine_coordinates,
656         0, 0, 0,
657         ossl_ec_GFp_simple_add,
658         ossl_ec_GFp_simple_dbl,
659         ossl_ec_GFp_simple_invert,
660         ossl_ec_GFp_simple_is_at_infinity,
661         ossl_ec_GFp_simple_is_on_curve,
662         ossl_ec_GFp_simple_cmp,
663         ossl_ec_GFp_simple_make_affine,
664         ossl_ec_GFp_simple_points_make_affine,
665         ecp_sm2p256_points_mul, /* mul */
666         0 /* precompute_mult */,
667         0 /* have_precompute_mult */,
668         ecp_sm2p256_field_mul,
669         ecp_sm2p256_field_sqr,
670         0 /* field_div */,
671         ossl_ec_GFp_simple_field_inv,
672         0 /* field_encode */,
673         0 /* field_decode */,
674         0 /* field_set_to_one */,
675         ossl_ec_key_simple_priv2oct,
676         ossl_ec_key_simple_oct2priv,
677         0, /* set private */
678         ossl_ec_key_simple_generate_key,
679         ossl_ec_key_simple_check_key,
680         ossl_ec_key_simple_generate_public_key,
681         0, /* keycopy */
682         0, /* keyfinish */
683         ossl_ecdh_simple_compute_key,
684         ossl_ecdsa_simple_sign_setup,
685         ossl_ecdsa_simple_sign_sig,
686         ossl_ecdsa_simple_verify_sig,
687         0, /* use constant‑time fallback for inverse mod order */
688         0, /* blind_coordinates */
689         0, /* ladder_pre */
690         0, /* ladder_step */
691         0  /* ladder_post */
692     };
693 
694     return &ret;
695 }
696