1 /*
2 * Copyright (C) 2022 - This file is part of libecc project
3 *
4 * Authors:
5 * Ryad BENADJILA <ryadbenadjila@gmail.com>
6 * Arnaud EBALARD <arnaud.ebalard@ssi.gouv.fr>
7 *
8 * This software is licensed under a dual BSD and GPL v2 license.
9 * See LICENSE file at the root folder of the project.
10 */
11 #include <libecc/lib_ecc_config.h>
12 #if defined(WITH_SIG_BIGN) || defined(WITH_SIG_DBIGN)
13
14 #include <libecc/nn/nn_rand.h>
15 #include <libecc/nn/nn_mul_public.h>
16 #include <libecc/nn/nn_logical.h>
17
18 #include <libecc/sig/sig_algs_internal.h>
19 #include <libecc/sig/ec_key.h>
20 #include <libecc/utils/utils.h>
21 #ifdef VERBOSE_INNER_VALUES
22 #define EC_SIG_ALG "BIGN"
23 #endif
24 #include <libecc/utils/dbg_sig.h>
25
26 /*
27 * This is an implementation of the BIGN signature algorithm as
28 * described in the STB 34.101.45 standard
29 * (http://apmi.bsu.by/assets/files/std/bign-spec29.pdf).
30 *
31 * The BIGN signature is a variation on the Shnorr signature scheme.
32 *
33 * An english high-level (less formal) description and rationale can be found
34 * in the IETF archive:
35 * https://mailarchive.ietf.org/arch/msg/cfrg/pI92HSRjMBg50NVEz32L5RciVBk/
36 *
37 * BIGN comes in two flavors: deterministic and non-deterministic. The current
38 * file implements the two.
39 *
40 * In this implementation, we are *on purpose* more lax than the STB standard regarding
41 * the so called "internal"/"external" hash function sizes and the order size:
42 * - We accept order sizes that might be different than twice the internal hash
43 * function (HASH-BELT truncated) and the size of the external hash function.
44 * - We accept security levels that might be different from {128, 192, 256}.
45 *
46 * If we strictly conform to STB 34.101.45, only orders of size exactly twice the
47 * internal hash function length are accepted, and only external hash functions of size
48 * of the order are accepted. Also only security levels of 128, 192 or 256 bits
49 * are accepted.
50 *
51 * Being more lax on these parameters allows to be compatible with more hash
52 * functions and curves.
53 *
54 * Finally, although the IETF archive in english leaves the "internal" hash functions
55 * as configurable (wrt size constraints), the STB 34.101.45 standard fixes the BELT hash
56 * function (standardized in STB 34.101.31) as the one to be used. The current file follows
57 * this mandatory requirement and uses BELT as the only possible internal hash function
58 * while the external one is configurable.
59 *
60 */
61
62 /* NOTE: BIGN uses per its standard the BELT-HASH hash function as its "internal"
63 * hash function, as well as the BELT encryption block cipher during the deterministic
64 * computation of the nonce for the deterministic version of BIGN.
65 * Hence the sanity check below.
66 */
67 #if !defined(WITH_HASH_BELT_HASH)
68 #error "BIGN and DBIGN need BELT-HASH, please activate it!"
69 #endif
70
71
72 /* Reverses the endiannes of a buffer in place */
_reverse_endianness(u8 * buf,u16 buf_size)73 ATTRIBUTE_WARN_UNUSED_RET static inline int _reverse_endianness(u8 *buf, u16 buf_size)
74 {
75 u16 i;
76 u8 tmp;
77 int ret;
78
79 MUST_HAVE((buf != NULL), ret, err);
80
81 if(buf_size > 1){
82 for(i = 0; i < (buf_size / 2); i++){
83 tmp = buf[i];
84 buf[i] = buf[buf_size - 1 - i];
85 buf[buf_size - 1 - i] = tmp;
86 }
87 }
88
89 ret = 0;
90 err:
91 return ret;
92 }
93
94 /* The additional data for bign are specific. We provide
95 * helpers to extract them from an adata pointer.
96 */
bign_get_oid_from_adata(const u8 * adata,u16 adata_len,const u8 ** oid_ptr,u16 * oid_len)97 int bign_get_oid_from_adata(const u8 *adata, u16 adata_len, const u8 **oid_ptr, u16 *oid_len)
98 {
99 int ret;
100 u16 t_len;
101
102 MUST_HAVE((adata != NULL) && (oid_ptr != NULL) && (oid_len != NULL), ret, err);
103 MUST_HAVE((adata_len >= 4), ret, err);
104
105 (*oid_len) = (u16)(((u16)adata[0] << 8) | adata[1]);
106 t_len = (u16)(((u16)adata[2] << 8) | adata[3]);
107 /* Check overflow */
108 MUST_HAVE(((*oid_len) + t_len) >= (t_len), ret, err);
109 MUST_HAVE(((*oid_len) + t_len) <= (adata_len - 4), ret, err);
110 (*oid_ptr) = &adata[4];
111
112 ret = 0;
113 err:
114 if(ret && (oid_ptr != NULL)){
115 (*oid_ptr) = NULL;
116 }
117 if(ret && (oid_len != NULL)){
118 (*oid_len) = 0;
119 }
120 return ret;
121 }
122
bign_get_t_from_adata(const u8 * adata,u16 adata_len,const u8 ** t_ptr,u16 * t_len)123 int bign_get_t_from_adata(const u8 *adata, u16 adata_len, const u8 **t_ptr, u16 *t_len)
124 {
125 int ret;
126 u16 oid_len;
127
128 MUST_HAVE((adata != NULL) && (t_ptr != NULL) && (t_len != NULL), ret, err);
129 MUST_HAVE((adata_len >= 4), ret, err);
130
131 oid_len = (u16)(((u16)adata[0] << 8) | adata[1]);
132 (*t_len) = (u16)(((u16)adata[2] << 8) | adata[3]);
133 /* Check overflow */
134 MUST_HAVE((oid_len + (*t_len)) >= (oid_len), ret, err);
135 MUST_HAVE((oid_len + (*t_len)) <= (adata_len - 4), ret, err);
136 (*t_ptr) = &adata[4 + oid_len];
137
138 ret = 0;
139 err:
140 if(ret && (t_ptr != NULL)){
141 (*t_ptr) = NULL;
142 }
143 if(ret && (t_len != NULL)){
144 (*t_len) = 0;
145 }
146 return ret;
147 }
148
bign_set_adata(u8 * adata,u16 adata_len,const u8 * oid,u16 oid_len,const u8 * t,u16 t_len)149 int bign_set_adata(u8 *adata, u16 adata_len, const u8 *oid, u16 oid_len, const u8 *t, u16 t_len)
150 {
151 int ret;
152
153 MUST_HAVE((adata != NULL), ret, err);
154
155 MUST_HAVE((oid != NULL) || (oid_len == 0), ret, err);
156 MUST_HAVE((t != NULL) || (t_len == 0), ret, err);
157 MUST_HAVE((adata_len >= 4), ret, err);
158 /* Check overflow */
159 MUST_HAVE(((oid_len + t_len) >= oid_len), ret, err);
160 MUST_HAVE(((adata_len - 4) >= (oid_len + t_len)), ret, err);
161
162 if(oid != NULL){
163 adata[0] = (u8)(oid_len >> 8);
164 adata[1] = (u8)(oid_len & 0xff);
165 ret = local_memcpy(&adata[4], oid, oid_len); EG(ret, err);
166 }
167 else{
168 adata[0] = adata[1] = 0;
169 }
170 if(t != NULL){
171 adata[2] = (u8)(t_len >> 8);
172 adata[3] = (u8)(t_len & 0xff);
173 ret = local_memcpy(&adata[4 + oid_len], t, t_len); EG(ret, err);
174
175 }
176 else{
177 adata[2] = adata[3] = 0;
178 }
179
180 ret = 0;
181 err:
182 return ret;
183 }
184
185 #if defined(WITH_SIG_DBIGN)
186 /*
187 * Deterministic nonce generation function for deterministic BIGN, as
188 * described in STB 34.101.45 6.3.3.
189 *
190 * NOTE: Deterministic nonce generation for BIGN is useful against attackers
191 * in contexts where only poor RNG/entropy are available, or when nonce bits
192 * leaking can be possible through side-channel attacks.
193 * However, in contexts where fault attacks are easy to mount, deterministic
194 * BIGN can bring more security risks than regular BIGN.
195 *
196 * Depending on the context where you use the library, choose carefully if
197 * you want to use the deterministic version or not.
198 *
199 */
__bign_determinitic_nonce(nn_t k,nn_src_t q,bitcnt_t q_bit_len,nn_src_t x,const u8 * adata,u16 adata_len,const u8 * h,u8 hlen)200 ATTRIBUTE_WARN_UNUSED_RET static int __bign_determinitic_nonce(nn_t k, nn_src_t q, bitcnt_t q_bit_len,
201 nn_src_t x, const u8 *adata, u16 adata_len,
202 const u8 *h, u8 hlen)
203 {
204 int ret, cmp, iszero;
205 u8 theta[BELT_HASH_DIGEST_SIZE];
206 u8 FE2OS_D[LOCAL_MAX(BYTECEIL(CURVES_MAX_Q_BIT_LEN), 2 * BELT_HASH_DIGEST_SIZE)];
207 u8 r[((MAX_DIGEST_SIZE / BELT_BLOCK_LEN) * BELT_BLOCK_LEN) + (2 * BELT_BLOCK_LEN)];
208 u8 r_bar[((MAX_DIGEST_SIZE / BELT_BLOCK_LEN) * BELT_BLOCK_LEN) + (2 * BELT_BLOCK_LEN)];
209 u8 q_len, l;
210 unsigned int j, z, n;
211 u32 i;
212 u16 r_bar_len;
213
214 belt_hash_context belt_hash_ctx;
215 const u8 *oid_ptr = NULL;
216 const u8 *t_ptr = NULL;
217 u16 oid_len = 0, t_len = 0;
218
219 MUST_HAVE((adata != NULL) && (h != NULL), ret, err);
220 ret = nn_check_initialized(q); EG(ret, err);
221 ret = nn_check_initialized(x); EG(ret, err);
222
223 ret = local_memset(theta, 0, sizeof(theta)); EG(ret, err);
224 ret = local_memset(FE2OS_D, 0, sizeof(FE2OS_D)); EG(ret, err);
225 ret = local_memset(r_bar, 0, sizeof(r_bar)); EG(ret, err);
226
227 q_len = (u8)BYTECEIL(q_bit_len);
228
229 /* Compute l depending on the order */
230 l = (u8)BIGN_S0_LEN(q_bit_len);
231
232 /* Extract oid and t from the additional data */
233 ret = bign_get_oid_from_adata(adata, adata_len, &oid_ptr, &oid_len); EG(ret, err);
234 ret = bign_get_t_from_adata(adata, adata_len, &t_ptr, &t_len); EG(ret, err);
235
236 ret = belt_hash_init(&belt_hash_ctx); EG(ret, err);
237 ret = belt_hash_update(&belt_hash_ctx, oid_ptr, oid_len); EG(ret, err);
238
239 /* Put the private key in a string <d>2*l */
240 ret = local_memset(FE2OS_D, 0, sizeof(FE2OS_D)); EG(ret, err);
241 ret = nn_export_to_buf(&FE2OS_D[0], q_len, x); EG(ret, err);
242 ret = _reverse_endianness(&FE2OS_D[0], q_len); EG(ret, err);
243 /* Only hash the 2*l bytes of d */
244 ret = belt_hash_update(&belt_hash_ctx, &FE2OS_D[0], (u32)(2*l)); EG(ret, err);
245
246 ret = belt_hash_update(&belt_hash_ctx, t_ptr, t_len); EG(ret, err);
247
248 ret = belt_hash_final(&belt_hash_ctx, theta); EG(ret, err);
249
250 dbg_buf_print("theta", theta, BELT_HASH_DIGEST_SIZE);
251
252 /* n is the number of 128 bits blocks in H */
253 n = (hlen / BELT_BLOCK_LEN);
254
255 MUST_HAVE((hlen <= sizeof(r)), ret, err);
256 ret = local_memset(r, 0, sizeof(r));
257 ret = local_memcpy(r, h, hlen); EG(ret, err);
258 /* If we have less than two blocks for the input hash size, we use zero
259 * padding to achieve at least two blocks.
260 * NOTE: this is not in the standard but allows to be compatible with small
261 * size hash functions.
262 */
263 if(n <= 1){
264 n = 2;
265 }
266
267 /* Now iterate until the nonce is computed in [1, q-1]
268 * NOTE: we are ensured here that n >= 2, which allows us to
269 * index (n-1) and (n-2) blocks in r.
270 */
271 i = (u32)1;
272
273 while(1){
274 u8 s[BELT_BLOCK_LEN];
275 u8 i_block[BELT_BLOCK_LEN];
276 ret = local_memset(s, 0, sizeof(s)); EG(ret, err);
277
278 /* Put the xor of all n-1 elements in s */
279 for(j = 0; j < (n - 1); j++){
280 for(z = 0; z < BELT_BLOCK_LEN; z++){
281 s[z] ^= r[(BELT_BLOCK_LEN * j) + z];
282 }
283 }
284 /* Move elements left for the first n-2 elements */
285 ret = local_memcpy(&r[0], &r[BELT_BLOCK_LEN], (n - 2) * BELT_BLOCK_LEN); EG(ret, err);
286
287 /* r_n-1 = belt-block(s, theta) ^ r_n ^ <i>128 */
288 ret = local_memset(i_block, 0, sizeof(i_block)); EG(ret, err);
289 PUT_UINT32_LE(i, i_block, 0);
290 belt_encrypt(s, &r[(n - 2) * BELT_BLOCK_LEN], theta);
291 for(z = 0; z < BELT_BLOCK_LEN; z++){
292 r[((n - 2) * BELT_BLOCK_LEN) + z] ^= (r[((n - 1) * BELT_BLOCK_LEN) + z] ^ i_block[z]);
293 }
294
295 /* r_n = s */
296 ret = local_memcpy(&r[(n - 1) * BELT_BLOCK_LEN], s, BELT_BLOCK_LEN); EG(ret, err);
297
298 /* Import r_bar as a big number in little endian
299 * (truncate our import to the bitlength size of q)
300 */
301 if(q_len < (n * BELT_BLOCK_LEN)){
302 r_bar_len = q_len;
303 ret = local_memcpy(&r_bar[0], &r[0], r_bar_len); EG(ret, err);
304 /* Handle the useless bits between q_bit_len and (8 * q_len) */
305 if((q_bit_len % 8) != 0){
306 r_bar[r_bar_len - 1] &= (u8)((0x1 << (q_bit_len % 8)) - 1);
307 }
308 }
309 else{
310 /* In this case, q_len is bigger than the size of r, we need to adapt:
311 * we truncate to the size of r.
312 * NOTE: we of course lose security, but this is the explicit choice
313 * of the user using a "small" hash function with a "big" order.
314 */
315 MUST_HAVE((n * BELT_BLOCK_LEN) <= 0xffff, ret, err);
316 r_bar_len = (u16)(n * BELT_BLOCK_LEN);
317 ret = local_memcpy(&r_bar[0], &r[0], r_bar_len); EG(ret, err);
318 }
319 ret = _reverse_endianness(&r_bar[0], r_bar_len); EG(ret, err);
320 ret = nn_init_from_buf(k, &r_bar[0], r_bar_len); EG(ret, err);
321
322 /* Compare it to q */
323 ret = nn_cmp(k, q, &cmp); EG(ret, err);
324 /* Compare it to 0 */
325 ret = nn_iszero(k, &iszero); EG(ret, err);
326
327 if((i >= (2 * n)) && (cmp < 0) && (!iszero)){
328 break;
329 }
330 i += (u32)1;
331 /* If we have wrapped (meaning i > 2^32), we exit with failure */
332 MUST_HAVE((i != 0), ret, err);
333 }
334
335 ret = 0;
336 err:
337 /* Destroy local variables potentially containing sensitive data */
338 IGNORE_RET_VAL(local_memset(theta, 0, sizeof(theta)));
339 IGNORE_RET_VAL(local_memset(FE2OS_D, 0, sizeof(FE2OS_D)));
340
341 return ret;
342 }
343 #endif
344
__bign_init_pub_key(ec_pub_key * out_pub,const ec_priv_key * in_priv,ec_alg_type key_type)345 int __bign_init_pub_key(ec_pub_key *out_pub, const ec_priv_key *in_priv,
346 ec_alg_type key_type)
347 {
348 prj_pt_src_t G;
349 int ret, cmp;
350 nn_src_t q;
351
352 MUST_HAVE((out_pub != NULL), ret, err);
353
354 /* Zero init public key to be generated */
355 ret = local_memset(out_pub, 0, sizeof(ec_pub_key)); EG(ret, err);
356
357 ret = priv_key_check_initialized_and_type(in_priv, key_type); EG(ret, err);
358 q = &(in_priv->params->ec_gen_order);
359
360 /* Sanity check on key compliance */
361 MUST_HAVE((!nn_cmp(&(in_priv->x), q, &cmp)) && (cmp < 0), ret, err);
362
363 /* Y = xG */
364 G = &(in_priv->params->ec_gen);
365 /* Use blinding when computing point scalar multiplication */
366 ret = prj_pt_mul_blind(&(out_pub->y), &(in_priv->x), G); EG(ret, err);
367
368 out_pub->key_type = key_type;
369 out_pub->params = in_priv->params;
370 out_pub->magic = PUB_KEY_MAGIC;
371
372 err:
373 return ret;
374 }
375
__bign_siglen(u16 p_bit_len,u16 q_bit_len,u8 hsize,u8 blocksize,u8 * siglen)376 int __bign_siglen(u16 p_bit_len, u16 q_bit_len, u8 hsize, u8 blocksize, u8 *siglen)
377 {
378 int ret;
379
380 MUST_HAVE(siglen != NULL, ret, err);
381 MUST_HAVE((p_bit_len <= CURVES_MAX_P_BIT_LEN) &&
382 (q_bit_len <= CURVES_MAX_Q_BIT_LEN) &&
383 (hsize <= MAX_DIGEST_SIZE) && (blocksize <= MAX_BLOCK_SIZE), ret, err);
384 (*siglen) = (u8)BIGN_SIGLEN(q_bit_len);
385 ret = 0;
386
387 err:
388 return ret;
389 }
390
391 /*
392 * Generic *internal* BIGN signature functions (init, update and finalize).
393 * Their purpose is to allow passing a specific hash function (along with
394 * its output size) and the random ephemeral key k, so that compliance
395 * tests against test vectors can be made without ugly hack in the code
396 * itself.
397 *
398 * Implementation notes:
399 *
400 * a) The BIGN algorithm makes use of the OID of the external hash function.
401 * We let the upper layer provide us with this in the "adata" field of the
402 * context.
403 *
404 */
405
406 #define BIGN_SIGN_MAGIC ((word_t)(0x63439a2b38921340ULL))
407 #define BIGN_SIGN_CHECK_INITIALIZED(A, ret, err) \
408 MUST_HAVE((((void *)(A)) != NULL) && ((A)->magic == BIGN_SIGN_MAGIC), ret, err)
409
__bign_sign_init(struct ec_sign_context * ctx,ec_alg_type key_type)410 int __bign_sign_init(struct ec_sign_context *ctx, ec_alg_type key_type)
411 {
412 int ret;
413
414 /* First, verify context has been initialized */
415 ret = sig_sign_check_initialized(ctx); EG(ret, err);
416
417 /* Additional sanity checks on input params from context */
418 ret = key_pair_check_initialized_and_type(ctx->key_pair, key_type); EG(ret, err);
419
420 MUST_HAVE((ctx->h != NULL) && (ctx->h->digest_size <= MAX_DIGEST_SIZE) &&
421 (ctx->h->block_size <= MAX_BLOCK_SIZE), ret, err);
422
423 /* We check that our additional data is not NULL as it must contain
424 * the mandatory external hash OID.
425 */
426 MUST_HAVE((ctx->adata != NULL) && (ctx->adata_len != 0), ret, err);
427
428 /*
429 * Initialize hash context stored in our private part of context
430 * and record data init has been done
431 */
432 /* Since we call a callback, sanity check our mapping */
433 ret = hash_mapping_callbacks_sanity_check(ctx->h); EG(ret, err);
434 ret = ctx->h->hfunc_init(&(ctx->sign_data.bign.h_ctx)); EG(ret, err);
435
436 ctx->sign_data.bign.magic = BIGN_SIGN_MAGIC;
437
438 err:
439 return ret;
440 }
441
__bign_sign_update(struct ec_sign_context * ctx,const u8 * chunk,u32 chunklen,ec_alg_type key_type)442 int __bign_sign_update(struct ec_sign_context *ctx,
443 const u8 *chunk, u32 chunklen, ec_alg_type key_type)
444 {
445 int ret;
446
447 /*
448 * First, verify context has been initialized and private
449 * part too. This guarantees the context is an BIGN
450 * signature one and we do not update() or finalize()
451 * before init().
452 */
453 ret = sig_sign_check_initialized(ctx); EG(ret, err);
454 BIGN_SIGN_CHECK_INITIALIZED(&(ctx->sign_data.bign), ret, err);
455
456 /* Additional sanity checks on input params from context */
457 ret = key_pair_check_initialized_and_type(ctx->key_pair, key_type); EG(ret, err);
458
459 /* 1. Compute h = H(m) */
460 /* Since we call a callback, sanity check our mapping */
461 ret = hash_mapping_callbacks_sanity_check(ctx->h); EG(ret, err);
462 ret = ctx->h->hfunc_update(&(ctx->sign_data.bign.h_ctx), chunk, chunklen);
463
464 err:
465 return ret;
466 }
467
__bign_sign_finalize(struct ec_sign_context * ctx,u8 * sig,u8 siglen,ec_alg_type key_type)468 int __bign_sign_finalize(struct ec_sign_context *ctx, u8 *sig, u8 siglen,
469 ec_alg_type key_type)
470 {
471 int ret, cmp;
472 const ec_priv_key *priv_key;
473 prj_pt_src_t G;
474 u8 hash[MAX_DIGEST_SIZE];
475 u8 hash_belt[BELT_HASH_DIGEST_SIZE];
476 u8 FE2OS_W[LOCAL_MAX(2 * BYTECEIL(CURVES_MAX_P_BIT_LEN), 2 * BIGN_S0_LEN(CURVES_MAX_Q_BIT_LEN))];
477 bitcnt_t q_bit_len, p_bit_len;
478 prj_pt kG;
479 nn_src_t q, x;
480 u8 hsize, p_len, l;
481 nn k, h, tmp, s1;
482 belt_hash_context belt_hash_ctx;
483 const u8 *oid_ptr = NULL;
484 u16 oid_len = 0;
485 #ifdef USE_SIG_BLINDING
486 /* b is the blinding mask */
487 nn b, binv;
488 b.magic = binv.magic = WORD(0);
489 #endif
490
491 k.magic = h.magic = WORD(0);
492 tmp.magic = s1.magic = WORD(0);
493 kG.magic = WORD(0);
494
495 /*
496 * First, verify context has been initialized and private
497 * part too. This guarantees the context is an BIGN
498 * signature one and we do not finalize() before init().
499 */
500 ret = sig_sign_check_initialized(ctx); EG(ret, err);
501 BIGN_SIGN_CHECK_INITIALIZED(&(ctx->sign_data.bign), ret, err);
502 MUST_HAVE((sig != NULL), ret, err);
503
504 /* Additional sanity checks on input params from context */
505 ret = key_pair_check_initialized_and_type(ctx->key_pair, key_type); EG(ret, err);
506
507 /* Zero init out point */
508 ret = local_memset(&kG, 0, sizeof(prj_pt)); EG(ret, err);
509
510 /* Make things more readable */
511 priv_key = &(ctx->key_pair->priv_key);
512 q = &(priv_key->params->ec_gen_order);
513 q_bit_len = priv_key->params->ec_gen_order_bitlen;
514 p_bit_len = priv_key->params->ec_fp.p_bitlen;
515 G = &(priv_key->params->ec_gen);
516 p_len = (u8)BYTECEIL(p_bit_len);
517 x = &(priv_key->x);
518 hsize = ctx->h->digest_size;
519
520 MUST_HAVE((priv_key->key_type == key_type), ret, err);
521
522 /* Compute l depending on the order */
523 l = (u8)BIGN_S0_LEN(q_bit_len);
524
525 /* Sanity check */
526 ret = nn_cmp(x, q, &cmp); EG(ret, err);
527 /* This should not happen and means that our
528 * private key is not compliant!
529 */
530 MUST_HAVE((cmp < 0), ret, err);
531
532 dbg_nn_print("p", &(priv_key->params->ec_fp.p));
533 dbg_nn_print("q", &(priv_key->params->ec_gen_order));
534 dbg_priv_key_print("x", priv_key);
535 dbg_ec_point_print("G", &(priv_key->params->ec_gen));
536 dbg_pub_key_print("Y", &(ctx->key_pair->pub_key));
537
538 /* Check given signature buffer length has the expected size */
539 MUST_HAVE((siglen == BIGN_SIGLEN(q_bit_len)), ret, err);
540
541 /* We check that our additional data is not NULL as it must contain
542 * the mandatory external hash OID.
543 */
544 MUST_HAVE((ctx->adata != NULL) && (ctx->adata_len != 0), ret, err);
545
546 /* 1. Compute h = H(m) */
547 ret = local_memset(hash, 0, hsize); EG(ret, err);
548 /* Since we call a callback, sanity check our mapping */
549 ret = hash_mapping_callbacks_sanity_check(ctx->h); EG(ret, err);
550 ret = ctx->h->hfunc_finalize(&(ctx->sign_data.bign.h_ctx), hash); EG(ret, err);
551 dbg_buf_print("h", hash, hsize);
552
553
554 /* 2. get a random value k in ]0,q[ */
555 #ifdef NO_KNOWN_VECTORS
556 /* NOTE: when we do not need self tests for known vectors,
557 * we can be strict about random function handler!
558 * This allows us to avoid the corruption of such a pointer.
559 */
560 /* Sanity check on the handler before calling it */
561 if(ctx->rand != nn_get_random_mod){
562 #ifdef WITH_SIG_DBIGN
563 /* In deterministic BIGN, nevermind! */
564 if(key_type != DBIGN)
565 #endif
566 {
567 ret = -1;
568 goto err;
569 }
570 }
571 #endif
572 if(ctx->rand != NULL){
573 /* Non-deterministic generation, or deterministic with
574 * test vectors.
575 */
576 ret = ctx->rand(&k, q);
577 }
578 else
579 #if defined(WITH_SIG_DBIGN)
580 {
581 /* Only applies for DETERMINISTIC BIGN */
582 if(key_type != DBIGN){
583 ret = -1;
584 goto err;
585 }
586 /* Deterministically generate k as STB 34.101.45 mandates */
587 ret = __bign_determinitic_nonce(&k, q, q_bit_len, &(priv_key->x), ctx->adata, ctx->adata_len, hash, hsize);
588 }
589 #else
590 {
591 /* NULL rand function is not accepted for regular BIGN */
592 ret = -1;
593 goto err;
594 }
595 #endif
596 if (ret) {
597 ret = -1;
598 goto err;
599 }
600 dbg_nn_print("k", &k);
601
602 #ifdef USE_SIG_BLINDING
603 /* Note: if we use blinding, r and e are multiplied by
604 * a random value b in ]0,q[ */
605 ret = nn_get_random_mod(&b, q); EG(ret, err);
606 /* NOTE: we use Fermat's little theorem inversion for
607 * constant time here. This is possible since q is prime.
608 */
609 ret = nn_modinv_fermat(&binv, &b, q); EG(ret, err);
610
611 dbg_nn_print("b", &b);
612 #endif /* USE_SIG_BLINDING */
613
614
615 /* 3. Compute W = (W_x,W_y) = kG */
616 #ifdef USE_SIG_BLINDING
617 ret = prj_pt_mul_blind(&kG, &k, G); EG(ret, err);
618 #else
619 ret = prj_pt_mul(&kG, &k, G); EG(ret, err);
620 #endif /* USE_SIG_BLINDING */
621 ret = prj_pt_unique(&kG, &kG); EG(ret, err);
622
623 dbg_nn_print("W_x", &(kG.X.fp_val));
624 dbg_nn_print("W_y", &(kG.Y.fp_val));
625
626 /* 4. Compute s0 = <BELT-HASH(OID(H) || <<FE2OS(W_x)> || <FE2OS(W_y)>>2*l || H(X))>l */
627 ret = belt_hash_init(&belt_hash_ctx); EG(ret, err);
628 ret = bign_get_oid_from_adata(ctx->adata, ctx->adata_len, &oid_ptr, &oid_len); EG(ret, err);
629 ret = belt_hash_update(&belt_hash_ctx, oid_ptr, oid_len); EG(ret, err);
630 /**/
631 ret = local_memset(FE2OS_W, 0, sizeof(FE2OS_W)); EG(ret, err);
632 ret = fp_export_to_buf(&FE2OS_W[0], p_len, &(kG.X)); EG(ret, err);
633 ret = _reverse_endianness(&FE2OS_W[0], p_len); EG(ret, err);
634 ret = fp_export_to_buf(&FE2OS_W[p_len], p_len, &(kG.Y)); EG(ret, err);
635 ret = _reverse_endianness(&FE2OS_W[p_len], p_len); EG(ret, err);
636 /* Only hash the 2*l bytes of FE2OS(W_x) || FE2OS(W_y) */
637 ret = belt_hash_update(&belt_hash_ctx, &FE2OS_W[0], (u32)(2*l)); EG(ret, err);
638 /**/
639 ret = belt_hash_update(&belt_hash_ctx, hash, hsize); EG(ret, err);
640 /* Store our s0 */
641 ret = local_memset(hash_belt, 0, sizeof(hash_belt)); EG(ret, err);
642 ret = belt_hash_final(&belt_hash_ctx, hash_belt); EG(ret, err);
643 ret = local_memset(&sig[0], 0, l); EG(ret, err);
644 ret = local_memcpy(&sig[0], &hash_belt[0], LOCAL_MIN(l, BELT_HASH_DIGEST_SIZE)); EG(ret, err);
645 dbg_buf_print("s0", &sig[0], LOCAL_MIN(l, BELT_HASH_DIGEST_SIZE));
646
647 /* 5. Now compute s1 = (k - H_bar - (s0_bar + 2**l) * d) mod q */
648 /* First import H and s0 as numbers modulo q */
649 /* Import H */
650 ret = _reverse_endianness(hash, hsize); EG(ret, err);
651 ret = nn_init_from_buf(&h, hash, hsize); EG(ret, err);
652 ret = nn_mod(&h, &h, q); EG(ret, err);
653 /* Import s0_bar */
654 ret = local_memcpy(FE2OS_W, &sig[0], l); EG(ret, err);
655 ret = _reverse_endianness(FE2OS_W, l); EG(ret, err);
656 ret = nn_init_from_buf(&s1, FE2OS_W, l); EG(ret, err);
657 ret = nn_mod(&s1, &s1, q); EG(ret, err);
658 /* Compute (s0_bar + 2**l) * d */
659 ret = nn_init(&tmp, 0); EG(ret, err);
660 ret = nn_one(&tmp); EG(ret, err);
661 ret = nn_lshift(&tmp, &tmp, (bitcnt_t)(8*l)); EG(ret, err);
662 ret = nn_mod(&tmp, &tmp, q); EG(ret, err);
663 ret = nn_mod_add(&s1, &s1, &tmp, q); EG(ret, err);
664 #ifdef USE_SIG_BLINDING
665 /* Blind s1 with b */
666 ret = nn_mod_mul(&s1, &s1, &b, q); EG(ret, err);
667
668 /* Blind the message hash */
669 ret = nn_mod_mul(&h, &h, &b, q); EG(ret, err);
670
671 /* Blind the nonce */
672 ret = nn_mod_mul(&k, &k, &b, q); EG(ret, err);
673 #endif /* USE_SIG_BLINDING */
674
675 ret = nn_mod_mul(&s1, &s1, &(priv_key->x), q); EG(ret, err);
676 ret = nn_mod_sub(&s1, &k, &s1, q); EG(ret, err);
677 ret = nn_mod_sub(&s1, &s1, &h, q); EG(ret, err);
678
679 #ifdef USE_SIG_BLINDING
680 /* Unblind s1 */
681 ret = nn_mod_mul(&s1, &s1, &binv, q); EG(ret, err);
682 #endif
683 dbg_nn_print("s1", &s1);
684
685 /* Clean hash buffer as we do not need it anymore */
686 ret = local_memset(hash, 0, hsize); EG(ret, err);
687
688 /* Now export s1 and reverse its endianness */
689 ret = nn_export_to_buf(&sig[l], (u16)BIGN_S1_LEN(q_bit_len), &s1); EG(ret, err);
690 ret = _reverse_endianness(&sig[l], (u16)BIGN_S1_LEN(q_bit_len));
691
692 err:
693 nn_uninit(&k);
694 nn_uninit(&h);
695 nn_uninit(&tmp);
696 nn_uninit(&s1);
697 prj_pt_uninit(&kG);
698 #ifdef USE_SIG_BLINDING
699 nn_uninit(&b);
700 nn_uninit(&binv);
701 #endif
702
703 /*
704 * We can now clear data part of the context. This will clear
705 * magic and avoid further reuse of the whole context.
706 */
707 if(ctx != NULL){
708 IGNORE_RET_VAL(local_memset(&(ctx->sign_data.bign), 0, sizeof(bign_sign_data)));
709 }
710
711 /* Clean what remains on the stack */
712 PTR_NULLIFY(priv_key);
713 PTR_NULLIFY(G);
714 PTR_NULLIFY(q);
715 PTR_NULLIFY(x);
716 PTR_NULLIFY(oid_ptr);
717 VAR_ZEROIFY(q_bit_len);
718 VAR_ZEROIFY(hsize);
719 VAR_ZEROIFY(oid_len);
720
721 return ret;
722 }
723
724 /*
725 * Generic *internal* BIGN verification functions (init, update and finalize).
726 * Their purpose is to allow passing a specific hash function (along with
727 * its output size) and the random ephemeral key k, so that compliance
728 * tests against test vectors can be made without ugly hack in the code
729 * itself.
730 *
731 * Implementation notes:
732 *
733 * a) The BIGN algorithm makes use of the OID of the external hash function.
734 * We let the upper layer provide us with this in the "adata" field of the
735 * context.
736 */
737
738 #define BIGN_VERIFY_MAGIC ((word_t)(0xceff8344927346abULL))
739 #define BIGN_VERIFY_CHECK_INITIALIZED(A, ret, err) \
740 MUST_HAVE((((void *)(A)) != NULL) && ((A)->magic == BIGN_VERIFY_MAGIC), ret, err)
741
__bign_verify_init(struct ec_verify_context * ctx,const u8 * sig,u8 siglen,ec_alg_type key_type)742 int __bign_verify_init(struct ec_verify_context *ctx, const u8 *sig, u8 siglen,
743 ec_alg_type key_type)
744 {
745 bitcnt_t q_bit_len;
746 nn_src_t q;
747 nn *s0, *s1;
748 u8 *s0_sig;
749 u8 TMP[BYTECEIL(CURVES_MAX_Q_BIT_LEN)];
750 u8 l;
751 int ret, cmp;
752
753 /* First, verify context has been initialized */
754 ret = sig_verify_check_initialized(ctx); EG(ret, err);
755
756 ret = local_memset(TMP, 0, sizeof(TMP)); EG(ret, err);
757
758 /* Do some sanity checks on input params */
759 ret = pub_key_check_initialized_and_type(ctx->pub_key, key_type); EG(ret, err);
760 MUST_HAVE((ctx->h != NULL) && (ctx->h->digest_size <= MAX_DIGEST_SIZE) &&
761 (ctx->h->block_size <= MAX_BLOCK_SIZE), ret, err);
762 MUST_HAVE((sig != NULL), ret, err);
763
764 /* We check that our additional data is not NULL as it must contain
765 * the mandatory external hash OID.
766 */
767 MUST_HAVE((ctx->adata != NULL) && (ctx->adata_len != 0), ret, err);
768
769 /* Make things more readable */
770 q = &(ctx->pub_key->params->ec_gen_order);
771 q_bit_len = ctx->pub_key->params->ec_gen_order_bitlen;
772 s0 = &(ctx->verify_data.bign.s0);
773 s1 = &(ctx->verify_data.bign.s1);
774 s0_sig = (u8*)(&(ctx->verify_data.bign.s0_sig));
775
776 /* Compute l depending on the order */
777 l = (u8)BIGN_S0_LEN(q_bit_len);
778
779 /* Check given signature length is the expected one */
780 MUST_HAVE((siglen == BIGN_SIGLEN(q_bit_len)), ret, err);
781
782 /* Copy s0 to be checked later */
783 ret = local_memcpy(s0_sig, sig, l); EG(ret, err);
784
785 /* Import s0 and s1 values from signature buffer */
786 ret = local_memcpy(&TMP[0], sig, l); EG(ret, err);
787 ret = _reverse_endianness(&TMP[0], l); EG(ret, err);
788 ret = nn_init_from_buf(s0, &TMP[0], l); EG(ret, err);
789 /**/
790 ret = local_memcpy(&TMP[0], &sig[l], (u32)BIGN_S1_LEN(q_bit_len)); EG(ret, err);
791 ret = _reverse_endianness(&TMP[0], (u16)BIGN_S1_LEN(q_bit_len)); EG(ret, err);
792 ret = nn_init_from_buf(s1, &TMP[0], (u8)BIGN_S1_LEN(q_bit_len)); EG(ret, err);
793 dbg_nn_print("s0", s0);
794 dbg_nn_print("s1", s1);
795
796 /* 1. Reject the signature if s1 >= q */
797 ret = nn_cmp(s1, q, &cmp); EG(ret, err);
798 MUST_HAVE((cmp < 0), ret, err);
799
800 /* Initialize the remaining of verify context. */
801 /* Since we call a callback, sanity check our mapping */
802 ret = hash_mapping_callbacks_sanity_check(ctx->h); EG(ret, err);
803 ret = ctx->h->hfunc_init(&(ctx->verify_data.bign.h_ctx)); EG(ret, err);
804
805 ctx->verify_data.bign.magic = BIGN_VERIFY_MAGIC;
806
807 err:
808 VAR_ZEROIFY(q_bit_len);
809 PTR_NULLIFY(q);
810 PTR_NULLIFY(s0);
811 PTR_NULLIFY(s1);
812 PTR_NULLIFY(s0_sig);
813
814 return ret;
815 }
816
__bign_verify_update(struct ec_verify_context * ctx,const u8 * chunk,u32 chunklen,ec_alg_type key_type)817 int __bign_verify_update(struct ec_verify_context *ctx,
818 const u8 *chunk, u32 chunklen, ec_alg_type key_type)
819 {
820 int ret;
821
822 /*
823 * First, verify context has been initialized and public
824 * part too. This guarantees the context is an BIGN
825 * verification one and we do not update() or finalize()
826 * before init().
827 */
828 ret = sig_verify_check_initialized(ctx); EG(ret, err);
829 BIGN_VERIFY_CHECK_INITIALIZED(&(ctx->verify_data.bign), ret, err);
830 /* Do some sanity checks on input params */
831 ret = pub_key_check_initialized_and_type(ctx->pub_key, key_type); EG(ret, err);
832
833 /* 2. Compute h = H(m) */
834 /* Since we call a callback, sanity check our mapping */
835 ret = hash_mapping_callbacks_sanity_check(ctx->h); EG(ret, err);
836 ret = ctx->h->hfunc_update(&(ctx->verify_data.bign.h_ctx), chunk, chunklen);
837
838 err:
839 return ret;
840 }
841
__bign_verify_finalize(struct ec_verify_context * ctx,ec_alg_type key_type)842 int __bign_verify_finalize(struct ec_verify_context *ctx,
843 ec_alg_type key_type)
844 {
845 prj_pt uG, vY;
846 prj_pt_src_t G, Y;
847 prj_pt_t W;
848 u8 hash[MAX_DIGEST_SIZE];
849 u8 hash_belt[BELT_HASH_DIGEST_SIZE];
850 u8 t[BIGN_S0_LEN(CURVES_MAX_Q_BIT_LEN)];
851 u8 FE2OS_W[LOCAL_MAX(2 * BYTECEIL(CURVES_MAX_P_BIT_LEN), 2 * BIGN_S0_LEN(CURVES_MAX_Q_BIT_LEN))];
852 bitcnt_t p_bit_len, q_bit_len;
853 nn_src_t q;
854 nn h, tmp;
855 nn *s0, *s1;
856 u8 *s0_sig;
857 u8 hsize, p_len, l;
858 belt_hash_context belt_hash_ctx;
859 int ret, iszero, cmp;
860 const u8 *oid_ptr = NULL;
861 u16 oid_len = 0;
862
863 h.magic = tmp.magic = WORD(0);
864 uG.magic = vY.magic = WORD(0);
865
866 /* NOTE: we reuse uG for W to optimize local variables */
867 W = &uG;
868
869 /*
870 * First, verify context has been initialized and public
871 * part too. This guarantees the context is an BIGN
872 * verification one and we do not finalize() before init().
873 */
874 ret = sig_verify_check_initialized(ctx); EG(ret, err);
875 BIGN_VERIFY_CHECK_INITIALIZED(&(ctx->verify_data.bign), ret, err);
876 /* Do some sanity checks on input params */
877 ret = pub_key_check_initialized_and_type(ctx->pub_key, key_type); EG(ret, err);
878
879 /* We check that our additional data is not NULL as it must contain
880 * the mandatory external hash OID.
881 */
882 MUST_HAVE((ctx->adata != NULL) && (ctx->adata_len != 0), ret, err);
883
884 /* Zero init points */
885 ret = local_memset(&uG, 0, sizeof(prj_pt)); EG(ret, err);
886 ret = local_memset(&vY, 0, sizeof(prj_pt)); EG(ret, err);
887
888 /* Make things more readable */
889 G = &(ctx->pub_key->params->ec_gen);
890 Y = &(ctx->pub_key->y);
891 q = &(ctx->pub_key->params->ec_gen_order);
892 p_bit_len = ctx->pub_key->params->ec_fp.p_bitlen;
893 q_bit_len = ctx->pub_key->params->ec_gen_order_bitlen;
894 p_len = (u8)BYTECEIL(p_bit_len);
895 hsize = ctx->h->digest_size;
896 s0 = &(ctx->verify_data.bign.s0);
897 s1 = &(ctx->verify_data.bign.s1);
898 s0_sig = (u8*)(&(ctx->verify_data.bign.s0_sig));
899
900 /* Sanity check */
901 MUST_HAVE((sizeof(t) == sizeof(ctx->verify_data.bign.s0_sig)), ret, err);
902
903 /* Compute our l that is inherited from q size */
904 l = (u8)BIGN_S0_LEN(q_bit_len);
905
906 /* 2. Compute h = H(m) */
907 /* Since we call a callback, sanity check our mapping */
908 ret = hash_mapping_callbacks_sanity_check(ctx->h); EG(ret, err);
909 ret = ctx->h->hfunc_finalize(&(ctx->verify_data.bign.h_ctx), hash); EG(ret, err);
910 dbg_buf_print("h = H(m)", hash, hsize);
911
912 /* Import H */
913 ret = _reverse_endianness(hash, hsize); EG(ret, err);
914 ret = nn_init_from_buf(&h, hash, hsize); EG(ret, err);
915 ret = nn_mod(&h, &h, q); EG(ret, err);
916 /* NOTE: we reverse endianness again of the hash since we will
917 * have to use the original value.
918 */
919 ret = _reverse_endianness(hash, hsize); EG(ret, err);
920
921 /* Compute ((s1_bar + h_bar) mod q) */
922 ret = nn_mod_add(&h, &h, s1, q); EG(ret, err);
923 /* Compute (s0_bar + 2**l) mod q */
924 ret = nn_init(&tmp, 0); EG(ret, err);
925 ret = nn_one(&tmp); EG(ret, err);
926 ret = nn_lshift(&tmp, &tmp, (bitcnt_t)(8*l)); EG(ret, err);
927 ret = nn_mod(&tmp, &tmp, q); EG(ret, err);
928 ret = nn_mod_add(&tmp, &tmp, s0, q); EG(ret, err);
929
930 /* 3. Compute ((s1_bar + h_bar) mod q) * G + ((s0_bar + 2**l) mod q) * Y. */
931 ret = prj_pt_mul(&uG, &h, G); EG(ret, err);
932 ret = prj_pt_mul(&vY, &tmp, Y); EG(ret, err);
933 ret = prj_pt_add(W, &uG, &vY); EG(ret, err);
934 /* 5. If the result is point at infinity, return false. */
935 ret = prj_pt_iszero(W, &iszero); EG(ret, err);
936 MUST_HAVE((!iszero), ret, err);
937 ret = prj_pt_unique(W, W); EG(ret, err);
938
939 /* 6. Compute t = <BELT-HASH(OID(H) || <<FE2OS(W_x)> || <FE2OS(W_y)>>2*l || H(X))>l */
940 ret = belt_hash_init(&belt_hash_ctx); EG(ret, err);
941 ret = bign_get_oid_from_adata(ctx->adata, ctx->adata_len, &oid_ptr, &oid_len); EG(ret, err);
942 ret = belt_hash_update(&belt_hash_ctx, oid_ptr, oid_len); EG(ret, err);
943 /**/
944 ret = local_memset(FE2OS_W, 0, sizeof(FE2OS_W)); EG(ret, err);
945 ret = fp_export_to_buf(&FE2OS_W[0], p_len, &(W->X)); EG(ret, err);
946 ret = _reverse_endianness(&FE2OS_W[0], p_len); EG(ret, err);
947 ret = fp_export_to_buf(&FE2OS_W[p_len], p_len, &(W->Y)); EG(ret, err);
948 ret = _reverse_endianness(&FE2OS_W[p_len], p_len); EG(ret, err);
949 /* Only hash the 2*l bytes of FE2OS(W_x) || FE2OS(W_y) */
950 ret = belt_hash_update(&belt_hash_ctx, &FE2OS_W[0], (u32)(2*l)); EG(ret, err);
951 /**/
952 ret = belt_hash_update(&belt_hash_ctx, hash, hsize); EG(ret, err);
953 /* Store our t */
954 ret = local_memset(hash_belt, 0, sizeof(hash_belt)); EG(ret, err);
955 ret = belt_hash_final(&belt_hash_ctx, hash_belt); EG(ret, err);
956 ret = local_memset(&t[0], 0, l); EG(ret, err);
957 ret = local_memcpy(&t[0], &hash_belt[0], LOCAL_MIN(l, BELT_HASH_DIGEST_SIZE)); EG(ret, err);
958
959 /* 10. Accept the signature if and only if t equals s0_sig' */
960 ret = are_equal(t, s0_sig, l, &cmp); EG(ret, err);
961 ret = (cmp == 0) ? -1 : 0;
962
963 err:
964 prj_pt_uninit(&uG);
965 prj_pt_uninit(&vY);
966 nn_uninit(&h);
967 nn_uninit(&tmp);
968
969 /*
970 * We can now clear data part of the context. This will clear
971 * magic and avoid further reuse of the whole context.
972 */
973 if(ctx != NULL){
974 IGNORE_RET_VAL(local_memset(&(ctx->verify_data.bign), 0, sizeof(bign_verify_data)));
975 }
976
977 /* Clean what remains on the stack */
978 PTR_NULLIFY(G);
979 PTR_NULLIFY(Y);
980 PTR_NULLIFY(W);
981 VAR_ZEROIFY(p_bit_len);
982 VAR_ZEROIFY(q_bit_len);
983 VAR_ZEROIFY(p_len);
984 PTR_NULLIFY(q);
985 PTR_NULLIFY(s0);
986 PTR_NULLIFY(s1);
987 PTR_NULLIFY(s0_sig);
988 PTR_NULLIFY(oid_ptr);
989 VAR_ZEROIFY(hsize);
990 VAR_ZEROIFY(oid_len);
991
992 return ret;
993 }
994
995 #else /* defined(WITH_SIG_BIGN) || defined(WITH_SIG_DBIGN) */
996
997 /*
998 * Dummy definition to avoid the empty translation unit ISO C warning
999 */
1000 typedef int dummy;
1001 #endif /* WITH_SIG_BIGN */
1002