1 /*
2 * Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <string.h>
11 #include <openssl/crypto.h>
12 #include <openssl/err.h>
13 #include "crypto/modes.h"
14
15 #ifndef OPENSSL_NO_OCB
16
17 /*
18 * Calculate the number of binary trailing zero's in any given number
19 */
ocb_ntz(u64 n)20 static u32 ocb_ntz(u64 n)
21 {
22 u32 cnt = 0;
23
24 /*
25 * We do a right-to-left simple sequential search. This is surprisingly
26 * efficient as the distribution of trailing zeros is not uniform,
27 * e.g. the number of possible inputs with no trailing zeros is equal to
28 * the number with 1 or more; the number with exactly 1 is equal to the
29 * number with 2 or more, etc. Checking the last two bits covers 75% of
30 * all numbers. Checking the last three covers 87.5%
31 */
32 while (!(n & 1)) {
33 n >>= 1;
34 cnt++;
35 }
36 return cnt;
37 }
38
39 /*
40 * Shift a block of 16 bytes left by shift bits
41 */
ocb_block_lshift(const unsigned char * in,size_t shift,unsigned char * out)42 static void ocb_block_lshift(const unsigned char *in, size_t shift,
43 unsigned char *out)
44 {
45 int i;
46 unsigned char carry = 0, carry_next;
47
48 for (i = 15; i >= 0; i--) {
49 carry_next = in[i] >> (8 - shift);
50 out[i] = (in[i] << shift) | carry;
51 carry = carry_next;
52 }
53 }
54
55 /*
56 * Perform a "double" operation as per OCB spec
57 */
ocb_double(OCB_BLOCK * in,OCB_BLOCK * out)58 static void ocb_double(OCB_BLOCK *in, OCB_BLOCK *out)
59 {
60 unsigned char mask;
61
62 /*
63 * Calculate the mask based on the most significant bit. There are more
64 * efficient ways to do this - but this way is constant time
65 */
66 mask = in->c[0] & 0x80;
67 mask >>= 7;
68 mask = (0 - mask) & 0x87;
69
70 ocb_block_lshift(in->c, 1, out->c);
71
72 out->c[15] ^= mask;
73 }
74
75 /*
76 * Perform an xor on in1 and in2 - each of len bytes. Store result in out
77 */
ocb_block_xor(const unsigned char * in1,const unsigned char * in2,size_t len,unsigned char * out)78 static void ocb_block_xor(const unsigned char *in1,
79 const unsigned char *in2, size_t len,
80 unsigned char *out)
81 {
82 size_t i;
83 for (i = 0; i < len; i++) {
84 out[i] = in1[i] ^ in2[i];
85 }
86 }
87
88 /*
89 * Lookup L_index in our lookup table. If we haven't already got it we need to
90 * calculate it
91 */
ocb_lookup_l(OCB128_CONTEXT * ctx,size_t idx)92 static OCB_BLOCK *ocb_lookup_l(OCB128_CONTEXT *ctx, size_t idx)
93 {
94 size_t l_index = ctx->l_index;
95
96 if (idx <= l_index) {
97 return ctx->l + idx;
98 }
99
100 /* We don't have it - so calculate it */
101 if (idx >= ctx->max_l_index) {
102 void *tmp_ptr;
103 /*
104 * Each additional entry allows to process almost double as
105 * much data, so that in linear world the table will need to
106 * be expanded with smaller and smaller increments. Originally
107 * it was doubling in size, which was a waste. Growing it
108 * linearly is not formally optimal, but is simpler to implement.
109 * We grow table by minimally required 4*n that would accommodate
110 * the index.
111 */
112 ctx->max_l_index += (idx - ctx->max_l_index + 4) & ~3;
113 tmp_ptr = OPENSSL_realloc(ctx->l, ctx->max_l_index * sizeof(OCB_BLOCK));
114 if (tmp_ptr == NULL) /* prevent ctx->l from being clobbered */
115 return NULL;
116 ctx->l = tmp_ptr;
117 }
118 while (l_index < idx) {
119 ocb_double(ctx->l + l_index, ctx->l + l_index + 1);
120 l_index++;
121 }
122 ctx->l_index = l_index;
123
124 return ctx->l + idx;
125 }
126
127 /*
128 * Create a new OCB128_CONTEXT
129 */
CRYPTO_ocb128_new(void * keyenc,void * keydec,block128_f encrypt,block128_f decrypt,ocb128_f stream)130 OCB128_CONTEXT *CRYPTO_ocb128_new(void *keyenc, void *keydec,
131 block128_f encrypt, block128_f decrypt,
132 ocb128_f stream)
133 {
134 OCB128_CONTEXT *octx;
135 int ret;
136
137 if ((octx = OPENSSL_malloc(sizeof(*octx))) != NULL) {
138 ret = CRYPTO_ocb128_init(octx, keyenc, keydec, encrypt, decrypt,
139 stream);
140 if (ret)
141 return octx;
142 OPENSSL_free(octx);
143 }
144
145 return NULL;
146 }
147
148 /*
149 * Initialise an existing OCB128_CONTEXT
150 */
CRYPTO_ocb128_init(OCB128_CONTEXT * ctx,void * keyenc,void * keydec,block128_f encrypt,block128_f decrypt,ocb128_f stream)151 int CRYPTO_ocb128_init(OCB128_CONTEXT *ctx, void *keyenc, void *keydec,
152 block128_f encrypt, block128_f decrypt,
153 ocb128_f stream)
154 {
155 memset(ctx, 0, sizeof(*ctx));
156 ctx->l_index = 0;
157 ctx->max_l_index = 5;
158 if ((ctx->l = OPENSSL_malloc(ctx->max_l_index * 16)) == NULL) {
159 ERR_raise(ERR_LIB_CRYPTO, ERR_R_MALLOC_FAILURE);
160 return 0;
161 }
162
163 /*
164 * We set both the encryption and decryption key schedules - decryption
165 * needs both. Don't really need decryption schedule if only doing
166 * encryption - but it simplifies things to take it anyway
167 */
168 ctx->encrypt = encrypt;
169 ctx->decrypt = decrypt;
170 ctx->stream = stream;
171 ctx->keyenc = keyenc;
172 ctx->keydec = keydec;
173
174 /* L_* = ENCIPHER(K, zeros(128)) */
175 ctx->encrypt(ctx->l_star.c, ctx->l_star.c, ctx->keyenc);
176
177 /* L_$ = double(L_*) */
178 ocb_double(&ctx->l_star, &ctx->l_dollar);
179
180 /* L_0 = double(L_$) */
181 ocb_double(&ctx->l_dollar, ctx->l);
182
183 /* L_{i} = double(L_{i-1}) */
184 ocb_double(ctx->l, ctx->l+1);
185 ocb_double(ctx->l+1, ctx->l+2);
186 ocb_double(ctx->l+2, ctx->l+3);
187 ocb_double(ctx->l+3, ctx->l+4);
188 ctx->l_index = 4; /* enough to process up to 496 bytes */
189
190 return 1;
191 }
192
193 /*
194 * Copy an OCB128_CONTEXT object
195 */
CRYPTO_ocb128_copy_ctx(OCB128_CONTEXT * dest,OCB128_CONTEXT * src,void * keyenc,void * keydec)196 int CRYPTO_ocb128_copy_ctx(OCB128_CONTEXT *dest, OCB128_CONTEXT *src,
197 void *keyenc, void *keydec)
198 {
199 memcpy(dest, src, sizeof(OCB128_CONTEXT));
200 if (keyenc)
201 dest->keyenc = keyenc;
202 if (keydec)
203 dest->keydec = keydec;
204 if (src->l) {
205 if ((dest->l = OPENSSL_malloc(src->max_l_index * 16)) == NULL) {
206 ERR_raise(ERR_LIB_CRYPTO, ERR_R_MALLOC_FAILURE);
207 return 0;
208 }
209 memcpy(dest->l, src->l, (src->l_index + 1) * 16);
210 }
211 return 1;
212 }
213
214 /*
215 * Set the IV to be used for this operation. Must be 1 - 15 bytes.
216 */
CRYPTO_ocb128_setiv(OCB128_CONTEXT * ctx,const unsigned char * iv,size_t len,size_t taglen)217 int CRYPTO_ocb128_setiv(OCB128_CONTEXT *ctx, const unsigned char *iv,
218 size_t len, size_t taglen)
219 {
220 unsigned char ktop[16], tmp[16], mask;
221 unsigned char stretch[24], nonce[16];
222 size_t bottom, shift;
223
224 /*
225 * Spec says IV is 120 bits or fewer - it allows non byte aligned lengths.
226 * We don't support this at this stage
227 */
228 if ((len > 15) || (len < 1) || (taglen > 16) || (taglen < 1)) {
229 return -1;
230 }
231
232 /* Reset nonce-dependent variables */
233 memset(&ctx->sess, 0, sizeof(ctx->sess));
234
235 /* Nonce = num2str(TAGLEN mod 128,7) || zeros(120-bitlen(N)) || 1 || N */
236 nonce[0] = ((taglen * 8) % 128) << 1;
237 memset(nonce + 1, 0, 15);
238 memcpy(nonce + 16 - len, iv, len);
239 nonce[15 - len] |= 1;
240
241 /* Ktop = ENCIPHER(K, Nonce[1..122] || zeros(6)) */
242 memcpy(tmp, nonce, 16);
243 tmp[15] &= 0xc0;
244 ctx->encrypt(tmp, ktop, ctx->keyenc);
245
246 /* Stretch = Ktop || (Ktop[1..64] xor Ktop[9..72]) */
247 memcpy(stretch, ktop, 16);
248 ocb_block_xor(ktop, ktop + 1, 8, stretch + 16);
249
250 /* bottom = str2num(Nonce[123..128]) */
251 bottom = nonce[15] & 0x3f;
252
253 /* Offset_0 = Stretch[1+bottom..128+bottom] */
254 shift = bottom % 8;
255 ocb_block_lshift(stretch + (bottom / 8), shift, ctx->sess.offset.c);
256 mask = 0xff;
257 mask <<= 8 - shift;
258 ctx->sess.offset.c[15] |=
259 (*(stretch + (bottom / 8) + 16) & mask) >> (8 - shift);
260
261 return 1;
262 }
263
264 /*
265 * Provide any AAD. This can be called multiple times. Only the final time can
266 * have a partial block
267 */
CRYPTO_ocb128_aad(OCB128_CONTEXT * ctx,const unsigned char * aad,size_t len)268 int CRYPTO_ocb128_aad(OCB128_CONTEXT *ctx, const unsigned char *aad,
269 size_t len)
270 {
271 u64 i, all_num_blocks;
272 size_t num_blocks, last_len;
273 OCB_BLOCK tmp;
274
275 /* Calculate the number of blocks of AAD provided now, and so far */
276 num_blocks = len / 16;
277 all_num_blocks = num_blocks + ctx->sess.blocks_hashed;
278
279 /* Loop through all full blocks of AAD */
280 for (i = ctx->sess.blocks_hashed + 1; i <= all_num_blocks; i++) {
281 OCB_BLOCK *lookup;
282
283 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
284 lookup = ocb_lookup_l(ctx, ocb_ntz(i));
285 if (lookup == NULL)
286 return 0;
287 ocb_block16_xor(&ctx->sess.offset_aad, lookup, &ctx->sess.offset_aad);
288
289 memcpy(tmp.c, aad, 16);
290 aad += 16;
291
292 /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
293 ocb_block16_xor(&ctx->sess.offset_aad, &tmp, &tmp);
294 ctx->encrypt(tmp.c, tmp.c, ctx->keyenc);
295 ocb_block16_xor(&tmp, &ctx->sess.sum, &ctx->sess.sum);
296 }
297
298 /*
299 * Check if we have any partial blocks left over. This is only valid in the
300 * last call to this function
301 */
302 last_len = len % 16;
303
304 if (last_len > 0) {
305 /* Offset_* = Offset_m xor L_* */
306 ocb_block16_xor(&ctx->sess.offset_aad, &ctx->l_star,
307 &ctx->sess.offset_aad);
308
309 /* CipherInput = (A_* || 1 || zeros(127-bitlen(A_*))) xor Offset_* */
310 memset(tmp.c, 0, 16);
311 memcpy(tmp.c, aad, last_len);
312 tmp.c[last_len] = 0x80;
313 ocb_block16_xor(&ctx->sess.offset_aad, &tmp, &tmp);
314
315 /* Sum = Sum_m xor ENCIPHER(K, CipherInput) */
316 ctx->encrypt(tmp.c, tmp.c, ctx->keyenc);
317 ocb_block16_xor(&tmp, &ctx->sess.sum, &ctx->sess.sum);
318 }
319
320 ctx->sess.blocks_hashed = all_num_blocks;
321
322 return 1;
323 }
324
325 /*
326 * Provide any data to be encrypted. This can be called multiple times. Only
327 * the final time can have a partial block
328 */
CRYPTO_ocb128_encrypt(OCB128_CONTEXT * ctx,const unsigned char * in,unsigned char * out,size_t len)329 int CRYPTO_ocb128_encrypt(OCB128_CONTEXT *ctx,
330 const unsigned char *in, unsigned char *out,
331 size_t len)
332 {
333 u64 i, all_num_blocks;
334 size_t num_blocks, last_len;
335
336 /*
337 * Calculate the number of blocks of data to be encrypted provided now, and
338 * so far
339 */
340 num_blocks = len / 16;
341 all_num_blocks = num_blocks + ctx->sess.blocks_processed;
342
343 if (num_blocks && all_num_blocks == (size_t)all_num_blocks
344 && ctx->stream != NULL) {
345 size_t max_idx = 0, top = (size_t)all_num_blocks;
346
347 /*
348 * See how many L_{i} entries we need to process data at hand
349 * and pre-compute missing entries in the table [if any]...
350 */
351 while (top >>= 1)
352 max_idx++;
353 if (ocb_lookup_l(ctx, max_idx) == NULL)
354 return 0;
355
356 ctx->stream(in, out, num_blocks, ctx->keyenc,
357 (size_t)ctx->sess.blocks_processed + 1, ctx->sess.offset.c,
358 (const unsigned char (*)[16])ctx->l, ctx->sess.checksum.c);
359 } else {
360 /* Loop through all full blocks to be encrypted */
361 for (i = ctx->sess.blocks_processed + 1; i <= all_num_blocks; i++) {
362 OCB_BLOCK *lookup;
363 OCB_BLOCK tmp;
364
365 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
366 lookup = ocb_lookup_l(ctx, ocb_ntz(i));
367 if (lookup == NULL)
368 return 0;
369 ocb_block16_xor(&ctx->sess.offset, lookup, &ctx->sess.offset);
370
371 memcpy(tmp.c, in, 16);
372 in += 16;
373
374 /* Checksum_i = Checksum_{i-1} xor P_i */
375 ocb_block16_xor(&tmp, &ctx->sess.checksum, &ctx->sess.checksum);
376
377 /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
378 ocb_block16_xor(&ctx->sess.offset, &tmp, &tmp);
379 ctx->encrypt(tmp.c, tmp.c, ctx->keyenc);
380 ocb_block16_xor(&ctx->sess.offset, &tmp, &tmp);
381
382 memcpy(out, tmp.c, 16);
383 out += 16;
384 }
385 }
386
387 /*
388 * Check if we have any partial blocks left over. This is only valid in the
389 * last call to this function
390 */
391 last_len = len % 16;
392
393 if (last_len > 0) {
394 OCB_BLOCK pad;
395
396 /* Offset_* = Offset_m xor L_* */
397 ocb_block16_xor(&ctx->sess.offset, &ctx->l_star, &ctx->sess.offset);
398
399 /* Pad = ENCIPHER(K, Offset_*) */
400 ctx->encrypt(ctx->sess.offset.c, pad.c, ctx->keyenc);
401
402 /* C_* = P_* xor Pad[1..bitlen(P_*)] */
403 ocb_block_xor(in, pad.c, last_len, out);
404
405 /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
406 memset(pad.c, 0, 16); /* borrow pad */
407 memcpy(pad.c, in, last_len);
408 pad.c[last_len] = 0x80;
409 ocb_block16_xor(&pad, &ctx->sess.checksum, &ctx->sess.checksum);
410 }
411
412 ctx->sess.blocks_processed = all_num_blocks;
413
414 return 1;
415 }
416
417 /*
418 * Provide any data to be decrypted. This can be called multiple times. Only
419 * the final time can have a partial block
420 */
CRYPTO_ocb128_decrypt(OCB128_CONTEXT * ctx,const unsigned char * in,unsigned char * out,size_t len)421 int CRYPTO_ocb128_decrypt(OCB128_CONTEXT *ctx,
422 const unsigned char *in, unsigned char *out,
423 size_t len)
424 {
425 u64 i, all_num_blocks;
426 size_t num_blocks, last_len;
427
428 /*
429 * Calculate the number of blocks of data to be decrypted provided now, and
430 * so far
431 */
432 num_blocks = len / 16;
433 all_num_blocks = num_blocks + ctx->sess.blocks_processed;
434
435 if (num_blocks && all_num_blocks == (size_t)all_num_blocks
436 && ctx->stream != NULL) {
437 size_t max_idx = 0, top = (size_t)all_num_blocks;
438
439 /*
440 * See how many L_{i} entries we need to process data at hand
441 * and pre-compute missing entries in the table [if any]...
442 */
443 while (top >>= 1)
444 max_idx++;
445 if (ocb_lookup_l(ctx, max_idx) == NULL)
446 return 0;
447
448 ctx->stream(in, out, num_blocks, ctx->keydec,
449 (size_t)ctx->sess.blocks_processed + 1, ctx->sess.offset.c,
450 (const unsigned char (*)[16])ctx->l, ctx->sess.checksum.c);
451 } else {
452 OCB_BLOCK tmp;
453
454 /* Loop through all full blocks to be decrypted */
455 for (i = ctx->sess.blocks_processed + 1; i <= all_num_blocks; i++) {
456
457 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
458 OCB_BLOCK *lookup = ocb_lookup_l(ctx, ocb_ntz(i));
459 if (lookup == NULL)
460 return 0;
461 ocb_block16_xor(&ctx->sess.offset, lookup, &ctx->sess.offset);
462
463 memcpy(tmp.c, in, 16);
464 in += 16;
465
466 /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */
467 ocb_block16_xor(&ctx->sess.offset, &tmp, &tmp);
468 ctx->decrypt(tmp.c, tmp.c, ctx->keydec);
469 ocb_block16_xor(&ctx->sess.offset, &tmp, &tmp);
470
471 /* Checksum_i = Checksum_{i-1} xor P_i */
472 ocb_block16_xor(&tmp, &ctx->sess.checksum, &ctx->sess.checksum);
473
474 memcpy(out, tmp.c, 16);
475 out += 16;
476 }
477 }
478
479 /*
480 * Check if we have any partial blocks left over. This is only valid in the
481 * last call to this function
482 */
483 last_len = len % 16;
484
485 if (last_len > 0) {
486 OCB_BLOCK pad;
487
488 /* Offset_* = Offset_m xor L_* */
489 ocb_block16_xor(&ctx->sess.offset, &ctx->l_star, &ctx->sess.offset);
490
491 /* Pad = ENCIPHER(K, Offset_*) */
492 ctx->encrypt(ctx->sess.offset.c, pad.c, ctx->keyenc);
493
494 /* P_* = C_* xor Pad[1..bitlen(C_*)] */
495 ocb_block_xor(in, pad.c, last_len, out);
496
497 /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
498 memset(pad.c, 0, 16); /* borrow pad */
499 memcpy(pad.c, out, last_len);
500 pad.c[last_len] = 0x80;
501 ocb_block16_xor(&pad, &ctx->sess.checksum, &ctx->sess.checksum);
502 }
503
504 ctx->sess.blocks_processed = all_num_blocks;
505
506 return 1;
507 }
508
ocb_finish(OCB128_CONTEXT * ctx,unsigned char * tag,size_t len,int write)509 static int ocb_finish(OCB128_CONTEXT *ctx, unsigned char *tag, size_t len,
510 int write)
511 {
512 OCB_BLOCK tmp;
513
514 if (len > 16 || len < 1) {
515 return -1;
516 }
517
518 /*
519 * Tag = ENCIPHER(K, Checksum_* xor Offset_* xor L_$) xor HASH(K,A)
520 */
521 ocb_block16_xor(&ctx->sess.checksum, &ctx->sess.offset, &tmp);
522 ocb_block16_xor(&ctx->l_dollar, &tmp, &tmp);
523 ctx->encrypt(tmp.c, tmp.c, ctx->keyenc);
524 ocb_block16_xor(&tmp, &ctx->sess.sum, &tmp);
525
526 if (write) {
527 memcpy(tag, &tmp, len);
528 return 1;
529 } else {
530 return CRYPTO_memcmp(&tmp, tag, len);
531 }
532 }
533
534 /*
535 * Calculate the tag and verify it against the supplied tag
536 */
CRYPTO_ocb128_finish(OCB128_CONTEXT * ctx,const unsigned char * tag,size_t len)537 int CRYPTO_ocb128_finish(OCB128_CONTEXT *ctx, const unsigned char *tag,
538 size_t len)
539 {
540 return ocb_finish(ctx, (unsigned char*)tag, len, 0);
541 }
542
543 /*
544 * Retrieve the calculated tag
545 */
CRYPTO_ocb128_tag(OCB128_CONTEXT * ctx,unsigned char * tag,size_t len)546 int CRYPTO_ocb128_tag(OCB128_CONTEXT *ctx, unsigned char *tag, size_t len)
547 {
548 return ocb_finish(ctx, tag, len, 1);
549 }
550
551 /*
552 * Release all resources
553 */
CRYPTO_ocb128_cleanup(OCB128_CONTEXT * ctx)554 void CRYPTO_ocb128_cleanup(OCB128_CONTEXT *ctx)
555 {
556 if (ctx) {
557 OPENSSL_clear_free(ctx->l, ctx->max_l_index * 16);
558 OPENSSL_cleanse(ctx, sizeof(*ctx));
559 }
560 }
561
562 #endif /* OPENSSL_NO_OCB */
563