1 /*
2 * Copyright 2001-2024 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 /*
11 * This file uses the low level AES functions (which are deprecated for
12 * non-internal use) in order to implement the EVP AES ciphers.
13 */
14 #include "internal/deprecated.h"
15
16 #include <string.h>
17 #include <assert.h>
18 #include <openssl/opensslconf.h>
19 #include <openssl/crypto.h>
20 #include <openssl/evp.h>
21 #include <openssl/err.h>
22 #include <openssl/aes.h>
23 #include <openssl/rand.h>
24 #include <openssl/cmac.h>
25 #include "crypto/evp.h"
26 #include "internal/cryptlib.h"
27 #include "crypto/modes.h"
28 #include "crypto/siv.h"
29 #include "crypto/aes_platform.h"
30 #include "evp_local.h"
31
32 typedef struct {
33 union {
34 OSSL_UNION_ALIGN;
35 AES_KEY ks;
36 } ks;
37 block128_f block;
38 union {
39 cbc128_f cbc;
40 ctr128_f ctr;
41 } stream;
42 } EVP_AES_KEY;
43
44 typedef struct {
45 union {
46 OSSL_UNION_ALIGN;
47 AES_KEY ks;
48 } ks; /* AES key schedule to use */
49 int key_set; /* Set if key initialised */
50 int iv_set; /* Set if an iv is set */
51 GCM128_CONTEXT gcm;
52 unsigned char *iv; /* Temporary IV store */
53 int ivlen; /* IV length */
54 int taglen;
55 int iv_gen; /* It is OK to generate IVs */
56 int iv_gen_rand; /* No IV was specified, so generate a rand IV */
57 int tls_aad_len; /* TLS AAD length */
58 uint64_t tls_enc_records; /* Number of TLS records encrypted */
59 ctr128_f ctr;
60 } EVP_AES_GCM_CTX;
61
62 typedef struct {
63 union {
64 OSSL_UNION_ALIGN;
65 AES_KEY ks;
66 } ks1, ks2; /* AES key schedules to use */
67 XTS128_CONTEXT xts;
68 void (*stream) (const unsigned char *in,
69 unsigned char *out, size_t length,
70 const AES_KEY *key1, const AES_KEY *key2,
71 const unsigned char iv[16]);
72 } EVP_AES_XTS_CTX;
73
74 #ifdef FIPS_MODULE
75 static const int allow_insecure_decrypt = 0;
76 #else
77 static const int allow_insecure_decrypt = 1;
78 #endif
79
80 typedef struct {
81 union {
82 OSSL_UNION_ALIGN;
83 AES_KEY ks;
84 } ks; /* AES key schedule to use */
85 int key_set; /* Set if key initialised */
86 int iv_set; /* Set if an iv is set */
87 int tag_set; /* Set if tag is valid */
88 int len_set; /* Set if message length set */
89 int L, M; /* L and M parameters from RFC3610 */
90 int tls_aad_len; /* TLS AAD length */
91 CCM128_CONTEXT ccm;
92 ccm128_f str;
93 } EVP_AES_CCM_CTX;
94
95 #ifndef OPENSSL_NO_OCB
96 typedef struct {
97 union {
98 OSSL_UNION_ALIGN;
99 AES_KEY ks;
100 } ksenc; /* AES key schedule to use for encryption */
101 union {
102 OSSL_UNION_ALIGN;
103 AES_KEY ks;
104 } ksdec; /* AES key schedule to use for decryption */
105 int key_set; /* Set if key initialised */
106 int iv_set; /* Set if an iv is set */
107 OCB128_CONTEXT ocb;
108 unsigned char *iv; /* Temporary IV store */
109 unsigned char tag[16];
110 unsigned char data_buf[16]; /* Store partial data blocks */
111 unsigned char aad_buf[16]; /* Store partial AAD blocks */
112 int data_buf_len;
113 int aad_buf_len;
114 int ivlen; /* IV length */
115 int taglen;
116 } EVP_AES_OCB_CTX;
117 #endif
118
119 #define MAXBITCHUNK ((size_t)1<<(sizeof(size_t)*8-4))
120
121 /* increment counter (64-bit int) by 1 */
ctr64_inc(unsigned char * counter)122 static void ctr64_inc(unsigned char *counter)
123 {
124 int n = 8;
125 unsigned char c;
126
127 do {
128 --n;
129 c = counter[n];
130 ++c;
131 counter[n] = c;
132 if (c)
133 return;
134 } while (n);
135 }
136
137 #if defined(AESNI_CAPABLE)
138 # if defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
139 # define AES_GCM_ASM2(gctx) (gctx->gcm.block==(block128_f)aesni_encrypt && \
140 gctx->gcm.ghash==gcm_ghash_avx)
141 # undef AES_GCM_ASM2 /* minor size optimization */
142 # endif
143
aesni_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)144 static int aesni_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
145 const unsigned char *iv, int enc)
146 {
147 int ret, mode;
148 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
149
150 mode = EVP_CIPHER_CTX_get_mode(ctx);
151 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
152 && !enc) {
153 ret = aesni_set_decrypt_key(key,
154 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
155 &dat->ks.ks);
156 dat->block = (block128_f) aesni_decrypt;
157 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
158 (cbc128_f) aesni_cbc_encrypt : NULL;
159 } else {
160 ret = aesni_set_encrypt_key(key,
161 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
162 &dat->ks.ks);
163 dat->block = (block128_f) aesni_encrypt;
164 if (mode == EVP_CIPH_CBC_MODE)
165 dat->stream.cbc = (cbc128_f) aesni_cbc_encrypt;
166 else if (mode == EVP_CIPH_CTR_MODE)
167 dat->stream.ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
168 else
169 dat->stream.cbc = NULL;
170 }
171
172 if (ret < 0) {
173 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
174 return 0;
175 }
176
177 return 1;
178 }
179
aesni_cbc_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)180 static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
181 const unsigned char *in, size_t len)
182 {
183 aesni_cbc_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
184 ctx->iv, EVP_CIPHER_CTX_is_encrypting(ctx));
185
186 return 1;
187 }
188
aesni_ecb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)189 static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
190 const unsigned char *in, size_t len)
191 {
192 size_t bl = EVP_CIPHER_CTX_get_block_size(ctx);
193
194 if (len < bl)
195 return 1;
196
197 aesni_ecb_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
198 EVP_CIPHER_CTX_is_encrypting(ctx));
199
200 return 1;
201 }
202
203 # define aesni_ofb_cipher aes_ofb_cipher
204 static int aesni_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
205 const unsigned char *in, size_t len);
206
207 # define aesni_cfb_cipher aes_cfb_cipher
208 static int aesni_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
209 const unsigned char *in, size_t len);
210
211 # define aesni_cfb8_cipher aes_cfb8_cipher
212 static int aesni_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
213 const unsigned char *in, size_t len);
214
215 # define aesni_cfb1_cipher aes_cfb1_cipher
216 static int aesni_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
217 const unsigned char *in, size_t len);
218
219 # define aesni_ctr_cipher aes_ctr_cipher
220 static int aesni_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
221 const unsigned char *in, size_t len);
222
aesni_gcm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)223 static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
224 const unsigned char *iv, int enc)
225 {
226 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
227 if (!iv && !key)
228 return 1;
229 if (key) {
230 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
231 &gctx->ks.ks);
232 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f) aesni_encrypt);
233 gctx->ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
234 /*
235 * If we have an iv can set it directly, otherwise use saved IV.
236 */
237 if (iv == NULL && gctx->iv_set)
238 iv = gctx->iv;
239 if (iv) {
240 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
241 gctx->iv_set = 1;
242 }
243 gctx->key_set = 1;
244 } else {
245 /* If key set use IV, otherwise copy */
246 if (gctx->key_set)
247 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
248 else
249 memcpy(gctx->iv, iv, gctx->ivlen);
250 gctx->iv_set = 1;
251 gctx->iv_gen = 0;
252 }
253 return 1;
254 }
255
256 # define aesni_gcm_cipher aes_gcm_cipher
257 static int aesni_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
258 const unsigned char *in, size_t len);
259
aesni_xts_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)260 static int aesni_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
261 const unsigned char *iv, int enc)
262 {
263 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
264
265 if (!iv && !key)
266 return 1;
267
268 if (key) {
269 /* The key is two half length keys in reality */
270 const int bytes = EVP_CIPHER_CTX_get_key_length(ctx) / 2;
271 const int bits = bytes * 8;
272
273 /*
274 * Verify that the two keys are different.
275 *
276 * This addresses Rogaway's vulnerability.
277 * See comment in aes_xts_init_key() below.
278 */
279 if ((!allow_insecure_decrypt || enc)
280 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
281 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
282 return 0;
283 }
284
285 /* key_len is two AES keys */
286 if (enc) {
287 aesni_set_encrypt_key(key, bits, &xctx->ks1.ks);
288 xctx->xts.block1 = (block128_f) aesni_encrypt;
289 xctx->stream = aesni_xts_encrypt;
290 } else {
291 aesni_set_decrypt_key(key, bits, &xctx->ks1.ks);
292 xctx->xts.block1 = (block128_f) aesni_decrypt;
293 xctx->stream = aesni_xts_decrypt;
294 }
295
296 aesni_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
297 xctx->xts.block2 = (block128_f) aesni_encrypt;
298
299 xctx->xts.key1 = &xctx->ks1;
300 }
301
302 if (iv) {
303 xctx->xts.key2 = &xctx->ks2;
304 memcpy(ctx->iv, iv, 16);
305 }
306
307 return 1;
308 }
309
310 # define aesni_xts_cipher aes_xts_cipher
311 static int aesni_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
312 const unsigned char *in, size_t len);
313
aesni_ccm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)314 static int aesni_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
315 const unsigned char *iv, int enc)
316 {
317 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
318 if (!iv && !key)
319 return 1;
320 if (key) {
321 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
322 &cctx->ks.ks);
323 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
324 &cctx->ks, (block128_f) aesni_encrypt);
325 cctx->str = enc ? (ccm128_f) aesni_ccm64_encrypt_blocks :
326 (ccm128_f) aesni_ccm64_decrypt_blocks;
327 cctx->key_set = 1;
328 }
329 if (iv) {
330 memcpy(ctx->iv, iv, 15 - cctx->L);
331 cctx->iv_set = 1;
332 }
333 return 1;
334 }
335
336 # define aesni_ccm_cipher aes_ccm_cipher
337 static int aesni_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
338 const unsigned char *in, size_t len);
339
340 # ifndef OPENSSL_NO_OCB
aesni_ocb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)341 static int aesni_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
342 const unsigned char *iv, int enc)
343 {
344 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
345 if (!iv && !key)
346 return 1;
347 if (key) {
348 do {
349 /*
350 * We set both the encrypt and decrypt key here because decrypt
351 * needs both. We could possibly optimise to remove setting the
352 * decrypt for an encryption operation.
353 */
354 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
355 &octx->ksenc.ks);
356 aesni_set_decrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
357 &octx->ksdec.ks);
358 if (!CRYPTO_ocb128_init(&octx->ocb,
359 &octx->ksenc.ks, &octx->ksdec.ks,
360 (block128_f) aesni_encrypt,
361 (block128_f) aesni_decrypt,
362 enc ? aesni_ocb_encrypt
363 : aesni_ocb_decrypt))
364 return 0;
365 }
366 while (0);
367
368 /*
369 * If we have an iv we can set it directly, otherwise use saved IV.
370 */
371 if (iv == NULL && octx->iv_set)
372 iv = octx->iv;
373 if (iv) {
374 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
375 != 1)
376 return 0;
377 octx->iv_set = 1;
378 }
379 octx->key_set = 1;
380 } else {
381 /* If key set use IV, otherwise copy */
382 if (octx->key_set)
383 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
384 else
385 memcpy(octx->iv, iv, octx->ivlen);
386 octx->iv_set = 1;
387 }
388 return 1;
389 }
390
391 # define aesni_ocb_cipher aes_ocb_cipher
392 static int aesni_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
393 const unsigned char *in, size_t len);
394 # endif /* OPENSSL_NO_OCB */
395
396 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
397 static const EVP_CIPHER aesni_##keylen##_##mode = { \
398 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
399 flags|EVP_CIPH_##MODE##_MODE, \
400 EVP_ORIG_GLOBAL, \
401 aesni_init_key, \
402 aesni_##mode##_cipher, \
403 NULL, \
404 sizeof(EVP_AES_KEY), \
405 NULL,NULL,NULL,NULL }; \
406 static const EVP_CIPHER aes_##keylen##_##mode = { \
407 nid##_##keylen##_##nmode,blocksize, \
408 keylen/8,ivlen, \
409 flags|EVP_CIPH_##MODE##_MODE, \
410 EVP_ORIG_GLOBAL, \
411 aes_init_key, \
412 aes_##mode##_cipher, \
413 NULL, \
414 sizeof(EVP_AES_KEY), \
415 NULL,NULL,NULL,NULL }; \
416 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
417 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
418
419 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
420 static const EVP_CIPHER aesni_##keylen##_##mode = { \
421 nid##_##keylen##_##mode,blocksize, \
422 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
423 ivlen, \
424 flags|EVP_CIPH_##MODE##_MODE, \
425 EVP_ORIG_GLOBAL, \
426 aesni_##mode##_init_key, \
427 aesni_##mode##_cipher, \
428 aes_##mode##_cleanup, \
429 sizeof(EVP_AES_##MODE##_CTX), \
430 NULL,NULL,aes_##mode##_ctrl,NULL }; \
431 static const EVP_CIPHER aes_##keylen##_##mode = { \
432 nid##_##keylen##_##mode,blocksize, \
433 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
434 ivlen, \
435 flags|EVP_CIPH_##MODE##_MODE, \
436 EVP_ORIG_GLOBAL, \
437 aes_##mode##_init_key, \
438 aes_##mode##_cipher, \
439 aes_##mode##_cleanup, \
440 sizeof(EVP_AES_##MODE##_CTX), \
441 NULL,NULL,aes_##mode##_ctrl,NULL }; \
442 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
443 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
444
445 #elif defined(SPARC_AES_CAPABLE)
446
aes_t4_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)447 static int aes_t4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
448 const unsigned char *iv, int enc)
449 {
450 int ret, mode, bits;
451 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
452
453 mode = EVP_CIPHER_CTX_get_mode(ctx);
454 bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
455 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
456 && !enc) {
457 ret = 0;
458 aes_t4_set_decrypt_key(key, bits, &dat->ks.ks);
459 dat->block = (block128_f) aes_t4_decrypt;
460 switch (bits) {
461 case 128:
462 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
463 (cbc128_f) aes128_t4_cbc_decrypt : NULL;
464 break;
465 case 192:
466 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
467 (cbc128_f) aes192_t4_cbc_decrypt : NULL;
468 break;
469 case 256:
470 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
471 (cbc128_f) aes256_t4_cbc_decrypt : NULL;
472 break;
473 default:
474 ret = -1;
475 }
476 } else {
477 ret = 0;
478 aes_t4_set_encrypt_key(key, bits, &dat->ks.ks);
479 dat->block = (block128_f) aes_t4_encrypt;
480 switch (bits) {
481 case 128:
482 if (mode == EVP_CIPH_CBC_MODE)
483 dat->stream.cbc = (cbc128_f) aes128_t4_cbc_encrypt;
484 else if (mode == EVP_CIPH_CTR_MODE)
485 dat->stream.ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
486 else
487 dat->stream.cbc = NULL;
488 break;
489 case 192:
490 if (mode == EVP_CIPH_CBC_MODE)
491 dat->stream.cbc = (cbc128_f) aes192_t4_cbc_encrypt;
492 else if (mode == EVP_CIPH_CTR_MODE)
493 dat->stream.ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
494 else
495 dat->stream.cbc = NULL;
496 break;
497 case 256:
498 if (mode == EVP_CIPH_CBC_MODE)
499 dat->stream.cbc = (cbc128_f) aes256_t4_cbc_encrypt;
500 else if (mode == EVP_CIPH_CTR_MODE)
501 dat->stream.ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
502 else
503 dat->stream.cbc = NULL;
504 break;
505 default:
506 ret = -1;
507 }
508 }
509
510 if (ret < 0) {
511 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
512 return 0;
513 }
514
515 return 1;
516 }
517
518 # define aes_t4_cbc_cipher aes_cbc_cipher
519 static int aes_t4_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
520 const unsigned char *in, size_t len);
521
522 # define aes_t4_ecb_cipher aes_ecb_cipher
523 static int aes_t4_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
524 const unsigned char *in, size_t len);
525
526 # define aes_t4_ofb_cipher aes_ofb_cipher
527 static int aes_t4_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
528 const unsigned char *in, size_t len);
529
530 # define aes_t4_cfb_cipher aes_cfb_cipher
531 static int aes_t4_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
532 const unsigned char *in, size_t len);
533
534 # define aes_t4_cfb8_cipher aes_cfb8_cipher
535 static int aes_t4_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
536 const unsigned char *in, size_t len);
537
538 # define aes_t4_cfb1_cipher aes_cfb1_cipher
539 static int aes_t4_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
540 const unsigned char *in, size_t len);
541
542 # define aes_t4_ctr_cipher aes_ctr_cipher
543 static int aes_t4_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
544 const unsigned char *in, size_t len);
545
aes_t4_gcm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)546 static int aes_t4_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
547 const unsigned char *iv, int enc)
548 {
549 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
550 if (!iv && !key)
551 return 1;
552 if (key) {
553 int bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
554 aes_t4_set_encrypt_key(key, bits, &gctx->ks.ks);
555 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
556 (block128_f) aes_t4_encrypt);
557 switch (bits) {
558 case 128:
559 gctx->ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
560 break;
561 case 192:
562 gctx->ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
563 break;
564 case 256:
565 gctx->ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
566 break;
567 default:
568 return 0;
569 }
570 /*
571 * If we have an iv can set it directly, otherwise use saved IV.
572 */
573 if (iv == NULL && gctx->iv_set)
574 iv = gctx->iv;
575 if (iv) {
576 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
577 gctx->iv_set = 1;
578 }
579 gctx->key_set = 1;
580 } else {
581 /* If key set use IV, otherwise copy */
582 if (gctx->key_set)
583 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
584 else
585 memcpy(gctx->iv, iv, gctx->ivlen);
586 gctx->iv_set = 1;
587 gctx->iv_gen = 0;
588 }
589 return 1;
590 }
591
592 # define aes_t4_gcm_cipher aes_gcm_cipher
593 static int aes_t4_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
594 const unsigned char *in, size_t len);
595
aes_t4_xts_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)596 static int aes_t4_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
597 const unsigned char *iv, int enc)
598 {
599 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
600
601 if (!iv && !key)
602 return 1;
603
604 if (key) {
605 /* The key is two half length keys in reality */
606 const int bytes = EVP_CIPHER_CTX_get_key_length(ctx) / 2;
607 const int bits = bytes * 8;
608
609 /*
610 * Verify that the two keys are different.
611 *
612 * This addresses Rogaway's vulnerability.
613 * See comment in aes_xts_init_key() below.
614 */
615 if ((!allow_insecure_decrypt || enc)
616 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
617 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
618 return 0;
619 }
620
621 xctx->stream = NULL;
622 /* key_len is two AES keys */
623 if (enc) {
624 aes_t4_set_encrypt_key(key, bits, &xctx->ks1.ks);
625 xctx->xts.block1 = (block128_f) aes_t4_encrypt;
626 switch (bits) {
627 case 128:
628 xctx->stream = aes128_t4_xts_encrypt;
629 break;
630 case 256:
631 xctx->stream = aes256_t4_xts_encrypt;
632 break;
633 default:
634 return 0;
635 }
636 } else {
637 aes_t4_set_decrypt_key(key, bits, &xctx->ks1.ks);
638 xctx->xts.block1 = (block128_f) aes_t4_decrypt;
639 switch (bits) {
640 case 128:
641 xctx->stream = aes128_t4_xts_decrypt;
642 break;
643 case 256:
644 xctx->stream = aes256_t4_xts_decrypt;
645 break;
646 default:
647 return 0;
648 }
649 }
650
651 aes_t4_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
652 xctx->xts.block2 = (block128_f) aes_t4_encrypt;
653
654 xctx->xts.key1 = &xctx->ks1;
655 }
656
657 if (iv) {
658 xctx->xts.key2 = &xctx->ks2;
659 memcpy(ctx->iv, iv, 16);
660 }
661
662 return 1;
663 }
664
665 # define aes_t4_xts_cipher aes_xts_cipher
666 static int aes_t4_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
667 const unsigned char *in, size_t len);
668
aes_t4_ccm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)669 static int aes_t4_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
670 const unsigned char *iv, int enc)
671 {
672 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
673 if (!iv && !key)
674 return 1;
675 if (key) {
676 int bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
677 aes_t4_set_encrypt_key(key, bits, &cctx->ks.ks);
678 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
679 &cctx->ks, (block128_f) aes_t4_encrypt);
680 cctx->str = NULL;
681 cctx->key_set = 1;
682 }
683 if (iv) {
684 memcpy(ctx->iv, iv, 15 - cctx->L);
685 cctx->iv_set = 1;
686 }
687 return 1;
688 }
689
690 # define aes_t4_ccm_cipher aes_ccm_cipher
691 static int aes_t4_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
692 const unsigned char *in, size_t len);
693
694 # ifndef OPENSSL_NO_OCB
aes_t4_ocb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)695 static int aes_t4_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
696 const unsigned char *iv, int enc)
697 {
698 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
699 if (!iv && !key)
700 return 1;
701 if (key) {
702 do {
703 /*
704 * We set both the encrypt and decrypt key here because decrypt
705 * needs both. We could possibly optimise to remove setting the
706 * decrypt for an encryption operation.
707 */
708 aes_t4_set_encrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
709 &octx->ksenc.ks);
710 aes_t4_set_decrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
711 &octx->ksdec.ks);
712 if (!CRYPTO_ocb128_init(&octx->ocb,
713 &octx->ksenc.ks, &octx->ksdec.ks,
714 (block128_f) aes_t4_encrypt,
715 (block128_f) aes_t4_decrypt,
716 NULL))
717 return 0;
718 }
719 while (0);
720
721 /*
722 * If we have an iv we can set it directly, otherwise use saved IV.
723 */
724 if (iv == NULL && octx->iv_set)
725 iv = octx->iv;
726 if (iv) {
727 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
728 != 1)
729 return 0;
730 octx->iv_set = 1;
731 }
732 octx->key_set = 1;
733 } else {
734 /* If key set use IV, otherwise copy */
735 if (octx->key_set)
736 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
737 else
738 memcpy(octx->iv, iv, octx->ivlen);
739 octx->iv_set = 1;
740 }
741 return 1;
742 }
743
744 # define aes_t4_ocb_cipher aes_ocb_cipher
745 static int aes_t4_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
746 const unsigned char *in, size_t len);
747 # endif /* OPENSSL_NO_OCB */
748
749 # ifndef OPENSSL_NO_SIV
750 # define aes_t4_siv_init_key aes_siv_init_key
751 # define aes_t4_siv_cipher aes_siv_cipher
752 # endif /* OPENSSL_NO_SIV */
753
754 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
755 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
756 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
757 flags|EVP_CIPH_##MODE##_MODE, \
758 EVP_ORIG_GLOBAL, \
759 aes_t4_init_key, \
760 aes_t4_##mode##_cipher, \
761 NULL, \
762 sizeof(EVP_AES_KEY), \
763 NULL,NULL,NULL,NULL }; \
764 static const EVP_CIPHER aes_##keylen##_##mode = { \
765 nid##_##keylen##_##nmode,blocksize, \
766 keylen/8,ivlen, \
767 flags|EVP_CIPH_##MODE##_MODE, \
768 EVP_ORIG_GLOBAL, \
769 aes_init_key, \
770 aes_##mode##_cipher, \
771 NULL, \
772 sizeof(EVP_AES_KEY), \
773 NULL,NULL,NULL,NULL }; \
774 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
775 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
776
777 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
778 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
779 nid##_##keylen##_##mode,blocksize, \
780 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
781 ivlen, \
782 flags|EVP_CIPH_##MODE##_MODE, \
783 EVP_ORIG_GLOBAL, \
784 aes_t4_##mode##_init_key, \
785 aes_t4_##mode##_cipher, \
786 aes_##mode##_cleanup, \
787 sizeof(EVP_AES_##MODE##_CTX), \
788 NULL,NULL,aes_##mode##_ctrl,NULL }; \
789 static const EVP_CIPHER aes_##keylen##_##mode = { \
790 nid##_##keylen##_##mode,blocksize, \
791 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
792 ivlen, \
793 flags|EVP_CIPH_##MODE##_MODE, \
794 EVP_ORIG_GLOBAL, \
795 aes_##mode##_init_key, \
796 aes_##mode##_cipher, \
797 aes_##mode##_cleanup, \
798 sizeof(EVP_AES_##MODE##_CTX), \
799 NULL,NULL,aes_##mode##_ctrl,NULL }; \
800 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
801 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
802
803 #elif defined(S390X_aes_128_CAPABLE)
804 /* IBM S390X support */
805 typedef struct {
806 union {
807 OSSL_UNION_ALIGN;
808 /*-
809 * KM-AES parameter block - begin
810 * (see z/Architecture Principles of Operation >= SA22-7832-06)
811 */
812 struct {
813 unsigned char k[32];
814 } param;
815 /* KM-AES parameter block - end */
816 } km;
817 unsigned int fc;
818 } S390X_AES_ECB_CTX;
819
820 typedef struct {
821 union {
822 OSSL_UNION_ALIGN;
823 /*-
824 * KMO-AES parameter block - begin
825 * (see z/Architecture Principles of Operation >= SA22-7832-08)
826 */
827 struct {
828 unsigned char cv[16];
829 unsigned char k[32];
830 } param;
831 /* KMO-AES parameter block - end */
832 } kmo;
833 unsigned int fc;
834 } S390X_AES_OFB_CTX;
835
836 typedef struct {
837 union {
838 OSSL_UNION_ALIGN;
839 /*-
840 * KMF-AES parameter block - begin
841 * (see z/Architecture Principles of Operation >= SA22-7832-08)
842 */
843 struct {
844 unsigned char cv[16];
845 unsigned char k[32];
846 } param;
847 /* KMF-AES parameter block - end */
848 } kmf;
849 unsigned int fc;
850 } S390X_AES_CFB_CTX;
851
852 typedef struct {
853 union {
854 OSSL_UNION_ALIGN;
855 /*-
856 * KMA-GCM-AES parameter block - begin
857 * (see z/Architecture Principles of Operation >= SA22-7832-11)
858 */
859 struct {
860 unsigned char reserved[12];
861 union {
862 unsigned int w;
863 unsigned char b[4];
864 } cv;
865 union {
866 unsigned long long g[2];
867 unsigned char b[16];
868 } t;
869 unsigned char h[16];
870 unsigned long long taadl;
871 unsigned long long tpcl;
872 union {
873 unsigned long long g[2];
874 unsigned int w[4];
875 } j0;
876 unsigned char k[32];
877 } param;
878 /* KMA-GCM-AES parameter block - end */
879 } kma;
880 unsigned int fc;
881 int key_set;
882
883 unsigned char *iv;
884 int ivlen;
885 int iv_set;
886 int iv_gen;
887
888 int taglen;
889
890 unsigned char ares[16];
891 unsigned char mres[16];
892 unsigned char kres[16];
893 int areslen;
894 int mreslen;
895 int kreslen;
896
897 int tls_aad_len;
898 uint64_t tls_enc_records; /* Number of TLS records encrypted */
899 } S390X_AES_GCM_CTX;
900
901 typedef struct {
902 union {
903 OSSL_UNION_ALIGN;
904 /*-
905 * Padding is chosen so that ccm.kmac_param.k overlaps with key.k and
906 * ccm.fc with key.k.rounds. Remember that on s390x, an AES_KEY's
907 * rounds field is used to store the function code and that the key
908 * schedule is not stored (if aes hardware support is detected).
909 */
910 struct {
911 unsigned char pad[16];
912 AES_KEY k;
913 } key;
914
915 struct {
916 /*-
917 * KMAC-AES parameter block - begin
918 * (see z/Architecture Principles of Operation >= SA22-7832-08)
919 */
920 struct {
921 union {
922 unsigned long long g[2];
923 unsigned char b[16];
924 } icv;
925 unsigned char k[32];
926 } kmac_param;
927 /* KMAC-AES parameter block - end */
928
929 union {
930 unsigned long long g[2];
931 unsigned char b[16];
932 } nonce;
933 union {
934 unsigned long long g[2];
935 unsigned char b[16];
936 } buf;
937
938 unsigned long long blocks;
939 int l;
940 int m;
941 int tls_aad_len;
942 int iv_set;
943 int tag_set;
944 int len_set;
945 int key_set;
946
947 unsigned char pad[140];
948 unsigned int fc;
949 } ccm;
950 } aes;
951 } S390X_AES_CCM_CTX;
952
953 # define s390x_aes_init_key aes_init_key
954 static int s390x_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
955 const unsigned char *iv, int enc);
956
957 # define S390X_AES_CBC_CTX EVP_AES_KEY
958
959 # define s390x_aes_cbc_init_key aes_init_key
960
961 # define s390x_aes_cbc_cipher aes_cbc_cipher
962 static int s390x_aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
963 const unsigned char *in, size_t len);
964
s390x_aes_ecb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)965 static int s390x_aes_ecb_init_key(EVP_CIPHER_CTX *ctx,
966 const unsigned char *key,
967 const unsigned char *iv, int enc)
968 {
969 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
970 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
971
972 cctx->fc = S390X_AES_FC(keylen);
973 if (!enc)
974 cctx->fc |= S390X_DECRYPT;
975
976 memcpy(cctx->km.param.k, key, keylen);
977 return 1;
978 }
979
s390x_aes_ecb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)980 static int s390x_aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
981 const unsigned char *in, size_t len)
982 {
983 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
984
985 s390x_km(in, len, out, cctx->fc, &cctx->km.param);
986 return 1;
987 }
988
s390x_aes_ofb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * ivec,int enc)989 static int s390x_aes_ofb_init_key(EVP_CIPHER_CTX *ctx,
990 const unsigned char *key,
991 const unsigned char *ivec, int enc)
992 {
993 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
994 const unsigned char *iv = ctx->oiv;
995 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
996 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
997
998 memcpy(cctx->kmo.param.cv, iv, ivlen);
999 memcpy(cctx->kmo.param.k, key, keylen);
1000 cctx->fc = S390X_AES_FC(keylen);
1001 return 1;
1002 }
1003
s390x_aes_ofb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1004 static int s390x_aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1005 const unsigned char *in, size_t len)
1006 {
1007 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1008 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1009 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1010 int n = ctx->num;
1011 int rem;
1012
1013 memcpy(cctx->kmo.param.cv, iv, ivlen);
1014 while (n && len) {
1015 *out = *in ^ cctx->kmo.param.cv[n];
1016 n = (n + 1) & 0xf;
1017 --len;
1018 ++in;
1019 ++out;
1020 }
1021
1022 rem = len & 0xf;
1023
1024 len &= ~(size_t)0xf;
1025 if (len) {
1026 s390x_kmo(in, len, out, cctx->fc, &cctx->kmo.param);
1027
1028 out += len;
1029 in += len;
1030 }
1031
1032 if (rem) {
1033 s390x_km(cctx->kmo.param.cv, 16, cctx->kmo.param.cv, cctx->fc,
1034 cctx->kmo.param.k);
1035
1036 while (rem--) {
1037 out[n] = in[n] ^ cctx->kmo.param.cv[n];
1038 ++n;
1039 }
1040 }
1041
1042 memcpy(iv, cctx->kmo.param.cv, ivlen);
1043 ctx->num = n;
1044 return 1;
1045 }
1046
s390x_aes_cfb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * ivec,int enc)1047 static int s390x_aes_cfb_init_key(EVP_CIPHER_CTX *ctx,
1048 const unsigned char *key,
1049 const unsigned char *ivec, int enc)
1050 {
1051 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1052 const unsigned char *iv = ctx->oiv;
1053 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1054 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1055
1056 cctx->fc = S390X_AES_FC(keylen);
1057 cctx->fc |= 16 << 24; /* 16 bytes cipher feedback */
1058 if (!enc)
1059 cctx->fc |= S390X_DECRYPT;
1060
1061 memcpy(cctx->kmf.param.cv, iv, ivlen);
1062 memcpy(cctx->kmf.param.k, key, keylen);
1063 return 1;
1064 }
1065
s390x_aes_cfb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1066 static int s390x_aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1067 const unsigned char *in, size_t len)
1068 {
1069 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1070 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1071 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1072 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1073 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1074 int n = ctx->num;
1075 int rem;
1076 unsigned char tmp;
1077
1078 memcpy(cctx->kmf.param.cv, iv, ivlen);
1079 while (n && len) {
1080 tmp = *in;
1081 *out = cctx->kmf.param.cv[n] ^ tmp;
1082 cctx->kmf.param.cv[n] = enc ? *out : tmp;
1083 n = (n + 1) & 0xf;
1084 --len;
1085 ++in;
1086 ++out;
1087 }
1088
1089 rem = len & 0xf;
1090
1091 len &= ~(size_t)0xf;
1092 if (len) {
1093 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1094
1095 out += len;
1096 in += len;
1097 }
1098
1099 if (rem) {
1100 s390x_km(cctx->kmf.param.cv, 16, cctx->kmf.param.cv,
1101 S390X_AES_FC(keylen), cctx->kmf.param.k);
1102
1103 while (rem--) {
1104 tmp = in[n];
1105 out[n] = cctx->kmf.param.cv[n] ^ tmp;
1106 cctx->kmf.param.cv[n] = enc ? out[n] : tmp;
1107 ++n;
1108 }
1109 }
1110
1111 memcpy(iv, cctx->kmf.param.cv, ivlen);
1112 ctx->num = n;
1113 return 1;
1114 }
1115
s390x_aes_cfb8_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * ivec,int enc)1116 static int s390x_aes_cfb8_init_key(EVP_CIPHER_CTX *ctx,
1117 const unsigned char *key,
1118 const unsigned char *ivec, int enc)
1119 {
1120 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1121 const unsigned char *iv = ctx->oiv;
1122 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1123 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1124
1125 cctx->fc = S390X_AES_FC(keylen);
1126 cctx->fc |= 1 << 24; /* 1 byte cipher feedback */
1127 if (!enc)
1128 cctx->fc |= S390X_DECRYPT;
1129
1130 memcpy(cctx->kmf.param.cv, iv, ivlen);
1131 memcpy(cctx->kmf.param.k, key, keylen);
1132 return 1;
1133 }
1134
s390x_aes_cfb8_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1135 static int s390x_aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1136 const unsigned char *in, size_t len)
1137 {
1138 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1139 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1140 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1141
1142 memcpy(cctx->kmf.param.cv, iv, ivlen);
1143 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1144 memcpy(iv, cctx->kmf.param.cv, ivlen);
1145 return 1;
1146 }
1147
1148 # define s390x_aes_cfb1_init_key aes_init_key
1149
1150 # define s390x_aes_cfb1_cipher aes_cfb1_cipher
1151 static int s390x_aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1152 const unsigned char *in, size_t len);
1153
1154 # define S390X_AES_CTR_CTX EVP_AES_KEY
1155
1156 # define s390x_aes_ctr_init_key aes_init_key
1157
1158 # define s390x_aes_ctr_cipher aes_ctr_cipher
1159 static int s390x_aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1160 const unsigned char *in, size_t len);
1161
1162 /* iv + padding length for iv lengths != 12 */
1163 # define S390X_gcm_ivpadlen(i) ((((i) + 15) >> 4 << 4) + 16)
1164
1165 /*-
1166 * Process additional authenticated data. Returns 0 on success. Code is
1167 * big-endian.
1168 */
s390x_aes_gcm_aad(S390X_AES_GCM_CTX * ctx,const unsigned char * aad,size_t len)1169 static int s390x_aes_gcm_aad(S390X_AES_GCM_CTX *ctx, const unsigned char *aad,
1170 size_t len)
1171 {
1172 unsigned long long alen;
1173 int n, rem;
1174
1175 if (ctx->kma.param.tpcl)
1176 return -2;
1177
1178 alen = ctx->kma.param.taadl + len;
1179 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
1180 return -1;
1181 ctx->kma.param.taadl = alen;
1182
1183 n = ctx->areslen;
1184 if (n) {
1185 while (n && len) {
1186 ctx->ares[n] = *aad;
1187 n = (n + 1) & 0xf;
1188 ++aad;
1189 --len;
1190 }
1191 /* ctx->ares contains a complete block if offset has wrapped around */
1192 if (!n) {
1193 s390x_kma(ctx->ares, 16, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1194 ctx->fc |= S390X_KMA_HS;
1195 }
1196 ctx->areslen = n;
1197 }
1198
1199 rem = len & 0xf;
1200
1201 len &= ~(size_t)0xf;
1202 if (len) {
1203 s390x_kma(aad, len, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1204 aad += len;
1205 ctx->fc |= S390X_KMA_HS;
1206 }
1207
1208 if (rem) {
1209 ctx->areslen = rem;
1210
1211 do {
1212 --rem;
1213 ctx->ares[rem] = aad[rem];
1214 } while (rem);
1215 }
1216 return 0;
1217 }
1218
1219 /*-
1220 * En/de-crypt plain/cipher-text and authenticate ciphertext. Returns 0 for
1221 * success. Code is big-endian.
1222 */
s390x_aes_gcm(S390X_AES_GCM_CTX * ctx,const unsigned char * in,unsigned char * out,size_t len)1223 static int s390x_aes_gcm(S390X_AES_GCM_CTX *ctx, const unsigned char *in,
1224 unsigned char *out, size_t len)
1225 {
1226 const unsigned char *inptr;
1227 unsigned long long mlen;
1228 union {
1229 unsigned int w[4];
1230 unsigned char b[16];
1231 } buf;
1232 size_t inlen;
1233 int n, rem, i;
1234
1235 mlen = ctx->kma.param.tpcl + len;
1236 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1237 return -1;
1238 ctx->kma.param.tpcl = mlen;
1239
1240 n = ctx->mreslen;
1241 if (n) {
1242 inptr = in;
1243 inlen = len;
1244 while (n && inlen) {
1245 ctx->mres[n] = *inptr;
1246 n = (n + 1) & 0xf;
1247 ++inptr;
1248 --inlen;
1249 }
1250 /* ctx->mres contains a complete block if offset has wrapped around */
1251 if (!n) {
1252 s390x_kma(ctx->ares, ctx->areslen, ctx->mres, 16, buf.b,
1253 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1254 ctx->fc |= S390X_KMA_HS;
1255 ctx->areslen = 0;
1256
1257 /* previous call already encrypted/decrypted its remainder,
1258 * see comment below */
1259 n = ctx->mreslen;
1260 while (n) {
1261 *out = buf.b[n];
1262 n = (n + 1) & 0xf;
1263 ++out;
1264 ++in;
1265 --len;
1266 }
1267 ctx->mreslen = 0;
1268 }
1269 }
1270
1271 rem = len & 0xf;
1272
1273 len &= ~(size_t)0xf;
1274 if (len) {
1275 s390x_kma(ctx->ares, ctx->areslen, in, len, out,
1276 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1277 in += len;
1278 out += len;
1279 ctx->fc |= S390X_KMA_HS;
1280 ctx->areslen = 0;
1281 }
1282
1283 /*-
1284 * If there is a remainder, it has to be saved such that it can be
1285 * processed by kma later. However, we also have to do the for-now
1286 * unauthenticated encryption/decryption part here and now...
1287 */
1288 if (rem) {
1289 if (!ctx->mreslen) {
1290 buf.w[0] = ctx->kma.param.j0.w[0];
1291 buf.w[1] = ctx->kma.param.j0.w[1];
1292 buf.w[2] = ctx->kma.param.j0.w[2];
1293 buf.w[3] = ctx->kma.param.cv.w + 1;
1294 s390x_km(buf.b, 16, ctx->kres, ctx->fc & 0x1f, &ctx->kma.param.k);
1295 }
1296
1297 n = ctx->mreslen;
1298 for (i = 0; i < rem; i++) {
1299 ctx->mres[n + i] = in[i];
1300 out[i] = in[i] ^ ctx->kres[n + i];
1301 }
1302
1303 ctx->mreslen += rem;
1304 }
1305 return 0;
1306 }
1307
1308 /*-
1309 * Initialize context structure. Code is big-endian.
1310 */
s390x_aes_gcm_setiv(S390X_AES_GCM_CTX * ctx,const unsigned char * iv)1311 static void s390x_aes_gcm_setiv(S390X_AES_GCM_CTX *ctx,
1312 const unsigned char *iv)
1313 {
1314 ctx->kma.param.t.g[0] = 0;
1315 ctx->kma.param.t.g[1] = 0;
1316 ctx->kma.param.tpcl = 0;
1317 ctx->kma.param.taadl = 0;
1318 ctx->mreslen = 0;
1319 ctx->areslen = 0;
1320 ctx->kreslen = 0;
1321
1322 if (ctx->ivlen == 12) {
1323 memcpy(&ctx->kma.param.j0, iv, ctx->ivlen);
1324 ctx->kma.param.j0.w[3] = 1;
1325 ctx->kma.param.cv.w = 1;
1326 } else {
1327 /* ctx->iv has the right size and is already padded. */
1328 memcpy(ctx->iv, iv, ctx->ivlen);
1329 s390x_kma(ctx->iv, S390X_gcm_ivpadlen(ctx->ivlen), NULL, 0, NULL,
1330 ctx->fc, &ctx->kma.param);
1331 ctx->fc |= S390X_KMA_HS;
1332
1333 ctx->kma.param.j0.g[0] = ctx->kma.param.t.g[0];
1334 ctx->kma.param.j0.g[1] = ctx->kma.param.t.g[1];
1335 ctx->kma.param.cv.w = ctx->kma.param.j0.w[3];
1336 ctx->kma.param.t.g[0] = 0;
1337 ctx->kma.param.t.g[1] = 0;
1338 }
1339 }
1340
1341 /*-
1342 * Performs various operations on the context structure depending on control
1343 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
1344 * Code is big-endian.
1345 */
s390x_aes_gcm_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)1346 static int s390x_aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
1347 {
1348 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1349 S390X_AES_GCM_CTX *gctx_out;
1350 EVP_CIPHER_CTX *out;
1351 unsigned char *buf;
1352 int ivlen, enc, len;
1353
1354 switch (type) {
1355 case EVP_CTRL_INIT:
1356 ivlen = EVP_CIPHER_get_iv_length(c->cipher);
1357 gctx->key_set = 0;
1358 gctx->iv_set = 0;
1359 gctx->ivlen = ivlen;
1360 gctx->iv = c->iv;
1361 gctx->taglen = -1;
1362 gctx->iv_gen = 0;
1363 gctx->tls_aad_len = -1;
1364 return 1;
1365
1366 case EVP_CTRL_GET_IVLEN:
1367 *(int *)ptr = gctx->ivlen;
1368 return 1;
1369
1370 case EVP_CTRL_AEAD_SET_IVLEN:
1371 if (arg <= 0)
1372 return 0;
1373
1374 if (arg != 12) {
1375 len = S390X_gcm_ivpadlen(arg);
1376
1377 /* Allocate memory for iv if needed. */
1378 if (gctx->ivlen == 12 || len > S390X_gcm_ivpadlen(gctx->ivlen)) {
1379 if (gctx->iv != c->iv)
1380 OPENSSL_free(gctx->iv);
1381
1382 if ((gctx->iv = OPENSSL_malloc(len)) == NULL) {
1383 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
1384 return 0;
1385 }
1386 }
1387 /* Add padding. */
1388 memset(gctx->iv + arg, 0, len - arg - 8);
1389 *((unsigned long long *)(gctx->iv + len - 8)) = arg << 3;
1390 }
1391 gctx->ivlen = arg;
1392 return 1;
1393
1394 case EVP_CTRL_AEAD_SET_TAG:
1395 buf = EVP_CIPHER_CTX_buf_noconst(c);
1396 enc = EVP_CIPHER_CTX_is_encrypting(c);
1397 if (arg <= 0 || arg > 16 || enc)
1398 return 0;
1399
1400 memcpy(buf, ptr, arg);
1401 gctx->taglen = arg;
1402 return 1;
1403
1404 case EVP_CTRL_AEAD_GET_TAG:
1405 enc = EVP_CIPHER_CTX_is_encrypting(c);
1406 if (arg <= 0 || arg > 16 || !enc || gctx->taglen < 0)
1407 return 0;
1408
1409 memcpy(ptr, gctx->kma.param.t.b, arg);
1410 return 1;
1411
1412 case EVP_CTRL_GCM_SET_IV_FIXED:
1413 /* Special case: -1 length restores whole iv */
1414 if (arg == -1) {
1415 memcpy(gctx->iv, ptr, gctx->ivlen);
1416 gctx->iv_gen = 1;
1417 return 1;
1418 }
1419 /*
1420 * Fixed field must be at least 4 bytes and invocation field at least
1421 * 8.
1422 */
1423 if ((arg < 4) || (gctx->ivlen - arg) < 8)
1424 return 0;
1425
1426 if (arg)
1427 memcpy(gctx->iv, ptr, arg);
1428
1429 enc = EVP_CIPHER_CTX_is_encrypting(c);
1430 if (enc && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
1431 return 0;
1432
1433 gctx->iv_gen = 1;
1434 return 1;
1435
1436 case EVP_CTRL_GCM_IV_GEN:
1437 if (gctx->iv_gen == 0 || gctx->key_set == 0)
1438 return 0;
1439
1440 s390x_aes_gcm_setiv(gctx, gctx->iv);
1441
1442 if (arg <= 0 || arg > gctx->ivlen)
1443 arg = gctx->ivlen;
1444
1445 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
1446 /*
1447 * Invocation field will be at least 8 bytes in size and so no need
1448 * to check wrap around or increment more than last 8 bytes.
1449 */
1450 ctr64_inc(gctx->iv + gctx->ivlen - 8);
1451 gctx->iv_set = 1;
1452 return 1;
1453
1454 case EVP_CTRL_GCM_SET_IV_INV:
1455 enc = EVP_CIPHER_CTX_is_encrypting(c);
1456 if (gctx->iv_gen == 0 || gctx->key_set == 0 || enc)
1457 return 0;
1458
1459 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
1460 s390x_aes_gcm_setiv(gctx, gctx->iv);
1461 gctx->iv_set = 1;
1462 return 1;
1463
1464 case EVP_CTRL_AEAD_TLS1_AAD:
1465 /* Save the aad for later use. */
1466 if (arg != EVP_AEAD_TLS1_AAD_LEN)
1467 return 0;
1468
1469 buf = EVP_CIPHER_CTX_buf_noconst(c);
1470 memcpy(buf, ptr, arg);
1471 gctx->tls_aad_len = arg;
1472 gctx->tls_enc_records = 0;
1473
1474 len = buf[arg - 2] << 8 | buf[arg - 1];
1475 /* Correct length for explicit iv. */
1476 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
1477 return 0;
1478 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
1479
1480 /* If decrypting correct for tag too. */
1481 enc = EVP_CIPHER_CTX_is_encrypting(c);
1482 if (!enc) {
1483 if (len < EVP_GCM_TLS_TAG_LEN)
1484 return 0;
1485 len -= EVP_GCM_TLS_TAG_LEN;
1486 }
1487 buf[arg - 2] = len >> 8;
1488 buf[arg - 1] = len & 0xff;
1489 /* Extra padding: tag appended to record. */
1490 return EVP_GCM_TLS_TAG_LEN;
1491
1492 case EVP_CTRL_COPY:
1493 out = ptr;
1494 gctx_out = EVP_C_DATA(S390X_AES_GCM_CTX, out);
1495
1496 if (gctx->iv == c->iv) {
1497 gctx_out->iv = out->iv;
1498 } else {
1499 len = S390X_gcm_ivpadlen(gctx->ivlen);
1500
1501 if ((gctx_out->iv = OPENSSL_malloc(len)) == NULL) {
1502 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
1503 return 0;
1504 }
1505
1506 memcpy(gctx_out->iv, gctx->iv, len);
1507 }
1508 return 1;
1509
1510 default:
1511 return -1;
1512 }
1513 }
1514
1515 /*-
1516 * Set key and/or iv. Returns 1 on success. Otherwise 0 is returned.
1517 */
s390x_aes_gcm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)1518 static int s390x_aes_gcm_init_key(EVP_CIPHER_CTX *ctx,
1519 const unsigned char *key,
1520 const unsigned char *iv, int enc)
1521 {
1522 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1523 int keylen;
1524
1525 if (iv == NULL && key == NULL)
1526 return 1;
1527
1528 if (key != NULL) {
1529 keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1530 memcpy(&gctx->kma.param.k, key, keylen);
1531
1532 gctx->fc = S390X_AES_FC(keylen);
1533 if (!enc)
1534 gctx->fc |= S390X_DECRYPT;
1535
1536 if (iv == NULL && gctx->iv_set)
1537 iv = gctx->iv;
1538
1539 if (iv != NULL) {
1540 s390x_aes_gcm_setiv(gctx, iv);
1541 gctx->iv_set = 1;
1542 }
1543 gctx->key_set = 1;
1544 } else {
1545 if (gctx->key_set)
1546 s390x_aes_gcm_setiv(gctx, iv);
1547 else
1548 memcpy(gctx->iv, iv, gctx->ivlen);
1549
1550 gctx->iv_set = 1;
1551 gctx->iv_gen = 0;
1552 }
1553 return 1;
1554 }
1555
1556 /*-
1557 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1558 * if successful. Otherwise -1 is returned. Code is big-endian.
1559 */
s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1560 static int s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1561 const unsigned char *in, size_t len)
1562 {
1563 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1564 const unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1565 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1566 int rv = -1;
1567
1568 if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
1569 return -1;
1570
1571 /*
1572 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
1573 * Requirements from SP 800-38D". The requirements is for one party to the
1574 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
1575 * side only.
1576 */
1577 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
1578 ERR_raise(ERR_LIB_EVP, EVP_R_TOO_MANY_RECORDS);
1579 goto err;
1580 }
1581
1582 if (EVP_CIPHER_CTX_ctrl(ctx, enc ? EVP_CTRL_GCM_IV_GEN
1583 : EVP_CTRL_GCM_SET_IV_INV,
1584 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
1585 goto err;
1586
1587 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1588 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1589 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1590
1591 gctx->kma.param.taadl = gctx->tls_aad_len << 3;
1592 gctx->kma.param.tpcl = len << 3;
1593 s390x_kma(buf, gctx->tls_aad_len, in, len, out,
1594 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1595
1596 if (enc) {
1597 memcpy(out + len, gctx->kma.param.t.b, EVP_GCM_TLS_TAG_LEN);
1598 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1599 } else {
1600 if (CRYPTO_memcmp(gctx->kma.param.t.b, in + len,
1601 EVP_GCM_TLS_TAG_LEN)) {
1602 OPENSSL_cleanse(out, len);
1603 goto err;
1604 }
1605 rv = len;
1606 }
1607 err:
1608 gctx->iv_set = 0;
1609 gctx->tls_aad_len = -1;
1610 return rv;
1611 }
1612
1613 /*-
1614 * Called from EVP layer to initialize context, process additional
1615 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1616 * ciphertext or process a TLS packet, depending on context. Returns bytes
1617 * written on success. Otherwise -1 is returned. Code is big-endian.
1618 */
s390x_aes_gcm_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1619 static int s390x_aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1620 const unsigned char *in, size_t len)
1621 {
1622 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1623 unsigned char *buf, tmp[16];
1624 int enc;
1625
1626 if (!gctx->key_set)
1627 return -1;
1628
1629 if (gctx->tls_aad_len >= 0)
1630 return s390x_aes_gcm_tls_cipher(ctx, out, in, len);
1631
1632 if (!gctx->iv_set)
1633 return -1;
1634
1635 if (in != NULL) {
1636 if (out == NULL) {
1637 if (s390x_aes_gcm_aad(gctx, in, len))
1638 return -1;
1639 } else {
1640 if (s390x_aes_gcm(gctx, in, out, len))
1641 return -1;
1642 }
1643 return len;
1644 } else {
1645 gctx->kma.param.taadl <<= 3;
1646 gctx->kma.param.tpcl <<= 3;
1647 s390x_kma(gctx->ares, gctx->areslen, gctx->mres, gctx->mreslen, tmp,
1648 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1649 /* recall that we already did en-/decrypt gctx->mres
1650 * and returned it to caller... */
1651 OPENSSL_cleanse(tmp, gctx->mreslen);
1652 gctx->iv_set = 0;
1653
1654 enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1655 if (enc) {
1656 gctx->taglen = 16;
1657 } else {
1658 if (gctx->taglen < 0)
1659 return -1;
1660
1661 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1662 if (CRYPTO_memcmp(buf, gctx->kma.param.t.b, gctx->taglen))
1663 return -1;
1664 }
1665 return 0;
1666 }
1667 }
1668
s390x_aes_gcm_cleanup(EVP_CIPHER_CTX * c)1669 static int s390x_aes_gcm_cleanup(EVP_CIPHER_CTX *c)
1670 {
1671 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1672
1673 if (gctx == NULL)
1674 return 0;
1675
1676 if (gctx->iv != c->iv)
1677 OPENSSL_free(gctx->iv);
1678
1679 OPENSSL_cleanse(gctx, sizeof(*gctx));
1680 return 1;
1681 }
1682
1683 # define S390X_AES_XTS_CTX EVP_AES_XTS_CTX
1684
1685 # define s390x_aes_xts_init_key aes_xts_init_key
1686 static int s390x_aes_xts_init_key(EVP_CIPHER_CTX *ctx,
1687 const unsigned char *key,
1688 const unsigned char *iv, int enc);
1689 # define s390x_aes_xts_cipher aes_xts_cipher
1690 static int s390x_aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1691 const unsigned char *in, size_t len);
1692 # define s390x_aes_xts_ctrl aes_xts_ctrl
1693 static int s390x_aes_xts_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
1694 # define s390x_aes_xts_cleanup aes_xts_cleanup
1695
1696 /*-
1697 * Set nonce and length fields. Code is big-endian.
1698 */
s390x_aes_ccm_setiv(S390X_AES_CCM_CTX * ctx,const unsigned char * nonce,size_t mlen)1699 static inline void s390x_aes_ccm_setiv(S390X_AES_CCM_CTX *ctx,
1700 const unsigned char *nonce,
1701 size_t mlen)
1702 {
1703 ctx->aes.ccm.nonce.b[0] &= ~S390X_CCM_AAD_FLAG;
1704 ctx->aes.ccm.nonce.g[1] = mlen;
1705 memcpy(ctx->aes.ccm.nonce.b + 1, nonce, 15 - ctx->aes.ccm.l);
1706 }
1707
1708 /*-
1709 * Process additional authenticated data. Code is big-endian.
1710 */
s390x_aes_ccm_aad(S390X_AES_CCM_CTX * ctx,const unsigned char * aad,size_t alen)1711 static void s390x_aes_ccm_aad(S390X_AES_CCM_CTX *ctx, const unsigned char *aad,
1712 size_t alen)
1713 {
1714 unsigned char *ptr;
1715 int i, rem;
1716
1717 if (!alen)
1718 return;
1719
1720 ctx->aes.ccm.nonce.b[0] |= S390X_CCM_AAD_FLAG;
1721
1722 /* Suppress 'type-punned pointer dereference' warning. */
1723 ptr = ctx->aes.ccm.buf.b;
1724
1725 if (alen < ((1 << 16) - (1 << 8))) {
1726 *(uint16_t *)ptr = alen;
1727 i = 2;
1728 } else if (sizeof(alen) == 8
1729 && alen >= (size_t)1 << (32 % (sizeof(alen) * 8))) {
1730 *(uint16_t *)ptr = 0xffff;
1731 *(uint64_t *)(ptr + 2) = alen;
1732 i = 10;
1733 } else {
1734 *(uint16_t *)ptr = 0xfffe;
1735 *(uint32_t *)(ptr + 2) = alen;
1736 i = 6;
1737 }
1738
1739 while (i < 16 && alen) {
1740 ctx->aes.ccm.buf.b[i] = *aad;
1741 ++aad;
1742 --alen;
1743 ++i;
1744 }
1745 while (i < 16) {
1746 ctx->aes.ccm.buf.b[i] = 0;
1747 ++i;
1748 }
1749
1750 ctx->aes.ccm.kmac_param.icv.g[0] = 0;
1751 ctx->aes.ccm.kmac_param.icv.g[1] = 0;
1752 s390x_kmac(ctx->aes.ccm.nonce.b, 32, ctx->aes.ccm.fc,
1753 &ctx->aes.ccm.kmac_param);
1754 ctx->aes.ccm.blocks += 2;
1755
1756 rem = alen & 0xf;
1757 alen &= ~(size_t)0xf;
1758 if (alen) {
1759 s390x_kmac(aad, alen, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1760 ctx->aes.ccm.blocks += alen >> 4;
1761 aad += alen;
1762 }
1763 if (rem) {
1764 for (i = 0; i < rem; i++)
1765 ctx->aes.ccm.kmac_param.icv.b[i] ^= aad[i];
1766
1767 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1768 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1769 ctx->aes.ccm.kmac_param.k);
1770 ctx->aes.ccm.blocks++;
1771 }
1772 }
1773
1774 /*-
1775 * En/de-crypt plain/cipher-text. Compute tag from plaintext. Returns 0 for
1776 * success.
1777 */
s390x_aes_ccm(S390X_AES_CCM_CTX * ctx,const unsigned char * in,unsigned char * out,size_t len,int enc)1778 static int s390x_aes_ccm(S390X_AES_CCM_CTX *ctx, const unsigned char *in,
1779 unsigned char *out, size_t len, int enc)
1780 {
1781 size_t n, rem;
1782 unsigned int i, l, num;
1783 unsigned char flags;
1784
1785 flags = ctx->aes.ccm.nonce.b[0];
1786 if (!(flags & S390X_CCM_AAD_FLAG)) {
1787 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.kmac_param.icv.b,
1788 ctx->aes.ccm.fc, ctx->aes.ccm.kmac_param.k);
1789 ctx->aes.ccm.blocks++;
1790 }
1791 l = flags & 0x7;
1792 ctx->aes.ccm.nonce.b[0] = l;
1793
1794 /*-
1795 * Reconstruct length from encoded length field
1796 * and initialize it with counter value.
1797 */
1798 n = 0;
1799 for (i = 15 - l; i < 15; i++) {
1800 n |= ctx->aes.ccm.nonce.b[i];
1801 ctx->aes.ccm.nonce.b[i] = 0;
1802 n <<= 8;
1803 }
1804 n |= ctx->aes.ccm.nonce.b[15];
1805 ctx->aes.ccm.nonce.b[15] = 1;
1806
1807 if (n != len)
1808 return -1; /* length mismatch */
1809
1810 if (enc) {
1811 /* Two operations per block plus one for tag encryption */
1812 ctx->aes.ccm.blocks += (((len + 15) >> 4) << 1) + 1;
1813 if (ctx->aes.ccm.blocks > (1ULL << 61))
1814 return -2; /* too much data */
1815 }
1816
1817 num = 0;
1818 rem = len & 0xf;
1819 len &= ~(size_t)0xf;
1820
1821 if (enc) {
1822 /* mac-then-encrypt */
1823 if (len)
1824 s390x_kmac(in, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1825 if (rem) {
1826 for (i = 0; i < rem; i++)
1827 ctx->aes.ccm.kmac_param.icv.b[i] ^= in[len + i];
1828
1829 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1830 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1831 ctx->aes.ccm.kmac_param.k);
1832 }
1833
1834 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1835 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1836 &num, (ctr128_f)AES_ctr32_encrypt);
1837 } else {
1838 /* decrypt-then-mac */
1839 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1840 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1841 &num, (ctr128_f)AES_ctr32_encrypt);
1842
1843 if (len)
1844 s390x_kmac(out, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1845 if (rem) {
1846 for (i = 0; i < rem; i++)
1847 ctx->aes.ccm.kmac_param.icv.b[i] ^= out[len + i];
1848
1849 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1850 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1851 ctx->aes.ccm.kmac_param.k);
1852 }
1853 }
1854 /* encrypt tag */
1855 for (i = 15 - l; i < 16; i++)
1856 ctx->aes.ccm.nonce.b[i] = 0;
1857
1858 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.buf.b, ctx->aes.ccm.fc,
1859 ctx->aes.ccm.kmac_param.k);
1860 ctx->aes.ccm.kmac_param.icv.g[0] ^= ctx->aes.ccm.buf.g[0];
1861 ctx->aes.ccm.kmac_param.icv.g[1] ^= ctx->aes.ccm.buf.g[1];
1862
1863 ctx->aes.ccm.nonce.b[0] = flags; /* restore flags field */
1864 return 0;
1865 }
1866
1867 /*-
1868 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1869 * if successful. Otherwise -1 is returned.
1870 */
s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1871 static int s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1872 const unsigned char *in, size_t len)
1873 {
1874 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1875 unsigned char *ivec = ctx->iv;
1876 unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1877 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1878
1879 if (out != in
1880 || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->aes.ccm.m))
1881 return -1;
1882
1883 if (enc) {
1884 /* Set explicit iv (sequence number). */
1885 memcpy(out, buf, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1886 }
1887
1888 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1889 /*-
1890 * Get explicit iv (sequence number). We already have fixed iv
1891 * (server/client_write_iv) here.
1892 */
1893 memcpy(ivec + EVP_CCM_TLS_FIXED_IV_LEN, in, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1894 s390x_aes_ccm_setiv(cctx, ivec, len);
1895
1896 /* Process aad (sequence number|type|version|length) */
1897 s390x_aes_ccm_aad(cctx, buf, cctx->aes.ccm.tls_aad_len);
1898
1899 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
1900 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
1901
1902 if (enc) {
1903 if (s390x_aes_ccm(cctx, in, out, len, enc))
1904 return -1;
1905
1906 memcpy(out + len, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
1907 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1908 } else {
1909 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
1910 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, in + len,
1911 cctx->aes.ccm.m))
1912 return len;
1913 }
1914
1915 OPENSSL_cleanse(out, len);
1916 return -1;
1917 }
1918 }
1919
1920 /*-
1921 * Set key and flag field and/or iv. Returns 1 if successful. Otherwise 0 is
1922 * returned.
1923 */
s390x_aes_ccm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)1924 static int s390x_aes_ccm_init_key(EVP_CIPHER_CTX *ctx,
1925 const unsigned char *key,
1926 const unsigned char *iv, int enc)
1927 {
1928 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1929 int keylen;
1930
1931 if (iv == NULL && key == NULL)
1932 return 1;
1933
1934 if (key != NULL) {
1935 keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1936 cctx->aes.ccm.fc = S390X_AES_FC(keylen);
1937 memcpy(cctx->aes.ccm.kmac_param.k, key, keylen);
1938
1939 /* Store encoded m and l. */
1940 cctx->aes.ccm.nonce.b[0] = ((cctx->aes.ccm.l - 1) & 0x7)
1941 | (((cctx->aes.ccm.m - 2) >> 1) & 0x7) << 3;
1942 memset(cctx->aes.ccm.nonce.b + 1, 0,
1943 sizeof(cctx->aes.ccm.nonce.b));
1944 cctx->aes.ccm.blocks = 0;
1945
1946 cctx->aes.ccm.key_set = 1;
1947 }
1948
1949 if (iv != NULL) {
1950 memcpy(ctx->iv, iv, 15 - cctx->aes.ccm.l);
1951
1952 cctx->aes.ccm.iv_set = 1;
1953 }
1954
1955 return 1;
1956 }
1957
1958 /*-
1959 * Called from EVP layer to initialize context, process additional
1960 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1961 * plaintext or process a TLS packet, depending on context. Returns bytes
1962 * written on success. Otherwise -1 is returned.
1963 */
s390x_aes_ccm_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1964 static int s390x_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1965 const unsigned char *in, size_t len)
1966 {
1967 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1968 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1969 int rv;
1970 unsigned char *buf;
1971
1972 if (!cctx->aes.ccm.key_set)
1973 return -1;
1974
1975 if (cctx->aes.ccm.tls_aad_len >= 0)
1976 return s390x_aes_ccm_tls_cipher(ctx, out, in, len);
1977
1978 /*-
1979 * Final(): Does not return any data. Recall that ccm is mac-then-encrypt
1980 * so integrity must be checked already at Update() i.e., before
1981 * potentially corrupted data is output.
1982 */
1983 if (in == NULL && out != NULL)
1984 return 0;
1985
1986 if (!cctx->aes.ccm.iv_set)
1987 return -1;
1988
1989 if (out == NULL) {
1990 /* Update(): Pass message length. */
1991 if (in == NULL) {
1992 s390x_aes_ccm_setiv(cctx, ctx->iv, len);
1993
1994 cctx->aes.ccm.len_set = 1;
1995 return len;
1996 }
1997
1998 /* Update(): Process aad. */
1999 if (!cctx->aes.ccm.len_set && len)
2000 return -1;
2001
2002 s390x_aes_ccm_aad(cctx, in, len);
2003 return len;
2004 }
2005
2006 /* The tag must be set before actually decrypting data */
2007 if (!enc && !cctx->aes.ccm.tag_set)
2008 return -1;
2009
2010 /* Update(): Process message. */
2011
2012 if (!cctx->aes.ccm.len_set) {
2013 /*-
2014 * In case message length was not previously set explicitly via
2015 * Update(), set it now.
2016 */
2017 s390x_aes_ccm_setiv(cctx, ctx->iv, len);
2018
2019 cctx->aes.ccm.len_set = 1;
2020 }
2021
2022 if (enc) {
2023 if (s390x_aes_ccm(cctx, in, out, len, enc))
2024 return -1;
2025
2026 cctx->aes.ccm.tag_set = 1;
2027 return len;
2028 } else {
2029 rv = -1;
2030
2031 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2032 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
2033 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, buf,
2034 cctx->aes.ccm.m))
2035 rv = len;
2036 }
2037
2038 if (rv == -1)
2039 OPENSSL_cleanse(out, len);
2040
2041 cctx->aes.ccm.iv_set = 0;
2042 cctx->aes.ccm.tag_set = 0;
2043 cctx->aes.ccm.len_set = 0;
2044 return rv;
2045 }
2046 }
2047
2048 /*-
2049 * Performs various operations on the context structure depending on control
2050 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
2051 * Code is big-endian.
2052 */
s390x_aes_ccm_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)2053 static int s390x_aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2054 {
2055 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, c);
2056 unsigned char *buf;
2057 int enc, len;
2058
2059 switch (type) {
2060 case EVP_CTRL_INIT:
2061 cctx->aes.ccm.key_set = 0;
2062 cctx->aes.ccm.iv_set = 0;
2063 cctx->aes.ccm.l = 8;
2064 cctx->aes.ccm.m = 12;
2065 cctx->aes.ccm.tag_set = 0;
2066 cctx->aes.ccm.len_set = 0;
2067 cctx->aes.ccm.tls_aad_len = -1;
2068 return 1;
2069
2070 case EVP_CTRL_GET_IVLEN:
2071 *(int *)ptr = 15 - cctx->aes.ccm.l;
2072 return 1;
2073
2074 case EVP_CTRL_AEAD_TLS1_AAD:
2075 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2076 return 0;
2077
2078 /* Save the aad for later use. */
2079 buf = EVP_CIPHER_CTX_buf_noconst(c);
2080 memcpy(buf, ptr, arg);
2081 cctx->aes.ccm.tls_aad_len = arg;
2082
2083 len = buf[arg - 2] << 8 | buf[arg - 1];
2084 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
2085 return 0;
2086
2087 /* Correct length for explicit iv. */
2088 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
2089
2090 enc = EVP_CIPHER_CTX_is_encrypting(c);
2091 if (!enc) {
2092 if (len < cctx->aes.ccm.m)
2093 return 0;
2094
2095 /* Correct length for tag. */
2096 len -= cctx->aes.ccm.m;
2097 }
2098
2099 buf[arg - 2] = len >> 8;
2100 buf[arg - 1] = len & 0xff;
2101
2102 /* Extra padding: tag appended to record. */
2103 return cctx->aes.ccm.m;
2104
2105 case EVP_CTRL_CCM_SET_IV_FIXED:
2106 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
2107 return 0;
2108
2109 /* Copy to first part of the iv. */
2110 memcpy(c->iv, ptr, arg);
2111 return 1;
2112
2113 case EVP_CTRL_AEAD_SET_IVLEN:
2114 arg = 15 - arg;
2115 /* fall-through */
2116
2117 case EVP_CTRL_CCM_SET_L:
2118 if (arg < 2 || arg > 8)
2119 return 0;
2120
2121 cctx->aes.ccm.l = arg;
2122 return 1;
2123
2124 case EVP_CTRL_AEAD_SET_TAG:
2125 if ((arg & 1) || arg < 4 || arg > 16)
2126 return 0;
2127
2128 enc = EVP_CIPHER_CTX_is_encrypting(c);
2129 if (enc && ptr)
2130 return 0;
2131
2132 if (ptr) {
2133 cctx->aes.ccm.tag_set = 1;
2134 buf = EVP_CIPHER_CTX_buf_noconst(c);
2135 memcpy(buf, ptr, arg);
2136 }
2137
2138 cctx->aes.ccm.m = arg;
2139 return 1;
2140
2141 case EVP_CTRL_AEAD_GET_TAG:
2142 enc = EVP_CIPHER_CTX_is_encrypting(c);
2143 if (!enc || !cctx->aes.ccm.tag_set)
2144 return 0;
2145
2146 if(arg < cctx->aes.ccm.m)
2147 return 0;
2148
2149 memcpy(ptr, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2150 cctx->aes.ccm.tag_set = 0;
2151 cctx->aes.ccm.iv_set = 0;
2152 cctx->aes.ccm.len_set = 0;
2153 return 1;
2154
2155 case EVP_CTRL_COPY:
2156 return 1;
2157
2158 default:
2159 return -1;
2160 }
2161 }
2162
2163 # define s390x_aes_ccm_cleanup aes_ccm_cleanup
2164
2165 # ifndef OPENSSL_NO_OCB
2166 # define S390X_AES_OCB_CTX EVP_AES_OCB_CTX
2167
2168 # define s390x_aes_ocb_init_key aes_ocb_init_key
2169 static int s390x_aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2170 const unsigned char *iv, int enc);
2171 # define s390x_aes_ocb_cipher aes_ocb_cipher
2172 static int s390x_aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2173 const unsigned char *in, size_t len);
2174 # define s390x_aes_ocb_cleanup aes_ocb_cleanup
2175 static int s390x_aes_ocb_cleanup(EVP_CIPHER_CTX *);
2176 # define s390x_aes_ocb_ctrl aes_ocb_ctrl
2177 static int s390x_aes_ocb_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
2178 # endif
2179
2180 # ifndef OPENSSL_NO_SIV
2181 # define S390X_AES_SIV_CTX EVP_AES_SIV_CTX
2182
2183 # define s390x_aes_siv_init_key aes_siv_init_key
2184 # define s390x_aes_siv_cipher aes_siv_cipher
2185 # define s390x_aes_siv_cleanup aes_siv_cleanup
2186 # define s390x_aes_siv_ctrl aes_siv_ctrl
2187 # endif
2188
2189 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode, \
2190 MODE,flags) \
2191 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2192 nid##_##keylen##_##nmode,blocksize, \
2193 keylen / 8, \
2194 ivlen, \
2195 flags | EVP_CIPH_##MODE##_MODE, \
2196 EVP_ORIG_GLOBAL, \
2197 s390x_aes_##mode##_init_key, \
2198 s390x_aes_##mode##_cipher, \
2199 NULL, \
2200 sizeof(S390X_AES_##MODE##_CTX), \
2201 NULL, \
2202 NULL, \
2203 NULL, \
2204 NULL \
2205 }; \
2206 static const EVP_CIPHER aes_##keylen##_##mode = { \
2207 nid##_##keylen##_##nmode, \
2208 blocksize, \
2209 keylen / 8, \
2210 ivlen, \
2211 flags | EVP_CIPH_##MODE##_MODE, \
2212 EVP_ORIG_GLOBAL, \
2213 aes_init_key, \
2214 aes_##mode##_cipher, \
2215 NULL, \
2216 sizeof(EVP_AES_KEY), \
2217 NULL, \
2218 NULL, \
2219 NULL, \
2220 NULL \
2221 }; \
2222 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2223 { \
2224 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2225 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2226 }
2227
2228 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags)\
2229 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2230 nid##_##keylen##_##mode, \
2231 blocksize, \
2232 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2233 ivlen, \
2234 flags | EVP_CIPH_##MODE##_MODE, \
2235 EVP_ORIG_GLOBAL, \
2236 s390x_aes_##mode##_init_key, \
2237 s390x_aes_##mode##_cipher, \
2238 s390x_aes_##mode##_cleanup, \
2239 sizeof(S390X_AES_##MODE##_CTX), \
2240 NULL, \
2241 NULL, \
2242 s390x_aes_##mode##_ctrl, \
2243 NULL \
2244 }; \
2245 static const EVP_CIPHER aes_##keylen##_##mode = { \
2246 nid##_##keylen##_##mode,blocksize, \
2247 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2248 ivlen, \
2249 flags | EVP_CIPH_##MODE##_MODE, \
2250 EVP_ORIG_GLOBAL, \
2251 aes_##mode##_init_key, \
2252 aes_##mode##_cipher, \
2253 aes_##mode##_cleanup, \
2254 sizeof(EVP_AES_##MODE##_CTX), \
2255 NULL, \
2256 NULL, \
2257 aes_##mode##_ctrl, \
2258 NULL \
2259 }; \
2260 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2261 { \
2262 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2263 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2264 }
2265
2266 #else
2267
2268 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
2269 static const EVP_CIPHER aes_##keylen##_##mode = { \
2270 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
2271 flags|EVP_CIPH_##MODE##_MODE, \
2272 EVP_ORIG_GLOBAL, \
2273 aes_init_key, \
2274 aes_##mode##_cipher, \
2275 NULL, \
2276 sizeof(EVP_AES_KEY), \
2277 NULL,NULL,NULL,NULL }; \
2278 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2279 { return &aes_##keylen##_##mode; }
2280
2281 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
2282 static const EVP_CIPHER aes_##keylen##_##mode = { \
2283 nid##_##keylen##_##mode,blocksize, \
2284 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
2285 ivlen, \
2286 flags|EVP_CIPH_##MODE##_MODE, \
2287 EVP_ORIG_GLOBAL, \
2288 aes_##mode##_init_key, \
2289 aes_##mode##_cipher, \
2290 aes_##mode##_cleanup, \
2291 sizeof(EVP_AES_##MODE##_CTX), \
2292 NULL,NULL,aes_##mode##_ctrl,NULL }; \
2293 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2294 { return &aes_##keylen##_##mode; }
2295
2296 #endif
2297
2298 #define BLOCK_CIPHER_generic_pack(nid,keylen,flags) \
2299 BLOCK_CIPHER_generic(nid,keylen,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2300 BLOCK_CIPHER_generic(nid,keylen,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2301 BLOCK_CIPHER_generic(nid,keylen,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2302 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2303 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb1,cfb1,CFB,flags) \
2304 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb8,cfb8,CFB,flags) \
2305 BLOCK_CIPHER_generic(nid,keylen,1,16,ctr,ctr,CTR,flags)
2306
aes_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)2307 static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2308 const unsigned char *iv, int enc)
2309 {
2310 int ret, mode;
2311 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2312
2313 mode = EVP_CIPHER_CTX_get_mode(ctx);
2314 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
2315 && !enc) {
2316 #ifdef HWAES_CAPABLE
2317 if (HWAES_CAPABLE) {
2318 ret = HWAES_set_decrypt_key(key,
2319 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
2320 &dat->ks.ks);
2321 dat->block = (block128_f) HWAES_decrypt;
2322 dat->stream.cbc = NULL;
2323 # ifdef HWAES_cbc_encrypt
2324 if (mode == EVP_CIPH_CBC_MODE)
2325 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2326 # endif
2327 } else
2328 #endif
2329 #ifdef BSAES_CAPABLE
2330 if (BSAES_CAPABLE && mode == EVP_CIPH_CBC_MODE) {
2331 ret = AES_set_decrypt_key(key,
2332 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
2333 &dat->ks.ks);
2334 dat->block = (block128_f) AES_decrypt;
2335 dat->stream.cbc = (cbc128_f) ossl_bsaes_cbc_encrypt;
2336 } else
2337 #endif
2338 #ifdef VPAES_CAPABLE
2339 if (VPAES_CAPABLE) {
2340 ret = vpaes_set_decrypt_key(key,
2341 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
2342 &dat->ks.ks);
2343 dat->block = (block128_f) vpaes_decrypt;
2344 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2345 (cbc128_f) vpaes_cbc_encrypt : NULL;
2346 } else
2347 #endif
2348 {
2349 ret = AES_set_decrypt_key(key,
2350 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
2351 &dat->ks.ks);
2352 dat->block = (block128_f) AES_decrypt;
2353 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2354 (cbc128_f) AES_cbc_encrypt : NULL;
2355 }
2356 } else
2357 #ifdef HWAES_CAPABLE
2358 if (HWAES_CAPABLE) {
2359 ret = HWAES_set_encrypt_key(key,
2360 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
2361 &dat->ks.ks);
2362 dat->block = (block128_f) HWAES_encrypt;
2363 dat->stream.cbc = NULL;
2364 # ifdef HWAES_cbc_encrypt
2365 if (mode == EVP_CIPH_CBC_MODE)
2366 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2367 else
2368 # endif
2369 # ifdef HWAES_ctr32_encrypt_blocks
2370 if (mode == EVP_CIPH_CTR_MODE)
2371 dat->stream.ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2372 else
2373 # endif
2374 (void)0; /* terminate potentially open 'else' */
2375 } else
2376 #endif
2377 #ifdef BSAES_CAPABLE
2378 if (BSAES_CAPABLE && mode == EVP_CIPH_CTR_MODE) {
2379 ret = AES_set_encrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
2380 &dat->ks.ks);
2381 dat->block = (block128_f) AES_encrypt;
2382 dat->stream.ctr = (ctr128_f) ossl_bsaes_ctr32_encrypt_blocks;
2383 } else
2384 #endif
2385 #ifdef VPAES_CAPABLE
2386 if (VPAES_CAPABLE) {
2387 ret = vpaes_set_encrypt_key(key,
2388 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
2389 &dat->ks.ks);
2390 dat->block = (block128_f) vpaes_encrypt;
2391 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2392 (cbc128_f) vpaes_cbc_encrypt : NULL;
2393 } else
2394 #endif
2395 {
2396 ret = AES_set_encrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
2397 &dat->ks.ks);
2398 dat->block = (block128_f) AES_encrypt;
2399 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2400 (cbc128_f) AES_cbc_encrypt : NULL;
2401 #ifdef AES_CTR_ASM
2402 if (mode == EVP_CIPH_CTR_MODE)
2403 dat->stream.ctr = (ctr128_f) AES_ctr32_encrypt;
2404 #endif
2405 }
2406
2407 if (ret < 0) {
2408 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
2409 return 0;
2410 }
2411
2412 return 1;
2413 }
2414
aes_cbc_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2415 static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2416 const unsigned char *in, size_t len)
2417 {
2418 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2419
2420 if (dat->stream.cbc)
2421 (*dat->stream.cbc) (in, out, len, &dat->ks, ctx->iv,
2422 EVP_CIPHER_CTX_is_encrypting(ctx));
2423 else if (EVP_CIPHER_CTX_is_encrypting(ctx))
2424 CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv,
2425 dat->block);
2426 else
2427 CRYPTO_cbc128_decrypt(in, out, len, &dat->ks,
2428 ctx->iv, dat->block);
2429
2430 return 1;
2431 }
2432
aes_ecb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2433 static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2434 const unsigned char *in, size_t len)
2435 {
2436 size_t bl = EVP_CIPHER_CTX_get_block_size(ctx);
2437 size_t i;
2438 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2439
2440 if (len < bl)
2441 return 1;
2442
2443 for (i = 0, len -= bl; i <= len; i += bl)
2444 (*dat->block) (in + i, out + i, &dat->ks);
2445
2446 return 1;
2447 }
2448
aes_ofb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2449 static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2450 const unsigned char *in, size_t len)
2451 {
2452 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2453
2454 int num = EVP_CIPHER_CTX_get_num(ctx);
2455 CRYPTO_ofb128_encrypt(in, out, len, &dat->ks,
2456 ctx->iv, &num, dat->block);
2457 EVP_CIPHER_CTX_set_num(ctx, num);
2458 return 1;
2459 }
2460
aes_cfb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2461 static int aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2462 const unsigned char *in, size_t len)
2463 {
2464 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2465
2466 int num = EVP_CIPHER_CTX_get_num(ctx);
2467 CRYPTO_cfb128_encrypt(in, out, len, &dat->ks,
2468 ctx->iv, &num,
2469 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2470 EVP_CIPHER_CTX_set_num(ctx, num);
2471 return 1;
2472 }
2473
aes_cfb8_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2474 static int aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2475 const unsigned char *in, size_t len)
2476 {
2477 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2478
2479 int num = EVP_CIPHER_CTX_get_num(ctx);
2480 CRYPTO_cfb128_8_encrypt(in, out, len, &dat->ks,
2481 ctx->iv, &num,
2482 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2483 EVP_CIPHER_CTX_set_num(ctx, num);
2484 return 1;
2485 }
2486
aes_cfb1_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2487 static int aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2488 const unsigned char *in, size_t len)
2489 {
2490 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2491
2492 if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS)) {
2493 int num = EVP_CIPHER_CTX_get_num(ctx);
2494 CRYPTO_cfb128_1_encrypt(in, out, len, &dat->ks,
2495 ctx->iv, &num,
2496 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2497 EVP_CIPHER_CTX_set_num(ctx, num);
2498 return 1;
2499 }
2500
2501 while (len >= MAXBITCHUNK) {
2502 int num = EVP_CIPHER_CTX_get_num(ctx);
2503 CRYPTO_cfb128_1_encrypt(in, out, MAXBITCHUNK * 8, &dat->ks,
2504 ctx->iv, &num,
2505 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2506 EVP_CIPHER_CTX_set_num(ctx, num);
2507 len -= MAXBITCHUNK;
2508 out += MAXBITCHUNK;
2509 in += MAXBITCHUNK;
2510 }
2511 if (len) {
2512 int num = EVP_CIPHER_CTX_get_num(ctx);
2513 CRYPTO_cfb128_1_encrypt(in, out, len * 8, &dat->ks,
2514 ctx->iv, &num,
2515 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2516 EVP_CIPHER_CTX_set_num(ctx, num);
2517 }
2518
2519 return 1;
2520 }
2521
aes_ctr_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2522 static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2523 const unsigned char *in, size_t len)
2524 {
2525 int n = EVP_CIPHER_CTX_get_num(ctx);
2526 unsigned int num;
2527 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2528
2529 if (n < 0)
2530 return 0;
2531 num = (unsigned int)n;
2532
2533 if (dat->stream.ctr)
2534 CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks,
2535 ctx->iv,
2536 EVP_CIPHER_CTX_buf_noconst(ctx),
2537 &num, dat->stream.ctr);
2538 else
2539 CRYPTO_ctr128_encrypt(in, out, len, &dat->ks,
2540 ctx->iv,
2541 EVP_CIPHER_CTX_buf_noconst(ctx), &num,
2542 dat->block);
2543 EVP_CIPHER_CTX_set_num(ctx, num);
2544 return 1;
2545 }
2546
2547 BLOCK_CIPHER_generic_pack(NID_aes, 128, 0)
2548 BLOCK_CIPHER_generic_pack(NID_aes, 192, 0)
2549 BLOCK_CIPHER_generic_pack(NID_aes, 256, 0)
2550
aes_gcm_cleanup(EVP_CIPHER_CTX * c)2551 static int aes_gcm_cleanup(EVP_CIPHER_CTX *c)
2552 {
2553 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2554 if (gctx == NULL)
2555 return 0;
2556 OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm));
2557 if (gctx->iv != c->iv)
2558 OPENSSL_free(gctx->iv);
2559 return 1;
2560 }
2561
aes_gcm_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)2562 static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2563 {
2564 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2565 switch (type) {
2566 case EVP_CTRL_INIT:
2567 gctx->key_set = 0;
2568 gctx->iv_set = 0;
2569 gctx->ivlen = EVP_CIPHER_get_iv_length(c->cipher);
2570 gctx->iv = c->iv;
2571 gctx->taglen = -1;
2572 gctx->iv_gen = 0;
2573 gctx->tls_aad_len = -1;
2574 return 1;
2575
2576 case EVP_CTRL_GET_IVLEN:
2577 *(int *)ptr = gctx->ivlen;
2578 return 1;
2579
2580 case EVP_CTRL_AEAD_SET_IVLEN:
2581 if (arg <= 0)
2582 return 0;
2583 /* Allocate memory for IV if needed */
2584 if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) {
2585 if (gctx->iv != c->iv)
2586 OPENSSL_free(gctx->iv);
2587 if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) {
2588 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
2589 return 0;
2590 }
2591 }
2592 gctx->ivlen = arg;
2593 return 1;
2594
2595 case EVP_CTRL_AEAD_SET_TAG:
2596 if (arg <= 0 || arg > 16 || c->encrypt)
2597 return 0;
2598 memcpy(c->buf, ptr, arg);
2599 gctx->taglen = arg;
2600 return 1;
2601
2602 case EVP_CTRL_AEAD_GET_TAG:
2603 if (arg <= 0 || arg > 16 || !c->encrypt
2604 || gctx->taglen < 0)
2605 return 0;
2606 memcpy(ptr, c->buf, arg);
2607 return 1;
2608
2609 case EVP_CTRL_GCM_SET_IV_FIXED:
2610 /* Special case: -1 length restores whole IV */
2611 if (arg == -1) {
2612 memcpy(gctx->iv, ptr, gctx->ivlen);
2613 gctx->iv_gen = 1;
2614 return 1;
2615 }
2616 /*
2617 * Fixed field must be at least 4 bytes and invocation field at least
2618 * 8.
2619 */
2620 if ((arg < 4) || (gctx->ivlen - arg) < 8)
2621 return 0;
2622 if (arg)
2623 memcpy(gctx->iv, ptr, arg);
2624 if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
2625 return 0;
2626 gctx->iv_gen = 1;
2627 return 1;
2628
2629 case EVP_CTRL_GCM_IV_GEN:
2630 if (gctx->iv_gen == 0 || gctx->key_set == 0)
2631 return 0;
2632 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2633 if (arg <= 0 || arg > gctx->ivlen)
2634 arg = gctx->ivlen;
2635 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
2636 /*
2637 * Invocation field will be at least 8 bytes in size and so no need
2638 * to check wrap around or increment more than last 8 bytes.
2639 */
2640 ctr64_inc(gctx->iv + gctx->ivlen - 8);
2641 gctx->iv_set = 1;
2642 return 1;
2643
2644 case EVP_CTRL_GCM_SET_IV_INV:
2645 if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt)
2646 return 0;
2647 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
2648 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2649 gctx->iv_set = 1;
2650 return 1;
2651
2652 case EVP_CTRL_AEAD_TLS1_AAD:
2653 /* Save the AAD for later use */
2654 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2655 return 0;
2656 memcpy(c->buf, ptr, arg);
2657 gctx->tls_aad_len = arg;
2658 gctx->tls_enc_records = 0;
2659 {
2660 unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1];
2661 /* Correct length for explicit IV */
2662 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
2663 return 0;
2664 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
2665 /* If decrypting correct for tag too */
2666 if (!c->encrypt) {
2667 if (len < EVP_GCM_TLS_TAG_LEN)
2668 return 0;
2669 len -= EVP_GCM_TLS_TAG_LEN;
2670 }
2671 c->buf[arg - 2] = len >> 8;
2672 c->buf[arg - 1] = len & 0xff;
2673 }
2674 /* Extra padding: tag appended to record */
2675 return EVP_GCM_TLS_TAG_LEN;
2676
2677 case EVP_CTRL_COPY:
2678 {
2679 EVP_CIPHER_CTX *out = ptr;
2680 EVP_AES_GCM_CTX *gctx_out = EVP_C_DATA(EVP_AES_GCM_CTX,out);
2681 if (gctx->gcm.key) {
2682 if (gctx->gcm.key != &gctx->ks)
2683 return 0;
2684 gctx_out->gcm.key = &gctx_out->ks;
2685 }
2686 if (gctx->iv == c->iv)
2687 gctx_out->iv = out->iv;
2688 else {
2689 if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) {
2690 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
2691 return 0;
2692 }
2693 memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
2694 }
2695 return 1;
2696 }
2697
2698 default:
2699 return -1;
2700
2701 }
2702 }
2703
aes_gcm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)2704 static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2705 const unsigned char *iv, int enc)
2706 {
2707 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2708 if (!iv && !key)
2709 return 1;
2710 if (key) {
2711 do {
2712 #ifdef HWAES_CAPABLE
2713 if (HWAES_CAPABLE) {
2714 HWAES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2715 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2716 (block128_f) HWAES_encrypt);
2717 # ifdef HWAES_ctr32_encrypt_blocks
2718 gctx->ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2719 # else
2720 gctx->ctr = NULL;
2721 # endif
2722 break;
2723 } else
2724 #endif
2725 #ifdef BSAES_CAPABLE
2726 if (BSAES_CAPABLE) {
2727 AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2728 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2729 (block128_f) AES_encrypt);
2730 gctx->ctr = (ctr128_f) ossl_bsaes_ctr32_encrypt_blocks;
2731 break;
2732 } else
2733 #endif
2734 #ifdef VPAES_CAPABLE
2735 if (VPAES_CAPABLE) {
2736 vpaes_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2737 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2738 (block128_f) vpaes_encrypt);
2739 gctx->ctr = NULL;
2740 break;
2741 } else
2742 #endif
2743 (void)0; /* terminate potentially open 'else' */
2744
2745 AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2746 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2747 (block128_f) AES_encrypt);
2748 #ifdef AES_CTR_ASM
2749 gctx->ctr = (ctr128_f) AES_ctr32_encrypt;
2750 #else
2751 gctx->ctr = NULL;
2752 #endif
2753 } while (0);
2754
2755 /*
2756 * If we have an iv can set it directly, otherwise use saved IV.
2757 */
2758 if (iv == NULL && gctx->iv_set)
2759 iv = gctx->iv;
2760 if (iv) {
2761 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2762 gctx->iv_set = 1;
2763 }
2764 gctx->key_set = 1;
2765 } else {
2766 /* If key set use IV, otherwise copy */
2767 if (gctx->key_set)
2768 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2769 else
2770 memcpy(gctx->iv, iv, gctx->ivlen);
2771 gctx->iv_set = 1;
2772 gctx->iv_gen = 0;
2773 }
2774 return 1;
2775 }
2776
2777 /*
2778 * Handle TLS GCM packet format. This consists of the last portion of the IV
2779 * followed by the payload and finally the tag. On encrypt generate IV,
2780 * encrypt payload and write the tag. On verify retrieve IV, decrypt payload
2781 * and verify tag.
2782 */
2783
aes_gcm_tls_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2784 static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2785 const unsigned char *in, size_t len)
2786 {
2787 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2788 int rv = -1;
2789 /* Encrypt/decrypt must be performed in place */
2790 if (out != in
2791 || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
2792 return -1;
2793
2794 /*
2795 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
2796 * Requirements from SP 800-38D". The requirements is for one party to the
2797 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
2798 * side only.
2799 */
2800 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
2801 ERR_raise(ERR_LIB_EVP, EVP_R_TOO_MANY_RECORDS);
2802 goto err;
2803 }
2804
2805 /*
2806 * Set IV from start of buffer or generate IV and write to start of
2807 * buffer.
2808 */
2809 if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN
2810 : EVP_CTRL_GCM_SET_IV_INV,
2811 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
2812 goto err;
2813 /* Use saved AAD */
2814 if (CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len))
2815 goto err;
2816 /* Fix buffer and length to point to payload */
2817 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2818 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2819 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2820 if (ctx->encrypt) {
2821 /* Encrypt payload */
2822 if (gctx->ctr) {
2823 size_t bulk = 0;
2824 #if defined(AES_GCM_ASM)
2825 if (len >= 32 && AES_GCM_ASM(gctx)) {
2826 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2827 return -1;
2828
2829 bulk = AES_gcm_encrypt(in, out, len,
2830 gctx->gcm.key,
2831 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2832 gctx->gcm.len.u[1] += bulk;
2833 }
2834 #endif
2835 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
2836 in + bulk,
2837 out + bulk,
2838 len - bulk, gctx->ctr))
2839 goto err;
2840 } else {
2841 size_t bulk = 0;
2842 #if defined(AES_GCM_ASM2)
2843 if (len >= 32 && AES_GCM_ASM2(gctx)) {
2844 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2845 return -1;
2846
2847 bulk = AES_gcm_encrypt(in, out, len,
2848 gctx->gcm.key,
2849 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2850 gctx->gcm.len.u[1] += bulk;
2851 }
2852 #endif
2853 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
2854 in + bulk, out + bulk, len - bulk))
2855 goto err;
2856 }
2857 out += len;
2858 /* Finally write tag */
2859 CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN);
2860 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2861 } else {
2862 /* Decrypt */
2863 if (gctx->ctr) {
2864 size_t bulk = 0;
2865 #if defined(AES_GCM_ASM)
2866 if (len >= 16 && AES_GCM_ASM(gctx)) {
2867 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2868 return -1;
2869
2870 bulk = AES_gcm_decrypt(in, out, len,
2871 gctx->gcm.key,
2872 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2873 gctx->gcm.len.u[1] += bulk;
2874 }
2875 #endif
2876 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
2877 in + bulk,
2878 out + bulk,
2879 len - bulk, gctx->ctr))
2880 goto err;
2881 } else {
2882 size_t bulk = 0;
2883 #if defined(AES_GCM_ASM2)
2884 if (len >= 16 && AES_GCM_ASM2(gctx)) {
2885 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2886 return -1;
2887
2888 bulk = AES_gcm_decrypt(in, out, len,
2889 gctx->gcm.key,
2890 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2891 gctx->gcm.len.u[1] += bulk;
2892 }
2893 #endif
2894 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
2895 in + bulk, out + bulk, len - bulk))
2896 goto err;
2897 }
2898 /* Retrieve tag */
2899 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN);
2900 /* If tag mismatch wipe buffer */
2901 if (CRYPTO_memcmp(ctx->buf, in + len, EVP_GCM_TLS_TAG_LEN)) {
2902 OPENSSL_cleanse(out, len);
2903 goto err;
2904 }
2905 rv = len;
2906 }
2907
2908 err:
2909 gctx->iv_set = 0;
2910 gctx->tls_aad_len = -1;
2911 return rv;
2912 }
2913
2914 #ifdef FIPS_MODULE
2915 /*
2916 * See SP800-38D (GCM) Section 8 "Uniqueness requirement on IVS and keys"
2917 *
2918 * See also 8.2.2 RBG-based construction.
2919 * Random construction consists of a free field (which can be NULL) and a
2920 * random field which will use a DRBG that can return at least 96 bits of
2921 * entropy strength. (The DRBG must be seeded by the FIPS module).
2922 */
aes_gcm_iv_generate(EVP_AES_GCM_CTX * gctx,int offset)2923 static int aes_gcm_iv_generate(EVP_AES_GCM_CTX *gctx, int offset)
2924 {
2925 int sz = gctx->ivlen - offset;
2926
2927 /* Must be at least 96 bits */
2928 if (sz <= 0 || gctx->ivlen < 12)
2929 return 0;
2930
2931 /* Use DRBG to generate random iv */
2932 if (RAND_bytes(gctx->iv + offset, sz) <= 0)
2933 return 0;
2934 return 1;
2935 }
2936 #endif /* FIPS_MODULE */
2937
aes_gcm_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2938 static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2939 const unsigned char *in, size_t len)
2940 {
2941 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2942
2943 /* If not set up, return error */
2944 if (!gctx->key_set)
2945 return -1;
2946
2947 if (gctx->tls_aad_len >= 0)
2948 return aes_gcm_tls_cipher(ctx, out, in, len);
2949
2950 #ifdef FIPS_MODULE
2951 /*
2952 * FIPS requires generation of AES-GCM IV's inside the FIPS module.
2953 * The IV can still be set externally (the security policy will state that
2954 * this is not FIPS compliant). There are some applications
2955 * where setting the IV externally is the only option available.
2956 */
2957 if (!gctx->iv_set) {
2958 if (!ctx->encrypt || !aes_gcm_iv_generate(gctx, 0))
2959 return -1;
2960 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2961 gctx->iv_set = 1;
2962 gctx->iv_gen_rand = 1;
2963 }
2964 #else
2965 if (!gctx->iv_set)
2966 return -1;
2967 #endif /* FIPS_MODULE */
2968
2969 if (in) {
2970 if (out == NULL) {
2971 if (CRYPTO_gcm128_aad(&gctx->gcm, in, len))
2972 return -1;
2973 } else if (ctx->encrypt) {
2974 if (gctx->ctr) {
2975 size_t bulk = 0;
2976 #if defined(AES_GCM_ASM)
2977 if (len >= 32 && AES_GCM_ASM(gctx)) {
2978 size_t res = (16 - gctx->gcm.mres) % 16;
2979
2980 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
2981 return -1;
2982
2983 bulk = AES_gcm_encrypt(in + res,
2984 out + res, len - res,
2985 gctx->gcm.key, gctx->gcm.Yi.c,
2986 gctx->gcm.Xi.u);
2987 gctx->gcm.len.u[1] += bulk;
2988 bulk += res;
2989 }
2990 #endif
2991 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
2992 in + bulk,
2993 out + bulk,
2994 len - bulk, gctx->ctr))
2995 return -1;
2996 } else {
2997 size_t bulk = 0;
2998 #if defined(AES_GCM_ASM2)
2999 if (len >= 32 && AES_GCM_ASM2(gctx)) {
3000 size_t res = (16 - gctx->gcm.mres) % 16;
3001
3002 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3003 return -1;
3004
3005 bulk = AES_gcm_encrypt(in + res,
3006 out + res, len - res,
3007 gctx->gcm.key, gctx->gcm.Yi.c,
3008 gctx->gcm.Xi.u);
3009 gctx->gcm.len.u[1] += bulk;
3010 bulk += res;
3011 }
3012 #endif
3013 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
3014 in + bulk, out + bulk, len - bulk))
3015 return -1;
3016 }
3017 } else {
3018 if (gctx->ctr) {
3019 size_t bulk = 0;
3020 #if defined(AES_GCM_ASM)
3021 if (len >= 16 && AES_GCM_ASM(gctx)) {
3022 size_t res = (16 - gctx->gcm.mres) % 16;
3023
3024 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3025 return -1;
3026
3027 bulk = AES_gcm_decrypt(in + res,
3028 out + res, len - res,
3029 gctx->gcm.key,
3030 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3031 gctx->gcm.len.u[1] += bulk;
3032 bulk += res;
3033 }
3034 #endif
3035 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
3036 in + bulk,
3037 out + bulk,
3038 len - bulk, gctx->ctr))
3039 return -1;
3040 } else {
3041 size_t bulk = 0;
3042 #if defined(AES_GCM_ASM2)
3043 if (len >= 16 && AES_GCM_ASM2(gctx)) {
3044 size_t res = (16 - gctx->gcm.mres) % 16;
3045
3046 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3047 return -1;
3048
3049 bulk = AES_gcm_decrypt(in + res,
3050 out + res, len - res,
3051 gctx->gcm.key,
3052 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3053 gctx->gcm.len.u[1] += bulk;
3054 bulk += res;
3055 }
3056 #endif
3057 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3058 in + bulk, out + bulk, len - bulk))
3059 return -1;
3060 }
3061 }
3062 return len;
3063 } else {
3064 if (!ctx->encrypt) {
3065 if (gctx->taglen < 0)
3066 return -1;
3067 if (CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0)
3068 return -1;
3069 gctx->iv_set = 0;
3070 return 0;
3071 }
3072 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
3073 gctx->taglen = 16;
3074 /* Don't reuse the IV */
3075 gctx->iv_set = 0;
3076 return 0;
3077 }
3078
3079 }
3080
3081 #define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \
3082 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3083 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3084 | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH)
3085
3086 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, gcm, GCM,
3087 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3088 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, gcm, GCM,
3089 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3090 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, gcm, GCM,
3091 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3092
aes_xts_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)3093 static int aes_xts_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3094 {
3095 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX, c);
3096
3097 if (type == EVP_CTRL_COPY) {
3098 EVP_CIPHER_CTX *out = ptr;
3099 EVP_AES_XTS_CTX *xctx_out = EVP_C_DATA(EVP_AES_XTS_CTX,out);
3100
3101 if (xctx->xts.key1) {
3102 if (xctx->xts.key1 != &xctx->ks1)
3103 return 0;
3104 xctx_out->xts.key1 = &xctx_out->ks1;
3105 }
3106 if (xctx->xts.key2) {
3107 if (xctx->xts.key2 != &xctx->ks2)
3108 return 0;
3109 xctx_out->xts.key2 = &xctx_out->ks2;
3110 }
3111 return 1;
3112 } else if (type != EVP_CTRL_INIT)
3113 return -1;
3114 /* key1 and key2 are used as an indicator both key and IV are set */
3115 xctx->xts.key1 = NULL;
3116 xctx->xts.key2 = NULL;
3117 return 1;
3118 }
3119
aes_xts_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)3120 static int aes_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3121 const unsigned char *iv, int enc)
3122 {
3123 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3124
3125 if (!iv && !key)
3126 return 1;
3127
3128 if (key) {
3129 do {
3130 /* The key is two half length keys in reality */
3131 const int bytes = EVP_CIPHER_CTX_get_key_length(ctx) / 2;
3132 const int bits = bytes * 8;
3133
3134 /*
3135 * Verify that the two keys are different.
3136 *
3137 * This addresses the vulnerability described in Rogaway's
3138 * September 2004 paper:
3139 *
3140 * "Efficient Instantiations of Tweakable Blockciphers and
3141 * Refinements to Modes OCB and PMAC".
3142 * (http://web.cs.ucdavis.edu/~rogaway/papers/offsets.pdf)
3143 *
3144 * FIPS 140-2 IG A.9 XTS-AES Key Generation Requirements states
3145 * that:
3146 * "The check for Key_1 != Key_2 shall be done at any place
3147 * BEFORE using the keys in the XTS-AES algorithm to process
3148 * data with them."
3149 */
3150 if ((!allow_insecure_decrypt || enc)
3151 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
3152 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
3153 return 0;
3154 }
3155
3156 #ifdef AES_XTS_ASM
3157 xctx->stream = enc ? AES_xts_encrypt : AES_xts_decrypt;
3158 #else
3159 xctx->stream = NULL;
3160 #endif
3161 /* key_len is two AES keys */
3162 #ifdef HWAES_CAPABLE
3163 if (HWAES_CAPABLE) {
3164 if (enc) {
3165 HWAES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3166 xctx->xts.block1 = (block128_f) HWAES_encrypt;
3167 # ifdef HWAES_xts_encrypt
3168 xctx->stream = HWAES_xts_encrypt;
3169 # endif
3170 } else {
3171 HWAES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3172 xctx->xts.block1 = (block128_f) HWAES_decrypt;
3173 # ifdef HWAES_xts_decrypt
3174 xctx->stream = HWAES_xts_decrypt;
3175 #endif
3176 }
3177
3178 HWAES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3179 xctx->xts.block2 = (block128_f) HWAES_encrypt;
3180
3181 xctx->xts.key1 = &xctx->ks1;
3182 break;
3183 } else
3184 #endif
3185 #ifdef BSAES_CAPABLE
3186 if (BSAES_CAPABLE)
3187 xctx->stream = enc ? ossl_bsaes_xts_encrypt : ossl_bsaes_xts_decrypt;
3188 else
3189 #endif
3190 #ifdef VPAES_CAPABLE
3191 if (VPAES_CAPABLE) {
3192 if (enc) {
3193 vpaes_set_encrypt_key(key, bits, &xctx->ks1.ks);
3194 xctx->xts.block1 = (block128_f) vpaes_encrypt;
3195 } else {
3196 vpaes_set_decrypt_key(key, bits, &xctx->ks1.ks);
3197 xctx->xts.block1 = (block128_f) vpaes_decrypt;
3198 }
3199
3200 vpaes_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3201 xctx->xts.block2 = (block128_f) vpaes_encrypt;
3202
3203 xctx->xts.key1 = &xctx->ks1;
3204 break;
3205 } else
3206 #endif
3207 (void)0; /* terminate potentially open 'else' */
3208
3209 if (enc) {
3210 AES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3211 xctx->xts.block1 = (block128_f) AES_encrypt;
3212 } else {
3213 AES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3214 xctx->xts.block1 = (block128_f) AES_decrypt;
3215 }
3216
3217 AES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3218 xctx->xts.block2 = (block128_f) AES_encrypt;
3219
3220 xctx->xts.key1 = &xctx->ks1;
3221 } while (0);
3222 }
3223
3224 if (iv) {
3225 xctx->xts.key2 = &xctx->ks2;
3226 memcpy(ctx->iv, iv, 16);
3227 }
3228
3229 return 1;
3230 }
3231
aes_xts_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)3232 static int aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3233 const unsigned char *in, size_t len)
3234 {
3235 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3236
3237 if (xctx->xts.key1 == NULL
3238 || xctx->xts.key2 == NULL
3239 || out == NULL
3240 || in == NULL
3241 || len < AES_BLOCK_SIZE)
3242 return 0;
3243
3244 /*
3245 * Impose a limit of 2^20 blocks per data unit as specified by
3246 * IEEE Std 1619-2018. The earlier and obsolete IEEE Std 1619-2007
3247 * indicated that this was a SHOULD NOT rather than a MUST NOT.
3248 * NIST SP 800-38E mandates the same limit.
3249 */
3250 if (len > XTS_MAX_BLOCKS_PER_DATA_UNIT * AES_BLOCK_SIZE) {
3251 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DATA_UNIT_IS_TOO_LARGE);
3252 return 0;
3253 }
3254
3255 if (xctx->stream)
3256 (*xctx->stream) (in, out, len,
3257 xctx->xts.key1, xctx->xts.key2,
3258 ctx->iv);
3259 else if (CRYPTO_xts128_encrypt(&xctx->xts, ctx->iv, in, out, len,
3260 EVP_CIPHER_CTX_is_encrypting(ctx)))
3261 return 0;
3262 return 1;
3263 }
3264
3265 #define aes_xts_cleanup NULL
3266
3267 #define XTS_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_CUSTOM_IV \
3268 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3269 | EVP_CIPH_CUSTOM_COPY)
3270
3271 BLOCK_CIPHER_custom(NID_aes, 128, 1, 16, xts, XTS, XTS_FLAGS)
3272 BLOCK_CIPHER_custom(NID_aes, 256, 1, 16, xts, XTS, XTS_FLAGS)
3273
aes_ccm_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)3274 static int aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3275 {
3276 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,c);
3277 switch (type) {
3278 case EVP_CTRL_INIT:
3279 cctx->key_set = 0;
3280 cctx->iv_set = 0;
3281 cctx->L = 8;
3282 cctx->M = 12;
3283 cctx->tag_set = 0;
3284 cctx->len_set = 0;
3285 cctx->tls_aad_len = -1;
3286 return 1;
3287
3288 case EVP_CTRL_GET_IVLEN:
3289 *(int *)ptr = 15 - cctx->L;
3290 return 1;
3291
3292 case EVP_CTRL_AEAD_TLS1_AAD:
3293 /* Save the AAD for later use */
3294 if (arg != EVP_AEAD_TLS1_AAD_LEN)
3295 return 0;
3296 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3297 cctx->tls_aad_len = arg;
3298 {
3299 uint16_t len =
3300 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8
3301 | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1];
3302 /* Correct length for explicit IV */
3303 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
3304 return 0;
3305 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
3306 /* If decrypting correct for tag too */
3307 if (!EVP_CIPHER_CTX_is_encrypting(c)) {
3308 if (len < cctx->M)
3309 return 0;
3310 len -= cctx->M;
3311 }
3312 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8;
3313 EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff;
3314 }
3315 /* Extra padding: tag appended to record */
3316 return cctx->M;
3317
3318 case EVP_CTRL_CCM_SET_IV_FIXED:
3319 /* Sanity check length */
3320 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
3321 return 0;
3322 /* Just copy to first part of IV */
3323 memcpy(c->iv, ptr, arg);
3324 return 1;
3325
3326 case EVP_CTRL_AEAD_SET_IVLEN:
3327 arg = 15 - arg;
3328 /* fall thru */
3329 case EVP_CTRL_CCM_SET_L:
3330 if (arg < 2 || arg > 8)
3331 return 0;
3332 cctx->L = arg;
3333 return 1;
3334
3335 case EVP_CTRL_AEAD_SET_TAG:
3336 if ((arg & 1) || arg < 4 || arg > 16)
3337 return 0;
3338 if (EVP_CIPHER_CTX_is_encrypting(c) && ptr)
3339 return 0;
3340 if (ptr) {
3341 cctx->tag_set = 1;
3342 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3343 }
3344 cctx->M = arg;
3345 return 1;
3346
3347 case EVP_CTRL_AEAD_GET_TAG:
3348 if (!EVP_CIPHER_CTX_is_encrypting(c) || !cctx->tag_set)
3349 return 0;
3350 if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg))
3351 return 0;
3352 cctx->tag_set = 0;
3353 cctx->iv_set = 0;
3354 cctx->len_set = 0;
3355 return 1;
3356
3357 case EVP_CTRL_COPY:
3358 {
3359 EVP_CIPHER_CTX *out = ptr;
3360 EVP_AES_CCM_CTX *cctx_out = EVP_C_DATA(EVP_AES_CCM_CTX,out);
3361 if (cctx->ccm.key) {
3362 if (cctx->ccm.key != &cctx->ks)
3363 return 0;
3364 cctx_out->ccm.key = &cctx_out->ks;
3365 }
3366 return 1;
3367 }
3368
3369 default:
3370 return -1;
3371
3372 }
3373 }
3374
aes_ccm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)3375 static int aes_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3376 const unsigned char *iv, int enc)
3377 {
3378 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3379 if (!iv && !key)
3380 return 1;
3381 if (key)
3382 do {
3383 #ifdef HWAES_CAPABLE
3384 if (HWAES_CAPABLE) {
3385 HWAES_set_encrypt_key(key,
3386 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3387 &cctx->ks.ks);
3388
3389 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3390 &cctx->ks, (block128_f) HWAES_encrypt);
3391 cctx->str = NULL;
3392 cctx->key_set = 1;
3393 break;
3394 } else
3395 #endif
3396 #ifdef VPAES_CAPABLE
3397 if (VPAES_CAPABLE) {
3398 vpaes_set_encrypt_key(key,
3399 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3400 &cctx->ks.ks);
3401 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3402 &cctx->ks, (block128_f) vpaes_encrypt);
3403 cctx->str = NULL;
3404 cctx->key_set = 1;
3405 break;
3406 }
3407 #endif
3408 AES_set_encrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3409 &cctx->ks.ks);
3410 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3411 &cctx->ks, (block128_f) AES_encrypt);
3412 cctx->str = NULL;
3413 cctx->key_set = 1;
3414 } while (0);
3415 if (iv) {
3416 memcpy(ctx->iv, iv, 15 - cctx->L);
3417 cctx->iv_set = 1;
3418 }
3419 return 1;
3420 }
3421
aes_ccm_tls_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)3422 static int aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3423 const unsigned char *in, size_t len)
3424 {
3425 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3426 CCM128_CONTEXT *ccm = &cctx->ccm;
3427 /* Encrypt/decrypt must be performed in place */
3428 if (out != in || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->M))
3429 return -1;
3430 /* If encrypting set explicit IV from sequence number (start of AAD) */
3431 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3432 memcpy(out, EVP_CIPHER_CTX_buf_noconst(ctx),
3433 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3434 /* Get rest of IV from explicit IV */
3435 memcpy(ctx->iv + EVP_CCM_TLS_FIXED_IV_LEN, in,
3436 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3437 /* Correct length value */
3438 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3439 if (CRYPTO_ccm128_setiv(ccm, ctx->iv, 15 - cctx->L,
3440 len))
3441 return -1;
3442 /* Use saved AAD */
3443 CRYPTO_ccm128_aad(ccm, EVP_CIPHER_CTX_buf_noconst(ctx),
3444 cctx->tls_aad_len);
3445 /* Fix buffer to point to payload */
3446 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3447 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3448 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3449 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3450 cctx->str) :
3451 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3452 return -1;
3453 if (!CRYPTO_ccm128_tag(ccm, out + len, cctx->M))
3454 return -1;
3455 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3456 } else {
3457 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3458 cctx->str) :
3459 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3460 unsigned char tag[16];
3461 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3462 if (!CRYPTO_memcmp(tag, in + len, cctx->M))
3463 return len;
3464 }
3465 }
3466 OPENSSL_cleanse(out, len);
3467 return -1;
3468 }
3469 }
3470
aes_ccm_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)3471 static int aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3472 const unsigned char *in, size_t len)
3473 {
3474 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3475 CCM128_CONTEXT *ccm = &cctx->ccm;
3476 /* If not set up, return error */
3477 if (!cctx->key_set)
3478 return -1;
3479
3480 if (cctx->tls_aad_len >= 0)
3481 return aes_ccm_tls_cipher(ctx, out, in, len);
3482
3483 /* EVP_*Final() doesn't return any data */
3484 if (in == NULL && out != NULL)
3485 return 0;
3486
3487 if (!cctx->iv_set)
3488 return -1;
3489
3490 if (!out) {
3491 if (!in) {
3492 if (CRYPTO_ccm128_setiv(ccm, ctx->iv,
3493 15 - cctx->L, len))
3494 return -1;
3495 cctx->len_set = 1;
3496 return len;
3497 }
3498 /* If have AAD need message length */
3499 if (!cctx->len_set && len)
3500 return -1;
3501 CRYPTO_ccm128_aad(ccm, in, len);
3502 return len;
3503 }
3504
3505 /* The tag must be set before actually decrypting data */
3506 if (!EVP_CIPHER_CTX_is_encrypting(ctx) && !cctx->tag_set)
3507 return -1;
3508
3509 /* If not set length yet do it */
3510 if (!cctx->len_set) {
3511 if (CRYPTO_ccm128_setiv(ccm, ctx->iv, 15 - cctx->L, len))
3512 return -1;
3513 cctx->len_set = 1;
3514 }
3515 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3516 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3517 cctx->str) :
3518 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3519 return -1;
3520 cctx->tag_set = 1;
3521 return len;
3522 } else {
3523 int rv = -1;
3524 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3525 cctx->str) :
3526 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3527 unsigned char tag[16];
3528 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3529 if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx),
3530 cctx->M))
3531 rv = len;
3532 }
3533 }
3534 if (rv == -1)
3535 OPENSSL_cleanse(out, len);
3536 cctx->iv_set = 0;
3537 cctx->tag_set = 0;
3538 cctx->len_set = 0;
3539 return rv;
3540 }
3541 }
3542
3543 #define aes_ccm_cleanup NULL
3544
3545 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, ccm, CCM,
3546 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3547 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, ccm, CCM,
3548 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3549 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, ccm, CCM,
3550 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3551
3552 typedef struct {
3553 union {
3554 OSSL_UNION_ALIGN;
3555 AES_KEY ks;
3556 } ks;
3557 /* Indicates if IV has been set */
3558 unsigned char *iv;
3559 } EVP_AES_WRAP_CTX;
3560
aes_wrap_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)3561 static int aes_wrap_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3562 const unsigned char *iv, int enc)
3563 {
3564 int len;
3565 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3566
3567 if (iv == NULL && key == NULL)
3568 return 1;
3569 if (key != NULL) {
3570 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3571 AES_set_encrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3572 &wctx->ks.ks);
3573 else
3574 AES_set_decrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3575 &wctx->ks.ks);
3576 if (iv == NULL)
3577 wctx->iv = NULL;
3578 }
3579 if (iv != NULL) {
3580 if ((len = EVP_CIPHER_CTX_get_iv_length(ctx)) < 0)
3581 return 0;
3582 memcpy(ctx->iv, iv, len);
3583 wctx->iv = ctx->iv;
3584 }
3585 return 1;
3586 }
3587
aes_wrap_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t inlen)3588 static int aes_wrap_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3589 const unsigned char *in, size_t inlen)
3590 {
3591 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3592 size_t rv;
3593 /* AES wrap with padding has IV length of 4, without padding 8 */
3594 int pad = EVP_CIPHER_CTX_get_iv_length(ctx) == 4;
3595 /* No final operation so always return zero length */
3596 if (!in)
3597 return 0;
3598 /* Input length must always be non-zero */
3599 if (!inlen)
3600 return -1;
3601 /* If decrypting need at least 16 bytes and multiple of 8 */
3602 if (!EVP_CIPHER_CTX_is_encrypting(ctx) && (inlen < 16 || inlen & 0x7))
3603 return -1;
3604 /* If not padding input must be multiple of 8 */
3605 if (!pad && inlen & 0x7)
3606 return -1;
3607 if (ossl_is_partially_overlapping(out, in, inlen)) {
3608 ERR_raise(ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING);
3609 return 0;
3610 }
3611 if (!out) {
3612 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3613 /* If padding round up to multiple of 8 */
3614 if (pad)
3615 inlen = (inlen + 7) / 8 * 8;
3616 /* 8 byte prefix */
3617 return inlen + 8;
3618 } else {
3619 /*
3620 * If not padding output will be exactly 8 bytes smaller than
3621 * input. If padding it will be at least 8 bytes smaller but we
3622 * don't know how much.
3623 */
3624 return inlen - 8;
3625 }
3626 }
3627 if (pad) {
3628 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3629 rv = CRYPTO_128_wrap_pad(&wctx->ks.ks, wctx->iv,
3630 out, in, inlen,
3631 (block128_f) AES_encrypt);
3632 else
3633 rv = CRYPTO_128_unwrap_pad(&wctx->ks.ks, wctx->iv,
3634 out, in, inlen,
3635 (block128_f) AES_decrypt);
3636 } else {
3637 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3638 rv = CRYPTO_128_wrap(&wctx->ks.ks, wctx->iv,
3639 out, in, inlen, (block128_f) AES_encrypt);
3640 else
3641 rv = CRYPTO_128_unwrap(&wctx->ks.ks, wctx->iv,
3642 out, in, inlen, (block128_f) AES_decrypt);
3643 }
3644 return rv ? (int)rv : -1;
3645 }
3646
3647 #define WRAP_FLAGS (EVP_CIPH_WRAP_MODE \
3648 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3649 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_FLAG_DEFAULT_ASN1)
3650
3651 static const EVP_CIPHER aes_128_wrap = {
3652 NID_id_aes128_wrap,
3653 8, 16, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3654 aes_wrap_init_key, aes_wrap_cipher,
3655 NULL,
3656 sizeof(EVP_AES_WRAP_CTX),
3657 NULL, NULL, NULL, NULL
3658 };
3659
EVP_aes_128_wrap(void)3660 const EVP_CIPHER *EVP_aes_128_wrap(void)
3661 {
3662 return &aes_128_wrap;
3663 }
3664
3665 static const EVP_CIPHER aes_192_wrap = {
3666 NID_id_aes192_wrap,
3667 8, 24, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3668 aes_wrap_init_key, aes_wrap_cipher,
3669 NULL,
3670 sizeof(EVP_AES_WRAP_CTX),
3671 NULL, NULL, NULL, NULL
3672 };
3673
EVP_aes_192_wrap(void)3674 const EVP_CIPHER *EVP_aes_192_wrap(void)
3675 {
3676 return &aes_192_wrap;
3677 }
3678
3679 static const EVP_CIPHER aes_256_wrap = {
3680 NID_id_aes256_wrap,
3681 8, 32, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3682 aes_wrap_init_key, aes_wrap_cipher,
3683 NULL,
3684 sizeof(EVP_AES_WRAP_CTX),
3685 NULL, NULL, NULL, NULL
3686 };
3687
EVP_aes_256_wrap(void)3688 const EVP_CIPHER *EVP_aes_256_wrap(void)
3689 {
3690 return &aes_256_wrap;
3691 }
3692
3693 static const EVP_CIPHER aes_128_wrap_pad = {
3694 NID_id_aes128_wrap_pad,
3695 8, 16, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3696 aes_wrap_init_key, aes_wrap_cipher,
3697 NULL,
3698 sizeof(EVP_AES_WRAP_CTX),
3699 NULL, NULL, NULL, NULL
3700 };
3701
EVP_aes_128_wrap_pad(void)3702 const EVP_CIPHER *EVP_aes_128_wrap_pad(void)
3703 {
3704 return &aes_128_wrap_pad;
3705 }
3706
3707 static const EVP_CIPHER aes_192_wrap_pad = {
3708 NID_id_aes192_wrap_pad,
3709 8, 24, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3710 aes_wrap_init_key, aes_wrap_cipher,
3711 NULL,
3712 sizeof(EVP_AES_WRAP_CTX),
3713 NULL, NULL, NULL, NULL
3714 };
3715
EVP_aes_192_wrap_pad(void)3716 const EVP_CIPHER *EVP_aes_192_wrap_pad(void)
3717 {
3718 return &aes_192_wrap_pad;
3719 }
3720
3721 static const EVP_CIPHER aes_256_wrap_pad = {
3722 NID_id_aes256_wrap_pad,
3723 8, 32, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3724 aes_wrap_init_key, aes_wrap_cipher,
3725 NULL,
3726 sizeof(EVP_AES_WRAP_CTX),
3727 NULL, NULL, NULL, NULL
3728 };
3729
EVP_aes_256_wrap_pad(void)3730 const EVP_CIPHER *EVP_aes_256_wrap_pad(void)
3731 {
3732 return &aes_256_wrap_pad;
3733 }
3734
3735 #ifndef OPENSSL_NO_OCB
aes_ocb_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)3736 static int aes_ocb_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3737 {
3738 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
3739 EVP_CIPHER_CTX *newc;
3740 EVP_AES_OCB_CTX *new_octx;
3741
3742 switch (type) {
3743 case EVP_CTRL_INIT:
3744 octx->key_set = 0;
3745 octx->iv_set = 0;
3746 octx->ivlen = EVP_CIPHER_get_iv_length(c->cipher);
3747 octx->iv = c->iv;
3748 octx->taglen = 16;
3749 octx->data_buf_len = 0;
3750 octx->aad_buf_len = 0;
3751 return 1;
3752
3753 case EVP_CTRL_GET_IVLEN:
3754 *(int *)ptr = octx->ivlen;
3755 return 1;
3756
3757 case EVP_CTRL_AEAD_SET_IVLEN:
3758 /* IV len must be 1 to 15 */
3759 if (arg <= 0 || arg > 15)
3760 return 0;
3761
3762 octx->ivlen = arg;
3763 return 1;
3764
3765 case EVP_CTRL_AEAD_SET_TAG:
3766 if (ptr == NULL) {
3767 /* Tag len must be 0 to 16 */
3768 if (arg < 0 || arg > 16)
3769 return 0;
3770
3771 octx->taglen = arg;
3772 return 1;
3773 }
3774 if (arg != octx->taglen || EVP_CIPHER_CTX_is_encrypting(c))
3775 return 0;
3776 memcpy(octx->tag, ptr, arg);
3777 return 1;
3778
3779 case EVP_CTRL_AEAD_GET_TAG:
3780 if (arg != octx->taglen || !EVP_CIPHER_CTX_is_encrypting(c))
3781 return 0;
3782
3783 memcpy(ptr, octx->tag, arg);
3784 return 1;
3785
3786 case EVP_CTRL_COPY:
3787 newc = (EVP_CIPHER_CTX *)ptr;
3788 new_octx = EVP_C_DATA(EVP_AES_OCB_CTX,newc);
3789 return CRYPTO_ocb128_copy_ctx(&new_octx->ocb, &octx->ocb,
3790 &new_octx->ksenc.ks,
3791 &new_octx->ksdec.ks);
3792
3793 default:
3794 return -1;
3795
3796 }
3797 }
3798
aes_ocb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)3799 static int aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3800 const unsigned char *iv, int enc)
3801 {
3802 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
3803 if (!iv && !key)
3804 return 1;
3805 if (key) {
3806 do {
3807 /*
3808 * We set both the encrypt and decrypt key here because decrypt
3809 * needs both. We could possibly optimise to remove setting the
3810 * decrypt for an encryption operation.
3811 */
3812 # ifdef HWAES_CAPABLE
3813 if (HWAES_CAPABLE) {
3814 HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3815 &octx->ksenc.ks);
3816 HWAES_set_decrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3817 &octx->ksdec.ks);
3818 if (!CRYPTO_ocb128_init(&octx->ocb,
3819 &octx->ksenc.ks, &octx->ksdec.ks,
3820 (block128_f) HWAES_encrypt,
3821 (block128_f) HWAES_decrypt,
3822 enc ? HWAES_ocb_encrypt
3823 : HWAES_ocb_decrypt))
3824 return 0;
3825 break;
3826 }
3827 # endif
3828 # ifdef VPAES_CAPABLE
3829 if (VPAES_CAPABLE) {
3830 vpaes_set_encrypt_key(key,
3831 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3832 &octx->ksenc.ks);
3833 vpaes_set_decrypt_key(key,
3834 EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3835 &octx->ksdec.ks);
3836 if (!CRYPTO_ocb128_init(&octx->ocb,
3837 &octx->ksenc.ks, &octx->ksdec.ks,
3838 (block128_f) vpaes_encrypt,
3839 (block128_f) vpaes_decrypt,
3840 NULL))
3841 return 0;
3842 break;
3843 }
3844 # endif
3845 AES_set_encrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3846 &octx->ksenc.ks);
3847 AES_set_decrypt_key(key, EVP_CIPHER_CTX_get_key_length(ctx) * 8,
3848 &octx->ksdec.ks);
3849 if (!CRYPTO_ocb128_init(&octx->ocb,
3850 &octx->ksenc.ks, &octx->ksdec.ks,
3851 (block128_f) AES_encrypt,
3852 (block128_f) AES_decrypt,
3853 NULL))
3854 return 0;
3855 }
3856 while (0);
3857
3858 /*
3859 * If we have an iv we can set it directly, otherwise use saved IV.
3860 */
3861 if (iv == NULL && octx->iv_set)
3862 iv = octx->iv;
3863 if (iv) {
3864 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
3865 != 1)
3866 return 0;
3867 octx->iv_set = 1;
3868 }
3869 octx->key_set = 1;
3870 } else {
3871 /* If key set use IV, otherwise copy */
3872 if (octx->key_set)
3873 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
3874 else
3875 memcpy(octx->iv, iv, octx->ivlen);
3876 octx->iv_set = 1;
3877 }
3878 return 1;
3879 }
3880
aes_ocb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)3881 static int aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3882 const unsigned char *in, size_t len)
3883 {
3884 unsigned char *buf;
3885 int *buf_len;
3886 int written_len = 0;
3887 size_t trailing_len;
3888 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
3889
3890 /* If IV or Key not set then return error */
3891 if (!octx->iv_set)
3892 return -1;
3893
3894 if (!octx->key_set)
3895 return -1;
3896
3897 if (in != NULL) {
3898 /*
3899 * Need to ensure we are only passing full blocks to low level OCB
3900 * routines. We do it here rather than in EVP_EncryptUpdate/
3901 * EVP_DecryptUpdate because we need to pass full blocks of AAD too
3902 * and those routines don't support that
3903 */
3904
3905 /* Are we dealing with AAD or normal data here? */
3906 if (out == NULL) {
3907 buf = octx->aad_buf;
3908 buf_len = &(octx->aad_buf_len);
3909 } else {
3910 buf = octx->data_buf;
3911 buf_len = &(octx->data_buf_len);
3912
3913 if (ossl_is_partially_overlapping(out + *buf_len, in, len)) {
3914 ERR_raise(ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING);
3915 return 0;
3916 }
3917 }
3918
3919 /*
3920 * If we've got a partially filled buffer from a previous call then
3921 * use that data first
3922 */
3923 if (*buf_len > 0) {
3924 unsigned int remaining;
3925
3926 remaining = AES_BLOCK_SIZE - (*buf_len);
3927 if (remaining > len) {
3928 memcpy(buf + (*buf_len), in, len);
3929 *(buf_len) += len;
3930 return 0;
3931 }
3932 memcpy(buf + (*buf_len), in, remaining);
3933
3934 /*
3935 * If we get here we've filled the buffer, so process it
3936 */
3937 len -= remaining;
3938 in += remaining;
3939 if (out == NULL) {
3940 if (!CRYPTO_ocb128_aad(&octx->ocb, buf, AES_BLOCK_SIZE))
3941 return -1;
3942 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3943 if (!CRYPTO_ocb128_encrypt(&octx->ocb, buf, out,
3944 AES_BLOCK_SIZE))
3945 return -1;
3946 } else {
3947 if (!CRYPTO_ocb128_decrypt(&octx->ocb, buf, out,
3948 AES_BLOCK_SIZE))
3949 return -1;
3950 }
3951 written_len = AES_BLOCK_SIZE;
3952 *buf_len = 0;
3953 if (out != NULL)
3954 out += AES_BLOCK_SIZE;
3955 }
3956
3957 /* Do we have a partial block to handle at the end? */
3958 trailing_len = len % AES_BLOCK_SIZE;
3959
3960 /*
3961 * If we've got some full blocks to handle, then process these first
3962 */
3963 if (len != trailing_len) {
3964 if (out == NULL) {
3965 if (!CRYPTO_ocb128_aad(&octx->ocb, in, len - trailing_len))
3966 return -1;
3967 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3968 if (!CRYPTO_ocb128_encrypt
3969 (&octx->ocb, in, out, len - trailing_len))
3970 return -1;
3971 } else {
3972 if (!CRYPTO_ocb128_decrypt
3973 (&octx->ocb, in, out, len - trailing_len))
3974 return -1;
3975 }
3976 written_len += len - trailing_len;
3977 in += len - trailing_len;
3978 }
3979
3980 /* Handle any trailing partial block */
3981 if (trailing_len > 0) {
3982 memcpy(buf, in, trailing_len);
3983 *buf_len = trailing_len;
3984 }
3985
3986 return written_len;
3987 } else {
3988 /*
3989 * First of all empty the buffer of any partial block that we might
3990 * have been provided - both for data and AAD
3991 */
3992 if (octx->data_buf_len > 0) {
3993 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3994 if (!CRYPTO_ocb128_encrypt(&octx->ocb, octx->data_buf, out,
3995 octx->data_buf_len))
3996 return -1;
3997 } else {
3998 if (!CRYPTO_ocb128_decrypt(&octx->ocb, octx->data_buf, out,
3999 octx->data_buf_len))
4000 return -1;
4001 }
4002 written_len = octx->data_buf_len;
4003 octx->data_buf_len = 0;
4004 }
4005 if (octx->aad_buf_len > 0) {
4006 if (!CRYPTO_ocb128_aad
4007 (&octx->ocb, octx->aad_buf, octx->aad_buf_len))
4008 return -1;
4009 octx->aad_buf_len = 0;
4010 }
4011 /* If decrypting then verify */
4012 if (!EVP_CIPHER_CTX_is_encrypting(ctx)) {
4013 if (octx->taglen < 0)
4014 return -1;
4015 if (CRYPTO_ocb128_finish(&octx->ocb,
4016 octx->tag, octx->taglen) != 0)
4017 return -1;
4018 octx->iv_set = 0;
4019 return written_len;
4020 }
4021 /* If encrypting then just get the tag */
4022 if (CRYPTO_ocb128_tag(&octx->ocb, octx->tag, 16) != 1)
4023 return -1;
4024 /* Don't reuse the IV */
4025 octx->iv_set = 0;
4026 return written_len;
4027 }
4028 }
4029
aes_ocb_cleanup(EVP_CIPHER_CTX * c)4030 static int aes_ocb_cleanup(EVP_CIPHER_CTX *c)
4031 {
4032 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
4033 CRYPTO_ocb128_cleanup(&octx->ocb);
4034 return 1;
4035 }
4036
4037 BLOCK_CIPHER_custom(NID_aes, 128, 16, 12, ocb, OCB,
4038 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4039 BLOCK_CIPHER_custom(NID_aes, 192, 16, 12, ocb, OCB,
4040 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4041 BLOCK_CIPHER_custom(NID_aes, 256, 16, 12, ocb, OCB,
4042 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4043 #endif /* OPENSSL_NO_OCB */
4044