1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * caam - Freescale FSL CAAM support for crypto API
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Copyright 2016-2019, 2023, 2025 NXP
7 *
8 * Based on talitos crypto API driver.
9 *
10 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
11 *
12 * --------------- ---------------
13 * | JobDesc #1 |-------------------->| ShareDesc |
14 * | *(packet 1) | | (PDB) |
15 * --------------- |------------->| (hashKey) |
16 * . | | (cipherKey) |
17 * . | |-------->| (operation) |
18 * --------------- | | ---------------
19 * | JobDesc #2 |------| |
20 * | *(packet 2) | |
21 * --------------- |
22 * . |
23 * . |
24 * --------------- |
25 * | JobDesc #3 |------------
26 * | *(packet 3) |
27 * ---------------
28 *
29 * The SharedDesc never changes for a connection unless rekeyed, but
30 * each packet will likely be in a different place. So all we need
31 * to know to process the packet is where the input is, where the
32 * output goes, and what context we want to process with. Context is
33 * in the SharedDesc, packet references in the JobDesc.
34 *
35 * So, a job desc looks like:
36 *
37 * ---------------------
38 * | Header |
39 * | ShareDesc Pointer |
40 * | SEQ_OUT_PTR |
41 * | (output buffer) |
42 * | (output length) |
43 * | SEQ_IN_PTR |
44 * | (input buffer) |
45 * | (input length) |
46 * ---------------------
47 */
48
49 #include "compat.h"
50
51 #include "regs.h"
52 #include "intern.h"
53 #include "desc_constr.h"
54 #include "jr.h"
55 #include "error.h"
56 #include "sg_sw_sec4.h"
57 #include "key_gen.h"
58 #include "caamalg_desc.h"
59 #include <linux/unaligned.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/internal/engine.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/xts.h>
64 #include <keys/trusted-type.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/device.h>
67 #include <linux/err.h>
68 #include <linux/module.h>
69 #include <linux/kernel.h>
70 #include <linux/key-type.h>
71 #include <linux/slab.h>
72 #include <linux/string.h>
73 #include <soc/fsl/caam-blob.h>
74
75 /*
76 * crypto alg
77 */
78 #define CAAM_CRA_PRIORITY 3000
79 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
80 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
81 CTR_RFC3686_NONCE_SIZE + \
82 SHA512_DIGEST_SIZE * 2)
83
84 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
85 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
86 CAAM_CMD_SZ * 4)
87 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
88 CAAM_CMD_SZ * 5)
89
90 #define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
91
92 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN)
93 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
94
95 struct caam_alg_entry {
96 int class1_alg_type;
97 int class2_alg_type;
98 bool rfc3686;
99 bool geniv;
100 bool nodkp;
101 };
102
103 struct caam_aead_alg {
104 struct aead_engine_alg aead;
105 struct caam_alg_entry caam;
106 bool registered;
107 };
108
109 struct caam_skcipher_alg {
110 struct skcipher_engine_alg skcipher;
111 struct caam_alg_entry caam;
112 bool registered;
113 };
114
115 /*
116 * per-session context
117 */
118 struct caam_ctx {
119 u32 sh_desc_enc[DESC_MAX_USED_LEN];
120 u32 sh_desc_dec[DESC_MAX_USED_LEN];
121 u8 key[CAAM_MAX_KEY_SIZE];
122 dma_addr_t sh_desc_enc_dma;
123 dma_addr_t sh_desc_dec_dma;
124 dma_addr_t key_dma;
125 u8 protected_key[CAAM_MAX_KEY_SIZE];
126 dma_addr_t protected_key_dma;
127 enum dma_data_direction dir;
128 struct device *jrdev;
129 struct alginfo adata;
130 struct alginfo cdata;
131 unsigned int authsize;
132 bool xts_key_fallback;
133 bool is_blob;
134 struct crypto_skcipher *fallback;
135 };
136
137 struct caam_skcipher_req_ctx {
138 struct skcipher_edesc *edesc;
139 struct skcipher_request fallback_req;
140 };
141
142 struct caam_aead_req_ctx {
143 struct aead_edesc *edesc;
144 };
145
aead_null_set_sh_desc(struct crypto_aead * aead)146 static int aead_null_set_sh_desc(struct crypto_aead *aead)
147 {
148 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
149 struct device *jrdev = ctx->jrdev;
150 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
151 u32 *desc;
152 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
153 ctx->adata.keylen_pad;
154
155 /*
156 * Job Descriptor and Shared Descriptors
157 * must all fit into the 64-word Descriptor h/w Buffer
158 */
159 if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
160 ctx->adata.key_inline = true;
161 ctx->adata.key_virt = ctx->key;
162 } else {
163 ctx->adata.key_inline = false;
164 ctx->adata.key_dma = ctx->key_dma;
165 }
166
167 /* aead_encrypt shared descriptor */
168 desc = ctx->sh_desc_enc;
169 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
170 ctrlpriv->era);
171 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
172 desc_bytes(desc), ctx->dir);
173
174 /*
175 * Job Descriptor and Shared Descriptors
176 * must all fit into the 64-word Descriptor h/w Buffer
177 */
178 if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
179 ctx->adata.key_inline = true;
180 ctx->adata.key_virt = ctx->key;
181 } else {
182 ctx->adata.key_inline = false;
183 ctx->adata.key_dma = ctx->key_dma;
184 }
185
186 /* aead_decrypt shared descriptor */
187 desc = ctx->sh_desc_dec;
188 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
189 ctrlpriv->era);
190 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
191 desc_bytes(desc), ctx->dir);
192
193 return 0;
194 }
195
aead_set_sh_desc(struct crypto_aead * aead)196 static int aead_set_sh_desc(struct crypto_aead *aead)
197 {
198 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
199 struct caam_aead_alg,
200 aead.base);
201 unsigned int ivsize = crypto_aead_ivsize(aead);
202 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
203 struct device *jrdev = ctx->jrdev;
204 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
205 u32 ctx1_iv_off = 0;
206 u32 *desc, *nonce = NULL;
207 u32 inl_mask;
208 unsigned int data_len[2];
209 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
210 OP_ALG_AAI_CTR_MOD128);
211 const bool is_rfc3686 = alg->caam.rfc3686;
212
213 if (!ctx->authsize)
214 return 0;
215
216 /* NULL encryption / decryption */
217 if (!ctx->cdata.keylen)
218 return aead_null_set_sh_desc(aead);
219
220 /*
221 * AES-CTR needs to load IV in CONTEXT1 reg
222 * at an offset of 128bits (16bytes)
223 * CONTEXT1[255:128] = IV
224 */
225 if (ctr_mode)
226 ctx1_iv_off = 16;
227
228 /*
229 * RFC3686 specific:
230 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
231 */
232 if (is_rfc3686) {
233 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
234 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
235 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
236 }
237
238 /*
239 * In case |user key| > |derived key|, using DKP<imm,imm>
240 * would result in invalid opcodes (last bytes of user key) in
241 * the resulting descriptor. Use DKP<ptr,imm> instead => both
242 * virtual and dma key addresses are needed.
243 */
244 ctx->adata.key_virt = ctx->key;
245 ctx->adata.key_dma = ctx->key_dma;
246
247 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
248 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
249
250 data_len[0] = ctx->adata.keylen_pad;
251 data_len[1] = ctx->cdata.keylen;
252
253 if (alg->caam.geniv)
254 goto skip_enc;
255
256 /*
257 * Job Descriptor and Shared Descriptors
258 * must all fit into the 64-word Descriptor h/w Buffer
259 */
260 if (desc_inline_query(DESC_AEAD_ENC_LEN +
261 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
262 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
263 ARRAY_SIZE(data_len)) < 0)
264 return -EINVAL;
265
266 ctx->adata.key_inline = !!(inl_mask & 1);
267 ctx->cdata.key_inline = !!(inl_mask & 2);
268
269 /* aead_encrypt shared descriptor */
270 desc = ctx->sh_desc_enc;
271 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
272 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
273 false, ctrlpriv->era);
274 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
275 desc_bytes(desc), ctx->dir);
276
277 skip_enc:
278 /*
279 * Job Descriptor and Shared Descriptors
280 * must all fit into the 64-word Descriptor h/w Buffer
281 */
282 if (desc_inline_query(DESC_AEAD_DEC_LEN +
283 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
284 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
285 ARRAY_SIZE(data_len)) < 0)
286 return -EINVAL;
287
288 ctx->adata.key_inline = !!(inl_mask & 1);
289 ctx->cdata.key_inline = !!(inl_mask & 2);
290
291 /* aead_decrypt shared descriptor */
292 desc = ctx->sh_desc_dec;
293 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
294 ctx->authsize, alg->caam.geniv, is_rfc3686,
295 nonce, ctx1_iv_off, false, ctrlpriv->era);
296 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
297 desc_bytes(desc), ctx->dir);
298
299 if (!alg->caam.geniv)
300 goto skip_givenc;
301
302 /*
303 * Job Descriptor and Shared Descriptors
304 * must all fit into the 64-word Descriptor h/w Buffer
305 */
306 if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
307 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
308 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
309 ARRAY_SIZE(data_len)) < 0)
310 return -EINVAL;
311
312 ctx->adata.key_inline = !!(inl_mask & 1);
313 ctx->cdata.key_inline = !!(inl_mask & 2);
314
315 /* aead_givencrypt shared descriptor */
316 desc = ctx->sh_desc_enc;
317 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
318 ctx->authsize, is_rfc3686, nonce,
319 ctx1_iv_off, false, ctrlpriv->era);
320 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
321 desc_bytes(desc), ctx->dir);
322
323 skip_givenc:
324 return 0;
325 }
326
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)327 static int aead_setauthsize(struct crypto_aead *authenc,
328 unsigned int authsize)
329 {
330 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
331
332 ctx->authsize = authsize;
333 aead_set_sh_desc(authenc);
334
335 return 0;
336 }
337
gcm_set_sh_desc(struct crypto_aead * aead)338 static int gcm_set_sh_desc(struct crypto_aead *aead)
339 {
340 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
341 struct device *jrdev = ctx->jrdev;
342 unsigned int ivsize = crypto_aead_ivsize(aead);
343 u32 *desc;
344 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
345 ctx->cdata.keylen;
346
347 if (!ctx->cdata.keylen || !ctx->authsize)
348 return 0;
349
350 /*
351 * AES GCM encrypt shared descriptor
352 * Job Descriptor and Shared Descriptor
353 * must fit into the 64-word Descriptor h/w Buffer
354 */
355 if (rem_bytes >= DESC_GCM_ENC_LEN) {
356 ctx->cdata.key_inline = true;
357 ctx->cdata.key_virt = ctx->key;
358 } else {
359 ctx->cdata.key_inline = false;
360 ctx->cdata.key_dma = ctx->key_dma;
361 }
362
363 desc = ctx->sh_desc_enc;
364 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
365 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
366 desc_bytes(desc), ctx->dir);
367
368 /*
369 * Job Descriptor and Shared Descriptors
370 * must all fit into the 64-word Descriptor h/w Buffer
371 */
372 if (rem_bytes >= DESC_GCM_DEC_LEN) {
373 ctx->cdata.key_inline = true;
374 ctx->cdata.key_virt = ctx->key;
375 } else {
376 ctx->cdata.key_inline = false;
377 ctx->cdata.key_dma = ctx->key_dma;
378 }
379
380 desc = ctx->sh_desc_dec;
381 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
382 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
383 desc_bytes(desc), ctx->dir);
384
385 return 0;
386 }
387
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)388 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
389 {
390 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
391 int err;
392
393 err = crypto_gcm_check_authsize(authsize);
394 if (err)
395 return err;
396
397 ctx->authsize = authsize;
398 gcm_set_sh_desc(authenc);
399
400 return 0;
401 }
402
rfc4106_set_sh_desc(struct crypto_aead * aead)403 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
404 {
405 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
406 struct device *jrdev = ctx->jrdev;
407 unsigned int ivsize = crypto_aead_ivsize(aead);
408 u32 *desc;
409 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
410 ctx->cdata.keylen;
411
412 if (!ctx->cdata.keylen || !ctx->authsize)
413 return 0;
414
415 /*
416 * RFC4106 encrypt shared descriptor
417 * Job Descriptor and Shared Descriptor
418 * must fit into the 64-word Descriptor h/w Buffer
419 */
420 if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
421 ctx->cdata.key_inline = true;
422 ctx->cdata.key_virt = ctx->key;
423 } else {
424 ctx->cdata.key_inline = false;
425 ctx->cdata.key_dma = ctx->key_dma;
426 }
427
428 desc = ctx->sh_desc_enc;
429 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
430 false);
431 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
432 desc_bytes(desc), ctx->dir);
433
434 /*
435 * Job Descriptor and Shared Descriptors
436 * must all fit into the 64-word Descriptor h/w Buffer
437 */
438 if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
439 ctx->cdata.key_inline = true;
440 ctx->cdata.key_virt = ctx->key;
441 } else {
442 ctx->cdata.key_inline = false;
443 ctx->cdata.key_dma = ctx->key_dma;
444 }
445
446 desc = ctx->sh_desc_dec;
447 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
448 false);
449 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
450 desc_bytes(desc), ctx->dir);
451
452 return 0;
453 }
454
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)455 static int rfc4106_setauthsize(struct crypto_aead *authenc,
456 unsigned int authsize)
457 {
458 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
459 int err;
460
461 err = crypto_rfc4106_check_authsize(authsize);
462 if (err)
463 return err;
464
465 ctx->authsize = authsize;
466 rfc4106_set_sh_desc(authenc);
467
468 return 0;
469 }
470
rfc4543_set_sh_desc(struct crypto_aead * aead)471 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
472 {
473 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
474 struct device *jrdev = ctx->jrdev;
475 unsigned int ivsize = crypto_aead_ivsize(aead);
476 u32 *desc;
477 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
478 ctx->cdata.keylen;
479
480 if (!ctx->cdata.keylen || !ctx->authsize)
481 return 0;
482
483 /*
484 * RFC4543 encrypt shared descriptor
485 * Job Descriptor and Shared Descriptor
486 * must fit into the 64-word Descriptor h/w Buffer
487 */
488 if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
489 ctx->cdata.key_inline = true;
490 ctx->cdata.key_virt = ctx->key;
491 } else {
492 ctx->cdata.key_inline = false;
493 ctx->cdata.key_dma = ctx->key_dma;
494 }
495
496 desc = ctx->sh_desc_enc;
497 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
498 false);
499 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
500 desc_bytes(desc), ctx->dir);
501
502 /*
503 * Job Descriptor and Shared Descriptors
504 * must all fit into the 64-word Descriptor h/w Buffer
505 */
506 if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
507 ctx->cdata.key_inline = true;
508 ctx->cdata.key_virt = ctx->key;
509 } else {
510 ctx->cdata.key_inline = false;
511 ctx->cdata.key_dma = ctx->key_dma;
512 }
513
514 desc = ctx->sh_desc_dec;
515 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
516 false);
517 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
518 desc_bytes(desc), ctx->dir);
519
520 return 0;
521 }
522
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)523 static int rfc4543_setauthsize(struct crypto_aead *authenc,
524 unsigned int authsize)
525 {
526 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
527
528 if (authsize != 16)
529 return -EINVAL;
530
531 ctx->authsize = authsize;
532 rfc4543_set_sh_desc(authenc);
533
534 return 0;
535 }
536
chachapoly_set_sh_desc(struct crypto_aead * aead)537 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
538 {
539 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
540 struct device *jrdev = ctx->jrdev;
541 unsigned int ivsize = crypto_aead_ivsize(aead);
542 u32 *desc;
543
544 if (!ctx->cdata.keylen || !ctx->authsize)
545 return 0;
546
547 desc = ctx->sh_desc_enc;
548 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
549 ctx->authsize, true, false);
550 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
551 desc_bytes(desc), ctx->dir);
552
553 desc = ctx->sh_desc_dec;
554 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
555 ctx->authsize, false, false);
556 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
557 desc_bytes(desc), ctx->dir);
558
559 return 0;
560 }
561
chachapoly_setauthsize(struct crypto_aead * aead,unsigned int authsize)562 static int chachapoly_setauthsize(struct crypto_aead *aead,
563 unsigned int authsize)
564 {
565 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
566
567 if (authsize != POLY1305_DIGEST_SIZE)
568 return -EINVAL;
569
570 ctx->authsize = authsize;
571 return chachapoly_set_sh_desc(aead);
572 }
573
chachapoly_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)574 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
575 unsigned int keylen)
576 {
577 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
578 unsigned int ivsize = crypto_aead_ivsize(aead);
579 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
580
581 if (keylen != CHACHA_KEY_SIZE + saltlen)
582 return -EINVAL;
583
584 memcpy(ctx->key, key, keylen);
585 ctx->cdata.key_virt = ctx->key;
586 ctx->cdata.keylen = keylen - saltlen;
587
588 return chachapoly_set_sh_desc(aead);
589 }
590
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)591 static int aead_setkey(struct crypto_aead *aead,
592 const u8 *key, unsigned int keylen)
593 {
594 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
595 struct device *jrdev = ctx->jrdev;
596 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
597 struct crypto_authenc_keys keys;
598 int ret = 0;
599
600 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
601 goto badkey;
602
603 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
604 keys.authkeylen + keys.enckeylen, keys.enckeylen,
605 keys.authkeylen);
606 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
607 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
608
609 /*
610 * If DKP is supported, use it in the shared descriptor to generate
611 * the split key.
612 */
613 if (ctrlpriv->era >= 6) {
614 ctx->adata.keylen = keys.authkeylen;
615 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
616 OP_ALG_ALGSEL_MASK);
617
618 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
619 goto badkey;
620
621 memcpy(ctx->key, keys.authkey, keys.authkeylen);
622 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
623 keys.enckeylen);
624 dma_sync_single_for_device(jrdev, ctx->key_dma,
625 ctx->adata.keylen_pad +
626 keys.enckeylen, ctx->dir);
627 goto skip_split_key;
628 }
629
630 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
631 keys.authkeylen, CAAM_MAX_KEY_SIZE -
632 keys.enckeylen);
633 if (ret) {
634 goto badkey;
635 }
636
637 /* postpend encryption key to auth split key */
638 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
639 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
640 keys.enckeylen, ctx->dir);
641
642 print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
643 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
644 ctx->adata.keylen_pad + keys.enckeylen, 1);
645
646 skip_split_key:
647 ctx->cdata.keylen = keys.enckeylen;
648 memzero_explicit(&keys, sizeof(keys));
649 return aead_set_sh_desc(aead);
650 badkey:
651 memzero_explicit(&keys, sizeof(keys));
652 return -EINVAL;
653 }
654
des3_aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)655 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
656 unsigned int keylen)
657 {
658 struct crypto_authenc_keys keys;
659 int err;
660
661 err = crypto_authenc_extractkeys(&keys, key, keylen);
662 if (unlikely(err))
663 return err;
664
665 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
666 aead_setkey(aead, key, keylen);
667
668 memzero_explicit(&keys, sizeof(keys));
669 return err;
670 }
671
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)672 static int gcm_setkey(struct crypto_aead *aead,
673 const u8 *key, unsigned int keylen)
674 {
675 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
676 struct device *jrdev = ctx->jrdev;
677 int err;
678
679 err = aes_check_keylen(keylen);
680 if (err)
681 return err;
682
683 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
684 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
685
686 memcpy(ctx->key, key, keylen);
687 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
688 ctx->cdata.keylen = keylen;
689
690 return gcm_set_sh_desc(aead);
691 }
692
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)693 static int rfc4106_setkey(struct crypto_aead *aead,
694 const u8 *key, unsigned int keylen)
695 {
696 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
697 struct device *jrdev = ctx->jrdev;
698 int err;
699
700 err = aes_check_keylen(keylen - 4);
701 if (err)
702 return err;
703
704 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
705 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
706
707 memcpy(ctx->key, key, keylen);
708
709 /*
710 * The last four bytes of the key material are used as the salt value
711 * in the nonce. Update the AES key length.
712 */
713 ctx->cdata.keylen = keylen - 4;
714 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
715 ctx->dir);
716 return rfc4106_set_sh_desc(aead);
717 }
718
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)719 static int rfc4543_setkey(struct crypto_aead *aead,
720 const u8 *key, unsigned int keylen)
721 {
722 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
723 struct device *jrdev = ctx->jrdev;
724 int err;
725
726 err = aes_check_keylen(keylen - 4);
727 if (err)
728 return err;
729
730 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
731 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
732
733 memcpy(ctx->key, key, keylen);
734
735 /*
736 * The last four bytes of the key material are used as the salt value
737 * in the nonce. Update the AES key length.
738 */
739 ctx->cdata.keylen = keylen - 4;
740 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
741 ctx->dir);
742 return rfc4543_set_sh_desc(aead);
743 }
744
skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen,const u32 ctx1_iv_off)745 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
746 unsigned int keylen, const u32 ctx1_iv_off)
747 {
748 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
749 struct caam_skcipher_alg *alg =
750 container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
751 skcipher.base);
752 struct device *jrdev = ctx->jrdev;
753 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
754 u32 *desc;
755 const bool is_rfc3686 = alg->caam.rfc3686;
756
757 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
758 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
759
760 /* Here keylen is actual key length */
761 ctx->cdata.keylen = keylen;
762 ctx->cdata.key_virt = key;
763 ctx->cdata.key_inline = true;
764 /* Here protected key len is plain key length */
765 ctx->cdata.plain_keylen = keylen;
766 ctx->cdata.key_cmd_opt = 0;
767
768
769 /* skcipher_encrypt shared descriptor */
770 desc = ctx->sh_desc_enc;
771 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
772 ctx1_iv_off);
773 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
774 desc_bytes(desc), ctx->dir);
775
776 /* skcipher_decrypt shared descriptor */
777 desc = ctx->sh_desc_dec;
778 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
779 ctx1_iv_off);
780 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
781 desc_bytes(desc), ctx->dir);
782
783 return 0;
784 }
785
paes_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)786 static int paes_skcipher_setkey(struct crypto_skcipher *skcipher,
787 const u8 *key,
788 unsigned int keylen)
789 {
790 struct caam_pkey_info *pkey_info = (struct caam_pkey_info *)key;
791 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
792 struct device *jrdev = ctx->jrdev;
793 int err;
794
795 ctx->cdata.key_inline = false;
796
797 keylen = keylen - CAAM_PKEY_HEADER;
798
799 /* Retrieve the length of key */
800 ctx->cdata.plain_keylen = pkey_info->plain_key_sz;
801
802 /* Retrieve the length of blob*/
803 ctx->cdata.keylen = keylen;
804
805 /* Retrieve the address of the blob */
806 ctx->cdata.key_virt = pkey_info->key_buf;
807
808 /* Validate key length for AES algorithms */
809 err = aes_check_keylen(ctx->cdata.plain_keylen);
810 if (err) {
811 dev_err(jrdev, "bad key length\n");
812 return err;
813 }
814
815 /* set command option */
816 ctx->cdata.key_cmd_opt |= KEY_ENC;
817
818 /* check if the Protected-Key is CCM key */
819 if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
820 ctx->cdata.key_cmd_opt |= KEY_EKT;
821
822 memcpy(ctx->key, ctx->cdata.key_virt, keylen);
823 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
824 ctx->cdata.key_dma = ctx->key_dma;
825
826 if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
827 ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
828 ctx->cdata.plain_keylen +
829 CAAM_CCM_OVERHEAD,
830 DMA_FROM_DEVICE);
831 else
832 ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
833 ctx->cdata.plain_keylen,
834 DMA_FROM_DEVICE);
835
836 ctx->cdata.protected_key_dma = ctx->protected_key_dma;
837 ctx->is_blob = true;
838
839 return 0;
840 }
841
aes_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)842 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
843 const u8 *key, unsigned int keylen)
844 {
845 int err;
846
847 err = aes_check_keylen(keylen);
848 if (err)
849 return err;
850
851 return skcipher_setkey(skcipher, key, keylen, 0);
852 }
853
rfc3686_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)854 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
855 const u8 *key, unsigned int keylen)
856 {
857 u32 ctx1_iv_off;
858 int err;
859
860 /*
861 * RFC3686 specific:
862 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
863 * | *key = {KEY, NONCE}
864 */
865 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
866 keylen -= CTR_RFC3686_NONCE_SIZE;
867
868 err = aes_check_keylen(keylen);
869 if (err)
870 return err;
871
872 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
873 }
874
ctr_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)875 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
876 const u8 *key, unsigned int keylen)
877 {
878 u32 ctx1_iv_off;
879 int err;
880
881 /*
882 * AES-CTR needs to load IV in CONTEXT1 reg
883 * at an offset of 128bits (16bytes)
884 * CONTEXT1[255:128] = IV
885 */
886 ctx1_iv_off = 16;
887
888 err = aes_check_keylen(keylen);
889 if (err)
890 return err;
891
892 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
893 }
894
des_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)895 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
896 const u8 *key, unsigned int keylen)
897 {
898 return verify_skcipher_des_key(skcipher, key) ?:
899 skcipher_setkey(skcipher, key, keylen, 0);
900 }
901
des3_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)902 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
903 const u8 *key, unsigned int keylen)
904 {
905 return verify_skcipher_des3_key(skcipher, key) ?:
906 skcipher_setkey(skcipher, key, keylen, 0);
907 }
908
xts_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)909 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
910 unsigned int keylen)
911 {
912 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
913 struct device *jrdev = ctx->jrdev;
914 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
915 u32 *desc;
916 int err;
917
918 err = xts_verify_key(skcipher, key, keylen);
919 if (err) {
920 dev_dbg(jrdev, "key size mismatch\n");
921 return err;
922 }
923
924 if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
925 ctx->xts_key_fallback = true;
926
927 if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
928 err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
929 if (err)
930 return err;
931 }
932
933 ctx->cdata.keylen = keylen;
934 ctx->cdata.key_virt = key;
935 ctx->cdata.key_inline = true;
936
937 /* xts_skcipher_encrypt shared descriptor */
938 desc = ctx->sh_desc_enc;
939 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
940 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
941 desc_bytes(desc), ctx->dir);
942
943 /* xts_skcipher_decrypt shared descriptor */
944 desc = ctx->sh_desc_dec;
945 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
946 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
947 desc_bytes(desc), ctx->dir);
948
949 return 0;
950 }
951
952 /*
953 * aead_edesc - s/w-extended aead descriptor
954 * @src_nents: number of segments in input s/w scatterlist
955 * @dst_nents: number of segments in output s/w scatterlist
956 * @mapped_src_nents: number of segments in input h/w link table
957 * @mapped_dst_nents: number of segments in output h/w link table
958 * @sec4_sg_bytes: length of dma mapped sec4_sg space
959 * @bklog: stored to determine if the request needs backlog
960 * @sec4_sg_dma: bus physical mapped address of h/w link table
961 * @sec4_sg: pointer to h/w link table
962 * @hw_desc: the h/w job descriptor followed by any referenced link tables
963 */
964 struct aead_edesc {
965 int src_nents;
966 int dst_nents;
967 int mapped_src_nents;
968 int mapped_dst_nents;
969 int sec4_sg_bytes;
970 bool bklog;
971 dma_addr_t sec4_sg_dma;
972 struct sec4_sg_entry *sec4_sg;
973 u32 hw_desc[];
974 };
975
976 /*
977 * skcipher_edesc - s/w-extended skcipher descriptor
978 * @src_nents: number of segments in input s/w scatterlist
979 * @dst_nents: number of segments in output s/w scatterlist
980 * @mapped_src_nents: number of segments in input h/w link table
981 * @mapped_dst_nents: number of segments in output h/w link table
982 * @iv_dma: dma address of iv for checking continuity and link table
983 * @sec4_sg_bytes: length of dma mapped sec4_sg space
984 * @bklog: stored to determine if the request needs backlog
985 * @sec4_sg_dma: bus physical mapped address of h/w link table
986 * @sec4_sg: pointer to h/w link table
987 * @hw_desc: the h/w job descriptor followed by any referenced link tables
988 * and IV
989 */
990 struct skcipher_edesc {
991 int src_nents;
992 int dst_nents;
993 int mapped_src_nents;
994 int mapped_dst_nents;
995 dma_addr_t iv_dma;
996 int sec4_sg_bytes;
997 bool bklog;
998 dma_addr_t sec4_sg_dma;
999 struct sec4_sg_entry *sec4_sg;
1000 u32 hw_desc[];
1001 };
1002
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,dma_addr_t sec4_sg_dma,int sec4_sg_bytes)1003 static void caam_unmap(struct device *dev, struct scatterlist *src,
1004 struct scatterlist *dst, int src_nents,
1005 int dst_nents,
1006 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1007 int sec4_sg_bytes)
1008 {
1009 if (dst != src) {
1010 if (src_nents)
1011 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
1012 if (dst_nents)
1013 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
1014 } else {
1015 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
1016 }
1017
1018 if (iv_dma)
1019 dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
1020 if (sec4_sg_bytes)
1021 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
1022 DMA_TO_DEVICE);
1023 }
1024
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)1025 static void aead_unmap(struct device *dev,
1026 struct aead_edesc *edesc,
1027 struct aead_request *req)
1028 {
1029 caam_unmap(dev, req->src, req->dst,
1030 edesc->src_nents, edesc->dst_nents, 0, 0,
1031 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1032 }
1033
skcipher_unmap(struct device * dev,struct skcipher_edesc * edesc,struct skcipher_request * req)1034 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1035 struct skcipher_request *req)
1036 {
1037 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1038 int ivsize = crypto_skcipher_ivsize(skcipher);
1039
1040 caam_unmap(dev, req->src, req->dst,
1041 edesc->src_nents, edesc->dst_nents,
1042 edesc->iv_dma, ivsize,
1043 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1044 }
1045
aead_crypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)1046 static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
1047 void *context)
1048 {
1049 struct aead_request *req = context;
1050 struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1051 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
1052 struct aead_edesc *edesc;
1053 int ecode = 0;
1054 bool has_bklog;
1055
1056 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1057
1058 edesc = rctx->edesc;
1059 has_bklog = edesc->bklog;
1060
1061 if (err)
1062 ecode = caam_jr_strstatus(jrdev, err);
1063
1064 aead_unmap(jrdev, edesc, req);
1065
1066 kfree(edesc);
1067
1068 /*
1069 * If no backlog flag, the completion of the request is done
1070 * by CAAM, not crypto engine.
1071 */
1072 if (!has_bklog)
1073 aead_request_complete(req, ecode);
1074 else
1075 crypto_finalize_aead_request(jrp->engine, req, ecode);
1076 }
1077
skcipher_edesc_iv(struct skcipher_edesc * edesc)1078 static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
1079 {
1080
1081 return PTR_ALIGN((u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1082 dma_get_cache_alignment());
1083 }
1084
skcipher_crypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)1085 static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
1086 void *context)
1087 {
1088 struct skcipher_request *req = context;
1089 struct skcipher_edesc *edesc;
1090 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1091 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1092 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
1093 int ivsize = crypto_skcipher_ivsize(skcipher);
1094 int ecode = 0;
1095 bool has_bklog;
1096
1097 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1098
1099 edesc = rctx->edesc;
1100 has_bklog = edesc->bklog;
1101 if (err)
1102 ecode = caam_jr_strstatus(jrdev, err);
1103
1104 skcipher_unmap(jrdev, edesc, req);
1105
1106 /*
1107 * The crypto API expects us to set the IV (req->iv) to the last
1108 * ciphertext block (CBC mode) or last counter (CTR mode).
1109 * This is used e.g. by the CTS mode.
1110 */
1111 if (ivsize && !ecode) {
1112 memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
1113
1114 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1115 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1116 ivsize, 1);
1117 }
1118
1119 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1120 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1121 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1122
1123 kfree(edesc);
1124
1125 /*
1126 * If no backlog flag, the completion of the request is done
1127 * by CAAM, not crypto engine.
1128 */
1129 if (!has_bklog)
1130 skcipher_request_complete(req, ecode);
1131 else
1132 crypto_finalize_skcipher_request(jrp->engine, req, ecode);
1133 }
1134
1135 /*
1136 * Fill in aead job descriptor
1137 */
init_aead_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)1138 static void init_aead_job(struct aead_request *req,
1139 struct aead_edesc *edesc,
1140 bool all_contig, bool encrypt)
1141 {
1142 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1143 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1144 int authsize = ctx->authsize;
1145 u32 *desc = edesc->hw_desc;
1146 u32 out_options, in_options;
1147 dma_addr_t dst_dma, src_dma;
1148 int len, sec4_sg_index = 0;
1149 dma_addr_t ptr;
1150 u32 *sh_desc;
1151
1152 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1153 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
1154
1155 len = desc_len(sh_desc);
1156 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1157
1158 if (all_contig) {
1159 src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
1160 0;
1161 in_options = 0;
1162 } else {
1163 src_dma = edesc->sec4_sg_dma;
1164 sec4_sg_index += edesc->mapped_src_nents;
1165 in_options = LDST_SGF;
1166 }
1167
1168 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
1169 in_options);
1170
1171 dst_dma = src_dma;
1172 out_options = in_options;
1173
1174 if (unlikely(req->src != req->dst)) {
1175 if (!edesc->mapped_dst_nents) {
1176 dst_dma = 0;
1177 out_options = 0;
1178 } else if (edesc->mapped_dst_nents == 1) {
1179 dst_dma = sg_dma_address(req->dst);
1180 out_options = 0;
1181 } else {
1182 dst_dma = edesc->sec4_sg_dma +
1183 sec4_sg_index *
1184 sizeof(struct sec4_sg_entry);
1185 out_options = LDST_SGF;
1186 }
1187 }
1188
1189 if (encrypt)
1190 append_seq_out_ptr(desc, dst_dma,
1191 req->assoclen + req->cryptlen + authsize,
1192 out_options);
1193 else
1194 append_seq_out_ptr(desc, dst_dma,
1195 req->assoclen + req->cryptlen - authsize,
1196 out_options);
1197 }
1198
init_gcm_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)1199 static void init_gcm_job(struct aead_request *req,
1200 struct aead_edesc *edesc,
1201 bool all_contig, bool encrypt)
1202 {
1203 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1204 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1205 unsigned int ivsize = crypto_aead_ivsize(aead);
1206 u32 *desc = edesc->hw_desc;
1207 bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
1208 unsigned int last;
1209
1210 init_aead_job(req, edesc, all_contig, encrypt);
1211 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1212
1213 /* BUG This should not be specific to generic GCM. */
1214 last = 0;
1215 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
1216 last = FIFOLD_TYPE_LAST1;
1217
1218 /* Read GCM IV */
1219 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1220 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
1221 /* Append Salt */
1222 if (!generic_gcm)
1223 append_data(desc, ctx->key + ctx->cdata.keylen, 4);
1224 /* Append IV */
1225 append_data(desc, req->iv, ivsize);
1226 /* End of blank commands */
1227 }
1228
init_chachapoly_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)1229 static void init_chachapoly_job(struct aead_request *req,
1230 struct aead_edesc *edesc, bool all_contig,
1231 bool encrypt)
1232 {
1233 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1234 unsigned int ivsize = crypto_aead_ivsize(aead);
1235 unsigned int assoclen = req->assoclen;
1236 u32 *desc = edesc->hw_desc;
1237 u32 ctx_iv_off = 4;
1238
1239 init_aead_job(req, edesc, all_contig, encrypt);
1240
1241 if (ivsize != CHACHAPOLY_IV_SIZE) {
1242 /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1243 ctx_iv_off += 4;
1244
1245 /*
1246 * The associated data comes already with the IV but we need
1247 * to skip it when we authenticate or encrypt...
1248 */
1249 assoclen -= ivsize;
1250 }
1251
1252 append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1253
1254 /*
1255 * For IPsec load the IV further in the same register.
1256 * For RFC7539 simply load the 12 bytes nonce in a single operation
1257 */
1258 append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1259 LDST_SRCDST_BYTE_CONTEXT |
1260 ctx_iv_off << LDST_OFFSET_SHIFT);
1261 }
1262
init_authenc_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)1263 static void init_authenc_job(struct aead_request *req,
1264 struct aead_edesc *edesc,
1265 bool all_contig, bool encrypt)
1266 {
1267 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1268 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
1269 struct caam_aead_alg,
1270 aead.base);
1271 unsigned int ivsize = crypto_aead_ivsize(aead);
1272 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1273 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1274 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1275 OP_ALG_AAI_CTR_MOD128);
1276 const bool is_rfc3686 = alg->caam.rfc3686;
1277 u32 *desc = edesc->hw_desc;
1278 u32 ivoffset = 0;
1279
1280 /*
1281 * AES-CTR needs to load IV in CONTEXT1 reg
1282 * at an offset of 128bits (16bytes)
1283 * CONTEXT1[255:128] = IV
1284 */
1285 if (ctr_mode)
1286 ivoffset = 16;
1287
1288 /*
1289 * RFC3686 specific:
1290 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1291 */
1292 if (is_rfc3686)
1293 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
1294
1295 init_aead_job(req, edesc, all_contig, encrypt);
1296
1297 /*
1298 * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1299 * having DPOVRD as destination.
1300 */
1301 if (ctrlpriv->era < 3)
1302 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1303 else
1304 append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1305
1306 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
1307 append_load_as_imm(desc, req->iv, ivsize,
1308 LDST_CLASS_1_CCB |
1309 LDST_SRCDST_BYTE_CONTEXT |
1310 (ivoffset << LDST_OFFSET_SHIFT));
1311 }
1312
1313 /*
1314 * Fill in skcipher job descriptor
1315 */
init_skcipher_job(struct skcipher_request * req,struct skcipher_edesc * edesc,const bool encrypt)1316 static void init_skcipher_job(struct skcipher_request *req,
1317 struct skcipher_edesc *edesc,
1318 const bool encrypt)
1319 {
1320 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1321 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1322 struct device *jrdev = ctx->jrdev;
1323 int ivsize = crypto_skcipher_ivsize(skcipher);
1324 u32 *desc = !ctx->is_blob ? edesc->hw_desc :
1325 (u32 *)((u8 *)edesc->hw_desc + CAAM_DESC_BYTES_MAX);
1326 dma_addr_t desc_dma;
1327 u32 *sh_desc;
1328 u32 in_options = 0, out_options = 0;
1329 dma_addr_t src_dma, dst_dma, ptr;
1330 int len, sec4_sg_index = 0;
1331
1332 print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
1333 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1334 dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
1335 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
1336
1337 caam_dump_sg("src @" __stringify(__LINE__)": ",
1338 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1339 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1340
1341
1342 if (ivsize || edesc->mapped_src_nents > 1) {
1343 src_dma = edesc->sec4_sg_dma;
1344 sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
1345 in_options = LDST_SGF;
1346 } else {
1347 src_dma = sg_dma_address(req->src);
1348 }
1349
1350 if (likely(req->src == req->dst)) {
1351 dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
1352 out_options = in_options;
1353 } else if (!ivsize && edesc->mapped_dst_nents == 1) {
1354 dst_dma = sg_dma_address(req->dst);
1355 } else {
1356 dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1357 sizeof(struct sec4_sg_entry);
1358 out_options = LDST_SGF;
1359 }
1360
1361 if (ctx->is_blob) {
1362 cnstr_desc_skcipher_enc_dec(desc, &ctx->cdata,
1363 src_dma, dst_dma, req->cryptlen + ivsize,
1364 in_options, out_options,
1365 ivsize, encrypt);
1366
1367 desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
1368
1369 cnstr_desc_protected_blob_decap(edesc->hw_desc, &ctx->cdata, desc_dma);
1370 } else {
1371 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1372 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
1373
1374 len = desc_len(sh_desc);
1375 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1376 append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
1377
1378 append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
1379 }
1380 }
1381
1382 /*
1383 * allocate and map the aead extended descriptor
1384 */
aead_edesc_alloc(struct aead_request * req,int desc_bytes,bool * all_contig_ptr,bool encrypt)1385 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1386 int desc_bytes, bool *all_contig_ptr,
1387 bool encrypt)
1388 {
1389 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1390 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1391 struct device *jrdev = ctx->jrdev;
1392 struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1393 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1394 GFP_KERNEL : GFP_ATOMIC;
1395 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1396 int src_len, dst_len = 0;
1397 struct aead_edesc *edesc;
1398 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
1399 unsigned int authsize = ctx->authsize;
1400
1401 if (unlikely(req->dst != req->src)) {
1402 src_len = req->assoclen + req->cryptlen;
1403 dst_len = src_len + (encrypt ? authsize : (-authsize));
1404
1405 src_nents = sg_nents_for_len(req->src, src_len);
1406 if (unlikely(src_nents < 0)) {
1407 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1408 src_len);
1409 return ERR_PTR(src_nents);
1410 }
1411
1412 dst_nents = sg_nents_for_len(req->dst, dst_len);
1413 if (unlikely(dst_nents < 0)) {
1414 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1415 dst_len);
1416 return ERR_PTR(dst_nents);
1417 }
1418 } else {
1419 src_len = req->assoclen + req->cryptlen +
1420 (encrypt ? authsize : 0);
1421
1422 src_nents = sg_nents_for_len(req->src, src_len);
1423 if (unlikely(src_nents < 0)) {
1424 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1425 src_len);
1426 return ERR_PTR(src_nents);
1427 }
1428 }
1429
1430 if (likely(req->src == req->dst)) {
1431 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1432 DMA_BIDIRECTIONAL);
1433 if (unlikely(!mapped_src_nents)) {
1434 dev_err(jrdev, "unable to map source\n");
1435 return ERR_PTR(-ENOMEM);
1436 }
1437 } else {
1438 /* Cover also the case of null (zero length) input data */
1439 if (src_nents) {
1440 mapped_src_nents = dma_map_sg(jrdev, req->src,
1441 src_nents, DMA_TO_DEVICE);
1442 if (unlikely(!mapped_src_nents)) {
1443 dev_err(jrdev, "unable to map source\n");
1444 return ERR_PTR(-ENOMEM);
1445 }
1446 } else {
1447 mapped_src_nents = 0;
1448 }
1449
1450 /* Cover also the case of null (zero length) output data */
1451 if (dst_nents) {
1452 mapped_dst_nents = dma_map_sg(jrdev, req->dst,
1453 dst_nents,
1454 DMA_FROM_DEVICE);
1455 if (unlikely(!mapped_dst_nents)) {
1456 dev_err(jrdev, "unable to map destination\n");
1457 dma_unmap_sg(jrdev, req->src, src_nents,
1458 DMA_TO_DEVICE);
1459 return ERR_PTR(-ENOMEM);
1460 }
1461 } else {
1462 mapped_dst_nents = 0;
1463 }
1464 }
1465
1466 /*
1467 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1468 * the end of the table by allocating more S/G entries.
1469 */
1470 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
1471 if (mapped_dst_nents > 1)
1472 sec4_sg_len += pad_sg_nents(mapped_dst_nents);
1473 else
1474 sec4_sg_len = pad_sg_nents(sec4_sg_len);
1475
1476 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1477
1478 /* allocate space for base edesc and hw desc commands, link tables */
1479 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags);
1480 if (!edesc) {
1481 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1482 0, 0, 0);
1483 return ERR_PTR(-ENOMEM);
1484 }
1485
1486 edesc->src_nents = src_nents;
1487 edesc->dst_nents = dst_nents;
1488 edesc->mapped_src_nents = mapped_src_nents;
1489 edesc->mapped_dst_nents = mapped_dst_nents;
1490 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1491 desc_bytes;
1492
1493 rctx->edesc = edesc;
1494
1495 *all_contig_ptr = !(mapped_src_nents > 1);
1496
1497 sec4_sg_index = 0;
1498 if (mapped_src_nents > 1) {
1499 sg_to_sec4_sg_last(req->src, src_len,
1500 edesc->sec4_sg + sec4_sg_index, 0);
1501 sec4_sg_index += mapped_src_nents;
1502 }
1503 if (mapped_dst_nents > 1) {
1504 sg_to_sec4_sg_last(req->dst, dst_len,
1505 edesc->sec4_sg + sec4_sg_index, 0);
1506 }
1507
1508 if (!sec4_sg_bytes)
1509 return edesc;
1510
1511 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1512 sec4_sg_bytes, DMA_TO_DEVICE);
1513 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1514 dev_err(jrdev, "unable to map S/G table\n");
1515 aead_unmap(jrdev, edesc, req);
1516 kfree(edesc);
1517 return ERR_PTR(-ENOMEM);
1518 }
1519
1520 edesc->sec4_sg_bytes = sec4_sg_bytes;
1521
1522 return edesc;
1523 }
1524
aead_enqueue_req(struct device * jrdev,struct aead_request * req)1525 static int aead_enqueue_req(struct device *jrdev, struct aead_request *req)
1526 {
1527 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
1528 struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1529 struct aead_edesc *edesc = rctx->edesc;
1530 u32 *desc = edesc->hw_desc;
1531 int ret;
1532
1533 /*
1534 * Only the backlog request are sent to crypto-engine since the others
1535 * can be handled by CAAM, if free, especially since JR has up to 1024
1536 * entries (more than the 10 entries from crypto-engine).
1537 */
1538 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
1539 ret = crypto_transfer_aead_request_to_engine(jrpriv->engine,
1540 req);
1541 else
1542 ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
1543
1544 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
1545 aead_unmap(jrdev, edesc, req);
1546 kfree(rctx->edesc);
1547 }
1548
1549 return ret;
1550 }
1551
chachapoly_crypt(struct aead_request * req,bool encrypt)1552 static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
1553 {
1554 struct aead_edesc *edesc;
1555 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1556 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1557 struct device *jrdev = ctx->jrdev;
1558 bool all_contig;
1559 u32 *desc;
1560
1561 edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
1562 encrypt);
1563 if (IS_ERR(edesc))
1564 return PTR_ERR(edesc);
1565
1566 desc = edesc->hw_desc;
1567
1568 init_chachapoly_job(req, edesc, all_contig, encrypt);
1569 print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
1570 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1571 1);
1572
1573 return aead_enqueue_req(jrdev, req);
1574 }
1575
chachapoly_encrypt(struct aead_request * req)1576 static int chachapoly_encrypt(struct aead_request *req)
1577 {
1578 return chachapoly_crypt(req, true);
1579 }
1580
chachapoly_decrypt(struct aead_request * req)1581 static int chachapoly_decrypt(struct aead_request *req)
1582 {
1583 return chachapoly_crypt(req, false);
1584 }
1585
aead_crypt(struct aead_request * req,bool encrypt)1586 static inline int aead_crypt(struct aead_request *req, bool encrypt)
1587 {
1588 struct aead_edesc *edesc;
1589 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1590 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1591 struct device *jrdev = ctx->jrdev;
1592 bool all_contig;
1593
1594 /* allocate extended descriptor */
1595 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1596 &all_contig, encrypt);
1597 if (IS_ERR(edesc))
1598 return PTR_ERR(edesc);
1599
1600 /* Create and submit job descriptor */
1601 init_authenc_job(req, edesc, all_contig, encrypt);
1602
1603 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1604 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1605 desc_bytes(edesc->hw_desc), 1);
1606
1607 return aead_enqueue_req(jrdev, req);
1608 }
1609
aead_encrypt(struct aead_request * req)1610 static int aead_encrypt(struct aead_request *req)
1611 {
1612 return aead_crypt(req, true);
1613 }
1614
aead_decrypt(struct aead_request * req)1615 static int aead_decrypt(struct aead_request *req)
1616 {
1617 return aead_crypt(req, false);
1618 }
1619
aead_do_one_req(struct crypto_engine * engine,void * areq)1620 static int aead_do_one_req(struct crypto_engine *engine, void *areq)
1621 {
1622 struct aead_request *req = aead_request_cast(areq);
1623 struct caam_ctx *ctx = crypto_aead_ctx_dma(crypto_aead_reqtfm(req));
1624 struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1625 u32 *desc = rctx->edesc->hw_desc;
1626 int ret;
1627
1628 rctx->edesc->bklog = true;
1629
1630 ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req);
1631
1632 if (ret == -ENOSPC && engine->retry_support)
1633 return ret;
1634
1635 if (ret != -EINPROGRESS) {
1636 aead_unmap(ctx->jrdev, rctx->edesc, req);
1637 kfree(rctx->edesc);
1638 } else {
1639 ret = 0;
1640 }
1641
1642 return ret;
1643 }
1644
gcm_crypt(struct aead_request * req,bool encrypt)1645 static inline int gcm_crypt(struct aead_request *req, bool encrypt)
1646 {
1647 struct aead_edesc *edesc;
1648 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1649 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1650 struct device *jrdev = ctx->jrdev;
1651 bool all_contig;
1652
1653 /* allocate extended descriptor */
1654 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig,
1655 encrypt);
1656 if (IS_ERR(edesc))
1657 return PTR_ERR(edesc);
1658
1659 /* Create and submit job descriptor */
1660 init_gcm_job(req, edesc, all_contig, encrypt);
1661
1662 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1663 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1664 desc_bytes(edesc->hw_desc), 1);
1665
1666 return aead_enqueue_req(jrdev, req);
1667 }
1668
gcm_encrypt(struct aead_request * req)1669 static int gcm_encrypt(struct aead_request *req)
1670 {
1671 return gcm_crypt(req, true);
1672 }
1673
gcm_decrypt(struct aead_request * req)1674 static int gcm_decrypt(struct aead_request *req)
1675 {
1676 return gcm_crypt(req, false);
1677 }
1678
ipsec_gcm_encrypt(struct aead_request * req)1679 static int ipsec_gcm_encrypt(struct aead_request *req)
1680 {
1681 return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
1682 }
1683
ipsec_gcm_decrypt(struct aead_request * req)1684 static int ipsec_gcm_decrypt(struct aead_request *req)
1685 {
1686 return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
1687 }
1688
1689 /*
1690 * allocate and map the skcipher extended descriptor for skcipher
1691 */
skcipher_edesc_alloc(struct skcipher_request * req,int desc_bytes)1692 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1693 int desc_bytes)
1694 {
1695 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1696 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1697 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1698 struct device *jrdev = ctx->jrdev;
1699 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1700 GFP_KERNEL : GFP_ATOMIC;
1701 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1702 struct skcipher_edesc *edesc;
1703 dma_addr_t iv_dma = 0;
1704 u8 *iv;
1705 int ivsize = crypto_skcipher_ivsize(skcipher);
1706 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1707 unsigned int aligned_size;
1708
1709 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1710 if (unlikely(src_nents < 0)) {
1711 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1712 req->cryptlen);
1713 return ERR_PTR(src_nents);
1714 }
1715
1716 if (req->dst != req->src) {
1717 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1718 if (unlikely(dst_nents < 0)) {
1719 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1720 req->cryptlen);
1721 return ERR_PTR(dst_nents);
1722 }
1723 }
1724
1725 if (likely(req->src == req->dst)) {
1726 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1727 DMA_BIDIRECTIONAL);
1728 if (unlikely(!mapped_src_nents)) {
1729 dev_err(jrdev, "unable to map source\n");
1730 return ERR_PTR(-ENOMEM);
1731 }
1732 } else {
1733 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1734 DMA_TO_DEVICE);
1735 if (unlikely(!mapped_src_nents)) {
1736 dev_err(jrdev, "unable to map source\n");
1737 return ERR_PTR(-ENOMEM);
1738 }
1739 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1740 DMA_FROM_DEVICE);
1741 if (unlikely(!mapped_dst_nents)) {
1742 dev_err(jrdev, "unable to map destination\n");
1743 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1744 return ERR_PTR(-ENOMEM);
1745 }
1746 }
1747
1748 if (!ivsize && mapped_src_nents == 1)
1749 sec4_sg_ents = 0; // no need for an input hw s/g table
1750 else
1751 sec4_sg_ents = mapped_src_nents + !!ivsize;
1752 dst_sg_idx = sec4_sg_ents;
1753
1754 /*
1755 * Input, output HW S/G tables: [IV, src][dst, IV]
1756 * IV entries point to the same buffer
1757 * If src == dst, S/G entries are reused (S/G tables overlap)
1758 *
1759 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1760 * the end of the table by allocating more S/G entries. Logic:
1761 * if (output S/G)
1762 * pad output S/G, if needed
1763 * else if (input S/G) ...
1764 * pad input S/G, if needed
1765 */
1766 if (ivsize || mapped_dst_nents > 1) {
1767 if (req->src == req->dst)
1768 sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
1769 else
1770 sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
1771 !!ivsize);
1772 } else {
1773 sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
1774 }
1775
1776 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1777
1778 /*
1779 * allocate space for base edesc and hw desc commands, link tables, IV
1780 */
1781 aligned_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
1782 aligned_size = ALIGN(aligned_size, dma_get_cache_alignment());
1783 aligned_size += ~(ARCH_KMALLOC_MINALIGN - 1) &
1784 (dma_get_cache_alignment() - 1);
1785 aligned_size += ALIGN(ivsize, dma_get_cache_alignment());
1786 edesc = kzalloc(aligned_size, flags);
1787 if (!edesc) {
1788 dev_err(jrdev, "could not allocate extended descriptor\n");
1789 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1790 0, 0, 0);
1791 return ERR_PTR(-ENOMEM);
1792 }
1793
1794 edesc->src_nents = src_nents;
1795 edesc->dst_nents = dst_nents;
1796 edesc->mapped_src_nents = mapped_src_nents;
1797 edesc->mapped_dst_nents = mapped_dst_nents;
1798 edesc->sec4_sg_bytes = sec4_sg_bytes;
1799 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1800 desc_bytes);
1801 rctx->edesc = edesc;
1802
1803 /* Make sure IV is located in a DMAable area */
1804 if (ivsize) {
1805 iv = skcipher_edesc_iv(edesc);
1806 memcpy(iv, req->iv, ivsize);
1807
1808 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
1809 if (dma_mapping_error(jrdev, iv_dma)) {
1810 dev_err(jrdev, "unable to map IV\n");
1811 caam_unmap(jrdev, req->src, req->dst, src_nents,
1812 dst_nents, 0, 0, 0, 0);
1813 kfree(edesc);
1814 return ERR_PTR(-ENOMEM);
1815 }
1816
1817 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1818 }
1819 if (dst_sg_idx)
1820 sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
1821 !!ivsize, 0);
1822
1823 if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
1824 sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
1825 dst_sg_idx, 0);
1826
1827 if (ivsize)
1828 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
1829 mapped_dst_nents, iv_dma, ivsize, 0);
1830
1831 if (ivsize || mapped_dst_nents > 1)
1832 sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
1833 mapped_dst_nents - 1 + !!ivsize);
1834
1835 if (sec4_sg_bytes) {
1836 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1837 sec4_sg_bytes,
1838 DMA_TO_DEVICE);
1839 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1840 dev_err(jrdev, "unable to map S/G table\n");
1841 caam_unmap(jrdev, req->src, req->dst, src_nents,
1842 dst_nents, iv_dma, ivsize, 0, 0);
1843 kfree(edesc);
1844 return ERR_PTR(-ENOMEM);
1845 }
1846 }
1847
1848 edesc->iv_dma = iv_dma;
1849
1850 print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
1851 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1852 sec4_sg_bytes, 1);
1853
1854 return edesc;
1855 }
1856
skcipher_do_one_req(struct crypto_engine * engine,void * areq)1857 static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
1858 {
1859 struct skcipher_request *req = skcipher_request_cast(areq);
1860 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(crypto_skcipher_reqtfm(req));
1861 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1862 u32 *desc = rctx->edesc->hw_desc;
1863 int ret;
1864
1865 rctx->edesc->bklog = true;
1866
1867 ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
1868
1869 if (ret == -ENOSPC && engine->retry_support)
1870 return ret;
1871
1872 if (ret != -EINPROGRESS) {
1873 skcipher_unmap(ctx->jrdev, rctx->edesc, req);
1874 kfree(rctx->edesc);
1875 } else {
1876 ret = 0;
1877 }
1878
1879 return ret;
1880 }
1881
xts_skcipher_ivsize(struct skcipher_request * req)1882 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1883 {
1884 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1885 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1886
1887 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1888 }
1889
skcipher_crypt(struct skcipher_request * req,bool encrypt)1890 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1891 {
1892 struct skcipher_edesc *edesc;
1893 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1894 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1895 struct device *jrdev = ctx->jrdev;
1896 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
1897 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1898 u32 *desc;
1899 int ret = 0;
1900 int len;
1901
1902 /*
1903 * XTS is expected to return an error even for input length = 0
1904 * Note that the case input length < block size will be caught during
1905 * HW offloading and return an error.
1906 */
1907 if (!req->cryptlen && !ctx->fallback)
1908 return 0;
1909
1910 if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
1911 ctx->xts_key_fallback)) {
1912 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1913
1914 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
1915 skcipher_request_set_callback(&rctx->fallback_req,
1916 req->base.flags,
1917 req->base.complete,
1918 req->base.data);
1919 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
1920 req->dst, req->cryptlen, req->iv);
1921
1922 return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1923 crypto_skcipher_decrypt(&rctx->fallback_req);
1924 }
1925
1926 len = DESC_JOB_IO_LEN * CAAM_CMD_SZ;
1927 if (ctx->is_blob)
1928 len += CAAM_DESC_BYTES_MAX;
1929
1930 /* allocate extended descriptor */
1931 edesc = skcipher_edesc_alloc(req, len);
1932 if (IS_ERR(edesc))
1933 return PTR_ERR(edesc);
1934
1935 /* Create and submit job descriptor*/
1936 init_skcipher_job(req, edesc, encrypt);
1937
1938 print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
1939 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1940 desc_bytes(edesc->hw_desc), 1);
1941
1942 desc = edesc->hw_desc;
1943 /*
1944 * Only the backlog request are sent to crypto-engine since the others
1945 * can be handled by CAAM, if free, especially since JR has up to 1024
1946 * entries (more than the 10 entries from crypto-engine).
1947 */
1948 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
1949 ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine,
1950 req);
1951 else
1952 ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
1953
1954 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
1955 skcipher_unmap(jrdev, edesc, req);
1956 kfree(edesc);
1957 }
1958
1959 return ret;
1960 }
1961
skcipher_encrypt(struct skcipher_request * req)1962 static int skcipher_encrypt(struct skcipher_request *req)
1963 {
1964 return skcipher_crypt(req, true);
1965 }
1966
skcipher_decrypt(struct skcipher_request * req)1967 static int skcipher_decrypt(struct skcipher_request *req)
1968 {
1969 return skcipher_crypt(req, false);
1970 }
1971
1972 static struct caam_skcipher_alg driver_algs[] = {
1973 {
1974 .skcipher.base = {
1975 .base = {
1976 .cra_name = "cbc(paes)",
1977 .cra_driver_name = "cbc-paes-caam",
1978 .cra_blocksize = AES_BLOCK_SIZE,
1979 },
1980 .setkey = paes_skcipher_setkey,
1981 .encrypt = skcipher_encrypt,
1982 .decrypt = skcipher_decrypt,
1983 .min_keysize = AES_MIN_KEY_SIZE + CAAM_BLOB_OVERHEAD +
1984 CAAM_PKEY_HEADER,
1985 .max_keysize = AES_MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD +
1986 CAAM_PKEY_HEADER,
1987 .ivsize = AES_BLOCK_SIZE,
1988 },
1989 .skcipher.op = {
1990 .do_one_request = skcipher_do_one_req,
1991 },
1992 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1993 },
1994 {
1995 .skcipher.base = {
1996 .base = {
1997 .cra_name = "cbc(aes)",
1998 .cra_driver_name = "cbc-aes-caam",
1999 .cra_blocksize = AES_BLOCK_SIZE,
2000 },
2001 .setkey = aes_skcipher_setkey,
2002 .encrypt = skcipher_encrypt,
2003 .decrypt = skcipher_decrypt,
2004 .min_keysize = AES_MIN_KEY_SIZE,
2005 .max_keysize = AES_MAX_KEY_SIZE,
2006 .ivsize = AES_BLOCK_SIZE,
2007 },
2008 .skcipher.op = {
2009 .do_one_request = skcipher_do_one_req,
2010 },
2011 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2012 },
2013 {
2014 .skcipher.base = {
2015 .base = {
2016 .cra_name = "cbc(des3_ede)",
2017 .cra_driver_name = "cbc-3des-caam",
2018 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2019 },
2020 .setkey = des3_skcipher_setkey,
2021 .encrypt = skcipher_encrypt,
2022 .decrypt = skcipher_decrypt,
2023 .min_keysize = DES3_EDE_KEY_SIZE,
2024 .max_keysize = DES3_EDE_KEY_SIZE,
2025 .ivsize = DES3_EDE_BLOCK_SIZE,
2026 },
2027 .skcipher.op = {
2028 .do_one_request = skcipher_do_one_req,
2029 },
2030 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2031 },
2032 {
2033 .skcipher.base = {
2034 .base = {
2035 .cra_name = "cbc(des)",
2036 .cra_driver_name = "cbc-des-caam",
2037 .cra_blocksize = DES_BLOCK_SIZE,
2038 },
2039 .setkey = des_skcipher_setkey,
2040 .encrypt = skcipher_encrypt,
2041 .decrypt = skcipher_decrypt,
2042 .min_keysize = DES_KEY_SIZE,
2043 .max_keysize = DES_KEY_SIZE,
2044 .ivsize = DES_BLOCK_SIZE,
2045 },
2046 .skcipher.op = {
2047 .do_one_request = skcipher_do_one_req,
2048 },
2049 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2050 },
2051 {
2052 .skcipher.base = {
2053 .base = {
2054 .cra_name = "ctr(aes)",
2055 .cra_driver_name = "ctr-aes-caam",
2056 .cra_blocksize = 1,
2057 },
2058 .setkey = ctr_skcipher_setkey,
2059 .encrypt = skcipher_encrypt,
2060 .decrypt = skcipher_decrypt,
2061 .min_keysize = AES_MIN_KEY_SIZE,
2062 .max_keysize = AES_MAX_KEY_SIZE,
2063 .ivsize = AES_BLOCK_SIZE,
2064 .chunksize = AES_BLOCK_SIZE,
2065 },
2066 .skcipher.op = {
2067 .do_one_request = skcipher_do_one_req,
2068 },
2069 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
2070 OP_ALG_AAI_CTR_MOD128,
2071 },
2072 {
2073 .skcipher.base = {
2074 .base = {
2075 .cra_name = "rfc3686(ctr(aes))",
2076 .cra_driver_name = "rfc3686-ctr-aes-caam",
2077 .cra_blocksize = 1,
2078 },
2079 .setkey = rfc3686_skcipher_setkey,
2080 .encrypt = skcipher_encrypt,
2081 .decrypt = skcipher_decrypt,
2082 .min_keysize = AES_MIN_KEY_SIZE +
2083 CTR_RFC3686_NONCE_SIZE,
2084 .max_keysize = AES_MAX_KEY_SIZE +
2085 CTR_RFC3686_NONCE_SIZE,
2086 .ivsize = CTR_RFC3686_IV_SIZE,
2087 .chunksize = AES_BLOCK_SIZE,
2088 },
2089 .skcipher.op = {
2090 .do_one_request = skcipher_do_one_req,
2091 },
2092 .caam = {
2093 .class1_alg_type = OP_ALG_ALGSEL_AES |
2094 OP_ALG_AAI_CTR_MOD128,
2095 .rfc3686 = true,
2096 },
2097 },
2098 {
2099 .skcipher.base = {
2100 .base = {
2101 .cra_name = "xts(aes)",
2102 .cra_driver_name = "xts-aes-caam",
2103 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
2104 .cra_blocksize = AES_BLOCK_SIZE,
2105 },
2106 .setkey = xts_skcipher_setkey,
2107 .encrypt = skcipher_encrypt,
2108 .decrypt = skcipher_decrypt,
2109 .min_keysize = 2 * AES_MIN_KEY_SIZE,
2110 .max_keysize = 2 * AES_MAX_KEY_SIZE,
2111 .ivsize = AES_BLOCK_SIZE,
2112 },
2113 .skcipher.op = {
2114 .do_one_request = skcipher_do_one_req,
2115 },
2116 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2117 },
2118 {
2119 .skcipher.base = {
2120 .base = {
2121 .cra_name = "ecb(des)",
2122 .cra_driver_name = "ecb-des-caam",
2123 .cra_blocksize = DES_BLOCK_SIZE,
2124 },
2125 .setkey = des_skcipher_setkey,
2126 .encrypt = skcipher_encrypt,
2127 .decrypt = skcipher_decrypt,
2128 .min_keysize = DES_KEY_SIZE,
2129 .max_keysize = DES_KEY_SIZE,
2130 },
2131 .skcipher.op = {
2132 .do_one_request = skcipher_do_one_req,
2133 },
2134 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
2135 },
2136 {
2137 .skcipher.base = {
2138 .base = {
2139 .cra_name = "ecb(aes)",
2140 .cra_driver_name = "ecb-aes-caam",
2141 .cra_blocksize = AES_BLOCK_SIZE,
2142 },
2143 .setkey = aes_skcipher_setkey,
2144 .encrypt = skcipher_encrypt,
2145 .decrypt = skcipher_decrypt,
2146 .min_keysize = AES_MIN_KEY_SIZE,
2147 .max_keysize = AES_MAX_KEY_SIZE,
2148 },
2149 .skcipher.op = {
2150 .do_one_request = skcipher_do_one_req,
2151 },
2152 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
2153 },
2154 {
2155 .skcipher.base = {
2156 .base = {
2157 .cra_name = "ecb(des3_ede)",
2158 .cra_driver_name = "ecb-des3-caam",
2159 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2160 },
2161 .setkey = des3_skcipher_setkey,
2162 .encrypt = skcipher_encrypt,
2163 .decrypt = skcipher_decrypt,
2164 .min_keysize = DES3_EDE_KEY_SIZE,
2165 .max_keysize = DES3_EDE_KEY_SIZE,
2166 },
2167 .skcipher.op = {
2168 .do_one_request = skcipher_do_one_req,
2169 },
2170 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
2171 },
2172 };
2173
2174 static struct caam_aead_alg driver_aeads[] = {
2175 {
2176 .aead.base = {
2177 .base = {
2178 .cra_name = "rfc4106(gcm(aes))",
2179 .cra_driver_name = "rfc4106-gcm-aes-caam",
2180 .cra_blocksize = 1,
2181 },
2182 .setkey = rfc4106_setkey,
2183 .setauthsize = rfc4106_setauthsize,
2184 .encrypt = ipsec_gcm_encrypt,
2185 .decrypt = ipsec_gcm_decrypt,
2186 .ivsize = GCM_RFC4106_IV_SIZE,
2187 .maxauthsize = AES_BLOCK_SIZE,
2188 },
2189 .aead.op = {
2190 .do_one_request = aead_do_one_req,
2191 },
2192 .caam = {
2193 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2194 .nodkp = true,
2195 },
2196 },
2197 {
2198 .aead.base = {
2199 .base = {
2200 .cra_name = "rfc4543(gcm(aes))",
2201 .cra_driver_name = "rfc4543-gcm-aes-caam",
2202 .cra_blocksize = 1,
2203 },
2204 .setkey = rfc4543_setkey,
2205 .setauthsize = rfc4543_setauthsize,
2206 .encrypt = ipsec_gcm_encrypt,
2207 .decrypt = ipsec_gcm_decrypt,
2208 .ivsize = GCM_RFC4543_IV_SIZE,
2209 .maxauthsize = AES_BLOCK_SIZE,
2210 },
2211 .aead.op = {
2212 .do_one_request = aead_do_one_req,
2213 },
2214 .caam = {
2215 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2216 .nodkp = true,
2217 },
2218 },
2219 /* Galois Counter Mode */
2220 {
2221 .aead.base = {
2222 .base = {
2223 .cra_name = "gcm(aes)",
2224 .cra_driver_name = "gcm-aes-caam",
2225 .cra_blocksize = 1,
2226 },
2227 .setkey = gcm_setkey,
2228 .setauthsize = gcm_setauthsize,
2229 .encrypt = gcm_encrypt,
2230 .decrypt = gcm_decrypt,
2231 .ivsize = GCM_AES_IV_SIZE,
2232 .maxauthsize = AES_BLOCK_SIZE,
2233 },
2234 .aead.op = {
2235 .do_one_request = aead_do_one_req,
2236 },
2237 .caam = {
2238 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2239 .nodkp = true,
2240 },
2241 },
2242 /* single-pass ipsec_esp descriptor */
2243 {
2244 .aead.base = {
2245 .base = {
2246 .cra_name = "authenc(hmac(md5),"
2247 "ecb(cipher_null))",
2248 .cra_driver_name = "authenc-hmac-md5-"
2249 "ecb-cipher_null-caam",
2250 .cra_blocksize = NULL_BLOCK_SIZE,
2251 },
2252 .setkey = aead_setkey,
2253 .setauthsize = aead_setauthsize,
2254 .encrypt = aead_encrypt,
2255 .decrypt = aead_decrypt,
2256 .ivsize = NULL_IV_SIZE,
2257 .maxauthsize = MD5_DIGEST_SIZE,
2258 },
2259 .aead.op = {
2260 .do_one_request = aead_do_one_req,
2261 },
2262 .caam = {
2263 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2264 OP_ALG_AAI_HMAC_PRECOMP,
2265 },
2266 },
2267 {
2268 .aead.base = {
2269 .base = {
2270 .cra_name = "authenc(hmac(sha1),"
2271 "ecb(cipher_null))",
2272 .cra_driver_name = "authenc-hmac-sha1-"
2273 "ecb-cipher_null-caam",
2274 .cra_blocksize = NULL_BLOCK_SIZE,
2275 },
2276 .setkey = aead_setkey,
2277 .setauthsize = aead_setauthsize,
2278 .encrypt = aead_encrypt,
2279 .decrypt = aead_decrypt,
2280 .ivsize = NULL_IV_SIZE,
2281 .maxauthsize = SHA1_DIGEST_SIZE,
2282 },
2283 .aead.op = {
2284 .do_one_request = aead_do_one_req,
2285 },
2286 .caam = {
2287 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2288 OP_ALG_AAI_HMAC_PRECOMP,
2289 },
2290 },
2291 {
2292 .aead.base = {
2293 .base = {
2294 .cra_name = "authenc(hmac(sha224),"
2295 "ecb(cipher_null))",
2296 .cra_driver_name = "authenc-hmac-sha224-"
2297 "ecb-cipher_null-caam",
2298 .cra_blocksize = NULL_BLOCK_SIZE,
2299 },
2300 .setkey = aead_setkey,
2301 .setauthsize = aead_setauthsize,
2302 .encrypt = aead_encrypt,
2303 .decrypt = aead_decrypt,
2304 .ivsize = NULL_IV_SIZE,
2305 .maxauthsize = SHA224_DIGEST_SIZE,
2306 },
2307 .aead.op = {
2308 .do_one_request = aead_do_one_req,
2309 },
2310 .caam = {
2311 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2312 OP_ALG_AAI_HMAC_PRECOMP,
2313 },
2314 },
2315 {
2316 .aead.base = {
2317 .base = {
2318 .cra_name = "authenc(hmac(sha256),"
2319 "ecb(cipher_null))",
2320 .cra_driver_name = "authenc-hmac-sha256-"
2321 "ecb-cipher_null-caam",
2322 .cra_blocksize = NULL_BLOCK_SIZE,
2323 },
2324 .setkey = aead_setkey,
2325 .setauthsize = aead_setauthsize,
2326 .encrypt = aead_encrypt,
2327 .decrypt = aead_decrypt,
2328 .ivsize = NULL_IV_SIZE,
2329 .maxauthsize = SHA256_DIGEST_SIZE,
2330 },
2331 .aead.op = {
2332 .do_one_request = aead_do_one_req,
2333 },
2334 .caam = {
2335 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2336 OP_ALG_AAI_HMAC_PRECOMP,
2337 },
2338 },
2339 {
2340 .aead.base = {
2341 .base = {
2342 .cra_name = "authenc(hmac(sha384),"
2343 "ecb(cipher_null))",
2344 .cra_driver_name = "authenc-hmac-sha384-"
2345 "ecb-cipher_null-caam",
2346 .cra_blocksize = NULL_BLOCK_SIZE,
2347 },
2348 .setkey = aead_setkey,
2349 .setauthsize = aead_setauthsize,
2350 .encrypt = aead_encrypt,
2351 .decrypt = aead_decrypt,
2352 .ivsize = NULL_IV_SIZE,
2353 .maxauthsize = SHA384_DIGEST_SIZE,
2354 },
2355 .aead.op = {
2356 .do_one_request = aead_do_one_req,
2357 },
2358 .caam = {
2359 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2360 OP_ALG_AAI_HMAC_PRECOMP,
2361 },
2362 },
2363 {
2364 .aead.base = {
2365 .base = {
2366 .cra_name = "authenc(hmac(sha512),"
2367 "ecb(cipher_null))",
2368 .cra_driver_name = "authenc-hmac-sha512-"
2369 "ecb-cipher_null-caam",
2370 .cra_blocksize = NULL_BLOCK_SIZE,
2371 },
2372 .setkey = aead_setkey,
2373 .setauthsize = aead_setauthsize,
2374 .encrypt = aead_encrypt,
2375 .decrypt = aead_decrypt,
2376 .ivsize = NULL_IV_SIZE,
2377 .maxauthsize = SHA512_DIGEST_SIZE,
2378 },
2379 .aead.op = {
2380 .do_one_request = aead_do_one_req,
2381 },
2382 .caam = {
2383 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2384 OP_ALG_AAI_HMAC_PRECOMP,
2385 },
2386 },
2387 {
2388 .aead.base = {
2389 .base = {
2390 .cra_name = "authenc(hmac(md5),cbc(aes))",
2391 .cra_driver_name = "authenc-hmac-md5-"
2392 "cbc-aes-caam",
2393 .cra_blocksize = AES_BLOCK_SIZE,
2394 },
2395 .setkey = aead_setkey,
2396 .setauthsize = aead_setauthsize,
2397 .encrypt = aead_encrypt,
2398 .decrypt = aead_decrypt,
2399 .ivsize = AES_BLOCK_SIZE,
2400 .maxauthsize = MD5_DIGEST_SIZE,
2401 },
2402 .aead.op = {
2403 .do_one_request = aead_do_one_req,
2404 },
2405 .caam = {
2406 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2407 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2408 OP_ALG_AAI_HMAC_PRECOMP,
2409 },
2410 },
2411 {
2412 .aead.base = {
2413 .base = {
2414 .cra_name = "echainiv(authenc(hmac(md5),"
2415 "cbc(aes)))",
2416 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2417 "cbc-aes-caam",
2418 .cra_blocksize = AES_BLOCK_SIZE,
2419 },
2420 .setkey = aead_setkey,
2421 .setauthsize = aead_setauthsize,
2422 .encrypt = aead_encrypt,
2423 .decrypt = aead_decrypt,
2424 .ivsize = AES_BLOCK_SIZE,
2425 .maxauthsize = MD5_DIGEST_SIZE,
2426 },
2427 .aead.op = {
2428 .do_one_request = aead_do_one_req,
2429 },
2430 .caam = {
2431 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2432 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2433 OP_ALG_AAI_HMAC_PRECOMP,
2434 .geniv = true,
2435 },
2436 },
2437 {
2438 .aead.base = {
2439 .base = {
2440 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2441 .cra_driver_name = "authenc-hmac-sha1-"
2442 "cbc-aes-caam",
2443 .cra_blocksize = AES_BLOCK_SIZE,
2444 },
2445 .setkey = aead_setkey,
2446 .setauthsize = aead_setauthsize,
2447 .encrypt = aead_encrypt,
2448 .decrypt = aead_decrypt,
2449 .ivsize = AES_BLOCK_SIZE,
2450 .maxauthsize = SHA1_DIGEST_SIZE,
2451 },
2452 .aead.op = {
2453 .do_one_request = aead_do_one_req,
2454 },
2455 .caam = {
2456 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2457 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2458 OP_ALG_AAI_HMAC_PRECOMP,
2459 },
2460 },
2461 {
2462 .aead.base = {
2463 .base = {
2464 .cra_name = "echainiv(authenc(hmac(sha1),"
2465 "cbc(aes)))",
2466 .cra_driver_name = "echainiv-authenc-"
2467 "hmac-sha1-cbc-aes-caam",
2468 .cra_blocksize = AES_BLOCK_SIZE,
2469 },
2470 .setkey = aead_setkey,
2471 .setauthsize = aead_setauthsize,
2472 .encrypt = aead_encrypt,
2473 .decrypt = aead_decrypt,
2474 .ivsize = AES_BLOCK_SIZE,
2475 .maxauthsize = SHA1_DIGEST_SIZE,
2476 },
2477 .aead.op = {
2478 .do_one_request = aead_do_one_req,
2479 },
2480 .caam = {
2481 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2482 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2483 OP_ALG_AAI_HMAC_PRECOMP,
2484 .geniv = true,
2485 },
2486 },
2487 {
2488 .aead.base = {
2489 .base = {
2490 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2491 .cra_driver_name = "authenc-hmac-sha224-"
2492 "cbc-aes-caam",
2493 .cra_blocksize = AES_BLOCK_SIZE,
2494 },
2495 .setkey = aead_setkey,
2496 .setauthsize = aead_setauthsize,
2497 .encrypt = aead_encrypt,
2498 .decrypt = aead_decrypt,
2499 .ivsize = AES_BLOCK_SIZE,
2500 .maxauthsize = SHA224_DIGEST_SIZE,
2501 },
2502 .aead.op = {
2503 .do_one_request = aead_do_one_req,
2504 },
2505 .caam = {
2506 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2507 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2508 OP_ALG_AAI_HMAC_PRECOMP,
2509 },
2510 },
2511 {
2512 .aead.base = {
2513 .base = {
2514 .cra_name = "echainiv(authenc(hmac(sha224),"
2515 "cbc(aes)))",
2516 .cra_driver_name = "echainiv-authenc-"
2517 "hmac-sha224-cbc-aes-caam",
2518 .cra_blocksize = AES_BLOCK_SIZE,
2519 },
2520 .setkey = aead_setkey,
2521 .setauthsize = aead_setauthsize,
2522 .encrypt = aead_encrypt,
2523 .decrypt = aead_decrypt,
2524 .ivsize = AES_BLOCK_SIZE,
2525 .maxauthsize = SHA224_DIGEST_SIZE,
2526 },
2527 .aead.op = {
2528 .do_one_request = aead_do_one_req,
2529 },
2530 .caam = {
2531 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2532 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2533 OP_ALG_AAI_HMAC_PRECOMP,
2534 .geniv = true,
2535 },
2536 },
2537 {
2538 .aead.base = {
2539 .base = {
2540 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2541 .cra_driver_name = "authenc-hmac-sha256-"
2542 "cbc-aes-caam",
2543 .cra_blocksize = AES_BLOCK_SIZE,
2544 },
2545 .setkey = aead_setkey,
2546 .setauthsize = aead_setauthsize,
2547 .encrypt = aead_encrypt,
2548 .decrypt = aead_decrypt,
2549 .ivsize = AES_BLOCK_SIZE,
2550 .maxauthsize = SHA256_DIGEST_SIZE,
2551 },
2552 .aead.op = {
2553 .do_one_request = aead_do_one_req,
2554 },
2555 .caam = {
2556 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2557 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2558 OP_ALG_AAI_HMAC_PRECOMP,
2559 },
2560 },
2561 {
2562 .aead.base = {
2563 .base = {
2564 .cra_name = "echainiv(authenc(hmac(sha256),"
2565 "cbc(aes)))",
2566 .cra_driver_name = "echainiv-authenc-"
2567 "hmac-sha256-cbc-aes-caam",
2568 .cra_blocksize = AES_BLOCK_SIZE,
2569 },
2570 .setkey = aead_setkey,
2571 .setauthsize = aead_setauthsize,
2572 .encrypt = aead_encrypt,
2573 .decrypt = aead_decrypt,
2574 .ivsize = AES_BLOCK_SIZE,
2575 .maxauthsize = SHA256_DIGEST_SIZE,
2576 },
2577 .aead.op = {
2578 .do_one_request = aead_do_one_req,
2579 },
2580 .caam = {
2581 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2582 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2583 OP_ALG_AAI_HMAC_PRECOMP,
2584 .geniv = true,
2585 },
2586 },
2587 {
2588 .aead.base = {
2589 .base = {
2590 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2591 .cra_driver_name = "authenc-hmac-sha384-"
2592 "cbc-aes-caam",
2593 .cra_blocksize = AES_BLOCK_SIZE,
2594 },
2595 .setkey = aead_setkey,
2596 .setauthsize = aead_setauthsize,
2597 .encrypt = aead_encrypt,
2598 .decrypt = aead_decrypt,
2599 .ivsize = AES_BLOCK_SIZE,
2600 .maxauthsize = SHA384_DIGEST_SIZE,
2601 },
2602 .aead.op = {
2603 .do_one_request = aead_do_one_req,
2604 },
2605 .caam = {
2606 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2607 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2608 OP_ALG_AAI_HMAC_PRECOMP,
2609 },
2610 },
2611 {
2612 .aead.base = {
2613 .base = {
2614 .cra_name = "echainiv(authenc(hmac(sha384),"
2615 "cbc(aes)))",
2616 .cra_driver_name = "echainiv-authenc-"
2617 "hmac-sha384-cbc-aes-caam",
2618 .cra_blocksize = AES_BLOCK_SIZE,
2619 },
2620 .setkey = aead_setkey,
2621 .setauthsize = aead_setauthsize,
2622 .encrypt = aead_encrypt,
2623 .decrypt = aead_decrypt,
2624 .ivsize = AES_BLOCK_SIZE,
2625 .maxauthsize = SHA384_DIGEST_SIZE,
2626 },
2627 .aead.op = {
2628 .do_one_request = aead_do_one_req,
2629 },
2630 .caam = {
2631 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2632 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2633 OP_ALG_AAI_HMAC_PRECOMP,
2634 .geniv = true,
2635 },
2636 },
2637 {
2638 .aead.base = {
2639 .base = {
2640 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2641 .cra_driver_name = "authenc-hmac-sha512-"
2642 "cbc-aes-caam",
2643 .cra_blocksize = AES_BLOCK_SIZE,
2644 },
2645 .setkey = aead_setkey,
2646 .setauthsize = aead_setauthsize,
2647 .encrypt = aead_encrypt,
2648 .decrypt = aead_decrypt,
2649 .ivsize = AES_BLOCK_SIZE,
2650 .maxauthsize = SHA512_DIGEST_SIZE,
2651 },
2652 .aead.op = {
2653 .do_one_request = aead_do_one_req,
2654 },
2655 .caam = {
2656 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2657 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2658 OP_ALG_AAI_HMAC_PRECOMP,
2659 },
2660 },
2661 {
2662 .aead.base = {
2663 .base = {
2664 .cra_name = "echainiv(authenc(hmac(sha512),"
2665 "cbc(aes)))",
2666 .cra_driver_name = "echainiv-authenc-"
2667 "hmac-sha512-cbc-aes-caam",
2668 .cra_blocksize = AES_BLOCK_SIZE,
2669 },
2670 .setkey = aead_setkey,
2671 .setauthsize = aead_setauthsize,
2672 .encrypt = aead_encrypt,
2673 .decrypt = aead_decrypt,
2674 .ivsize = AES_BLOCK_SIZE,
2675 .maxauthsize = SHA512_DIGEST_SIZE,
2676 },
2677 .aead.op = {
2678 .do_one_request = aead_do_one_req,
2679 },
2680 .caam = {
2681 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2682 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2683 OP_ALG_AAI_HMAC_PRECOMP,
2684 .geniv = true,
2685 },
2686 },
2687 {
2688 .aead.base = {
2689 .base = {
2690 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2691 .cra_driver_name = "authenc-hmac-md5-"
2692 "cbc-des3_ede-caam",
2693 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2694 },
2695 .setkey = des3_aead_setkey,
2696 .setauthsize = aead_setauthsize,
2697 .encrypt = aead_encrypt,
2698 .decrypt = aead_decrypt,
2699 .ivsize = DES3_EDE_BLOCK_SIZE,
2700 .maxauthsize = MD5_DIGEST_SIZE,
2701 },
2702 .aead.op = {
2703 .do_one_request = aead_do_one_req,
2704 },
2705 .caam = {
2706 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2707 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2708 OP_ALG_AAI_HMAC_PRECOMP,
2709 }
2710 },
2711 {
2712 .aead.base = {
2713 .base = {
2714 .cra_name = "echainiv(authenc(hmac(md5),"
2715 "cbc(des3_ede)))",
2716 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2717 "cbc-des3_ede-caam",
2718 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2719 },
2720 .setkey = des3_aead_setkey,
2721 .setauthsize = aead_setauthsize,
2722 .encrypt = aead_encrypt,
2723 .decrypt = aead_decrypt,
2724 .ivsize = DES3_EDE_BLOCK_SIZE,
2725 .maxauthsize = MD5_DIGEST_SIZE,
2726 },
2727 .aead.op = {
2728 .do_one_request = aead_do_one_req,
2729 },
2730 .caam = {
2731 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2732 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2733 OP_ALG_AAI_HMAC_PRECOMP,
2734 .geniv = true,
2735 }
2736 },
2737 {
2738 .aead.base = {
2739 .base = {
2740 .cra_name = "authenc(hmac(sha1),"
2741 "cbc(des3_ede))",
2742 .cra_driver_name = "authenc-hmac-sha1-"
2743 "cbc-des3_ede-caam",
2744 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2745 },
2746 .setkey = des3_aead_setkey,
2747 .setauthsize = aead_setauthsize,
2748 .encrypt = aead_encrypt,
2749 .decrypt = aead_decrypt,
2750 .ivsize = DES3_EDE_BLOCK_SIZE,
2751 .maxauthsize = SHA1_DIGEST_SIZE,
2752 },
2753 .aead.op = {
2754 .do_one_request = aead_do_one_req,
2755 },
2756 .caam = {
2757 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2758 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2759 OP_ALG_AAI_HMAC_PRECOMP,
2760 },
2761 },
2762 {
2763 .aead.base = {
2764 .base = {
2765 .cra_name = "echainiv(authenc(hmac(sha1),"
2766 "cbc(des3_ede)))",
2767 .cra_driver_name = "echainiv-authenc-"
2768 "hmac-sha1-"
2769 "cbc-des3_ede-caam",
2770 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2771 },
2772 .setkey = des3_aead_setkey,
2773 .setauthsize = aead_setauthsize,
2774 .encrypt = aead_encrypt,
2775 .decrypt = aead_decrypt,
2776 .ivsize = DES3_EDE_BLOCK_SIZE,
2777 .maxauthsize = SHA1_DIGEST_SIZE,
2778 },
2779 .aead.op = {
2780 .do_one_request = aead_do_one_req,
2781 },
2782 .caam = {
2783 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2784 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2785 OP_ALG_AAI_HMAC_PRECOMP,
2786 .geniv = true,
2787 },
2788 },
2789 {
2790 .aead.base = {
2791 .base = {
2792 .cra_name = "authenc(hmac(sha224),"
2793 "cbc(des3_ede))",
2794 .cra_driver_name = "authenc-hmac-sha224-"
2795 "cbc-des3_ede-caam",
2796 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2797 },
2798 .setkey = des3_aead_setkey,
2799 .setauthsize = aead_setauthsize,
2800 .encrypt = aead_encrypt,
2801 .decrypt = aead_decrypt,
2802 .ivsize = DES3_EDE_BLOCK_SIZE,
2803 .maxauthsize = SHA224_DIGEST_SIZE,
2804 },
2805 .aead.op = {
2806 .do_one_request = aead_do_one_req,
2807 },
2808 .caam = {
2809 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2810 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2811 OP_ALG_AAI_HMAC_PRECOMP,
2812 },
2813 },
2814 {
2815 .aead.base = {
2816 .base = {
2817 .cra_name = "echainiv(authenc(hmac(sha224),"
2818 "cbc(des3_ede)))",
2819 .cra_driver_name = "echainiv-authenc-"
2820 "hmac-sha224-"
2821 "cbc-des3_ede-caam",
2822 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2823 },
2824 .setkey = des3_aead_setkey,
2825 .setauthsize = aead_setauthsize,
2826 .encrypt = aead_encrypt,
2827 .decrypt = aead_decrypt,
2828 .ivsize = DES3_EDE_BLOCK_SIZE,
2829 .maxauthsize = SHA224_DIGEST_SIZE,
2830 },
2831 .aead.op = {
2832 .do_one_request = aead_do_one_req,
2833 },
2834 .caam = {
2835 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2836 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2837 OP_ALG_AAI_HMAC_PRECOMP,
2838 .geniv = true,
2839 },
2840 },
2841 {
2842 .aead.base = {
2843 .base = {
2844 .cra_name = "authenc(hmac(sha256),"
2845 "cbc(des3_ede))",
2846 .cra_driver_name = "authenc-hmac-sha256-"
2847 "cbc-des3_ede-caam",
2848 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2849 },
2850 .setkey = des3_aead_setkey,
2851 .setauthsize = aead_setauthsize,
2852 .encrypt = aead_encrypt,
2853 .decrypt = aead_decrypt,
2854 .ivsize = DES3_EDE_BLOCK_SIZE,
2855 .maxauthsize = SHA256_DIGEST_SIZE,
2856 },
2857 .aead.op = {
2858 .do_one_request = aead_do_one_req,
2859 },
2860 .caam = {
2861 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2862 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2863 OP_ALG_AAI_HMAC_PRECOMP,
2864 },
2865 },
2866 {
2867 .aead.base = {
2868 .base = {
2869 .cra_name = "echainiv(authenc(hmac(sha256),"
2870 "cbc(des3_ede)))",
2871 .cra_driver_name = "echainiv-authenc-"
2872 "hmac-sha256-"
2873 "cbc-des3_ede-caam",
2874 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2875 },
2876 .setkey = des3_aead_setkey,
2877 .setauthsize = aead_setauthsize,
2878 .encrypt = aead_encrypt,
2879 .decrypt = aead_decrypt,
2880 .ivsize = DES3_EDE_BLOCK_SIZE,
2881 .maxauthsize = SHA256_DIGEST_SIZE,
2882 },
2883 .aead.op = {
2884 .do_one_request = aead_do_one_req,
2885 },
2886 .caam = {
2887 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2888 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2889 OP_ALG_AAI_HMAC_PRECOMP,
2890 .geniv = true,
2891 },
2892 },
2893 {
2894 .aead.base = {
2895 .base = {
2896 .cra_name = "authenc(hmac(sha384),"
2897 "cbc(des3_ede))",
2898 .cra_driver_name = "authenc-hmac-sha384-"
2899 "cbc-des3_ede-caam",
2900 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2901 },
2902 .setkey = des3_aead_setkey,
2903 .setauthsize = aead_setauthsize,
2904 .encrypt = aead_encrypt,
2905 .decrypt = aead_decrypt,
2906 .ivsize = DES3_EDE_BLOCK_SIZE,
2907 .maxauthsize = SHA384_DIGEST_SIZE,
2908 },
2909 .aead.op = {
2910 .do_one_request = aead_do_one_req,
2911 },
2912 .caam = {
2913 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2914 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2915 OP_ALG_AAI_HMAC_PRECOMP,
2916 },
2917 },
2918 {
2919 .aead.base = {
2920 .base = {
2921 .cra_name = "echainiv(authenc(hmac(sha384),"
2922 "cbc(des3_ede)))",
2923 .cra_driver_name = "echainiv-authenc-"
2924 "hmac-sha384-"
2925 "cbc-des3_ede-caam",
2926 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2927 },
2928 .setkey = des3_aead_setkey,
2929 .setauthsize = aead_setauthsize,
2930 .encrypt = aead_encrypt,
2931 .decrypt = aead_decrypt,
2932 .ivsize = DES3_EDE_BLOCK_SIZE,
2933 .maxauthsize = SHA384_DIGEST_SIZE,
2934 },
2935 .aead.op = {
2936 .do_one_request = aead_do_one_req,
2937 },
2938 .caam = {
2939 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2940 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2941 OP_ALG_AAI_HMAC_PRECOMP,
2942 .geniv = true,
2943 },
2944 },
2945 {
2946 .aead.base = {
2947 .base = {
2948 .cra_name = "authenc(hmac(sha512),"
2949 "cbc(des3_ede))",
2950 .cra_driver_name = "authenc-hmac-sha512-"
2951 "cbc-des3_ede-caam",
2952 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2953 },
2954 .setkey = des3_aead_setkey,
2955 .setauthsize = aead_setauthsize,
2956 .encrypt = aead_encrypt,
2957 .decrypt = aead_decrypt,
2958 .ivsize = DES3_EDE_BLOCK_SIZE,
2959 .maxauthsize = SHA512_DIGEST_SIZE,
2960 },
2961 .aead.op = {
2962 .do_one_request = aead_do_one_req,
2963 },
2964 .caam = {
2965 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2966 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2967 OP_ALG_AAI_HMAC_PRECOMP,
2968 },
2969 },
2970 {
2971 .aead.base = {
2972 .base = {
2973 .cra_name = "echainiv(authenc(hmac(sha512),"
2974 "cbc(des3_ede)))",
2975 .cra_driver_name = "echainiv-authenc-"
2976 "hmac-sha512-"
2977 "cbc-des3_ede-caam",
2978 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2979 },
2980 .setkey = des3_aead_setkey,
2981 .setauthsize = aead_setauthsize,
2982 .encrypt = aead_encrypt,
2983 .decrypt = aead_decrypt,
2984 .ivsize = DES3_EDE_BLOCK_SIZE,
2985 .maxauthsize = SHA512_DIGEST_SIZE,
2986 },
2987 .aead.op = {
2988 .do_one_request = aead_do_one_req,
2989 },
2990 .caam = {
2991 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2992 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2993 OP_ALG_AAI_HMAC_PRECOMP,
2994 .geniv = true,
2995 },
2996 },
2997 {
2998 .aead.base = {
2999 .base = {
3000 .cra_name = "authenc(hmac(md5),cbc(des))",
3001 .cra_driver_name = "authenc-hmac-md5-"
3002 "cbc-des-caam",
3003 .cra_blocksize = DES_BLOCK_SIZE,
3004 },
3005 .setkey = aead_setkey,
3006 .setauthsize = aead_setauthsize,
3007 .encrypt = aead_encrypt,
3008 .decrypt = aead_decrypt,
3009 .ivsize = DES_BLOCK_SIZE,
3010 .maxauthsize = MD5_DIGEST_SIZE,
3011 },
3012 .aead.op = {
3013 .do_one_request = aead_do_one_req,
3014 },
3015 .caam = {
3016 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3017 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3018 OP_ALG_AAI_HMAC_PRECOMP,
3019 },
3020 },
3021 {
3022 .aead.base = {
3023 .base = {
3024 .cra_name = "echainiv(authenc(hmac(md5),"
3025 "cbc(des)))",
3026 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3027 "cbc-des-caam",
3028 .cra_blocksize = DES_BLOCK_SIZE,
3029 },
3030 .setkey = aead_setkey,
3031 .setauthsize = aead_setauthsize,
3032 .encrypt = aead_encrypt,
3033 .decrypt = aead_decrypt,
3034 .ivsize = DES_BLOCK_SIZE,
3035 .maxauthsize = MD5_DIGEST_SIZE,
3036 },
3037 .aead.op = {
3038 .do_one_request = aead_do_one_req,
3039 },
3040 .caam = {
3041 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3042 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3043 OP_ALG_AAI_HMAC_PRECOMP,
3044 .geniv = true,
3045 },
3046 },
3047 {
3048 .aead.base = {
3049 .base = {
3050 .cra_name = "authenc(hmac(sha1),cbc(des))",
3051 .cra_driver_name = "authenc-hmac-sha1-"
3052 "cbc-des-caam",
3053 .cra_blocksize = DES_BLOCK_SIZE,
3054 },
3055 .setkey = aead_setkey,
3056 .setauthsize = aead_setauthsize,
3057 .encrypt = aead_encrypt,
3058 .decrypt = aead_decrypt,
3059 .ivsize = DES_BLOCK_SIZE,
3060 .maxauthsize = SHA1_DIGEST_SIZE,
3061 },
3062 .aead.op = {
3063 .do_one_request = aead_do_one_req,
3064 },
3065 .caam = {
3066 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3067 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3068 OP_ALG_AAI_HMAC_PRECOMP,
3069 },
3070 },
3071 {
3072 .aead.base = {
3073 .base = {
3074 .cra_name = "echainiv(authenc(hmac(sha1),"
3075 "cbc(des)))",
3076 .cra_driver_name = "echainiv-authenc-"
3077 "hmac-sha1-cbc-des-caam",
3078 .cra_blocksize = DES_BLOCK_SIZE,
3079 },
3080 .setkey = aead_setkey,
3081 .setauthsize = aead_setauthsize,
3082 .encrypt = aead_encrypt,
3083 .decrypt = aead_decrypt,
3084 .ivsize = DES_BLOCK_SIZE,
3085 .maxauthsize = SHA1_DIGEST_SIZE,
3086 },
3087 .aead.op = {
3088 .do_one_request = aead_do_one_req,
3089 },
3090 .caam = {
3091 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3092 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3093 OP_ALG_AAI_HMAC_PRECOMP,
3094 .geniv = true,
3095 },
3096 },
3097 {
3098 .aead.base = {
3099 .base = {
3100 .cra_name = "authenc(hmac(sha224),cbc(des))",
3101 .cra_driver_name = "authenc-hmac-sha224-"
3102 "cbc-des-caam",
3103 .cra_blocksize = DES_BLOCK_SIZE,
3104 },
3105 .setkey = aead_setkey,
3106 .setauthsize = aead_setauthsize,
3107 .encrypt = aead_encrypt,
3108 .decrypt = aead_decrypt,
3109 .ivsize = DES_BLOCK_SIZE,
3110 .maxauthsize = SHA224_DIGEST_SIZE,
3111 },
3112 .aead.op = {
3113 .do_one_request = aead_do_one_req,
3114 },
3115 .caam = {
3116 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3117 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3118 OP_ALG_AAI_HMAC_PRECOMP,
3119 },
3120 },
3121 {
3122 .aead.base = {
3123 .base = {
3124 .cra_name = "echainiv(authenc(hmac(sha224),"
3125 "cbc(des)))",
3126 .cra_driver_name = "echainiv-authenc-"
3127 "hmac-sha224-cbc-des-caam",
3128 .cra_blocksize = DES_BLOCK_SIZE,
3129 },
3130 .setkey = aead_setkey,
3131 .setauthsize = aead_setauthsize,
3132 .encrypt = aead_encrypt,
3133 .decrypt = aead_decrypt,
3134 .ivsize = DES_BLOCK_SIZE,
3135 .maxauthsize = SHA224_DIGEST_SIZE,
3136 },
3137 .aead.op = {
3138 .do_one_request = aead_do_one_req,
3139 },
3140 .caam = {
3141 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3142 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3143 OP_ALG_AAI_HMAC_PRECOMP,
3144 .geniv = true,
3145 },
3146 },
3147 {
3148 .aead.base = {
3149 .base = {
3150 .cra_name = "authenc(hmac(sha256),cbc(des))",
3151 .cra_driver_name = "authenc-hmac-sha256-"
3152 "cbc-des-caam",
3153 .cra_blocksize = DES_BLOCK_SIZE,
3154 },
3155 .setkey = aead_setkey,
3156 .setauthsize = aead_setauthsize,
3157 .encrypt = aead_encrypt,
3158 .decrypt = aead_decrypt,
3159 .ivsize = DES_BLOCK_SIZE,
3160 .maxauthsize = SHA256_DIGEST_SIZE,
3161 },
3162 .aead.op = {
3163 .do_one_request = aead_do_one_req,
3164 },
3165 .caam = {
3166 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3167 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3168 OP_ALG_AAI_HMAC_PRECOMP,
3169 },
3170 },
3171 {
3172 .aead.base = {
3173 .base = {
3174 .cra_name = "echainiv(authenc(hmac(sha256),"
3175 "cbc(des)))",
3176 .cra_driver_name = "echainiv-authenc-"
3177 "hmac-sha256-cbc-des-caam",
3178 .cra_blocksize = DES_BLOCK_SIZE,
3179 },
3180 .setkey = aead_setkey,
3181 .setauthsize = aead_setauthsize,
3182 .encrypt = aead_encrypt,
3183 .decrypt = aead_decrypt,
3184 .ivsize = DES_BLOCK_SIZE,
3185 .maxauthsize = SHA256_DIGEST_SIZE,
3186 },
3187 .aead.op = {
3188 .do_one_request = aead_do_one_req,
3189 },
3190 .caam = {
3191 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3192 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3193 OP_ALG_AAI_HMAC_PRECOMP,
3194 .geniv = true,
3195 },
3196 },
3197 {
3198 .aead.base = {
3199 .base = {
3200 .cra_name = "authenc(hmac(sha384),cbc(des))",
3201 .cra_driver_name = "authenc-hmac-sha384-"
3202 "cbc-des-caam",
3203 .cra_blocksize = DES_BLOCK_SIZE,
3204 },
3205 .setkey = aead_setkey,
3206 .setauthsize = aead_setauthsize,
3207 .encrypt = aead_encrypt,
3208 .decrypt = aead_decrypt,
3209 .ivsize = DES_BLOCK_SIZE,
3210 .maxauthsize = SHA384_DIGEST_SIZE,
3211 },
3212 .aead.op = {
3213 .do_one_request = aead_do_one_req,
3214 },
3215 .caam = {
3216 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3217 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3218 OP_ALG_AAI_HMAC_PRECOMP,
3219 },
3220 },
3221 {
3222 .aead.base = {
3223 .base = {
3224 .cra_name = "echainiv(authenc(hmac(sha384),"
3225 "cbc(des)))",
3226 .cra_driver_name = "echainiv-authenc-"
3227 "hmac-sha384-cbc-des-caam",
3228 .cra_blocksize = DES_BLOCK_SIZE,
3229 },
3230 .setkey = aead_setkey,
3231 .setauthsize = aead_setauthsize,
3232 .encrypt = aead_encrypt,
3233 .decrypt = aead_decrypt,
3234 .ivsize = DES_BLOCK_SIZE,
3235 .maxauthsize = SHA384_DIGEST_SIZE,
3236 },
3237 .aead.op = {
3238 .do_one_request = aead_do_one_req,
3239 },
3240 .caam = {
3241 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3242 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3243 OP_ALG_AAI_HMAC_PRECOMP,
3244 .geniv = true,
3245 },
3246 },
3247 {
3248 .aead.base = {
3249 .base = {
3250 .cra_name = "authenc(hmac(sha512),cbc(des))",
3251 .cra_driver_name = "authenc-hmac-sha512-"
3252 "cbc-des-caam",
3253 .cra_blocksize = DES_BLOCK_SIZE,
3254 },
3255 .setkey = aead_setkey,
3256 .setauthsize = aead_setauthsize,
3257 .encrypt = aead_encrypt,
3258 .decrypt = aead_decrypt,
3259 .ivsize = DES_BLOCK_SIZE,
3260 .maxauthsize = SHA512_DIGEST_SIZE,
3261 },
3262 .aead.op = {
3263 .do_one_request = aead_do_one_req,
3264 },
3265 .caam = {
3266 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3267 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3268 OP_ALG_AAI_HMAC_PRECOMP,
3269 },
3270 },
3271 {
3272 .aead.base = {
3273 .base = {
3274 .cra_name = "echainiv(authenc(hmac(sha512),"
3275 "cbc(des)))",
3276 .cra_driver_name = "echainiv-authenc-"
3277 "hmac-sha512-cbc-des-caam",
3278 .cra_blocksize = DES_BLOCK_SIZE,
3279 },
3280 .setkey = aead_setkey,
3281 .setauthsize = aead_setauthsize,
3282 .encrypt = aead_encrypt,
3283 .decrypt = aead_decrypt,
3284 .ivsize = DES_BLOCK_SIZE,
3285 .maxauthsize = SHA512_DIGEST_SIZE,
3286 },
3287 .aead.op = {
3288 .do_one_request = aead_do_one_req,
3289 },
3290 .caam = {
3291 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3292 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3293 OP_ALG_AAI_HMAC_PRECOMP,
3294 .geniv = true,
3295 },
3296 },
3297 {
3298 .aead.base = {
3299 .base = {
3300 .cra_name = "authenc(hmac(md5),"
3301 "rfc3686(ctr(aes)))",
3302 .cra_driver_name = "authenc-hmac-md5-"
3303 "rfc3686-ctr-aes-caam",
3304 .cra_blocksize = 1,
3305 },
3306 .setkey = aead_setkey,
3307 .setauthsize = aead_setauthsize,
3308 .encrypt = aead_encrypt,
3309 .decrypt = aead_decrypt,
3310 .ivsize = CTR_RFC3686_IV_SIZE,
3311 .maxauthsize = MD5_DIGEST_SIZE,
3312 },
3313 .aead.op = {
3314 .do_one_request = aead_do_one_req,
3315 },
3316 .caam = {
3317 .class1_alg_type = OP_ALG_ALGSEL_AES |
3318 OP_ALG_AAI_CTR_MOD128,
3319 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3320 OP_ALG_AAI_HMAC_PRECOMP,
3321 .rfc3686 = true,
3322 },
3323 },
3324 {
3325 .aead.base = {
3326 .base = {
3327 .cra_name = "seqiv(authenc("
3328 "hmac(md5),rfc3686(ctr(aes))))",
3329 .cra_driver_name = "seqiv-authenc-hmac-md5-"
3330 "rfc3686-ctr-aes-caam",
3331 .cra_blocksize = 1,
3332 },
3333 .setkey = aead_setkey,
3334 .setauthsize = aead_setauthsize,
3335 .encrypt = aead_encrypt,
3336 .decrypt = aead_decrypt,
3337 .ivsize = CTR_RFC3686_IV_SIZE,
3338 .maxauthsize = MD5_DIGEST_SIZE,
3339 },
3340 .aead.op = {
3341 .do_one_request = aead_do_one_req,
3342 },
3343 .caam = {
3344 .class1_alg_type = OP_ALG_ALGSEL_AES |
3345 OP_ALG_AAI_CTR_MOD128,
3346 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3347 OP_ALG_AAI_HMAC_PRECOMP,
3348 .rfc3686 = true,
3349 .geniv = true,
3350 },
3351 },
3352 {
3353 .aead.base = {
3354 .base = {
3355 .cra_name = "authenc(hmac(sha1),"
3356 "rfc3686(ctr(aes)))",
3357 .cra_driver_name = "authenc-hmac-sha1-"
3358 "rfc3686-ctr-aes-caam",
3359 .cra_blocksize = 1,
3360 },
3361 .setkey = aead_setkey,
3362 .setauthsize = aead_setauthsize,
3363 .encrypt = aead_encrypt,
3364 .decrypt = aead_decrypt,
3365 .ivsize = CTR_RFC3686_IV_SIZE,
3366 .maxauthsize = SHA1_DIGEST_SIZE,
3367 },
3368 .aead.op = {
3369 .do_one_request = aead_do_one_req,
3370 },
3371 .caam = {
3372 .class1_alg_type = OP_ALG_ALGSEL_AES |
3373 OP_ALG_AAI_CTR_MOD128,
3374 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3375 OP_ALG_AAI_HMAC_PRECOMP,
3376 .rfc3686 = true,
3377 },
3378 },
3379 {
3380 .aead.base = {
3381 .base = {
3382 .cra_name = "seqiv(authenc("
3383 "hmac(sha1),rfc3686(ctr(aes))))",
3384 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
3385 "rfc3686-ctr-aes-caam",
3386 .cra_blocksize = 1,
3387 },
3388 .setkey = aead_setkey,
3389 .setauthsize = aead_setauthsize,
3390 .encrypt = aead_encrypt,
3391 .decrypt = aead_decrypt,
3392 .ivsize = CTR_RFC3686_IV_SIZE,
3393 .maxauthsize = SHA1_DIGEST_SIZE,
3394 },
3395 .aead.op = {
3396 .do_one_request = aead_do_one_req,
3397 },
3398 .caam = {
3399 .class1_alg_type = OP_ALG_ALGSEL_AES |
3400 OP_ALG_AAI_CTR_MOD128,
3401 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3402 OP_ALG_AAI_HMAC_PRECOMP,
3403 .rfc3686 = true,
3404 .geniv = true,
3405 },
3406 },
3407 {
3408 .aead.base = {
3409 .base = {
3410 .cra_name = "authenc(hmac(sha224),"
3411 "rfc3686(ctr(aes)))",
3412 .cra_driver_name = "authenc-hmac-sha224-"
3413 "rfc3686-ctr-aes-caam",
3414 .cra_blocksize = 1,
3415 },
3416 .setkey = aead_setkey,
3417 .setauthsize = aead_setauthsize,
3418 .encrypt = aead_encrypt,
3419 .decrypt = aead_decrypt,
3420 .ivsize = CTR_RFC3686_IV_SIZE,
3421 .maxauthsize = SHA224_DIGEST_SIZE,
3422 },
3423 .aead.op = {
3424 .do_one_request = aead_do_one_req,
3425 },
3426 .caam = {
3427 .class1_alg_type = OP_ALG_ALGSEL_AES |
3428 OP_ALG_AAI_CTR_MOD128,
3429 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3430 OP_ALG_AAI_HMAC_PRECOMP,
3431 .rfc3686 = true,
3432 },
3433 },
3434 {
3435 .aead.base = {
3436 .base = {
3437 .cra_name = "seqiv(authenc("
3438 "hmac(sha224),rfc3686(ctr(aes))))",
3439 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
3440 "rfc3686-ctr-aes-caam",
3441 .cra_blocksize = 1,
3442 },
3443 .setkey = aead_setkey,
3444 .setauthsize = aead_setauthsize,
3445 .encrypt = aead_encrypt,
3446 .decrypt = aead_decrypt,
3447 .ivsize = CTR_RFC3686_IV_SIZE,
3448 .maxauthsize = SHA224_DIGEST_SIZE,
3449 },
3450 .aead.op = {
3451 .do_one_request = aead_do_one_req,
3452 },
3453 .caam = {
3454 .class1_alg_type = OP_ALG_ALGSEL_AES |
3455 OP_ALG_AAI_CTR_MOD128,
3456 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3457 OP_ALG_AAI_HMAC_PRECOMP,
3458 .rfc3686 = true,
3459 .geniv = true,
3460 },
3461 },
3462 {
3463 .aead.base = {
3464 .base = {
3465 .cra_name = "authenc(hmac(sha256),"
3466 "rfc3686(ctr(aes)))",
3467 .cra_driver_name = "authenc-hmac-sha256-"
3468 "rfc3686-ctr-aes-caam",
3469 .cra_blocksize = 1,
3470 },
3471 .setkey = aead_setkey,
3472 .setauthsize = aead_setauthsize,
3473 .encrypt = aead_encrypt,
3474 .decrypt = aead_decrypt,
3475 .ivsize = CTR_RFC3686_IV_SIZE,
3476 .maxauthsize = SHA256_DIGEST_SIZE,
3477 },
3478 .aead.op = {
3479 .do_one_request = aead_do_one_req,
3480 },
3481 .caam = {
3482 .class1_alg_type = OP_ALG_ALGSEL_AES |
3483 OP_ALG_AAI_CTR_MOD128,
3484 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3485 OP_ALG_AAI_HMAC_PRECOMP,
3486 .rfc3686 = true,
3487 },
3488 },
3489 {
3490 .aead.base = {
3491 .base = {
3492 .cra_name = "seqiv(authenc(hmac(sha256),"
3493 "rfc3686(ctr(aes))))",
3494 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
3495 "rfc3686-ctr-aes-caam",
3496 .cra_blocksize = 1,
3497 },
3498 .setkey = aead_setkey,
3499 .setauthsize = aead_setauthsize,
3500 .encrypt = aead_encrypt,
3501 .decrypt = aead_decrypt,
3502 .ivsize = CTR_RFC3686_IV_SIZE,
3503 .maxauthsize = SHA256_DIGEST_SIZE,
3504 },
3505 .aead.op = {
3506 .do_one_request = aead_do_one_req,
3507 },
3508 .caam = {
3509 .class1_alg_type = OP_ALG_ALGSEL_AES |
3510 OP_ALG_AAI_CTR_MOD128,
3511 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3512 OP_ALG_AAI_HMAC_PRECOMP,
3513 .rfc3686 = true,
3514 .geniv = true,
3515 },
3516 },
3517 {
3518 .aead.base = {
3519 .base = {
3520 .cra_name = "authenc(hmac(sha384),"
3521 "rfc3686(ctr(aes)))",
3522 .cra_driver_name = "authenc-hmac-sha384-"
3523 "rfc3686-ctr-aes-caam",
3524 .cra_blocksize = 1,
3525 },
3526 .setkey = aead_setkey,
3527 .setauthsize = aead_setauthsize,
3528 .encrypt = aead_encrypt,
3529 .decrypt = aead_decrypt,
3530 .ivsize = CTR_RFC3686_IV_SIZE,
3531 .maxauthsize = SHA384_DIGEST_SIZE,
3532 },
3533 .aead.op = {
3534 .do_one_request = aead_do_one_req,
3535 },
3536 .caam = {
3537 .class1_alg_type = OP_ALG_ALGSEL_AES |
3538 OP_ALG_AAI_CTR_MOD128,
3539 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3540 OP_ALG_AAI_HMAC_PRECOMP,
3541 .rfc3686 = true,
3542 },
3543 },
3544 {
3545 .aead.base = {
3546 .base = {
3547 .cra_name = "seqiv(authenc(hmac(sha384),"
3548 "rfc3686(ctr(aes))))",
3549 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
3550 "rfc3686-ctr-aes-caam",
3551 .cra_blocksize = 1,
3552 },
3553 .setkey = aead_setkey,
3554 .setauthsize = aead_setauthsize,
3555 .encrypt = aead_encrypt,
3556 .decrypt = aead_decrypt,
3557 .ivsize = CTR_RFC3686_IV_SIZE,
3558 .maxauthsize = SHA384_DIGEST_SIZE,
3559 },
3560 .aead.op = {
3561 .do_one_request = aead_do_one_req,
3562 },
3563 .caam = {
3564 .class1_alg_type = OP_ALG_ALGSEL_AES |
3565 OP_ALG_AAI_CTR_MOD128,
3566 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3567 OP_ALG_AAI_HMAC_PRECOMP,
3568 .rfc3686 = true,
3569 .geniv = true,
3570 },
3571 },
3572 {
3573 .aead.base = {
3574 .base = {
3575 .cra_name = "authenc(hmac(sha512),"
3576 "rfc3686(ctr(aes)))",
3577 .cra_driver_name = "authenc-hmac-sha512-"
3578 "rfc3686-ctr-aes-caam",
3579 .cra_blocksize = 1,
3580 },
3581 .setkey = aead_setkey,
3582 .setauthsize = aead_setauthsize,
3583 .encrypt = aead_encrypt,
3584 .decrypt = aead_decrypt,
3585 .ivsize = CTR_RFC3686_IV_SIZE,
3586 .maxauthsize = SHA512_DIGEST_SIZE,
3587 },
3588 .aead.op = {
3589 .do_one_request = aead_do_one_req,
3590 },
3591 .caam = {
3592 .class1_alg_type = OP_ALG_ALGSEL_AES |
3593 OP_ALG_AAI_CTR_MOD128,
3594 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3595 OP_ALG_AAI_HMAC_PRECOMP,
3596 .rfc3686 = true,
3597 },
3598 },
3599 {
3600 .aead.base = {
3601 .base = {
3602 .cra_name = "seqiv(authenc(hmac(sha512),"
3603 "rfc3686(ctr(aes))))",
3604 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
3605 "rfc3686-ctr-aes-caam",
3606 .cra_blocksize = 1,
3607 },
3608 .setkey = aead_setkey,
3609 .setauthsize = aead_setauthsize,
3610 .encrypt = aead_encrypt,
3611 .decrypt = aead_decrypt,
3612 .ivsize = CTR_RFC3686_IV_SIZE,
3613 .maxauthsize = SHA512_DIGEST_SIZE,
3614 },
3615 .aead.op = {
3616 .do_one_request = aead_do_one_req,
3617 },
3618 .caam = {
3619 .class1_alg_type = OP_ALG_ALGSEL_AES |
3620 OP_ALG_AAI_CTR_MOD128,
3621 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3622 OP_ALG_AAI_HMAC_PRECOMP,
3623 .rfc3686 = true,
3624 .geniv = true,
3625 },
3626 },
3627 {
3628 .aead.base = {
3629 .base = {
3630 .cra_name = "rfc7539(chacha20,poly1305)",
3631 .cra_driver_name = "rfc7539-chacha20-poly1305-"
3632 "caam",
3633 .cra_blocksize = 1,
3634 },
3635 .setkey = chachapoly_setkey,
3636 .setauthsize = chachapoly_setauthsize,
3637 .encrypt = chachapoly_encrypt,
3638 .decrypt = chachapoly_decrypt,
3639 .ivsize = CHACHAPOLY_IV_SIZE,
3640 .maxauthsize = POLY1305_DIGEST_SIZE,
3641 },
3642 .aead.op = {
3643 .do_one_request = aead_do_one_req,
3644 },
3645 .caam = {
3646 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3647 OP_ALG_AAI_AEAD,
3648 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3649 OP_ALG_AAI_AEAD,
3650 .nodkp = true,
3651 },
3652 },
3653 {
3654 .aead.base = {
3655 .base = {
3656 .cra_name = "rfc7539esp(chacha20,poly1305)",
3657 .cra_driver_name = "rfc7539esp-chacha20-"
3658 "poly1305-caam",
3659 .cra_blocksize = 1,
3660 },
3661 .setkey = chachapoly_setkey,
3662 .setauthsize = chachapoly_setauthsize,
3663 .encrypt = chachapoly_encrypt,
3664 .decrypt = chachapoly_decrypt,
3665 .ivsize = 8,
3666 .maxauthsize = POLY1305_DIGEST_SIZE,
3667 },
3668 .aead.op = {
3669 .do_one_request = aead_do_one_req,
3670 },
3671 .caam = {
3672 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3673 OP_ALG_AAI_AEAD,
3674 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3675 OP_ALG_AAI_AEAD,
3676 .nodkp = true,
3677 },
3678 },
3679 };
3680
caam_init_common(struct caam_ctx * ctx,struct caam_alg_entry * caam,bool uses_dkp)3681 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
3682 bool uses_dkp)
3683 {
3684 dma_addr_t dma_addr;
3685 struct caam_drv_private *priv;
3686 const size_t sh_desc_enc_offset = offsetof(struct caam_ctx,
3687 sh_desc_enc);
3688
3689 ctx->jrdev = caam_jr_alloc();
3690 if (IS_ERR(ctx->jrdev)) {
3691 pr_err("Job Ring Device allocation for transform failed\n");
3692 return PTR_ERR(ctx->jrdev);
3693 }
3694
3695 priv = dev_get_drvdata(ctx->jrdev->parent);
3696 if (priv->era >= 6 && uses_dkp)
3697 ctx->dir = DMA_BIDIRECTIONAL;
3698 else
3699 ctx->dir = DMA_TO_DEVICE;
3700
3701 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
3702 offsetof(struct caam_ctx,
3703 sh_desc_enc_dma) -
3704 sh_desc_enc_offset,
3705 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3706 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
3707 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
3708 caam_jr_free(ctx->jrdev);
3709 return -ENOMEM;
3710 }
3711
3712 ctx->sh_desc_enc_dma = dma_addr;
3713 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
3714 sh_desc_dec) -
3715 sh_desc_enc_offset;
3716 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) -
3717 sh_desc_enc_offset;
3718
3719 /* copy descriptor header template value */
3720 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
3721 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
3722
3723 return 0;
3724 }
3725
caam_cra_init(struct crypto_skcipher * tfm)3726 static int caam_cra_init(struct crypto_skcipher *tfm)
3727 {
3728 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3729 struct caam_skcipher_alg *caam_alg =
3730 container_of(alg, typeof(*caam_alg), skcipher.base);
3731 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
3732 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3733 int ret = 0;
3734
3735 if (alg_aai == OP_ALG_AAI_XTS) {
3736 const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
3737 struct crypto_skcipher *fallback;
3738
3739 fallback = crypto_alloc_skcipher(tfm_name, 0,
3740 CRYPTO_ALG_NEED_FALLBACK);
3741 if (IS_ERR(fallback)) {
3742 pr_err("Failed to allocate %s fallback: %ld\n",
3743 tfm_name, PTR_ERR(fallback));
3744 return PTR_ERR(fallback);
3745 }
3746
3747 ctx->fallback = fallback;
3748 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
3749 crypto_skcipher_reqsize(fallback));
3750 } else {
3751 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
3752 }
3753
3754 ret = caam_init_common(ctx, &caam_alg->caam, false);
3755 if (ret && ctx->fallback)
3756 crypto_free_skcipher(ctx->fallback);
3757
3758 return ret;
3759 }
3760
caam_aead_init(struct crypto_aead * tfm)3761 static int caam_aead_init(struct crypto_aead *tfm)
3762 {
3763 struct aead_alg *alg = crypto_aead_alg(tfm);
3764 struct caam_aead_alg *caam_alg =
3765 container_of(alg, struct caam_aead_alg, aead.base);
3766 struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
3767
3768 crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
3769
3770 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
3771 }
3772
caam_exit_common(struct caam_ctx * ctx)3773 static void caam_exit_common(struct caam_ctx *ctx)
3774 {
3775 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
3776 offsetof(struct caam_ctx, sh_desc_enc_dma) -
3777 offsetof(struct caam_ctx, sh_desc_enc),
3778 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3779 caam_jr_free(ctx->jrdev);
3780 }
3781
caam_cra_exit(struct crypto_skcipher * tfm)3782 static void caam_cra_exit(struct crypto_skcipher *tfm)
3783 {
3784 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
3785
3786 if (ctx->fallback)
3787 crypto_free_skcipher(ctx->fallback);
3788 caam_exit_common(ctx);
3789 }
3790
caam_aead_exit(struct crypto_aead * tfm)3791 static void caam_aead_exit(struct crypto_aead *tfm)
3792 {
3793 caam_exit_common(crypto_aead_ctx_dma(tfm));
3794 }
3795
caam_algapi_exit(void)3796 void caam_algapi_exit(void)
3797 {
3798 int i;
3799
3800 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3801 struct caam_aead_alg *t_alg = driver_aeads + i;
3802
3803 if (t_alg->registered)
3804 crypto_engine_unregister_aead(&t_alg->aead);
3805 }
3806
3807 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3808 struct caam_skcipher_alg *t_alg = driver_algs + i;
3809
3810 if (t_alg->registered)
3811 crypto_engine_unregister_skcipher(&t_alg->skcipher);
3812 }
3813 }
3814
caam_skcipher_alg_init(struct caam_skcipher_alg * t_alg)3815 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3816 {
3817 struct skcipher_alg *alg = &t_alg->skcipher.base;
3818
3819 alg->base.cra_module = THIS_MODULE;
3820 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3821 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3822 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3823 CRYPTO_ALG_KERN_DRIVER_ONLY);
3824
3825 alg->init = caam_cra_init;
3826 alg->exit = caam_cra_exit;
3827 }
3828
caam_aead_alg_init(struct caam_aead_alg * t_alg)3829 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3830 {
3831 struct aead_alg *alg = &t_alg->aead.base;
3832
3833 alg->base.cra_module = THIS_MODULE;
3834 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3835 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3836 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3837 CRYPTO_ALG_KERN_DRIVER_ONLY;
3838
3839 alg->init = caam_aead_init;
3840 alg->exit = caam_aead_exit;
3841 }
3842
caam_algapi_init(struct device * ctrldev)3843 int caam_algapi_init(struct device *ctrldev)
3844 {
3845 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
3846 int i = 0, err = 0;
3847 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
3848 unsigned int md_limit = SHA512_DIGEST_SIZE;
3849 bool registered = false, gcm_support;
3850
3851 /*
3852 * Register crypto algorithms the device supports.
3853 * First, detect presence and attributes of DES, AES, and MD blocks.
3854 */
3855 if (priv->era < 10) {
3856 struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
3857 u32 cha_vid, cha_inst, aes_rn;
3858
3859 cha_vid = rd_reg32(&perfmon->cha_id_ls);
3860 aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
3861 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3862
3863 cha_inst = rd_reg32(&perfmon->cha_num_ls);
3864 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
3865 CHA_ID_LS_DES_SHIFT;
3866 aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
3867 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3868 ccha_inst = 0;
3869 ptha_inst = 0;
3870
3871 aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK;
3872 gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
3873 } else {
3874 struct version_regs __iomem *vreg = &priv->jr[0]->vreg;
3875 u32 aesa, mdha;
3876
3877 aesa = rd_reg32(&vreg->aesa);
3878 mdha = rd_reg32(&vreg->mdha);
3879
3880 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3881 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3882
3883 des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK;
3884 aes_inst = aesa & CHA_VER_NUM_MASK;
3885 md_inst = mdha & CHA_VER_NUM_MASK;
3886 ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK;
3887 ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK;
3888
3889 gcm_support = aesa & CHA_VER_MISC_AES_GCM;
3890 }
3891
3892 /* If MD is present, limit digest size based on LP256 */
3893 if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
3894 md_limit = SHA256_DIGEST_SIZE;
3895
3896 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3897 struct caam_skcipher_alg *t_alg = driver_algs + i;
3898 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
3899
3900 /* Skip DES algorithms if not supported by device */
3901 if (!des_inst &&
3902 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
3903 (alg_sel == OP_ALG_ALGSEL_DES)))
3904 continue;
3905
3906 /* Skip AES algorithms if not supported by device */
3907 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
3908 continue;
3909
3910 /*
3911 * Check support for AES modes not available
3912 * on LP devices.
3913 */
3914 if (aes_vid == CHA_VER_VID_AES_LP &&
3915 (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
3916 OP_ALG_AAI_XTS)
3917 continue;
3918
3919 caam_skcipher_alg_init(t_alg);
3920
3921 err = crypto_engine_register_skcipher(&t_alg->skcipher);
3922 if (err) {
3923 pr_warn("%s alg registration failed\n",
3924 t_alg->skcipher.base.base.cra_driver_name);
3925 continue;
3926 }
3927
3928 t_alg->registered = true;
3929 registered = true;
3930 }
3931
3932 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3933 struct caam_aead_alg *t_alg = driver_aeads + i;
3934 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
3935 OP_ALG_ALGSEL_MASK;
3936 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
3937 OP_ALG_ALGSEL_MASK;
3938 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3939
3940 /* Skip DES algorithms if not supported by device */
3941 if (!des_inst &&
3942 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
3943 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
3944 continue;
3945
3946 /* Skip AES algorithms if not supported by device */
3947 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
3948 continue;
3949
3950 /* Skip CHACHA20 algorithms if not supported by device */
3951 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
3952 continue;
3953
3954 /* Skip POLY1305 algorithms if not supported by device */
3955 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
3956 continue;
3957
3958 /* Skip GCM algorithms if not supported by device */
3959 if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
3960 alg_aai == OP_ALG_AAI_GCM && !gcm_support)
3961 continue;
3962
3963 /*
3964 * Skip algorithms requiring message digests
3965 * if MD or MD size is not supported by device.
3966 */
3967 if (is_mdha(c2_alg_sel) &&
3968 (!md_inst || t_alg->aead.base.maxauthsize > md_limit))
3969 continue;
3970
3971 caam_aead_alg_init(t_alg);
3972
3973 err = crypto_engine_register_aead(&t_alg->aead);
3974 if (err) {
3975 pr_warn("%s alg registration failed\n",
3976 t_alg->aead.base.base.cra_driver_name);
3977 continue;
3978 }
3979
3980 t_alg->registered = true;
3981 registered = true;
3982 }
3983
3984 if (registered)
3985 pr_info("caam algorithms registered in /proc/crypto\n");
3986
3987 return err;
3988 }
3989