1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 *
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019, 2023 NXP
7 *
8 * Based on caamalg.c crypto API driver.
9 *
10 * relationship of digest job descriptor or first job descriptor after init to
11 * shared descriptors:
12 *
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
17 * ---------------
18 *
19 * relationship of subsequent job descriptors to shared descriptors:
20 *
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
27 * --------------- | |
28 * | JobDesc #3 |------| |
29 * | *(packet 3) | |
30 * --------------- |
31 * . |
32 * . |
33 * --------------- |
34 * | JobDesc #4 |------------
35 * | *(packet 4) |
36 * ---------------
37 *
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
43 *
44 * So, a job desc looks like:
45 *
46 * ---------------------
47 * | Header |
48 * | ShareDesc Pointer |
49 * | SEQ_OUT_PTR |
50 * | (output buffer) |
51 * | (output length) |
52 * | SEQ_IN_PTR |
53 * | (input buffer) |
54 * | (input length) |
55 * ---------------------
56 */
57
58 #include "compat.h"
59
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 #include <crypto/internal/engine.h>
69 #include <crypto/internal/hash.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/err.h>
72 #include <linux/kernel.h>
73 #include <linux/slab.h>
74 #include <linux/string.h>
75
76 #define CAAM_CRA_PRIORITY 3000
77
78 /* max hash key is max split key size */
79 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
80
81 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
82 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
83
84 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
85 CAAM_MAX_HASH_KEY_SIZE)
86 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
87
88 /* caam context sizes for hashes: running digest + 8 */
89 #define HASH_MSG_LEN 8
90 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91
92 static struct list_head hash_list;
93
94 /* ahash per-session context */
95 struct caam_hash_ctx {
96 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
101 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
102 dma_addr_t sh_desc_update_first_dma;
103 dma_addr_t sh_desc_fin_dma;
104 dma_addr_t sh_desc_digest_dma;
105 enum dma_data_direction dir;
106 enum dma_data_direction key_dir;
107 struct device *jrdev;
108 int ctx_len;
109 struct alginfo adata;
110 };
111
112 /* ahash state */
113 struct caam_hash_state {
114 dma_addr_t buf_dma;
115 dma_addr_t ctx_dma;
116 int ctx_dma_len;
117 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
118 int buflen;
119 int next_buflen;
120 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
121 int (*update)(struct ahash_request *req) ____cacheline_aligned;
122 int (*final)(struct ahash_request *req);
123 int (*finup)(struct ahash_request *req);
124 struct ahash_edesc *edesc;
125 void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
126 void *context);
127 };
128
129 struct caam_export_state {
130 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
131 u8 caam_ctx[MAX_CTX_LEN];
132 int buflen;
133 int (*update)(struct ahash_request *req);
134 int (*final)(struct ahash_request *req);
135 int (*finup)(struct ahash_request *req);
136 };
137
is_cmac_aes(u32 algtype)138 static inline bool is_cmac_aes(u32 algtype)
139 {
140 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
141 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
142 }
143 /* Common job descriptor seq in/out ptr routines */
144
145 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
map_seq_out_ptr_ctx(u32 * desc,struct device * jrdev,struct caam_hash_state * state,int ctx_len)146 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
147 struct caam_hash_state *state,
148 int ctx_len)
149 {
150 state->ctx_dma_len = ctx_len;
151 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
152 ctx_len, DMA_FROM_DEVICE);
153 if (dma_mapping_error(jrdev, state->ctx_dma)) {
154 dev_err(jrdev, "unable to map ctx\n");
155 state->ctx_dma = 0;
156 return -ENOMEM;
157 }
158
159 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
160
161 return 0;
162 }
163
164 /* Map current buffer in state (if length > 0) and put it in link table */
buf_map_to_sec4_sg(struct device * jrdev,struct sec4_sg_entry * sec4_sg,struct caam_hash_state * state)165 static inline int buf_map_to_sec4_sg(struct device *jrdev,
166 struct sec4_sg_entry *sec4_sg,
167 struct caam_hash_state *state)
168 {
169 int buflen = state->buflen;
170
171 if (!buflen)
172 return 0;
173
174 state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
175 DMA_TO_DEVICE);
176 if (dma_mapping_error(jrdev, state->buf_dma)) {
177 dev_err(jrdev, "unable to map buf\n");
178 state->buf_dma = 0;
179 return -ENOMEM;
180 }
181
182 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
183
184 return 0;
185 }
186
187 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_sec4_sg(struct device * jrdev,struct caam_hash_state * state,int ctx_len,struct sec4_sg_entry * sec4_sg,u32 flag)188 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
189 struct caam_hash_state *state, int ctx_len,
190 struct sec4_sg_entry *sec4_sg, u32 flag)
191 {
192 state->ctx_dma_len = ctx_len;
193 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
194 if (dma_mapping_error(jrdev, state->ctx_dma)) {
195 dev_err(jrdev, "unable to map ctx\n");
196 state->ctx_dma = 0;
197 return -ENOMEM;
198 }
199
200 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
201
202 return 0;
203 }
204
ahash_set_sh_desc(struct crypto_ahash * ahash)205 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
206 {
207 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
208 int digestsize = crypto_ahash_digestsize(ahash);
209 struct device *jrdev = ctx->jrdev;
210 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
211 u32 *desc;
212
213 ctx->adata.key_virt = ctx->key;
214
215 /* ahash_update shared descriptor */
216 desc = ctx->sh_desc_update;
217 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
218 ctx->ctx_len, true, ctrlpriv->era);
219 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
220 desc_bytes(desc), ctx->dir);
221
222 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
223 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
224 1);
225
226 /* ahash_update_first shared descriptor */
227 desc = ctx->sh_desc_update_first;
228 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
229 ctx->ctx_len, false, ctrlpriv->era);
230 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
231 desc_bytes(desc), ctx->dir);
232 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
233 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
234 desc_bytes(desc), 1);
235
236 /* ahash_final shared descriptor */
237 desc = ctx->sh_desc_fin;
238 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
239 ctx->ctx_len, true, ctrlpriv->era);
240 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
241 desc_bytes(desc), ctx->dir);
242
243 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
244 DUMP_PREFIX_ADDRESS, 16, 4, desc,
245 desc_bytes(desc), 1);
246
247 /* ahash_digest shared descriptor */
248 desc = ctx->sh_desc_digest;
249 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
250 ctx->ctx_len, false, ctrlpriv->era);
251 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
252 desc_bytes(desc), ctx->dir);
253
254 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
255 DUMP_PREFIX_ADDRESS, 16, 4, desc,
256 desc_bytes(desc), 1);
257
258 return 0;
259 }
260
axcbc_set_sh_desc(struct crypto_ahash * ahash)261 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
262 {
263 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
264 int digestsize = crypto_ahash_digestsize(ahash);
265 struct device *jrdev = ctx->jrdev;
266 u32 *desc;
267
268 /* shared descriptor for ahash_update */
269 desc = ctx->sh_desc_update;
270 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
271 ctx->ctx_len, ctx->ctx_len);
272 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
273 desc_bytes(desc), ctx->dir);
274 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
275 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
276 1);
277
278 /* shared descriptor for ahash_{final,finup} */
279 desc = ctx->sh_desc_fin;
280 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
281 digestsize, ctx->ctx_len);
282 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
283 desc_bytes(desc), ctx->dir);
284 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
285 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
286 1);
287
288 /* key is immediate data for INIT and INITFINAL states */
289 ctx->adata.key_virt = ctx->key;
290
291 /* shared descriptor for first invocation of ahash_update */
292 desc = ctx->sh_desc_update_first;
293 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
294 ctx->ctx_len);
295 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
296 desc_bytes(desc), ctx->dir);
297 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
298 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
299 desc_bytes(desc), 1);
300
301 /* shared descriptor for ahash_digest */
302 desc = ctx->sh_desc_digest;
303 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
304 digestsize, ctx->ctx_len);
305 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
306 desc_bytes(desc), ctx->dir);
307 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
308 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
309 1);
310 return 0;
311 }
312
acmac_set_sh_desc(struct crypto_ahash * ahash)313 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
314 {
315 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
316 int digestsize = crypto_ahash_digestsize(ahash);
317 struct device *jrdev = ctx->jrdev;
318 u32 *desc;
319
320 /* shared descriptor for ahash_update */
321 desc = ctx->sh_desc_update;
322 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
323 ctx->ctx_len, ctx->ctx_len);
324 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
325 desc_bytes(desc), ctx->dir);
326 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
327 DUMP_PREFIX_ADDRESS, 16, 4, desc,
328 desc_bytes(desc), 1);
329
330 /* shared descriptor for ahash_{final,finup} */
331 desc = ctx->sh_desc_fin;
332 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
333 digestsize, ctx->ctx_len);
334 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
335 desc_bytes(desc), ctx->dir);
336 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
337 DUMP_PREFIX_ADDRESS, 16, 4, desc,
338 desc_bytes(desc), 1);
339
340 /* shared descriptor for first invocation of ahash_update */
341 desc = ctx->sh_desc_update_first;
342 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
343 ctx->ctx_len);
344 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
345 desc_bytes(desc), ctx->dir);
346 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
347 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
348 desc_bytes(desc), 1);
349
350 /* shared descriptor for ahash_digest */
351 desc = ctx->sh_desc_digest;
352 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
353 digestsize, ctx->ctx_len);
354 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
355 desc_bytes(desc), ctx->dir);
356 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
357 DUMP_PREFIX_ADDRESS, 16, 4, desc,
358 desc_bytes(desc), 1);
359
360 return 0;
361 }
362
363 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx * ctx,u32 * keylen,u8 * key,u32 digestsize)364 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
365 u32 digestsize)
366 {
367 struct device *jrdev = ctx->jrdev;
368 u32 *desc;
369 struct split_key_result result;
370 dma_addr_t key_dma;
371 int ret;
372
373 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
374 if (!desc)
375 return -ENOMEM;
376
377 init_job_desc(desc, 0);
378
379 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
380 if (dma_mapping_error(jrdev, key_dma)) {
381 dev_err(jrdev, "unable to map key memory\n");
382 kfree(desc);
383 return -ENOMEM;
384 }
385
386 /* Job descriptor to perform unkeyed hash on key_in */
387 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
388 OP_ALG_AS_INITFINAL);
389 append_seq_in_ptr(desc, key_dma, *keylen, 0);
390 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
391 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
392 append_seq_out_ptr(desc, key_dma, digestsize, 0);
393 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
394 LDST_SRCDST_BYTE_CONTEXT);
395
396 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
397 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
398 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
399 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
400 1);
401
402 result.err = 0;
403 init_completion(&result.completion);
404
405 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
406 if (ret == -EINPROGRESS) {
407 /* in progress */
408 wait_for_completion(&result.completion);
409 ret = result.err;
410
411 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
412 DUMP_PREFIX_ADDRESS, 16, 4, key,
413 digestsize, 1);
414 }
415 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
416
417 *keylen = digestsize;
418
419 kfree(desc);
420
421 return ret;
422 }
423
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)424 static int ahash_setkey(struct crypto_ahash *ahash,
425 const u8 *key, unsigned int keylen)
426 {
427 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
428 struct device *jrdev = ctx->jrdev;
429 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
430 int digestsize = crypto_ahash_digestsize(ahash);
431 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
432 int ret;
433 u8 *hashed_key = NULL;
434
435 dev_dbg(jrdev, "keylen %d\n", keylen);
436
437 if (keylen > blocksize) {
438 unsigned int aligned_len =
439 ALIGN(keylen, dma_get_cache_alignment());
440
441 if (aligned_len < keylen)
442 return -EOVERFLOW;
443
444 hashed_key = kmalloc(aligned_len, GFP_KERNEL);
445 if (!hashed_key)
446 return -ENOMEM;
447 memcpy(hashed_key, key, keylen);
448 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
449 if (ret)
450 goto bad_free_key;
451 key = hashed_key;
452 }
453
454 /*
455 * If DKP is supported, use it in the shared descriptor to generate
456 * the split key.
457 */
458 if (ctrlpriv->era >= 6) {
459 ctx->adata.key_inline = true;
460 ctx->adata.keylen = keylen;
461 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
462 OP_ALG_ALGSEL_MASK);
463
464 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
465 goto bad_free_key;
466
467 memcpy(ctx->key, key, keylen);
468
469 /*
470 * In case |user key| > |derived key|, using DKP<imm,imm>
471 * would result in invalid opcodes (last bytes of user key) in
472 * the resulting descriptor. Use DKP<ptr,imm> instead => both
473 * virtual and dma key addresses are needed.
474 */
475 if (keylen > ctx->adata.keylen_pad)
476 dma_sync_single_for_device(ctx->jrdev,
477 ctx->adata.key_dma,
478 ctx->adata.keylen_pad,
479 DMA_TO_DEVICE);
480 } else {
481 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
482 keylen, CAAM_MAX_HASH_KEY_SIZE);
483 if (ret)
484 goto bad_free_key;
485 }
486
487 kfree(hashed_key);
488 return ahash_set_sh_desc(ahash);
489 bad_free_key:
490 kfree(hashed_key);
491 return -EINVAL;
492 }
493
axcbc_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)494 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
495 unsigned int keylen)
496 {
497 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
498 struct device *jrdev = ctx->jrdev;
499
500 if (keylen != AES_KEYSIZE_128)
501 return -EINVAL;
502
503 memcpy(ctx->key, key, keylen);
504 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
505 DMA_TO_DEVICE);
506 ctx->adata.keylen = keylen;
507
508 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
509 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
510
511 return axcbc_set_sh_desc(ahash);
512 }
513
acmac_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)514 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
515 unsigned int keylen)
516 {
517 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
518 int err;
519
520 err = aes_check_keylen(keylen);
521 if (err)
522 return err;
523
524 /* key is immediate data for all cmac shared descriptors */
525 ctx->adata.key_virt = key;
526 ctx->adata.keylen = keylen;
527
528 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
529 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
530
531 return acmac_set_sh_desc(ahash);
532 }
533
534 /*
535 * ahash_edesc - s/w-extended ahash descriptor
536 * @sec4_sg_dma: physical mapped address of h/w link table
537 * @src_nents: number of segments in input scatterlist
538 * @sec4_sg_bytes: length of dma mapped sec4_sg space
539 * @bklog: stored to determine if the request needs backlog
540 * @hw_desc: the h/w job descriptor followed by any referenced link tables
541 * @sec4_sg: h/w link table
542 */
543 struct ahash_edesc {
544 dma_addr_t sec4_sg_dma;
545 int src_nents;
546 int sec4_sg_bytes;
547 bool bklog;
548 u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
549 struct sec4_sg_entry sec4_sg[];
550 };
551
ahash_unmap(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,int dst_len)552 static inline void ahash_unmap(struct device *dev,
553 struct ahash_edesc *edesc,
554 struct ahash_request *req, int dst_len)
555 {
556 struct caam_hash_state *state = ahash_request_ctx_dma(req);
557
558 if (edesc->src_nents)
559 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
560
561 if (edesc->sec4_sg_bytes)
562 dma_unmap_single(dev, edesc->sec4_sg_dma,
563 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
564
565 if (state->buf_dma) {
566 dma_unmap_single(dev, state->buf_dma, state->buflen,
567 DMA_TO_DEVICE);
568 state->buf_dma = 0;
569 }
570 }
571
ahash_unmap_ctx(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,int dst_len,u32 flag)572 static inline void ahash_unmap_ctx(struct device *dev,
573 struct ahash_edesc *edesc,
574 struct ahash_request *req, int dst_len, u32 flag)
575 {
576 struct caam_hash_state *state = ahash_request_ctx_dma(req);
577
578 if (state->ctx_dma) {
579 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
580 state->ctx_dma = 0;
581 }
582 ahash_unmap(dev, edesc, req, dst_len);
583 }
584
ahash_done_cpy(struct device * jrdev,u32 * desc,u32 err,void * context,enum dma_data_direction dir)585 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
586 void *context, enum dma_data_direction dir)
587 {
588 struct ahash_request *req = context;
589 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
590 struct ahash_edesc *edesc;
591 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
592 int digestsize = crypto_ahash_digestsize(ahash);
593 struct caam_hash_state *state = ahash_request_ctx_dma(req);
594 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
595 int ecode = 0;
596 bool has_bklog;
597
598 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
599
600 edesc = state->edesc;
601 has_bklog = edesc->bklog;
602
603 if (err)
604 ecode = caam_jr_strstatus(jrdev, err);
605
606 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
607 memcpy(req->result, state->caam_ctx, digestsize);
608 kfree(edesc);
609
610 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
611 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
612 ctx->ctx_len, 1);
613
614 /*
615 * If no backlog flag, the completion of the request is done
616 * by CAAM, not crypto engine.
617 */
618 if (!has_bklog)
619 ahash_request_complete(req, ecode);
620 else
621 crypto_finalize_hash_request(jrp->engine, req, ecode);
622 }
623
ahash_done(struct device * jrdev,u32 * desc,u32 err,void * context)624 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
625 void *context)
626 {
627 ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
628 }
629
ahash_done_ctx_src(struct device * jrdev,u32 * desc,u32 err,void * context)630 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
631 void *context)
632 {
633 ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
634 }
635
ahash_done_switch(struct device * jrdev,u32 * desc,u32 err,void * context,enum dma_data_direction dir)636 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
637 void *context, enum dma_data_direction dir)
638 {
639 struct ahash_request *req = context;
640 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
641 struct ahash_edesc *edesc;
642 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
643 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
644 struct caam_hash_state *state = ahash_request_ctx_dma(req);
645 int digestsize = crypto_ahash_digestsize(ahash);
646 int ecode = 0;
647 bool has_bklog;
648
649 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
650
651 edesc = state->edesc;
652 has_bklog = edesc->bklog;
653 if (err)
654 ecode = caam_jr_strstatus(jrdev, err);
655
656 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
657 kfree(edesc);
658
659 scatterwalk_map_and_copy(state->buf, req->src,
660 req->nbytes - state->next_buflen,
661 state->next_buflen, 0);
662 state->buflen = state->next_buflen;
663
664 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
665 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
666 state->buflen, 1);
667
668 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
669 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
670 ctx->ctx_len, 1);
671 if (req->result)
672 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
673 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
674 digestsize, 1);
675
676 /*
677 * If no backlog flag, the completion of the request is done
678 * by CAAM, not crypto engine.
679 */
680 if (!has_bklog)
681 ahash_request_complete(req, ecode);
682 else
683 crypto_finalize_hash_request(jrp->engine, req, ecode);
684
685 }
686
ahash_done_bi(struct device * jrdev,u32 * desc,u32 err,void * context)687 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
688 void *context)
689 {
690 ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
691 }
692
ahash_done_ctx_dst(struct device * jrdev,u32 * desc,u32 err,void * context)693 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
694 void *context)
695 {
696 ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
697 }
698
699 /*
700 * Allocate an enhanced descriptor, which contains the hardware descriptor
701 * and space for hardware scatter table containing sg_num entries.
702 */
ahash_edesc_alloc(struct ahash_request * req,int sg_num,u32 * sh_desc,dma_addr_t sh_desc_dma)703 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
704 int sg_num, u32 *sh_desc,
705 dma_addr_t sh_desc_dma)
706 {
707 struct caam_hash_state *state = ahash_request_ctx_dma(req);
708 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
709 GFP_KERNEL : GFP_ATOMIC;
710 struct ahash_edesc *edesc;
711
712 sg_num = pad_sg_nents(sg_num);
713 edesc = kzalloc_flex(*edesc, sec4_sg, sg_num, flags);
714 if (!edesc)
715 return NULL;
716
717 state->edesc = edesc;
718
719 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
720 HDR_SHARE_DEFER | HDR_REVERSE);
721
722 return edesc;
723 }
724
ahash_edesc_add_src(struct caam_hash_ctx * ctx,struct ahash_edesc * edesc,struct ahash_request * req,int nents,unsigned int first_sg,unsigned int first_bytes,size_t to_hash)725 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
726 struct ahash_edesc *edesc,
727 struct ahash_request *req, int nents,
728 unsigned int first_sg,
729 unsigned int first_bytes, size_t to_hash)
730 {
731 dma_addr_t src_dma;
732 u32 options;
733
734 if (nents > 1 || first_sg) {
735 struct sec4_sg_entry *sg = edesc->sec4_sg;
736 unsigned int sgsize = sizeof(*sg) *
737 pad_sg_nents(first_sg + nents);
738
739 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
740
741 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
742 if (dma_mapping_error(ctx->jrdev, src_dma)) {
743 dev_err(ctx->jrdev, "unable to map S/G table\n");
744 return -ENOMEM;
745 }
746
747 edesc->sec4_sg_bytes = sgsize;
748 edesc->sec4_sg_dma = src_dma;
749 options = LDST_SGF;
750 } else {
751 src_dma = sg_dma_address(req->src);
752 options = 0;
753 }
754
755 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
756 options);
757
758 return 0;
759 }
760
ahash_do_one_req(struct crypto_engine * engine,void * areq)761 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
762 {
763 struct ahash_request *req = ahash_request_cast(areq);
764 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
765 struct caam_hash_state *state = ahash_request_ctx_dma(req);
766 struct device *jrdev = ctx->jrdev;
767 u32 *desc = state->edesc->hw_desc;
768 int ret;
769
770 state->edesc->bklog = true;
771
772 ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
773
774 if (ret == -ENOSPC && engine->retry_support)
775 return ret;
776
777 if (ret != -EINPROGRESS) {
778 ahash_unmap(jrdev, state->edesc, req, 0);
779 kfree(state->edesc);
780 } else {
781 ret = 0;
782 }
783
784 return ret;
785 }
786
ahash_enqueue_req(struct device * jrdev,void (* cbk)(struct device * jrdev,u32 * desc,u32 err,void * context),struct ahash_request * req,int dst_len,enum dma_data_direction dir)787 static int ahash_enqueue_req(struct device *jrdev,
788 void (*cbk)(struct device *jrdev, u32 *desc,
789 u32 err, void *context),
790 struct ahash_request *req,
791 int dst_len, enum dma_data_direction dir)
792 {
793 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
794 struct caam_hash_state *state = ahash_request_ctx_dma(req);
795 struct ahash_edesc *edesc = state->edesc;
796 u32 *desc = edesc->hw_desc;
797 int ret;
798
799 state->ahash_op_done = cbk;
800
801 /*
802 * Only the backlog request are sent to crypto-engine since the others
803 * can be handled by CAAM, if free, especially since JR has up to 1024
804 * entries (more than the 10 entries from crypto-engine).
805 */
806 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
807 ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
808 req);
809 else
810 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
811
812 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
813 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
814 kfree(edesc);
815 }
816
817 return ret;
818 }
819
820 /* submit update job descriptor */
ahash_update_ctx(struct ahash_request * req)821 static int ahash_update_ctx(struct ahash_request *req)
822 {
823 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
824 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
825 struct caam_hash_state *state = ahash_request_ctx_dma(req);
826 struct device *jrdev = ctx->jrdev;
827 u8 *buf = state->buf;
828 int *buflen = &state->buflen;
829 int *next_buflen = &state->next_buflen;
830 int blocksize = crypto_ahash_blocksize(ahash);
831 int in_len = *buflen + req->nbytes, to_hash;
832 u32 *desc;
833 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
834 struct ahash_edesc *edesc;
835 int ret = 0;
836
837 *next_buflen = in_len & (blocksize - 1);
838 to_hash = in_len - *next_buflen;
839
840 /*
841 * For XCBC and CMAC, if to_hash is multiple of block size,
842 * keep last block in internal buffer
843 */
844 if ((is_xcbc_aes(ctx->adata.algtype) ||
845 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
846 (*next_buflen == 0)) {
847 *next_buflen = blocksize;
848 to_hash -= blocksize;
849 }
850
851 if (to_hash) {
852 int pad_nents;
853 int src_len = req->nbytes - *next_buflen;
854
855 src_nents = sg_nents_for_len(req->src, src_len);
856 if (src_nents < 0) {
857 dev_err(jrdev, "Invalid number of src SG.\n");
858 return src_nents;
859 }
860
861 if (src_nents) {
862 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
863 DMA_TO_DEVICE);
864 if (!mapped_nents) {
865 dev_err(jrdev, "unable to DMA map source\n");
866 return -ENOMEM;
867 }
868 } else {
869 mapped_nents = 0;
870 }
871
872 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
873 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
874 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
875
876 /*
877 * allocate space for base edesc and hw desc commands,
878 * link tables
879 */
880 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
881 ctx->sh_desc_update_dma);
882 if (!edesc) {
883 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
884 return -ENOMEM;
885 }
886
887 edesc->src_nents = src_nents;
888 edesc->sec4_sg_bytes = sec4_sg_bytes;
889
890 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
891 edesc->sec4_sg, DMA_BIDIRECTIONAL);
892 if (ret)
893 goto unmap_ctx;
894
895 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
896 if (ret)
897 goto unmap_ctx;
898
899 if (mapped_nents)
900 sg_to_sec4_sg_last(req->src, src_len,
901 edesc->sec4_sg + sec4_sg_src_index,
902 0);
903 else
904 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
905 1);
906
907 desc = edesc->hw_desc;
908
909 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
910 sec4_sg_bytes,
911 DMA_TO_DEVICE);
912 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
913 dev_err(jrdev, "unable to map S/G table\n");
914 ret = -ENOMEM;
915 goto unmap_ctx;
916 }
917
918 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
919 to_hash, LDST_SGF);
920
921 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
922
923 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
924 DUMP_PREFIX_ADDRESS, 16, 4, desc,
925 desc_bytes(desc), 1);
926
927 ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
928 ctx->ctx_len, DMA_BIDIRECTIONAL);
929 } else if (*next_buflen) {
930 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
931 req->nbytes, 0);
932 *buflen = *next_buflen;
933
934 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
935 DUMP_PREFIX_ADDRESS, 16, 4, buf,
936 *buflen, 1);
937 }
938
939 return ret;
940 unmap_ctx:
941 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
942 kfree(edesc);
943 return ret;
944 }
945
ahash_final_ctx(struct ahash_request * req)946 static int ahash_final_ctx(struct ahash_request *req)
947 {
948 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
949 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
950 struct caam_hash_state *state = ahash_request_ctx_dma(req);
951 struct device *jrdev = ctx->jrdev;
952 int buflen = state->buflen;
953 u32 *desc;
954 int sec4_sg_bytes;
955 int digestsize = crypto_ahash_digestsize(ahash);
956 struct ahash_edesc *edesc;
957 int ret;
958
959 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
960 sizeof(struct sec4_sg_entry);
961
962 /* allocate space for base edesc and hw desc commands, link tables */
963 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
964 ctx->sh_desc_fin_dma);
965 if (!edesc)
966 return -ENOMEM;
967
968 desc = edesc->hw_desc;
969
970 edesc->sec4_sg_bytes = sec4_sg_bytes;
971
972 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
973 edesc->sec4_sg, DMA_BIDIRECTIONAL);
974 if (ret)
975 goto unmap_ctx;
976
977 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
978 if (ret)
979 goto unmap_ctx;
980
981 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
982
983 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
984 sec4_sg_bytes, DMA_TO_DEVICE);
985 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
986 dev_err(jrdev, "unable to map S/G table\n");
987 ret = -ENOMEM;
988 goto unmap_ctx;
989 }
990
991 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
992 LDST_SGF);
993 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
994
995 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
996 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
997 1);
998
999 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1000 digestsize, DMA_BIDIRECTIONAL);
1001 unmap_ctx:
1002 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1003 kfree(edesc);
1004 return ret;
1005 }
1006
ahash_finup_ctx(struct ahash_request * req)1007 static int ahash_finup_ctx(struct ahash_request *req)
1008 {
1009 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1010 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1011 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1012 struct device *jrdev = ctx->jrdev;
1013 int buflen = state->buflen;
1014 u32 *desc;
1015 int sec4_sg_src_index;
1016 int src_nents, mapped_nents;
1017 int digestsize = crypto_ahash_digestsize(ahash);
1018 struct ahash_edesc *edesc;
1019 int ret;
1020
1021 src_nents = sg_nents_for_len(req->src, req->nbytes);
1022 if (src_nents < 0) {
1023 dev_err(jrdev, "Invalid number of src SG.\n");
1024 return src_nents;
1025 }
1026
1027 if (src_nents) {
1028 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1029 DMA_TO_DEVICE);
1030 if (!mapped_nents) {
1031 dev_err(jrdev, "unable to DMA map source\n");
1032 return -ENOMEM;
1033 }
1034 } else {
1035 mapped_nents = 0;
1036 }
1037
1038 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1039
1040 /* allocate space for base edesc and hw desc commands, link tables */
1041 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1042 ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1043 if (!edesc) {
1044 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1045 return -ENOMEM;
1046 }
1047
1048 desc = edesc->hw_desc;
1049
1050 edesc->src_nents = src_nents;
1051
1052 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1053 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1054 if (ret)
1055 goto unmap_ctx;
1056
1057 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1058 if (ret)
1059 goto unmap_ctx;
1060
1061 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1062 sec4_sg_src_index, ctx->ctx_len + buflen,
1063 req->nbytes);
1064 if (ret)
1065 goto unmap_ctx;
1066
1067 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1068
1069 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1070 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1071 1);
1072
1073 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1074 digestsize, DMA_BIDIRECTIONAL);
1075 unmap_ctx:
1076 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1077 kfree(edesc);
1078 return ret;
1079 }
1080
ahash_digest(struct ahash_request * req)1081 static int ahash_digest(struct ahash_request *req)
1082 {
1083 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1084 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1085 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1086 struct device *jrdev = ctx->jrdev;
1087 u32 *desc;
1088 int digestsize = crypto_ahash_digestsize(ahash);
1089 int src_nents, mapped_nents;
1090 struct ahash_edesc *edesc;
1091 int ret;
1092
1093 state->buf_dma = 0;
1094
1095 src_nents = sg_nents_for_len(req->src, req->nbytes);
1096 if (src_nents < 0) {
1097 dev_err(jrdev, "Invalid number of src SG.\n");
1098 return src_nents;
1099 }
1100
1101 if (src_nents) {
1102 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1103 DMA_TO_DEVICE);
1104 if (!mapped_nents) {
1105 dev_err(jrdev, "unable to map source for DMA\n");
1106 return -ENOMEM;
1107 }
1108 } else {
1109 mapped_nents = 0;
1110 }
1111
1112 /* allocate space for base edesc and hw desc commands, link tables */
1113 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1114 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1115 if (!edesc) {
1116 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1117 return -ENOMEM;
1118 }
1119
1120 edesc->src_nents = src_nents;
1121
1122 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1123 req->nbytes);
1124 if (ret) {
1125 ahash_unmap(jrdev, edesc, req, digestsize);
1126 kfree(edesc);
1127 return ret;
1128 }
1129
1130 desc = edesc->hw_desc;
1131
1132 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1133 if (ret) {
1134 ahash_unmap(jrdev, edesc, req, digestsize);
1135 kfree(edesc);
1136 return -ENOMEM;
1137 }
1138
1139 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1140 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1141 1);
1142
1143 return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1144 DMA_FROM_DEVICE);
1145 }
1146
1147 /* submit ahash final if it the first job descriptor */
ahash_final_no_ctx(struct ahash_request * req)1148 static int ahash_final_no_ctx(struct ahash_request *req)
1149 {
1150 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1151 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1152 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1153 struct device *jrdev = ctx->jrdev;
1154 u8 *buf = state->buf;
1155 int buflen = state->buflen;
1156 u32 *desc;
1157 int digestsize = crypto_ahash_digestsize(ahash);
1158 struct ahash_edesc *edesc;
1159 int ret;
1160
1161 /* allocate space for base edesc and hw desc commands, link tables */
1162 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1163 ctx->sh_desc_digest_dma);
1164 if (!edesc)
1165 return -ENOMEM;
1166
1167 desc = edesc->hw_desc;
1168
1169 if (buflen) {
1170 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1171 DMA_TO_DEVICE);
1172 if (dma_mapping_error(jrdev, state->buf_dma)) {
1173 dev_err(jrdev, "unable to map src\n");
1174 goto unmap;
1175 }
1176
1177 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1178 }
1179
1180 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1181 if (ret)
1182 goto unmap;
1183
1184 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1185 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1186 1);
1187
1188 return ahash_enqueue_req(jrdev, ahash_done, req,
1189 digestsize, DMA_FROM_DEVICE);
1190 unmap:
1191 ahash_unmap(jrdev, edesc, req, digestsize);
1192 kfree(edesc);
1193 return -ENOMEM;
1194 }
1195
1196 /* submit ahash update if it the first job descriptor after update */
ahash_update_no_ctx(struct ahash_request * req)1197 static int ahash_update_no_ctx(struct ahash_request *req)
1198 {
1199 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1200 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1201 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1202 struct device *jrdev = ctx->jrdev;
1203 u8 *buf = state->buf;
1204 int *buflen = &state->buflen;
1205 int *next_buflen = &state->next_buflen;
1206 int blocksize = crypto_ahash_blocksize(ahash);
1207 int in_len = *buflen + req->nbytes, to_hash;
1208 int sec4_sg_bytes, src_nents, mapped_nents;
1209 struct ahash_edesc *edesc;
1210 u32 *desc;
1211 int ret = 0;
1212
1213 *next_buflen = in_len & (blocksize - 1);
1214 to_hash = in_len - *next_buflen;
1215
1216 /*
1217 * For XCBC and CMAC, if to_hash is multiple of block size,
1218 * keep last block in internal buffer
1219 */
1220 if ((is_xcbc_aes(ctx->adata.algtype) ||
1221 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1222 (*next_buflen == 0)) {
1223 *next_buflen = blocksize;
1224 to_hash -= blocksize;
1225 }
1226
1227 if (to_hash) {
1228 int pad_nents;
1229 int src_len = req->nbytes - *next_buflen;
1230
1231 src_nents = sg_nents_for_len(req->src, src_len);
1232 if (src_nents < 0) {
1233 dev_err(jrdev, "Invalid number of src SG.\n");
1234 return src_nents;
1235 }
1236
1237 if (src_nents) {
1238 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1239 DMA_TO_DEVICE);
1240 if (!mapped_nents) {
1241 dev_err(jrdev, "unable to DMA map source\n");
1242 return -ENOMEM;
1243 }
1244 } else {
1245 mapped_nents = 0;
1246 }
1247
1248 pad_nents = pad_sg_nents(1 + mapped_nents);
1249 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1250
1251 /*
1252 * allocate space for base edesc and hw desc commands,
1253 * link tables
1254 */
1255 edesc = ahash_edesc_alloc(req, pad_nents,
1256 ctx->sh_desc_update_first,
1257 ctx->sh_desc_update_first_dma);
1258 if (!edesc) {
1259 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1260 return -ENOMEM;
1261 }
1262
1263 edesc->src_nents = src_nents;
1264 edesc->sec4_sg_bytes = sec4_sg_bytes;
1265
1266 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1267 if (ret)
1268 goto unmap_ctx;
1269
1270 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1271
1272 desc = edesc->hw_desc;
1273
1274 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1275 sec4_sg_bytes,
1276 DMA_TO_DEVICE);
1277 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1278 dev_err(jrdev, "unable to map S/G table\n");
1279 ret = -ENOMEM;
1280 goto unmap_ctx;
1281 }
1282
1283 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1284
1285 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1286 if (ret)
1287 goto unmap_ctx;
1288
1289 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1290 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1291 desc_bytes(desc), 1);
1292
1293 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1294 ctx->ctx_len, DMA_TO_DEVICE);
1295 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1296 return ret;
1297 state->update = ahash_update_ctx;
1298 state->finup = ahash_finup_ctx;
1299 state->final = ahash_final_ctx;
1300 } else if (*next_buflen) {
1301 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1302 req->nbytes, 0);
1303 *buflen = *next_buflen;
1304
1305 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1306 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1307 *buflen, 1);
1308 }
1309
1310 return ret;
1311 unmap_ctx:
1312 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1313 kfree(edesc);
1314 return ret;
1315 }
1316
1317 /* submit ahash finup if it the first job descriptor after update */
ahash_finup_no_ctx(struct ahash_request * req)1318 static int ahash_finup_no_ctx(struct ahash_request *req)
1319 {
1320 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1321 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1322 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1323 struct device *jrdev = ctx->jrdev;
1324 int buflen = state->buflen;
1325 u32 *desc;
1326 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1327 int digestsize = crypto_ahash_digestsize(ahash);
1328 struct ahash_edesc *edesc;
1329 int ret;
1330
1331 src_nents = sg_nents_for_len(req->src, req->nbytes);
1332 if (src_nents < 0) {
1333 dev_err(jrdev, "Invalid number of src SG.\n");
1334 return src_nents;
1335 }
1336
1337 if (src_nents) {
1338 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1339 DMA_TO_DEVICE);
1340 if (!mapped_nents) {
1341 dev_err(jrdev, "unable to DMA map source\n");
1342 return -ENOMEM;
1343 }
1344 } else {
1345 mapped_nents = 0;
1346 }
1347
1348 sec4_sg_src_index = 2;
1349 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1350 sizeof(struct sec4_sg_entry);
1351
1352 /* allocate space for base edesc and hw desc commands, link tables */
1353 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1354 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1355 if (!edesc) {
1356 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1357 return -ENOMEM;
1358 }
1359
1360 desc = edesc->hw_desc;
1361
1362 edesc->src_nents = src_nents;
1363 edesc->sec4_sg_bytes = sec4_sg_bytes;
1364
1365 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1366 if (ret)
1367 goto unmap;
1368
1369 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1370 req->nbytes);
1371 if (ret) {
1372 dev_err(jrdev, "unable to map S/G table\n");
1373 goto unmap;
1374 }
1375
1376 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1377 if (ret)
1378 goto unmap;
1379
1380 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1381 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1382 1);
1383
1384 return ahash_enqueue_req(jrdev, ahash_done, req,
1385 digestsize, DMA_FROM_DEVICE);
1386 unmap:
1387 ahash_unmap(jrdev, edesc, req, digestsize);
1388 kfree(edesc);
1389 return -ENOMEM;
1390
1391 }
1392
1393 /* submit first update job descriptor after init */
ahash_update_first(struct ahash_request * req)1394 static int ahash_update_first(struct ahash_request *req)
1395 {
1396 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1397 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1398 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1399 struct device *jrdev = ctx->jrdev;
1400 u8 *buf = state->buf;
1401 int *buflen = &state->buflen;
1402 int *next_buflen = &state->next_buflen;
1403 int to_hash;
1404 int blocksize = crypto_ahash_blocksize(ahash);
1405 u32 *desc;
1406 int src_nents, mapped_nents;
1407 struct ahash_edesc *edesc;
1408 int ret = 0;
1409
1410 *next_buflen = req->nbytes & (blocksize - 1);
1411 to_hash = req->nbytes - *next_buflen;
1412
1413 /*
1414 * For XCBC and CMAC, if to_hash is multiple of block size,
1415 * keep last block in internal buffer
1416 */
1417 if ((is_xcbc_aes(ctx->adata.algtype) ||
1418 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1419 (*next_buflen == 0)) {
1420 *next_buflen = blocksize;
1421 to_hash -= blocksize;
1422 }
1423
1424 if (to_hash) {
1425 src_nents = sg_nents_for_len(req->src,
1426 req->nbytes - *next_buflen);
1427 if (src_nents < 0) {
1428 dev_err(jrdev, "Invalid number of src SG.\n");
1429 return src_nents;
1430 }
1431
1432 if (src_nents) {
1433 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1434 DMA_TO_DEVICE);
1435 if (!mapped_nents) {
1436 dev_err(jrdev, "unable to map source for DMA\n");
1437 return -ENOMEM;
1438 }
1439 } else {
1440 mapped_nents = 0;
1441 }
1442
1443 /*
1444 * allocate space for base edesc and hw desc commands,
1445 * link tables
1446 */
1447 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1448 mapped_nents : 0,
1449 ctx->sh_desc_update_first,
1450 ctx->sh_desc_update_first_dma);
1451 if (!edesc) {
1452 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1453 return -ENOMEM;
1454 }
1455
1456 edesc->src_nents = src_nents;
1457
1458 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1459 to_hash);
1460 if (ret)
1461 goto unmap_ctx;
1462
1463 desc = edesc->hw_desc;
1464
1465 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1466 if (ret)
1467 goto unmap_ctx;
1468
1469 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1470 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1471 desc_bytes(desc), 1);
1472
1473 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1474 ctx->ctx_len, DMA_TO_DEVICE);
1475 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1476 return ret;
1477 state->update = ahash_update_ctx;
1478 state->finup = ahash_finup_ctx;
1479 state->final = ahash_final_ctx;
1480 } else if (*next_buflen) {
1481 state->update = ahash_update_no_ctx;
1482 state->finup = ahash_finup_no_ctx;
1483 state->final = ahash_final_no_ctx;
1484 scatterwalk_map_and_copy(buf, req->src, 0,
1485 req->nbytes, 0);
1486 *buflen = *next_buflen;
1487
1488 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1489 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1490 *buflen, 1);
1491 }
1492
1493 return ret;
1494 unmap_ctx:
1495 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1496 kfree(edesc);
1497 return ret;
1498 }
1499
ahash_finup_first(struct ahash_request * req)1500 static int ahash_finup_first(struct ahash_request *req)
1501 {
1502 return ahash_digest(req);
1503 }
1504
ahash_init(struct ahash_request * req)1505 static int ahash_init(struct ahash_request *req)
1506 {
1507 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1508
1509 state->update = ahash_update_first;
1510 state->finup = ahash_finup_first;
1511 state->final = ahash_final_no_ctx;
1512
1513 state->ctx_dma = 0;
1514 state->ctx_dma_len = 0;
1515 state->buf_dma = 0;
1516 state->buflen = 0;
1517 state->next_buflen = 0;
1518
1519 return 0;
1520 }
1521
ahash_update(struct ahash_request * req)1522 static int ahash_update(struct ahash_request *req)
1523 {
1524 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1525
1526 return state->update(req);
1527 }
1528
ahash_finup(struct ahash_request * req)1529 static int ahash_finup(struct ahash_request *req)
1530 {
1531 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1532
1533 return state->finup(req);
1534 }
1535
ahash_final(struct ahash_request * req)1536 static int ahash_final(struct ahash_request *req)
1537 {
1538 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1539
1540 return state->final(req);
1541 }
1542
ahash_export(struct ahash_request * req,void * out)1543 static int ahash_export(struct ahash_request *req, void *out)
1544 {
1545 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1546 struct caam_export_state *export = out;
1547 u8 *buf = state->buf;
1548 int len = state->buflen;
1549
1550 memcpy(export->buf, buf, len);
1551 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1552 export->buflen = len;
1553 export->update = state->update;
1554 export->final = state->final;
1555 export->finup = state->finup;
1556
1557 return 0;
1558 }
1559
ahash_import(struct ahash_request * req,const void * in)1560 static int ahash_import(struct ahash_request *req, const void *in)
1561 {
1562 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1563 const struct caam_export_state *export = in;
1564
1565 memset(state, 0, sizeof(*state));
1566 memcpy(state->buf, export->buf, export->buflen);
1567 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1568 state->buflen = export->buflen;
1569 state->update = export->update;
1570 state->final = export->final;
1571 state->finup = export->finup;
1572
1573 return 0;
1574 }
1575
1576 struct caam_hash_template {
1577 char name[CRYPTO_MAX_ALG_NAME];
1578 char driver_name[CRYPTO_MAX_ALG_NAME];
1579 char hmac_name[CRYPTO_MAX_ALG_NAME];
1580 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1581 unsigned int blocksize;
1582 struct ahash_alg template_ahash;
1583 u32 alg_type;
1584 };
1585
1586 /* ahash descriptors */
1587 static struct caam_hash_template driver_hash[] = {
1588 {
1589 .name = "sha1",
1590 .driver_name = "sha1-caam",
1591 .hmac_name = "hmac(sha1)",
1592 .hmac_driver_name = "hmac-sha1-caam",
1593 .blocksize = SHA1_BLOCK_SIZE,
1594 .template_ahash = {
1595 .init = ahash_init,
1596 .update = ahash_update,
1597 .final = ahash_final,
1598 .finup = ahash_finup,
1599 .digest = ahash_digest,
1600 .export = ahash_export,
1601 .import = ahash_import,
1602 .setkey = ahash_setkey,
1603 .halg = {
1604 .digestsize = SHA1_DIGEST_SIZE,
1605 .statesize = sizeof(struct caam_export_state),
1606 },
1607 },
1608 .alg_type = OP_ALG_ALGSEL_SHA1,
1609 }, {
1610 .name = "sha224",
1611 .driver_name = "sha224-caam",
1612 .hmac_name = "hmac(sha224)",
1613 .hmac_driver_name = "hmac-sha224-caam",
1614 .blocksize = SHA224_BLOCK_SIZE,
1615 .template_ahash = {
1616 .init = ahash_init,
1617 .update = ahash_update,
1618 .final = ahash_final,
1619 .finup = ahash_finup,
1620 .digest = ahash_digest,
1621 .export = ahash_export,
1622 .import = ahash_import,
1623 .setkey = ahash_setkey,
1624 .halg = {
1625 .digestsize = SHA224_DIGEST_SIZE,
1626 .statesize = sizeof(struct caam_export_state),
1627 },
1628 },
1629 .alg_type = OP_ALG_ALGSEL_SHA224,
1630 }, {
1631 .name = "sha256",
1632 .driver_name = "sha256-caam",
1633 .hmac_name = "hmac(sha256)",
1634 .hmac_driver_name = "hmac-sha256-caam",
1635 .blocksize = SHA256_BLOCK_SIZE,
1636 .template_ahash = {
1637 .init = ahash_init,
1638 .update = ahash_update,
1639 .final = ahash_final,
1640 .finup = ahash_finup,
1641 .digest = ahash_digest,
1642 .export = ahash_export,
1643 .import = ahash_import,
1644 .setkey = ahash_setkey,
1645 .halg = {
1646 .digestsize = SHA256_DIGEST_SIZE,
1647 .statesize = sizeof(struct caam_export_state),
1648 },
1649 },
1650 .alg_type = OP_ALG_ALGSEL_SHA256,
1651 }, {
1652 .name = "sha384",
1653 .driver_name = "sha384-caam",
1654 .hmac_name = "hmac(sha384)",
1655 .hmac_driver_name = "hmac-sha384-caam",
1656 .blocksize = SHA384_BLOCK_SIZE,
1657 .template_ahash = {
1658 .init = ahash_init,
1659 .update = ahash_update,
1660 .final = ahash_final,
1661 .finup = ahash_finup,
1662 .digest = ahash_digest,
1663 .export = ahash_export,
1664 .import = ahash_import,
1665 .setkey = ahash_setkey,
1666 .halg = {
1667 .digestsize = SHA384_DIGEST_SIZE,
1668 .statesize = sizeof(struct caam_export_state),
1669 },
1670 },
1671 .alg_type = OP_ALG_ALGSEL_SHA384,
1672 }, {
1673 .name = "sha512",
1674 .driver_name = "sha512-caam",
1675 .hmac_name = "hmac(sha512)",
1676 .hmac_driver_name = "hmac-sha512-caam",
1677 .blocksize = SHA512_BLOCK_SIZE,
1678 .template_ahash = {
1679 .init = ahash_init,
1680 .update = ahash_update,
1681 .final = ahash_final,
1682 .finup = ahash_finup,
1683 .digest = ahash_digest,
1684 .export = ahash_export,
1685 .import = ahash_import,
1686 .setkey = ahash_setkey,
1687 .halg = {
1688 .digestsize = SHA512_DIGEST_SIZE,
1689 .statesize = sizeof(struct caam_export_state),
1690 },
1691 },
1692 .alg_type = OP_ALG_ALGSEL_SHA512,
1693 }, {
1694 .name = "md5",
1695 .driver_name = "md5-caam",
1696 .hmac_name = "hmac(md5)",
1697 .hmac_driver_name = "hmac-md5-caam",
1698 .blocksize = MD5_BLOCK_WORDS * 4,
1699 .template_ahash = {
1700 .init = ahash_init,
1701 .update = ahash_update,
1702 .final = ahash_final,
1703 .finup = ahash_finup,
1704 .digest = ahash_digest,
1705 .export = ahash_export,
1706 .import = ahash_import,
1707 .setkey = ahash_setkey,
1708 .halg = {
1709 .digestsize = MD5_DIGEST_SIZE,
1710 .statesize = sizeof(struct caam_export_state),
1711 },
1712 },
1713 .alg_type = OP_ALG_ALGSEL_MD5,
1714 }, {
1715 .hmac_name = "xcbc(aes)",
1716 .hmac_driver_name = "xcbc-aes-caam",
1717 .blocksize = AES_BLOCK_SIZE,
1718 .template_ahash = {
1719 .init = ahash_init,
1720 .update = ahash_update,
1721 .final = ahash_final,
1722 .finup = ahash_finup,
1723 .digest = ahash_digest,
1724 .export = ahash_export,
1725 .import = ahash_import,
1726 .setkey = axcbc_setkey,
1727 .halg = {
1728 .digestsize = AES_BLOCK_SIZE,
1729 .statesize = sizeof(struct caam_export_state),
1730 },
1731 },
1732 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1733 }, {
1734 .hmac_name = "cmac(aes)",
1735 .hmac_driver_name = "cmac-aes-caam",
1736 .blocksize = AES_BLOCK_SIZE,
1737 .template_ahash = {
1738 .init = ahash_init,
1739 .update = ahash_update,
1740 .final = ahash_final,
1741 .finup = ahash_finup,
1742 .digest = ahash_digest,
1743 .export = ahash_export,
1744 .import = ahash_import,
1745 .setkey = acmac_setkey,
1746 .halg = {
1747 .digestsize = AES_BLOCK_SIZE,
1748 .statesize = sizeof(struct caam_export_state),
1749 },
1750 },
1751 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1752 },
1753 };
1754
1755 struct caam_hash_alg {
1756 struct list_head entry;
1757 int alg_type;
1758 bool is_hmac;
1759 struct ahash_engine_alg ahash_alg;
1760 };
1761
caam_hash_cra_init(struct crypto_tfm * tfm)1762 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1763 {
1764 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1765 struct crypto_alg *base = tfm->__crt_alg;
1766 struct hash_alg_common *halg =
1767 container_of(base, struct hash_alg_common, base);
1768 struct ahash_alg *alg =
1769 container_of(halg, struct ahash_alg, halg);
1770 struct caam_hash_alg *caam_hash =
1771 container_of(alg, struct caam_hash_alg, ahash_alg.base);
1772 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1773 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1774 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1775 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1776 HASH_MSG_LEN + 32,
1777 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1778 HASH_MSG_LEN + 64,
1779 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1780 const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1781 sh_desc_update);
1782 dma_addr_t dma_addr;
1783 struct caam_drv_private *priv;
1784
1785 /*
1786 * Get a Job ring from Job Ring driver to ensure in-order
1787 * crypto request processing per tfm
1788 */
1789 ctx->jrdev = caam_jr_alloc();
1790 if (IS_ERR(ctx->jrdev)) {
1791 pr_err("Job Ring Device allocation for transform failed\n");
1792 return PTR_ERR(ctx->jrdev);
1793 }
1794
1795 priv = dev_get_drvdata(ctx->jrdev->parent);
1796
1797 if (is_xcbc_aes(caam_hash->alg_type)) {
1798 ctx->dir = DMA_TO_DEVICE;
1799 ctx->key_dir = DMA_BIDIRECTIONAL;
1800 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1801 ctx->ctx_len = 48;
1802 } else if (is_cmac_aes(caam_hash->alg_type)) {
1803 ctx->dir = DMA_TO_DEVICE;
1804 ctx->key_dir = DMA_NONE;
1805 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1806 ctx->ctx_len = 32;
1807 } else {
1808 if (priv->era >= 6) {
1809 ctx->dir = DMA_BIDIRECTIONAL;
1810 ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE;
1811 } else {
1812 ctx->dir = DMA_TO_DEVICE;
1813 ctx->key_dir = DMA_NONE;
1814 }
1815 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1816 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1817 OP_ALG_ALGSEL_SUBMASK) >>
1818 OP_ALG_ALGSEL_SHIFT];
1819 }
1820
1821 if (ctx->key_dir != DMA_NONE) {
1822 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1823 ARRAY_SIZE(ctx->key),
1824 ctx->key_dir,
1825 DMA_ATTR_SKIP_CPU_SYNC);
1826 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1827 dev_err(ctx->jrdev, "unable to map key\n");
1828 caam_jr_free(ctx->jrdev);
1829 return -ENOMEM;
1830 }
1831 }
1832
1833 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1834 offsetof(struct caam_hash_ctx, key) -
1835 sh_desc_update_offset,
1836 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1837 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1838 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1839
1840 if (ctx->key_dir != DMA_NONE)
1841 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1842 ARRAY_SIZE(ctx->key),
1843 ctx->key_dir,
1844 DMA_ATTR_SKIP_CPU_SYNC);
1845
1846 caam_jr_free(ctx->jrdev);
1847 return -ENOMEM;
1848 }
1849
1850 ctx->sh_desc_update_dma = dma_addr;
1851 ctx->sh_desc_update_first_dma = dma_addr +
1852 offsetof(struct caam_hash_ctx,
1853 sh_desc_update_first) -
1854 sh_desc_update_offset;
1855 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1856 sh_desc_fin) -
1857 sh_desc_update_offset;
1858 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1859 sh_desc_digest) -
1860 sh_desc_update_offset;
1861
1862 crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
1863
1864 /*
1865 * For keyed hash algorithms shared descriptors
1866 * will be created later in setkey() callback
1867 */
1868 return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
1869 }
1870
caam_hash_cra_exit(struct crypto_tfm * tfm)1871 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1872 {
1873 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1874
1875 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1876 offsetof(struct caam_hash_ctx, key) -
1877 offsetof(struct caam_hash_ctx, sh_desc_update),
1878 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1879 if (ctx->key_dir != DMA_NONE)
1880 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1881 ARRAY_SIZE(ctx->key), ctx->key_dir,
1882 DMA_ATTR_SKIP_CPU_SYNC);
1883 caam_jr_free(ctx->jrdev);
1884 }
1885
caam_algapi_hash_exit(void)1886 void caam_algapi_hash_exit(void)
1887 {
1888 struct caam_hash_alg *t_alg, *n;
1889
1890 if (!hash_list.next)
1891 return;
1892
1893 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1894 crypto_engine_unregister_ahash(&t_alg->ahash_alg);
1895 list_del(&t_alg->entry);
1896 kfree(t_alg);
1897 }
1898 }
1899
1900 static struct caam_hash_alg *
caam_hash_alloc(struct caam_hash_template * template,bool keyed)1901 caam_hash_alloc(struct caam_hash_template *template,
1902 bool keyed)
1903 {
1904 struct caam_hash_alg *t_alg;
1905 struct ahash_alg *halg;
1906 struct crypto_alg *alg;
1907
1908 t_alg = kzalloc_obj(*t_alg);
1909 if (!t_alg)
1910 return ERR_PTR(-ENOMEM);
1911
1912 t_alg->ahash_alg.base = template->template_ahash;
1913 halg = &t_alg->ahash_alg.base;
1914 alg = &halg->halg.base;
1915
1916 if (keyed) {
1917 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1918 template->hmac_name);
1919 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1920 template->hmac_driver_name);
1921 t_alg->is_hmac = true;
1922 } else {
1923 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1924 template->name);
1925 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1926 template->driver_name);
1927 halg->setkey = NULL;
1928 t_alg->is_hmac = false;
1929 }
1930 alg->cra_module = THIS_MODULE;
1931 alg->cra_init = caam_hash_cra_init;
1932 alg->cra_exit = caam_hash_cra_exit;
1933 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
1934 alg->cra_priority = CAAM_CRA_PRIORITY;
1935 alg->cra_blocksize = template->blocksize;
1936 alg->cra_alignmask = 0;
1937 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1938
1939 t_alg->alg_type = template->alg_type;
1940 t_alg->ahash_alg.op.do_one_request = ahash_do_one_req;
1941
1942 return t_alg;
1943 }
1944
caam_algapi_hash_init(struct device * ctrldev)1945 int caam_algapi_hash_init(struct device *ctrldev)
1946 {
1947 int i = 0, err = 0;
1948 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1949 unsigned int md_limit = SHA512_DIGEST_SIZE;
1950 u32 md_inst, md_vid;
1951
1952 /*
1953 * Register crypto algorithms the device supports. First, identify
1954 * presence and attributes of MD block.
1955 */
1956 if (priv->era < 10) {
1957 struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
1958
1959 md_vid = (rd_reg32(&perfmon->cha_id_ls) &
1960 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1961 md_inst = (rd_reg32(&perfmon->cha_num_ls) &
1962 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1963 } else {
1964 u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
1965
1966 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1967 md_inst = mdha & CHA_VER_NUM_MASK;
1968 }
1969
1970 /*
1971 * Skip registration of any hashing algorithms if MD block
1972 * is not present.
1973 */
1974 if (!md_inst)
1975 return 0;
1976
1977 /* Limit digest size based on LP256 */
1978 if (md_vid == CHA_VER_VID_MD_LP256)
1979 md_limit = SHA256_DIGEST_SIZE;
1980
1981 INIT_LIST_HEAD(&hash_list);
1982
1983 /* register crypto algorithms the device supports */
1984 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1985 struct caam_hash_alg *t_alg;
1986 struct caam_hash_template *alg = driver_hash + i;
1987
1988 /* If MD size is not supported by device, skip registration */
1989 if (is_mdha(alg->alg_type) &&
1990 alg->template_ahash.halg.digestsize > md_limit)
1991 continue;
1992
1993 /* register hmac version */
1994 t_alg = caam_hash_alloc(alg, true);
1995 if (IS_ERR(t_alg)) {
1996 err = PTR_ERR(t_alg);
1997 pr_warn("%s alg allocation failed\n",
1998 alg->hmac_driver_name);
1999 continue;
2000 }
2001
2002 err = crypto_engine_register_ahash(&t_alg->ahash_alg);
2003 if (err) {
2004 pr_warn("%s alg registration failed: %d\n",
2005 t_alg->ahash_alg.base.halg.base.cra_driver_name,
2006 err);
2007 kfree(t_alg);
2008 } else
2009 list_add_tail(&t_alg->entry, &hash_list);
2010
2011 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2012 continue;
2013
2014 /* register unkeyed version */
2015 t_alg = caam_hash_alloc(alg, false);
2016 if (IS_ERR(t_alg)) {
2017 err = PTR_ERR(t_alg);
2018 pr_warn("%s alg allocation failed\n", alg->driver_name);
2019 continue;
2020 }
2021
2022 err = crypto_engine_register_ahash(&t_alg->ahash_alg);
2023 if (err) {
2024 pr_warn("%s alg registration failed: %d\n",
2025 t_alg->ahash_alg.base.halg.base.cra_driver_name,
2026 err);
2027 kfree(t_alg);
2028 } else
2029 list_add_tail(&t_alg->entry, &hash_list);
2030 }
2031
2032 return err;
2033 }
2034