1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
5 */
6
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/dma-mapping.h>
20 #include <linux/fsl/mc.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/string_choices.h>
24 #include <soc/fsl/dpaa2-io.h>
25 #include <soc/fsl/dpaa2-fd.h>
26 #include <crypto/xts.h>
27 #include <linux/unaligned.h>
28
29 #define CAAM_CRA_PRIORITY 2000
30
31 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
32 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
33 SHA512_DIGEST_SIZE * 2)
34
35 /*
36 * This is a cache of buffers, from which the users of CAAM QI driver
37 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
38 * NOTE: A more elegant solution would be to have some headroom in the frames
39 * being processed. This can be added by the dpaa2-eth driver. This would
40 * pose a problem for userspace application processing which cannot
41 * know of this limitation. So for now, this will work.
42 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
43 */
44 static struct kmem_cache *qi_cache;
45
46 struct caam_alg_entry {
47 struct device *dev;
48 int class1_alg_type;
49 int class2_alg_type;
50 bool rfc3686;
51 bool geniv;
52 bool nodkp;
53 };
54
55 struct caam_aead_alg {
56 struct aead_alg aead;
57 struct caam_alg_entry caam;
58 bool registered;
59 };
60
61 struct caam_skcipher_alg {
62 struct skcipher_alg skcipher;
63 struct caam_alg_entry caam;
64 bool registered;
65 };
66
67 /**
68 * struct caam_ctx - per-session context
69 * @flc: Flow Contexts array
70 * @key: [authentication key], encryption key
71 * @flc_dma: I/O virtual addresses of the Flow Contexts
72 * @key_dma: I/O virtual address of the key
73 * @dir: DMA direction for mapping key and Flow Contexts
74 * @dev: dpseci device
75 * @adata: authentication algorithm details
76 * @cdata: encryption algorithm details
77 * @authsize: authentication tag (a.k.a. ICV / MAC) size
78 * @xts_key_fallback: true if fallback tfm needs to be used due
79 * to unsupported xts key lengths
80 * @fallback: xts fallback tfm
81 */
82 struct caam_ctx {
83 struct caam_flc flc[NUM_OP];
84 u8 key[CAAM_MAX_KEY_SIZE];
85 dma_addr_t flc_dma[NUM_OP];
86 dma_addr_t key_dma;
87 enum dma_data_direction dir;
88 struct device *dev;
89 struct alginfo adata;
90 struct alginfo cdata;
91 unsigned int authsize;
92 bool xts_key_fallback;
93 struct crypto_skcipher *fallback;
94 };
95
dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv * priv,dma_addr_t iova_addr)96 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
97 dma_addr_t iova_addr)
98 {
99 phys_addr_t phys_addr;
100
101 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
102 iova_addr;
103
104 return phys_to_virt(phys_addr);
105 }
106
107 /*
108 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
109 *
110 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
111 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
112 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
113 * hosting 16 SG entries.
114 *
115 * @flags - flags that would be used for the equivalent kmalloc(..) call
116 *
117 * Returns a pointer to a retrieved buffer on success or NULL on failure.
118 */
qi_cache_zalloc(gfp_t flags)119 static inline void *qi_cache_zalloc(gfp_t flags)
120 {
121 return kmem_cache_zalloc(qi_cache, flags);
122 }
123
124 /*
125 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
126 *
127 * @obj - buffer previously allocated by qi_cache_zalloc
128 *
129 * No checking is being done, the call is a passthrough call to
130 * kmem_cache_free(...)
131 */
qi_cache_free(void * obj)132 static inline void qi_cache_free(void *obj)
133 {
134 kmem_cache_free(qi_cache, obj);
135 }
136
to_caam_req(struct crypto_async_request * areq)137 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
138 {
139 switch (crypto_tfm_alg_type(areq->tfm)) {
140 case CRYPTO_ALG_TYPE_SKCIPHER:
141 return skcipher_request_ctx_dma(skcipher_request_cast(areq));
142 case CRYPTO_ALG_TYPE_AEAD:
143 return aead_request_ctx_dma(
144 container_of(areq, struct aead_request, base));
145 case CRYPTO_ALG_TYPE_AHASH:
146 return ahash_request_ctx_dma(ahash_request_cast(areq));
147 default:
148 return ERR_PTR(-EINVAL);
149 }
150 }
151
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,enum dma_data_direction iv_dir,dma_addr_t qm_sg_dma,int qm_sg_bytes)152 static void caam_unmap(struct device *dev, struct scatterlist *src,
153 struct scatterlist *dst, int src_nents,
154 int dst_nents, dma_addr_t iv_dma, int ivsize,
155 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
156 int qm_sg_bytes)
157 {
158 if (dst != src) {
159 if (src_nents)
160 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
161 if (dst_nents)
162 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
163 } else {
164 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
165 }
166
167 if (iv_dma)
168 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
169
170 if (qm_sg_bytes)
171 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
172 }
173
aead_set_sh_desc(struct crypto_aead * aead)174 static int aead_set_sh_desc(struct crypto_aead *aead)
175 {
176 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
177 typeof(*alg), aead);
178 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
179 unsigned int ivsize = crypto_aead_ivsize(aead);
180 struct device *dev = ctx->dev;
181 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
182 struct caam_flc *flc;
183 u32 *desc;
184 u32 ctx1_iv_off = 0;
185 u32 *nonce = NULL;
186 unsigned int data_len[2];
187 u32 inl_mask;
188 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
189 OP_ALG_AAI_CTR_MOD128);
190 const bool is_rfc3686 = alg->caam.rfc3686;
191
192 if (!ctx->cdata.keylen || !ctx->authsize)
193 return 0;
194
195 /*
196 * AES-CTR needs to load IV in CONTEXT1 reg
197 * at an offset of 128bits (16bytes)
198 * CONTEXT1[255:128] = IV
199 */
200 if (ctr_mode)
201 ctx1_iv_off = 16;
202
203 /*
204 * RFC3686 specific:
205 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
206 */
207 if (is_rfc3686) {
208 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
209 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
210 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
211 }
212
213 /*
214 * In case |user key| > |derived key|, using DKP<imm,imm> would result
215 * in invalid opcodes (last bytes of user key) in the resulting
216 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
217 * addresses are needed.
218 */
219 ctx->adata.key_virt = ctx->key;
220 ctx->adata.key_dma = ctx->key_dma;
221
222 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
223 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
224
225 data_len[0] = ctx->adata.keylen_pad;
226 data_len[1] = ctx->cdata.keylen;
227
228 /* aead_encrypt shared descriptor */
229 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
230 DESC_QI_AEAD_ENC_LEN) +
231 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
232 DESC_JOB_IO_LEN, data_len, &inl_mask,
233 ARRAY_SIZE(data_len)) < 0)
234 return -EINVAL;
235
236 ctx->adata.key_inline = !!(inl_mask & 1);
237 ctx->cdata.key_inline = !!(inl_mask & 2);
238
239 flc = &ctx->flc[ENCRYPT];
240 desc = flc->sh_desc;
241
242 if (alg->caam.geniv)
243 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
244 ivsize, ctx->authsize, is_rfc3686,
245 nonce, ctx1_iv_off, true,
246 priv->sec_attr.era);
247 else
248 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
249 ivsize, ctx->authsize, is_rfc3686, nonce,
250 ctx1_iv_off, true, priv->sec_attr.era);
251
252 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
253 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
254 sizeof(flc->flc) + desc_bytes(desc),
255 ctx->dir);
256
257 /* aead_decrypt shared descriptor */
258 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
259 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
260 DESC_JOB_IO_LEN, data_len, &inl_mask,
261 ARRAY_SIZE(data_len)) < 0)
262 return -EINVAL;
263
264 ctx->adata.key_inline = !!(inl_mask & 1);
265 ctx->cdata.key_inline = !!(inl_mask & 2);
266
267 flc = &ctx->flc[DECRYPT];
268 desc = flc->sh_desc;
269 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
270 ivsize, ctx->authsize, alg->caam.geniv,
271 is_rfc3686, nonce, ctx1_iv_off, true,
272 priv->sec_attr.era);
273 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
274 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
275 sizeof(flc->flc) + desc_bytes(desc),
276 ctx->dir);
277
278 return 0;
279 }
280
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)281 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
282 {
283 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
284
285 ctx->authsize = authsize;
286 aead_set_sh_desc(authenc);
287
288 return 0;
289 }
290
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)291 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
292 unsigned int keylen)
293 {
294 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
295 struct device *dev = ctx->dev;
296 struct crypto_authenc_keys keys;
297
298 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
299 goto badkey;
300
301 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
302 keys.authkeylen + keys.enckeylen, keys.enckeylen,
303 keys.authkeylen);
304 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
305 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
306
307 ctx->adata.keylen = keys.authkeylen;
308 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
309 OP_ALG_ALGSEL_MASK);
310
311 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
312 goto badkey;
313
314 memcpy(ctx->key, keys.authkey, keys.authkeylen);
315 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
316 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
317 keys.enckeylen, ctx->dir);
318 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
319 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
320 ctx->adata.keylen_pad + keys.enckeylen, 1);
321
322 ctx->cdata.keylen = keys.enckeylen;
323
324 memzero_explicit(&keys, sizeof(keys));
325 return aead_set_sh_desc(aead);
326 badkey:
327 memzero_explicit(&keys, sizeof(keys));
328 return -EINVAL;
329 }
330
des3_aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)331 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
332 unsigned int keylen)
333 {
334 struct crypto_authenc_keys keys;
335 int err;
336
337 err = crypto_authenc_extractkeys(&keys, key, keylen);
338 if (unlikely(err))
339 goto out;
340
341 err = -EINVAL;
342 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
343 goto out;
344
345 err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
346 aead_setkey(aead, key, keylen);
347
348 out:
349 memzero_explicit(&keys, sizeof(keys));
350 return err;
351 }
352
aead_edesc_alloc(struct aead_request * req,bool encrypt)353 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
354 bool encrypt)
355 {
356 struct crypto_aead *aead = crypto_aead_reqtfm(req);
357 struct caam_request *req_ctx = aead_request_ctx_dma(req);
358 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
359 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
360 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
361 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
362 typeof(*alg), aead);
363 struct device *dev = ctx->dev;
364 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
365 GFP_KERNEL : GFP_ATOMIC;
366 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
367 int src_len, dst_len = 0;
368 struct aead_edesc *edesc;
369 dma_addr_t qm_sg_dma, iv_dma = 0;
370 int ivsize = 0;
371 unsigned int authsize = ctx->authsize;
372 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
373 int in_len, out_len;
374 struct dpaa2_sg_entry *sg_table;
375
376 /* allocate space for base edesc, link tables and IV */
377 edesc = qi_cache_zalloc(flags);
378 if (unlikely(!edesc)) {
379 dev_err(dev, "could not allocate extended descriptor\n");
380 return ERR_PTR(-ENOMEM);
381 }
382
383 if (unlikely(req->dst != req->src)) {
384 src_len = req->assoclen + req->cryptlen;
385 dst_len = src_len + (encrypt ? authsize : (-authsize));
386
387 src_nents = sg_nents_for_len(req->src, src_len);
388 if (unlikely(src_nents < 0)) {
389 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
390 src_len);
391 qi_cache_free(edesc);
392 return ERR_PTR(src_nents);
393 }
394
395 dst_nents = sg_nents_for_len(req->dst, dst_len);
396 if (unlikely(dst_nents < 0)) {
397 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
398 dst_len);
399 qi_cache_free(edesc);
400 return ERR_PTR(dst_nents);
401 }
402
403 if (src_nents) {
404 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
405 DMA_TO_DEVICE);
406 if (unlikely(!mapped_src_nents)) {
407 dev_err(dev, "unable to map source\n");
408 qi_cache_free(edesc);
409 return ERR_PTR(-ENOMEM);
410 }
411 } else {
412 mapped_src_nents = 0;
413 }
414
415 if (dst_nents) {
416 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
417 DMA_FROM_DEVICE);
418 if (unlikely(!mapped_dst_nents)) {
419 dev_err(dev, "unable to map destination\n");
420 dma_unmap_sg(dev, req->src, src_nents,
421 DMA_TO_DEVICE);
422 qi_cache_free(edesc);
423 return ERR_PTR(-ENOMEM);
424 }
425 } else {
426 mapped_dst_nents = 0;
427 }
428 } else {
429 src_len = req->assoclen + req->cryptlen +
430 (encrypt ? authsize : 0);
431
432 src_nents = sg_nents_for_len(req->src, src_len);
433 if (unlikely(src_nents < 0)) {
434 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
435 src_len);
436 qi_cache_free(edesc);
437 return ERR_PTR(src_nents);
438 }
439
440 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
441 DMA_BIDIRECTIONAL);
442 if (unlikely(!mapped_src_nents)) {
443 dev_err(dev, "unable to map source\n");
444 qi_cache_free(edesc);
445 return ERR_PTR(-ENOMEM);
446 }
447 }
448
449 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
450 ivsize = crypto_aead_ivsize(aead);
451
452 /*
453 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
454 * Input is not contiguous.
455 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
456 * the end of the table by allocating more S/G entries. Logic:
457 * if (src != dst && output S/G)
458 * pad output S/G, if needed
459 * else if (src == dst && S/G)
460 * overlapping S/Gs; pad one of them
461 * else if (input S/G) ...
462 * pad input S/G, if needed
463 */
464 qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
465 if (mapped_dst_nents > 1)
466 qm_sg_nents += pad_sg_nents(mapped_dst_nents);
467 else if ((req->src == req->dst) && (mapped_src_nents > 1))
468 qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
469 1 + !!ivsize +
470 pad_sg_nents(mapped_src_nents));
471 else
472 qm_sg_nents = pad_sg_nents(qm_sg_nents);
473
474 sg_table = &edesc->sgt[0];
475 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
476 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
477 CAAM_QI_MEMCACHE_SIZE)) {
478 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
479 qm_sg_nents, ivsize);
480 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
481 0, DMA_NONE, 0, 0);
482 qi_cache_free(edesc);
483 return ERR_PTR(-ENOMEM);
484 }
485
486 if (ivsize) {
487 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
488
489 /* Make sure IV is located in a DMAable area */
490 memcpy(iv, req->iv, ivsize);
491
492 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
493 if (dma_mapping_error(dev, iv_dma)) {
494 dev_err(dev, "unable to map IV\n");
495 caam_unmap(dev, req->src, req->dst, src_nents,
496 dst_nents, 0, 0, DMA_NONE, 0, 0);
497 qi_cache_free(edesc);
498 return ERR_PTR(-ENOMEM);
499 }
500 }
501
502 edesc->src_nents = src_nents;
503 edesc->dst_nents = dst_nents;
504 edesc->iv_dma = iv_dma;
505
506 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
507 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
508 /*
509 * The associated data comes already with the IV but we need
510 * to skip it when we authenticate or encrypt...
511 */
512 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
513 else
514 edesc->assoclen = cpu_to_caam32(req->assoclen);
515 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
516 DMA_TO_DEVICE);
517 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
518 dev_err(dev, "unable to map assoclen\n");
519 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
520 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
521 qi_cache_free(edesc);
522 return ERR_PTR(-ENOMEM);
523 }
524
525 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
526 qm_sg_index++;
527 if (ivsize) {
528 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
529 qm_sg_index++;
530 }
531 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
532 qm_sg_index += mapped_src_nents;
533
534 if (mapped_dst_nents > 1)
535 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
536
537 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
538 if (dma_mapping_error(dev, qm_sg_dma)) {
539 dev_err(dev, "unable to map S/G table\n");
540 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
541 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
542 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
543 qi_cache_free(edesc);
544 return ERR_PTR(-ENOMEM);
545 }
546
547 edesc->qm_sg_dma = qm_sg_dma;
548 edesc->qm_sg_bytes = qm_sg_bytes;
549
550 out_len = req->assoclen + req->cryptlen +
551 (encrypt ? ctx->authsize : (-ctx->authsize));
552 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
553
554 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
555 dpaa2_fl_set_final(in_fle, true);
556 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
557 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
558 dpaa2_fl_set_len(in_fle, in_len);
559
560 if (req->dst == req->src) {
561 if (mapped_src_nents == 1) {
562 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
563 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
564 } else {
565 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
566 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
567 (1 + !!ivsize) * sizeof(*sg_table));
568 }
569 } else if (!mapped_dst_nents) {
570 /*
571 * crypto engine requires the output entry to be present when
572 * "frame list" FD is used.
573 * Since engine does not support FMT=2'b11 (unused entry type),
574 * leaving out_fle zeroized is the best option.
575 */
576 goto skip_out_fle;
577 } else if (mapped_dst_nents == 1) {
578 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
579 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
580 } else {
581 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
582 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
583 sizeof(*sg_table));
584 }
585
586 dpaa2_fl_set_len(out_fle, out_len);
587
588 skip_out_fle:
589 return edesc;
590 }
591
chachapoly_set_sh_desc(struct crypto_aead * aead)592 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
593 {
594 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
595 unsigned int ivsize = crypto_aead_ivsize(aead);
596 struct device *dev = ctx->dev;
597 struct caam_flc *flc;
598 u32 *desc;
599
600 if (!ctx->cdata.keylen || !ctx->authsize)
601 return 0;
602
603 flc = &ctx->flc[ENCRYPT];
604 desc = flc->sh_desc;
605 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
606 ctx->authsize, true, true);
607 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
608 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
609 sizeof(flc->flc) + desc_bytes(desc),
610 ctx->dir);
611
612 flc = &ctx->flc[DECRYPT];
613 desc = flc->sh_desc;
614 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
615 ctx->authsize, false, true);
616 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
617 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
618 sizeof(flc->flc) + desc_bytes(desc),
619 ctx->dir);
620
621 return 0;
622 }
623
chachapoly_setauthsize(struct crypto_aead * aead,unsigned int authsize)624 static int chachapoly_setauthsize(struct crypto_aead *aead,
625 unsigned int authsize)
626 {
627 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
628
629 if (authsize != POLY1305_DIGEST_SIZE)
630 return -EINVAL;
631
632 ctx->authsize = authsize;
633 return chachapoly_set_sh_desc(aead);
634 }
635
chachapoly_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)636 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
637 unsigned int keylen)
638 {
639 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
640 unsigned int ivsize = crypto_aead_ivsize(aead);
641 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
642
643 if (keylen != CHACHA_KEY_SIZE + saltlen)
644 return -EINVAL;
645
646 memcpy(ctx->key, key, keylen);
647 ctx->cdata.key_virt = ctx->key;
648 ctx->cdata.keylen = keylen - saltlen;
649
650 return chachapoly_set_sh_desc(aead);
651 }
652
gcm_set_sh_desc(struct crypto_aead * aead)653 static int gcm_set_sh_desc(struct crypto_aead *aead)
654 {
655 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
656 struct device *dev = ctx->dev;
657 unsigned int ivsize = crypto_aead_ivsize(aead);
658 struct caam_flc *flc;
659 u32 *desc;
660 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
661 ctx->cdata.keylen;
662
663 if (!ctx->cdata.keylen || !ctx->authsize)
664 return 0;
665
666 /*
667 * AES GCM encrypt shared descriptor
668 * Job Descriptor and Shared Descriptor
669 * must fit into the 64-word Descriptor h/w Buffer
670 */
671 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
672 ctx->cdata.key_inline = true;
673 ctx->cdata.key_virt = ctx->key;
674 } else {
675 ctx->cdata.key_inline = false;
676 ctx->cdata.key_dma = ctx->key_dma;
677 }
678
679 flc = &ctx->flc[ENCRYPT];
680 desc = flc->sh_desc;
681 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
682 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
683 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
684 sizeof(flc->flc) + desc_bytes(desc),
685 ctx->dir);
686
687 /*
688 * Job Descriptor and Shared Descriptors
689 * must all fit into the 64-word Descriptor h/w Buffer
690 */
691 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
692 ctx->cdata.key_inline = true;
693 ctx->cdata.key_virt = ctx->key;
694 } else {
695 ctx->cdata.key_inline = false;
696 ctx->cdata.key_dma = ctx->key_dma;
697 }
698
699 flc = &ctx->flc[DECRYPT];
700 desc = flc->sh_desc;
701 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
702 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
703 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
704 sizeof(flc->flc) + desc_bytes(desc),
705 ctx->dir);
706
707 return 0;
708 }
709
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)710 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
711 {
712 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
713 int err;
714
715 err = crypto_gcm_check_authsize(authsize);
716 if (err)
717 return err;
718
719 ctx->authsize = authsize;
720 gcm_set_sh_desc(authenc);
721
722 return 0;
723 }
724
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)725 static int gcm_setkey(struct crypto_aead *aead,
726 const u8 *key, unsigned int keylen)
727 {
728 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
729 struct device *dev = ctx->dev;
730 int ret;
731
732 ret = aes_check_keylen(keylen);
733 if (ret)
734 return ret;
735 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
736 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
737
738 memcpy(ctx->key, key, keylen);
739 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
740 ctx->cdata.keylen = keylen;
741
742 return gcm_set_sh_desc(aead);
743 }
744
rfc4106_set_sh_desc(struct crypto_aead * aead)745 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
746 {
747 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
748 struct device *dev = ctx->dev;
749 unsigned int ivsize = crypto_aead_ivsize(aead);
750 struct caam_flc *flc;
751 u32 *desc;
752 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
753 ctx->cdata.keylen;
754
755 if (!ctx->cdata.keylen || !ctx->authsize)
756 return 0;
757
758 ctx->cdata.key_virt = ctx->key;
759
760 /*
761 * RFC4106 encrypt shared descriptor
762 * Job Descriptor and Shared Descriptor
763 * must fit into the 64-word Descriptor h/w Buffer
764 */
765 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
766 ctx->cdata.key_inline = true;
767 } else {
768 ctx->cdata.key_inline = false;
769 ctx->cdata.key_dma = ctx->key_dma;
770 }
771
772 flc = &ctx->flc[ENCRYPT];
773 desc = flc->sh_desc;
774 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
775 true);
776 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
777 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
778 sizeof(flc->flc) + desc_bytes(desc),
779 ctx->dir);
780
781 /*
782 * Job Descriptor and Shared Descriptors
783 * must all fit into the 64-word Descriptor h/w Buffer
784 */
785 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
786 ctx->cdata.key_inline = true;
787 } else {
788 ctx->cdata.key_inline = false;
789 ctx->cdata.key_dma = ctx->key_dma;
790 }
791
792 flc = &ctx->flc[DECRYPT];
793 desc = flc->sh_desc;
794 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
795 true);
796 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
797 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
798 sizeof(flc->flc) + desc_bytes(desc),
799 ctx->dir);
800
801 return 0;
802 }
803
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)804 static int rfc4106_setauthsize(struct crypto_aead *authenc,
805 unsigned int authsize)
806 {
807 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
808 int err;
809
810 err = crypto_rfc4106_check_authsize(authsize);
811 if (err)
812 return err;
813
814 ctx->authsize = authsize;
815 rfc4106_set_sh_desc(authenc);
816
817 return 0;
818 }
819
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)820 static int rfc4106_setkey(struct crypto_aead *aead,
821 const u8 *key, unsigned int keylen)
822 {
823 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
824 struct device *dev = ctx->dev;
825 int ret;
826
827 ret = aes_check_keylen(keylen - 4);
828 if (ret)
829 return ret;
830
831 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
832 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
833
834 memcpy(ctx->key, key, keylen);
835 /*
836 * The last four bytes of the key material are used as the salt value
837 * in the nonce. Update the AES key length.
838 */
839 ctx->cdata.keylen = keylen - 4;
840 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
841 ctx->dir);
842
843 return rfc4106_set_sh_desc(aead);
844 }
845
rfc4543_set_sh_desc(struct crypto_aead * aead)846 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
847 {
848 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
849 struct device *dev = ctx->dev;
850 unsigned int ivsize = crypto_aead_ivsize(aead);
851 struct caam_flc *flc;
852 u32 *desc;
853 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
854 ctx->cdata.keylen;
855
856 if (!ctx->cdata.keylen || !ctx->authsize)
857 return 0;
858
859 ctx->cdata.key_virt = ctx->key;
860
861 /*
862 * RFC4543 encrypt shared descriptor
863 * Job Descriptor and Shared Descriptor
864 * must fit into the 64-word Descriptor h/w Buffer
865 */
866 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
867 ctx->cdata.key_inline = true;
868 } else {
869 ctx->cdata.key_inline = false;
870 ctx->cdata.key_dma = ctx->key_dma;
871 }
872
873 flc = &ctx->flc[ENCRYPT];
874 desc = flc->sh_desc;
875 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
876 true);
877 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
878 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
879 sizeof(flc->flc) + desc_bytes(desc),
880 ctx->dir);
881
882 /*
883 * Job Descriptor and Shared Descriptors
884 * must all fit into the 64-word Descriptor h/w Buffer
885 */
886 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
887 ctx->cdata.key_inline = true;
888 } else {
889 ctx->cdata.key_inline = false;
890 ctx->cdata.key_dma = ctx->key_dma;
891 }
892
893 flc = &ctx->flc[DECRYPT];
894 desc = flc->sh_desc;
895 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
896 true);
897 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
898 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
899 sizeof(flc->flc) + desc_bytes(desc),
900 ctx->dir);
901
902 return 0;
903 }
904
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)905 static int rfc4543_setauthsize(struct crypto_aead *authenc,
906 unsigned int authsize)
907 {
908 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
909
910 if (authsize != 16)
911 return -EINVAL;
912
913 ctx->authsize = authsize;
914 rfc4543_set_sh_desc(authenc);
915
916 return 0;
917 }
918
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)919 static int rfc4543_setkey(struct crypto_aead *aead,
920 const u8 *key, unsigned int keylen)
921 {
922 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
923 struct device *dev = ctx->dev;
924 int ret;
925
926 ret = aes_check_keylen(keylen - 4);
927 if (ret)
928 return ret;
929
930 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
931 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
932
933 memcpy(ctx->key, key, keylen);
934 /*
935 * The last four bytes of the key material are used as the salt value
936 * in the nonce. Update the AES key length.
937 */
938 ctx->cdata.keylen = keylen - 4;
939 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
940 ctx->dir);
941
942 return rfc4543_set_sh_desc(aead);
943 }
944
skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen,const u32 ctx1_iv_off)945 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
946 unsigned int keylen, const u32 ctx1_iv_off)
947 {
948 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
949 struct caam_skcipher_alg *alg =
950 container_of(crypto_skcipher_alg(skcipher),
951 struct caam_skcipher_alg, skcipher);
952 struct device *dev = ctx->dev;
953 struct caam_flc *flc;
954 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
955 u32 *desc;
956 const bool is_rfc3686 = alg->caam.rfc3686;
957
958 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
959 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
960
961 ctx->cdata.keylen = keylen;
962 ctx->cdata.key_virt = key;
963 ctx->cdata.key_inline = true;
964
965 /* skcipher_encrypt shared descriptor */
966 flc = &ctx->flc[ENCRYPT];
967 desc = flc->sh_desc;
968 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
969 ctx1_iv_off);
970 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
971 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
972 sizeof(flc->flc) + desc_bytes(desc),
973 ctx->dir);
974
975 /* skcipher_decrypt shared descriptor */
976 flc = &ctx->flc[DECRYPT];
977 desc = flc->sh_desc;
978 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
979 ctx1_iv_off);
980 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
981 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
982 sizeof(flc->flc) + desc_bytes(desc),
983 ctx->dir);
984
985 return 0;
986 }
987
aes_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)988 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
989 const u8 *key, unsigned int keylen)
990 {
991 int err;
992
993 err = aes_check_keylen(keylen);
994 if (err)
995 return err;
996
997 return skcipher_setkey(skcipher, key, keylen, 0);
998 }
999
rfc3686_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1000 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
1001 const u8 *key, unsigned int keylen)
1002 {
1003 u32 ctx1_iv_off;
1004 int err;
1005
1006 /*
1007 * RFC3686 specific:
1008 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1009 * | *key = {KEY, NONCE}
1010 */
1011 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1012 keylen -= CTR_RFC3686_NONCE_SIZE;
1013
1014 err = aes_check_keylen(keylen);
1015 if (err)
1016 return err;
1017
1018 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1019 }
1020
ctr_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1021 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1022 const u8 *key, unsigned int keylen)
1023 {
1024 u32 ctx1_iv_off;
1025 int err;
1026
1027 /*
1028 * AES-CTR needs to load IV in CONTEXT1 reg
1029 * at an offset of 128bits (16bytes)
1030 * CONTEXT1[255:128] = IV
1031 */
1032 ctx1_iv_off = 16;
1033
1034 err = aes_check_keylen(keylen);
1035 if (err)
1036 return err;
1037
1038 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1039 }
1040
chacha20_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1041 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1042 const u8 *key, unsigned int keylen)
1043 {
1044 if (keylen != CHACHA_KEY_SIZE)
1045 return -EINVAL;
1046
1047 return skcipher_setkey(skcipher, key, keylen, 0);
1048 }
1049
des_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1050 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1051 const u8 *key, unsigned int keylen)
1052 {
1053 return verify_skcipher_des_key(skcipher, key) ?:
1054 skcipher_setkey(skcipher, key, keylen, 0);
1055 }
1056
des3_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1057 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1058 const u8 *key, unsigned int keylen)
1059 {
1060 return verify_skcipher_des3_key(skcipher, key) ?:
1061 skcipher_setkey(skcipher, key, keylen, 0);
1062 }
1063
xts_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1064 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1065 unsigned int keylen)
1066 {
1067 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1068 struct device *dev = ctx->dev;
1069 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1070 struct caam_flc *flc;
1071 u32 *desc;
1072 int err;
1073
1074 err = xts_verify_key(skcipher, key, keylen);
1075 if (err) {
1076 dev_dbg(dev, "key size mismatch\n");
1077 return err;
1078 }
1079
1080 if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1081 ctx->xts_key_fallback = true;
1082
1083 if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1084 err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1085 if (err)
1086 return err;
1087 }
1088
1089 ctx->cdata.keylen = keylen;
1090 ctx->cdata.key_virt = key;
1091 ctx->cdata.key_inline = true;
1092
1093 /* xts_skcipher_encrypt shared descriptor */
1094 flc = &ctx->flc[ENCRYPT];
1095 desc = flc->sh_desc;
1096 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1097 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1098 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1099 sizeof(flc->flc) + desc_bytes(desc),
1100 ctx->dir);
1101
1102 /* xts_skcipher_decrypt shared descriptor */
1103 flc = &ctx->flc[DECRYPT];
1104 desc = flc->sh_desc;
1105 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1106 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1107 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1108 sizeof(flc->flc) + desc_bytes(desc),
1109 ctx->dir);
1110
1111 return 0;
1112 }
1113
skcipher_edesc_alloc(struct skcipher_request * req)1114 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1115 {
1116 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1117 struct caam_request *req_ctx = skcipher_request_ctx_dma(req);
1118 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1119 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1120 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1121 struct device *dev = ctx->dev;
1122 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1123 GFP_KERNEL : GFP_ATOMIC;
1124 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1125 struct skcipher_edesc *edesc;
1126 dma_addr_t iv_dma;
1127 u8 *iv;
1128 int ivsize = crypto_skcipher_ivsize(skcipher);
1129 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1130 struct dpaa2_sg_entry *sg_table;
1131
1132 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1133 if (unlikely(src_nents < 0)) {
1134 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1135 req->cryptlen);
1136 return ERR_PTR(src_nents);
1137 }
1138
1139 if (unlikely(req->dst != req->src)) {
1140 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1141 if (unlikely(dst_nents < 0)) {
1142 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1143 req->cryptlen);
1144 return ERR_PTR(dst_nents);
1145 }
1146
1147 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1148 DMA_TO_DEVICE);
1149 if (unlikely(!mapped_src_nents)) {
1150 dev_err(dev, "unable to map source\n");
1151 return ERR_PTR(-ENOMEM);
1152 }
1153
1154 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1155 DMA_FROM_DEVICE);
1156 if (unlikely(!mapped_dst_nents)) {
1157 dev_err(dev, "unable to map destination\n");
1158 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1159 return ERR_PTR(-ENOMEM);
1160 }
1161 } else {
1162 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1163 DMA_BIDIRECTIONAL);
1164 if (unlikely(!mapped_src_nents)) {
1165 dev_err(dev, "unable to map source\n");
1166 return ERR_PTR(-ENOMEM);
1167 }
1168 }
1169
1170 qm_sg_ents = 1 + mapped_src_nents;
1171 dst_sg_idx = qm_sg_ents;
1172
1173 /*
1174 * Input, output HW S/G tables: [IV, src][dst, IV]
1175 * IV entries point to the same buffer
1176 * If src == dst, S/G entries are reused (S/G tables overlap)
1177 *
1178 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1179 * the end of the table by allocating more S/G entries.
1180 */
1181 if (req->src != req->dst)
1182 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1183 else
1184 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1185
1186 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1187 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1188 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1189 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1190 qm_sg_ents, ivsize);
1191 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1192 0, DMA_NONE, 0, 0);
1193 return ERR_PTR(-ENOMEM);
1194 }
1195
1196 /* allocate space for base edesc, link tables and IV */
1197 edesc = qi_cache_zalloc(flags);
1198 if (unlikely(!edesc)) {
1199 dev_err(dev, "could not allocate extended descriptor\n");
1200 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1201 0, DMA_NONE, 0, 0);
1202 return ERR_PTR(-ENOMEM);
1203 }
1204
1205 /* Make sure IV is located in a DMAable area */
1206 sg_table = &edesc->sgt[0];
1207 iv = (u8 *)(sg_table + qm_sg_ents);
1208 memcpy(iv, req->iv, ivsize);
1209
1210 iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1211 if (dma_mapping_error(dev, iv_dma)) {
1212 dev_err(dev, "unable to map IV\n");
1213 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1214 0, DMA_NONE, 0, 0);
1215 qi_cache_free(edesc);
1216 return ERR_PTR(-ENOMEM);
1217 }
1218
1219 edesc->src_nents = src_nents;
1220 edesc->dst_nents = dst_nents;
1221 edesc->iv_dma = iv_dma;
1222 edesc->qm_sg_bytes = qm_sg_bytes;
1223
1224 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1225 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1226
1227 if (req->src != req->dst)
1228 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1229
1230 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1231 ivsize, 0);
1232
1233 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1234 DMA_TO_DEVICE);
1235 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1236 dev_err(dev, "unable to map S/G table\n");
1237 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1238 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1239 qi_cache_free(edesc);
1240 return ERR_PTR(-ENOMEM);
1241 }
1242
1243 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1244 dpaa2_fl_set_final(in_fle, true);
1245 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1246 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1247
1248 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1249 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1250
1251 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1252
1253 if (req->src == req->dst)
1254 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1255 sizeof(*sg_table));
1256 else
1257 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1258 sizeof(*sg_table));
1259
1260 return edesc;
1261 }
1262
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)1263 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1264 struct aead_request *req)
1265 {
1266 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1267 int ivsize = crypto_aead_ivsize(aead);
1268
1269 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1270 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1271 edesc->qm_sg_bytes);
1272 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1273 }
1274
skcipher_unmap(struct device * dev,struct skcipher_edesc * edesc,struct skcipher_request * req)1275 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1276 struct skcipher_request *req)
1277 {
1278 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1279 int ivsize = crypto_skcipher_ivsize(skcipher);
1280
1281 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1282 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1283 edesc->qm_sg_bytes);
1284 }
1285
aead_encrypt_done(void * cbk_ctx,u32 status)1286 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1287 {
1288 struct crypto_async_request *areq = cbk_ctx;
1289 struct aead_request *req = container_of(areq, struct aead_request,
1290 base);
1291 struct caam_request *req_ctx = to_caam_req(areq);
1292 struct aead_edesc *edesc = req_ctx->edesc;
1293 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1294 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1295 int ecode = 0;
1296
1297 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1298
1299 if (unlikely(status))
1300 ecode = caam_qi2_strstatus(ctx->dev, status);
1301
1302 aead_unmap(ctx->dev, edesc, req);
1303 qi_cache_free(edesc);
1304 aead_request_complete(req, ecode);
1305 }
1306
aead_decrypt_done(void * cbk_ctx,u32 status)1307 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1308 {
1309 struct crypto_async_request *areq = cbk_ctx;
1310 struct aead_request *req = container_of(areq, struct aead_request,
1311 base);
1312 struct caam_request *req_ctx = to_caam_req(areq);
1313 struct aead_edesc *edesc = req_ctx->edesc;
1314 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1315 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1316 int ecode = 0;
1317
1318 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1319
1320 if (unlikely(status))
1321 ecode = caam_qi2_strstatus(ctx->dev, status);
1322
1323 aead_unmap(ctx->dev, edesc, req);
1324 qi_cache_free(edesc);
1325 aead_request_complete(req, ecode);
1326 }
1327
aead_encrypt(struct aead_request * req)1328 static int aead_encrypt(struct aead_request *req)
1329 {
1330 struct aead_edesc *edesc;
1331 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1332 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1333 struct caam_request *caam_req = aead_request_ctx_dma(req);
1334 int ret;
1335
1336 /* allocate extended descriptor */
1337 edesc = aead_edesc_alloc(req, true);
1338 if (IS_ERR(edesc))
1339 return PTR_ERR(edesc);
1340
1341 caam_req->flc = &ctx->flc[ENCRYPT];
1342 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1343 caam_req->cbk = aead_encrypt_done;
1344 caam_req->ctx = &req->base;
1345 caam_req->edesc = edesc;
1346 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1347 if (ret != -EINPROGRESS &&
1348 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1349 aead_unmap(ctx->dev, edesc, req);
1350 qi_cache_free(edesc);
1351 }
1352
1353 return ret;
1354 }
1355
aead_decrypt(struct aead_request * req)1356 static int aead_decrypt(struct aead_request *req)
1357 {
1358 struct aead_edesc *edesc;
1359 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1360 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1361 struct caam_request *caam_req = aead_request_ctx_dma(req);
1362 int ret;
1363
1364 /* allocate extended descriptor */
1365 edesc = aead_edesc_alloc(req, false);
1366 if (IS_ERR(edesc))
1367 return PTR_ERR(edesc);
1368
1369 caam_req->flc = &ctx->flc[DECRYPT];
1370 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1371 caam_req->cbk = aead_decrypt_done;
1372 caam_req->ctx = &req->base;
1373 caam_req->edesc = edesc;
1374 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1375 if (ret != -EINPROGRESS &&
1376 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1377 aead_unmap(ctx->dev, edesc, req);
1378 qi_cache_free(edesc);
1379 }
1380
1381 return ret;
1382 }
1383
ipsec_gcm_encrypt(struct aead_request * req)1384 static int ipsec_gcm_encrypt(struct aead_request *req)
1385 {
1386 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1387 }
1388
ipsec_gcm_decrypt(struct aead_request * req)1389 static int ipsec_gcm_decrypt(struct aead_request *req)
1390 {
1391 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1392 }
1393
skcipher_encrypt_done(void * cbk_ctx,u32 status)1394 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1395 {
1396 struct crypto_async_request *areq = cbk_ctx;
1397 struct skcipher_request *req = skcipher_request_cast(areq);
1398 struct caam_request *req_ctx = to_caam_req(areq);
1399 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1400 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1401 struct skcipher_edesc *edesc = req_ctx->edesc;
1402 int ecode = 0;
1403 int ivsize = crypto_skcipher_ivsize(skcipher);
1404
1405 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1406
1407 if (unlikely(status))
1408 ecode = caam_qi2_strstatus(ctx->dev, status);
1409
1410 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1411 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1412 edesc->src_nents > 1 ? 100 : ivsize, 1);
1413 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1414 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1415 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1416
1417 skcipher_unmap(ctx->dev, edesc, req);
1418
1419 /*
1420 * The crypto API expects us to set the IV (req->iv) to the last
1421 * ciphertext block (CBC mode) or last counter (CTR mode).
1422 * This is used e.g. by the CTS mode.
1423 */
1424 if (!ecode)
1425 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1426 ivsize);
1427
1428 qi_cache_free(edesc);
1429 skcipher_request_complete(req, ecode);
1430 }
1431
skcipher_decrypt_done(void * cbk_ctx,u32 status)1432 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1433 {
1434 struct crypto_async_request *areq = cbk_ctx;
1435 struct skcipher_request *req = skcipher_request_cast(areq);
1436 struct caam_request *req_ctx = to_caam_req(areq);
1437 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1438 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1439 struct skcipher_edesc *edesc = req_ctx->edesc;
1440 int ecode = 0;
1441 int ivsize = crypto_skcipher_ivsize(skcipher);
1442
1443 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1444
1445 if (unlikely(status))
1446 ecode = caam_qi2_strstatus(ctx->dev, status);
1447
1448 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1449 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1450 edesc->src_nents > 1 ? 100 : ivsize, 1);
1451 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1452 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1453 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1454
1455 skcipher_unmap(ctx->dev, edesc, req);
1456
1457 /*
1458 * The crypto API expects us to set the IV (req->iv) to the last
1459 * ciphertext block (CBC mode) or last counter (CTR mode).
1460 * This is used e.g. by the CTS mode.
1461 */
1462 if (!ecode)
1463 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1464 ivsize);
1465
1466 qi_cache_free(edesc);
1467 skcipher_request_complete(req, ecode);
1468 }
1469
xts_skcipher_ivsize(struct skcipher_request * req)1470 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1471 {
1472 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1473 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1474
1475 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1476 }
1477
skcipher_encrypt(struct skcipher_request * req)1478 static int skcipher_encrypt(struct skcipher_request *req)
1479 {
1480 struct skcipher_edesc *edesc;
1481 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1482 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1483 struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1484 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1485 int ret;
1486
1487 /*
1488 * XTS is expected to return an error even for input length = 0
1489 * Note that the case input length < block size will be caught during
1490 * HW offloading and return an error.
1491 */
1492 if (!req->cryptlen && !ctx->fallback)
1493 return 0;
1494
1495 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1496 ctx->xts_key_fallback)) {
1497 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1498 skcipher_request_set_callback(&caam_req->fallback_req,
1499 req->base.flags,
1500 req->base.complete,
1501 req->base.data);
1502 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1503 req->dst, req->cryptlen, req->iv);
1504
1505 return crypto_skcipher_encrypt(&caam_req->fallback_req);
1506 }
1507
1508 /* allocate extended descriptor */
1509 edesc = skcipher_edesc_alloc(req);
1510 if (IS_ERR(edesc))
1511 return PTR_ERR(edesc);
1512
1513 caam_req->flc = &ctx->flc[ENCRYPT];
1514 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1515 caam_req->cbk = skcipher_encrypt_done;
1516 caam_req->ctx = &req->base;
1517 caam_req->edesc = edesc;
1518 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1519 if (ret != -EINPROGRESS &&
1520 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1521 skcipher_unmap(ctx->dev, edesc, req);
1522 qi_cache_free(edesc);
1523 }
1524
1525 return ret;
1526 }
1527
skcipher_decrypt(struct skcipher_request * req)1528 static int skcipher_decrypt(struct skcipher_request *req)
1529 {
1530 struct skcipher_edesc *edesc;
1531 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1532 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1533 struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1534 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1535 int ret;
1536
1537 /*
1538 * XTS is expected to return an error even for input length = 0
1539 * Note that the case input length < block size will be caught during
1540 * HW offloading and return an error.
1541 */
1542 if (!req->cryptlen && !ctx->fallback)
1543 return 0;
1544
1545 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1546 ctx->xts_key_fallback)) {
1547 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1548 skcipher_request_set_callback(&caam_req->fallback_req,
1549 req->base.flags,
1550 req->base.complete,
1551 req->base.data);
1552 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1553 req->dst, req->cryptlen, req->iv);
1554
1555 return crypto_skcipher_decrypt(&caam_req->fallback_req);
1556 }
1557
1558 /* allocate extended descriptor */
1559 edesc = skcipher_edesc_alloc(req);
1560 if (IS_ERR(edesc))
1561 return PTR_ERR(edesc);
1562
1563 caam_req->flc = &ctx->flc[DECRYPT];
1564 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1565 caam_req->cbk = skcipher_decrypt_done;
1566 caam_req->ctx = &req->base;
1567 caam_req->edesc = edesc;
1568 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1569 if (ret != -EINPROGRESS &&
1570 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1571 skcipher_unmap(ctx->dev, edesc, req);
1572 qi_cache_free(edesc);
1573 }
1574
1575 return ret;
1576 }
1577
caam_cra_init(struct caam_ctx * ctx,struct caam_alg_entry * caam,bool uses_dkp)1578 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1579 bool uses_dkp)
1580 {
1581 dma_addr_t dma_addr;
1582 int i;
1583
1584 /* copy descriptor header template value */
1585 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1586 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1587
1588 ctx->dev = caam->dev;
1589 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1590
1591 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1592 offsetof(struct caam_ctx, flc_dma),
1593 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1594 if (dma_mapping_error(ctx->dev, dma_addr)) {
1595 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1596 return -ENOMEM;
1597 }
1598
1599 for (i = 0; i < NUM_OP; i++)
1600 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1601 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1602
1603 return 0;
1604 }
1605
caam_cra_init_skcipher(struct crypto_skcipher * tfm)1606 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1607 {
1608 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1609 struct caam_skcipher_alg *caam_alg =
1610 container_of(alg, typeof(*caam_alg), skcipher);
1611 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1612 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1613 int ret = 0;
1614
1615 if (alg_aai == OP_ALG_AAI_XTS) {
1616 const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1617 struct crypto_skcipher *fallback;
1618
1619 fallback = crypto_alloc_skcipher(tfm_name, 0,
1620 CRYPTO_ALG_NEED_FALLBACK);
1621 if (IS_ERR(fallback)) {
1622 dev_err(caam_alg->caam.dev,
1623 "Failed to allocate %s fallback: %ld\n",
1624 tfm_name, PTR_ERR(fallback));
1625 return PTR_ERR(fallback);
1626 }
1627
1628 ctx->fallback = fallback;
1629 crypto_skcipher_set_reqsize_dma(
1630 tfm, sizeof(struct caam_request) +
1631 crypto_skcipher_reqsize(fallback));
1632 } else {
1633 crypto_skcipher_set_reqsize_dma(tfm,
1634 sizeof(struct caam_request));
1635 }
1636
1637 ret = caam_cra_init(ctx, &caam_alg->caam, false);
1638 if (ret && ctx->fallback)
1639 crypto_free_skcipher(ctx->fallback);
1640
1641 return ret;
1642 }
1643
caam_cra_init_aead(struct crypto_aead * tfm)1644 static int caam_cra_init_aead(struct crypto_aead *tfm)
1645 {
1646 struct aead_alg *alg = crypto_aead_alg(tfm);
1647 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1648 aead);
1649
1650 crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request));
1651 return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam,
1652 !caam_alg->caam.nodkp);
1653 }
1654
caam_exit_common(struct caam_ctx * ctx)1655 static void caam_exit_common(struct caam_ctx *ctx)
1656 {
1657 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1658 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1659 DMA_ATTR_SKIP_CPU_SYNC);
1660 }
1661
caam_cra_exit(struct crypto_skcipher * tfm)1662 static void caam_cra_exit(struct crypto_skcipher *tfm)
1663 {
1664 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1665
1666 if (ctx->fallback)
1667 crypto_free_skcipher(ctx->fallback);
1668 caam_exit_common(ctx);
1669 }
1670
caam_cra_exit_aead(struct crypto_aead * tfm)1671 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1672 {
1673 caam_exit_common(crypto_aead_ctx_dma(tfm));
1674 }
1675
1676 static struct caam_skcipher_alg driver_algs[] = {
1677 {
1678 .skcipher = {
1679 .base = {
1680 .cra_name = "cbc(aes)",
1681 .cra_driver_name = "cbc-aes-caam-qi2",
1682 .cra_blocksize = AES_BLOCK_SIZE,
1683 },
1684 .setkey = aes_skcipher_setkey,
1685 .encrypt = skcipher_encrypt,
1686 .decrypt = skcipher_decrypt,
1687 .min_keysize = AES_MIN_KEY_SIZE,
1688 .max_keysize = AES_MAX_KEY_SIZE,
1689 .ivsize = AES_BLOCK_SIZE,
1690 },
1691 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1692 },
1693 {
1694 .skcipher = {
1695 .base = {
1696 .cra_name = "cbc(des3_ede)",
1697 .cra_driver_name = "cbc-3des-caam-qi2",
1698 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1699 },
1700 .setkey = des3_skcipher_setkey,
1701 .encrypt = skcipher_encrypt,
1702 .decrypt = skcipher_decrypt,
1703 .min_keysize = DES3_EDE_KEY_SIZE,
1704 .max_keysize = DES3_EDE_KEY_SIZE,
1705 .ivsize = DES3_EDE_BLOCK_SIZE,
1706 },
1707 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1708 },
1709 {
1710 .skcipher = {
1711 .base = {
1712 .cra_name = "cbc(des)",
1713 .cra_driver_name = "cbc-des-caam-qi2",
1714 .cra_blocksize = DES_BLOCK_SIZE,
1715 },
1716 .setkey = des_skcipher_setkey,
1717 .encrypt = skcipher_encrypt,
1718 .decrypt = skcipher_decrypt,
1719 .min_keysize = DES_KEY_SIZE,
1720 .max_keysize = DES_KEY_SIZE,
1721 .ivsize = DES_BLOCK_SIZE,
1722 },
1723 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1724 },
1725 {
1726 .skcipher = {
1727 .base = {
1728 .cra_name = "ctr(aes)",
1729 .cra_driver_name = "ctr-aes-caam-qi2",
1730 .cra_blocksize = 1,
1731 },
1732 .setkey = ctr_skcipher_setkey,
1733 .encrypt = skcipher_encrypt,
1734 .decrypt = skcipher_decrypt,
1735 .min_keysize = AES_MIN_KEY_SIZE,
1736 .max_keysize = AES_MAX_KEY_SIZE,
1737 .ivsize = AES_BLOCK_SIZE,
1738 .chunksize = AES_BLOCK_SIZE,
1739 },
1740 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1741 OP_ALG_AAI_CTR_MOD128,
1742 },
1743 {
1744 .skcipher = {
1745 .base = {
1746 .cra_name = "rfc3686(ctr(aes))",
1747 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1748 .cra_blocksize = 1,
1749 },
1750 .setkey = rfc3686_skcipher_setkey,
1751 .encrypt = skcipher_encrypt,
1752 .decrypt = skcipher_decrypt,
1753 .min_keysize = AES_MIN_KEY_SIZE +
1754 CTR_RFC3686_NONCE_SIZE,
1755 .max_keysize = AES_MAX_KEY_SIZE +
1756 CTR_RFC3686_NONCE_SIZE,
1757 .ivsize = CTR_RFC3686_IV_SIZE,
1758 .chunksize = AES_BLOCK_SIZE,
1759 },
1760 .caam = {
1761 .class1_alg_type = OP_ALG_ALGSEL_AES |
1762 OP_ALG_AAI_CTR_MOD128,
1763 .rfc3686 = true,
1764 },
1765 },
1766 {
1767 .skcipher = {
1768 .base = {
1769 .cra_name = "xts(aes)",
1770 .cra_driver_name = "xts-aes-caam-qi2",
1771 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1772 .cra_blocksize = AES_BLOCK_SIZE,
1773 },
1774 .setkey = xts_skcipher_setkey,
1775 .encrypt = skcipher_encrypt,
1776 .decrypt = skcipher_decrypt,
1777 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1778 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1779 .ivsize = AES_BLOCK_SIZE,
1780 },
1781 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1782 },
1783 {
1784 .skcipher = {
1785 .base = {
1786 .cra_name = "chacha20",
1787 .cra_driver_name = "chacha20-caam-qi2",
1788 .cra_blocksize = 1,
1789 },
1790 .setkey = chacha20_skcipher_setkey,
1791 .encrypt = skcipher_encrypt,
1792 .decrypt = skcipher_decrypt,
1793 .min_keysize = CHACHA_KEY_SIZE,
1794 .max_keysize = CHACHA_KEY_SIZE,
1795 .ivsize = CHACHA_IV_SIZE,
1796 },
1797 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1798 },
1799 };
1800
1801 static struct caam_aead_alg driver_aeads[] = {
1802 {
1803 .aead = {
1804 .base = {
1805 .cra_name = "rfc4106(gcm(aes))",
1806 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1807 .cra_blocksize = 1,
1808 },
1809 .setkey = rfc4106_setkey,
1810 .setauthsize = rfc4106_setauthsize,
1811 .encrypt = ipsec_gcm_encrypt,
1812 .decrypt = ipsec_gcm_decrypt,
1813 .ivsize = 8,
1814 .maxauthsize = AES_BLOCK_SIZE,
1815 },
1816 .caam = {
1817 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1818 .nodkp = true,
1819 },
1820 },
1821 {
1822 .aead = {
1823 .base = {
1824 .cra_name = "rfc4543(gcm(aes))",
1825 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1826 .cra_blocksize = 1,
1827 },
1828 .setkey = rfc4543_setkey,
1829 .setauthsize = rfc4543_setauthsize,
1830 .encrypt = ipsec_gcm_encrypt,
1831 .decrypt = ipsec_gcm_decrypt,
1832 .ivsize = 8,
1833 .maxauthsize = AES_BLOCK_SIZE,
1834 },
1835 .caam = {
1836 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1837 .nodkp = true,
1838 },
1839 },
1840 /* Galois Counter Mode */
1841 {
1842 .aead = {
1843 .base = {
1844 .cra_name = "gcm(aes)",
1845 .cra_driver_name = "gcm-aes-caam-qi2",
1846 .cra_blocksize = 1,
1847 },
1848 .setkey = gcm_setkey,
1849 .setauthsize = gcm_setauthsize,
1850 .encrypt = aead_encrypt,
1851 .decrypt = aead_decrypt,
1852 .ivsize = 12,
1853 .maxauthsize = AES_BLOCK_SIZE,
1854 },
1855 .caam = {
1856 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1857 .nodkp = true,
1858 }
1859 },
1860 /* single-pass ipsec_esp descriptor */
1861 {
1862 .aead = {
1863 .base = {
1864 .cra_name = "authenc(hmac(md5),cbc(aes))",
1865 .cra_driver_name = "authenc-hmac-md5-"
1866 "cbc-aes-caam-qi2",
1867 .cra_blocksize = AES_BLOCK_SIZE,
1868 },
1869 .setkey = aead_setkey,
1870 .setauthsize = aead_setauthsize,
1871 .encrypt = aead_encrypt,
1872 .decrypt = aead_decrypt,
1873 .ivsize = AES_BLOCK_SIZE,
1874 .maxauthsize = MD5_DIGEST_SIZE,
1875 },
1876 .caam = {
1877 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1878 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1879 OP_ALG_AAI_HMAC_PRECOMP,
1880 }
1881 },
1882 {
1883 .aead = {
1884 .base = {
1885 .cra_name = "echainiv(authenc(hmac(md5),"
1886 "cbc(aes)))",
1887 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1888 "cbc-aes-caam-qi2",
1889 .cra_blocksize = AES_BLOCK_SIZE,
1890 },
1891 .setkey = aead_setkey,
1892 .setauthsize = aead_setauthsize,
1893 .encrypt = aead_encrypt,
1894 .decrypt = aead_decrypt,
1895 .ivsize = AES_BLOCK_SIZE,
1896 .maxauthsize = MD5_DIGEST_SIZE,
1897 },
1898 .caam = {
1899 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1900 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1901 OP_ALG_AAI_HMAC_PRECOMP,
1902 .geniv = true,
1903 }
1904 },
1905 {
1906 .aead = {
1907 .base = {
1908 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1909 .cra_driver_name = "authenc-hmac-sha1-"
1910 "cbc-aes-caam-qi2",
1911 .cra_blocksize = AES_BLOCK_SIZE,
1912 },
1913 .setkey = aead_setkey,
1914 .setauthsize = aead_setauthsize,
1915 .encrypt = aead_encrypt,
1916 .decrypt = aead_decrypt,
1917 .ivsize = AES_BLOCK_SIZE,
1918 .maxauthsize = SHA1_DIGEST_SIZE,
1919 },
1920 .caam = {
1921 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1922 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1923 OP_ALG_AAI_HMAC_PRECOMP,
1924 }
1925 },
1926 {
1927 .aead = {
1928 .base = {
1929 .cra_name = "echainiv(authenc(hmac(sha1),"
1930 "cbc(aes)))",
1931 .cra_driver_name = "echainiv-authenc-"
1932 "hmac-sha1-cbc-aes-caam-qi2",
1933 .cra_blocksize = AES_BLOCK_SIZE,
1934 },
1935 .setkey = aead_setkey,
1936 .setauthsize = aead_setauthsize,
1937 .encrypt = aead_encrypt,
1938 .decrypt = aead_decrypt,
1939 .ivsize = AES_BLOCK_SIZE,
1940 .maxauthsize = SHA1_DIGEST_SIZE,
1941 },
1942 .caam = {
1943 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1944 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1945 OP_ALG_AAI_HMAC_PRECOMP,
1946 .geniv = true,
1947 },
1948 },
1949 {
1950 .aead = {
1951 .base = {
1952 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1953 .cra_driver_name = "authenc-hmac-sha224-"
1954 "cbc-aes-caam-qi2",
1955 .cra_blocksize = AES_BLOCK_SIZE,
1956 },
1957 .setkey = aead_setkey,
1958 .setauthsize = aead_setauthsize,
1959 .encrypt = aead_encrypt,
1960 .decrypt = aead_decrypt,
1961 .ivsize = AES_BLOCK_SIZE,
1962 .maxauthsize = SHA224_DIGEST_SIZE,
1963 },
1964 .caam = {
1965 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1966 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1967 OP_ALG_AAI_HMAC_PRECOMP,
1968 }
1969 },
1970 {
1971 .aead = {
1972 .base = {
1973 .cra_name = "echainiv(authenc(hmac(sha224),"
1974 "cbc(aes)))",
1975 .cra_driver_name = "echainiv-authenc-"
1976 "hmac-sha224-cbc-aes-caam-qi2",
1977 .cra_blocksize = AES_BLOCK_SIZE,
1978 },
1979 .setkey = aead_setkey,
1980 .setauthsize = aead_setauthsize,
1981 .encrypt = aead_encrypt,
1982 .decrypt = aead_decrypt,
1983 .ivsize = AES_BLOCK_SIZE,
1984 .maxauthsize = SHA224_DIGEST_SIZE,
1985 },
1986 .caam = {
1987 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1988 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1989 OP_ALG_AAI_HMAC_PRECOMP,
1990 .geniv = true,
1991 }
1992 },
1993 {
1994 .aead = {
1995 .base = {
1996 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1997 .cra_driver_name = "authenc-hmac-sha256-"
1998 "cbc-aes-caam-qi2",
1999 .cra_blocksize = AES_BLOCK_SIZE,
2000 },
2001 .setkey = aead_setkey,
2002 .setauthsize = aead_setauthsize,
2003 .encrypt = aead_encrypt,
2004 .decrypt = aead_decrypt,
2005 .ivsize = AES_BLOCK_SIZE,
2006 .maxauthsize = SHA256_DIGEST_SIZE,
2007 },
2008 .caam = {
2009 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2010 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2011 OP_ALG_AAI_HMAC_PRECOMP,
2012 }
2013 },
2014 {
2015 .aead = {
2016 .base = {
2017 .cra_name = "echainiv(authenc(hmac(sha256),"
2018 "cbc(aes)))",
2019 .cra_driver_name = "echainiv-authenc-"
2020 "hmac-sha256-cbc-aes-"
2021 "caam-qi2",
2022 .cra_blocksize = AES_BLOCK_SIZE,
2023 },
2024 .setkey = aead_setkey,
2025 .setauthsize = aead_setauthsize,
2026 .encrypt = aead_encrypt,
2027 .decrypt = aead_decrypt,
2028 .ivsize = AES_BLOCK_SIZE,
2029 .maxauthsize = SHA256_DIGEST_SIZE,
2030 },
2031 .caam = {
2032 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2033 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2034 OP_ALG_AAI_HMAC_PRECOMP,
2035 .geniv = true,
2036 }
2037 },
2038 {
2039 .aead = {
2040 .base = {
2041 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2042 .cra_driver_name = "authenc-hmac-sha384-"
2043 "cbc-aes-caam-qi2",
2044 .cra_blocksize = AES_BLOCK_SIZE,
2045 },
2046 .setkey = aead_setkey,
2047 .setauthsize = aead_setauthsize,
2048 .encrypt = aead_encrypt,
2049 .decrypt = aead_decrypt,
2050 .ivsize = AES_BLOCK_SIZE,
2051 .maxauthsize = SHA384_DIGEST_SIZE,
2052 },
2053 .caam = {
2054 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2055 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2056 OP_ALG_AAI_HMAC_PRECOMP,
2057 }
2058 },
2059 {
2060 .aead = {
2061 .base = {
2062 .cra_name = "echainiv(authenc(hmac(sha384),"
2063 "cbc(aes)))",
2064 .cra_driver_name = "echainiv-authenc-"
2065 "hmac-sha384-cbc-aes-"
2066 "caam-qi2",
2067 .cra_blocksize = AES_BLOCK_SIZE,
2068 },
2069 .setkey = aead_setkey,
2070 .setauthsize = aead_setauthsize,
2071 .encrypt = aead_encrypt,
2072 .decrypt = aead_decrypt,
2073 .ivsize = AES_BLOCK_SIZE,
2074 .maxauthsize = SHA384_DIGEST_SIZE,
2075 },
2076 .caam = {
2077 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2078 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2079 OP_ALG_AAI_HMAC_PRECOMP,
2080 .geniv = true,
2081 }
2082 },
2083 {
2084 .aead = {
2085 .base = {
2086 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2087 .cra_driver_name = "authenc-hmac-sha512-"
2088 "cbc-aes-caam-qi2",
2089 .cra_blocksize = AES_BLOCK_SIZE,
2090 },
2091 .setkey = aead_setkey,
2092 .setauthsize = aead_setauthsize,
2093 .encrypt = aead_encrypt,
2094 .decrypt = aead_decrypt,
2095 .ivsize = AES_BLOCK_SIZE,
2096 .maxauthsize = SHA512_DIGEST_SIZE,
2097 },
2098 .caam = {
2099 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2100 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2101 OP_ALG_AAI_HMAC_PRECOMP,
2102 }
2103 },
2104 {
2105 .aead = {
2106 .base = {
2107 .cra_name = "echainiv(authenc(hmac(sha512),"
2108 "cbc(aes)))",
2109 .cra_driver_name = "echainiv-authenc-"
2110 "hmac-sha512-cbc-aes-"
2111 "caam-qi2",
2112 .cra_blocksize = AES_BLOCK_SIZE,
2113 },
2114 .setkey = aead_setkey,
2115 .setauthsize = aead_setauthsize,
2116 .encrypt = aead_encrypt,
2117 .decrypt = aead_decrypt,
2118 .ivsize = AES_BLOCK_SIZE,
2119 .maxauthsize = SHA512_DIGEST_SIZE,
2120 },
2121 .caam = {
2122 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2123 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2124 OP_ALG_AAI_HMAC_PRECOMP,
2125 .geniv = true,
2126 }
2127 },
2128 {
2129 .aead = {
2130 .base = {
2131 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2132 .cra_driver_name = "authenc-hmac-md5-"
2133 "cbc-des3_ede-caam-qi2",
2134 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2135 },
2136 .setkey = des3_aead_setkey,
2137 .setauthsize = aead_setauthsize,
2138 .encrypt = aead_encrypt,
2139 .decrypt = aead_decrypt,
2140 .ivsize = DES3_EDE_BLOCK_SIZE,
2141 .maxauthsize = MD5_DIGEST_SIZE,
2142 },
2143 .caam = {
2144 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2145 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2146 OP_ALG_AAI_HMAC_PRECOMP,
2147 }
2148 },
2149 {
2150 .aead = {
2151 .base = {
2152 .cra_name = "echainiv(authenc(hmac(md5),"
2153 "cbc(des3_ede)))",
2154 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2155 "cbc-des3_ede-caam-qi2",
2156 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2157 },
2158 .setkey = des3_aead_setkey,
2159 .setauthsize = aead_setauthsize,
2160 .encrypt = aead_encrypt,
2161 .decrypt = aead_decrypt,
2162 .ivsize = DES3_EDE_BLOCK_SIZE,
2163 .maxauthsize = MD5_DIGEST_SIZE,
2164 },
2165 .caam = {
2166 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2167 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2168 OP_ALG_AAI_HMAC_PRECOMP,
2169 .geniv = true,
2170 }
2171 },
2172 {
2173 .aead = {
2174 .base = {
2175 .cra_name = "authenc(hmac(sha1),"
2176 "cbc(des3_ede))",
2177 .cra_driver_name = "authenc-hmac-sha1-"
2178 "cbc-des3_ede-caam-qi2",
2179 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2180 },
2181 .setkey = des3_aead_setkey,
2182 .setauthsize = aead_setauthsize,
2183 .encrypt = aead_encrypt,
2184 .decrypt = aead_decrypt,
2185 .ivsize = DES3_EDE_BLOCK_SIZE,
2186 .maxauthsize = SHA1_DIGEST_SIZE,
2187 },
2188 .caam = {
2189 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2190 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2191 OP_ALG_AAI_HMAC_PRECOMP,
2192 },
2193 },
2194 {
2195 .aead = {
2196 .base = {
2197 .cra_name = "echainiv(authenc(hmac(sha1),"
2198 "cbc(des3_ede)))",
2199 .cra_driver_name = "echainiv-authenc-"
2200 "hmac-sha1-"
2201 "cbc-des3_ede-caam-qi2",
2202 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2203 },
2204 .setkey = des3_aead_setkey,
2205 .setauthsize = aead_setauthsize,
2206 .encrypt = aead_encrypt,
2207 .decrypt = aead_decrypt,
2208 .ivsize = DES3_EDE_BLOCK_SIZE,
2209 .maxauthsize = SHA1_DIGEST_SIZE,
2210 },
2211 .caam = {
2212 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2213 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2214 OP_ALG_AAI_HMAC_PRECOMP,
2215 .geniv = true,
2216 }
2217 },
2218 {
2219 .aead = {
2220 .base = {
2221 .cra_name = "authenc(hmac(sha224),"
2222 "cbc(des3_ede))",
2223 .cra_driver_name = "authenc-hmac-sha224-"
2224 "cbc-des3_ede-caam-qi2",
2225 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2226 },
2227 .setkey = des3_aead_setkey,
2228 .setauthsize = aead_setauthsize,
2229 .encrypt = aead_encrypt,
2230 .decrypt = aead_decrypt,
2231 .ivsize = DES3_EDE_BLOCK_SIZE,
2232 .maxauthsize = SHA224_DIGEST_SIZE,
2233 },
2234 .caam = {
2235 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2236 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2237 OP_ALG_AAI_HMAC_PRECOMP,
2238 },
2239 },
2240 {
2241 .aead = {
2242 .base = {
2243 .cra_name = "echainiv(authenc(hmac(sha224),"
2244 "cbc(des3_ede)))",
2245 .cra_driver_name = "echainiv-authenc-"
2246 "hmac-sha224-"
2247 "cbc-des3_ede-caam-qi2",
2248 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2249 },
2250 .setkey = des3_aead_setkey,
2251 .setauthsize = aead_setauthsize,
2252 .encrypt = aead_encrypt,
2253 .decrypt = aead_decrypt,
2254 .ivsize = DES3_EDE_BLOCK_SIZE,
2255 .maxauthsize = SHA224_DIGEST_SIZE,
2256 },
2257 .caam = {
2258 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2259 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2260 OP_ALG_AAI_HMAC_PRECOMP,
2261 .geniv = true,
2262 }
2263 },
2264 {
2265 .aead = {
2266 .base = {
2267 .cra_name = "authenc(hmac(sha256),"
2268 "cbc(des3_ede))",
2269 .cra_driver_name = "authenc-hmac-sha256-"
2270 "cbc-des3_ede-caam-qi2",
2271 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2272 },
2273 .setkey = des3_aead_setkey,
2274 .setauthsize = aead_setauthsize,
2275 .encrypt = aead_encrypt,
2276 .decrypt = aead_decrypt,
2277 .ivsize = DES3_EDE_BLOCK_SIZE,
2278 .maxauthsize = SHA256_DIGEST_SIZE,
2279 },
2280 .caam = {
2281 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2282 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2283 OP_ALG_AAI_HMAC_PRECOMP,
2284 },
2285 },
2286 {
2287 .aead = {
2288 .base = {
2289 .cra_name = "echainiv(authenc(hmac(sha256),"
2290 "cbc(des3_ede)))",
2291 .cra_driver_name = "echainiv-authenc-"
2292 "hmac-sha256-"
2293 "cbc-des3_ede-caam-qi2",
2294 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2295 },
2296 .setkey = des3_aead_setkey,
2297 .setauthsize = aead_setauthsize,
2298 .encrypt = aead_encrypt,
2299 .decrypt = aead_decrypt,
2300 .ivsize = DES3_EDE_BLOCK_SIZE,
2301 .maxauthsize = SHA256_DIGEST_SIZE,
2302 },
2303 .caam = {
2304 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2305 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2306 OP_ALG_AAI_HMAC_PRECOMP,
2307 .geniv = true,
2308 }
2309 },
2310 {
2311 .aead = {
2312 .base = {
2313 .cra_name = "authenc(hmac(sha384),"
2314 "cbc(des3_ede))",
2315 .cra_driver_name = "authenc-hmac-sha384-"
2316 "cbc-des3_ede-caam-qi2",
2317 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2318 },
2319 .setkey = des3_aead_setkey,
2320 .setauthsize = aead_setauthsize,
2321 .encrypt = aead_encrypt,
2322 .decrypt = aead_decrypt,
2323 .ivsize = DES3_EDE_BLOCK_SIZE,
2324 .maxauthsize = SHA384_DIGEST_SIZE,
2325 },
2326 .caam = {
2327 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2328 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2329 OP_ALG_AAI_HMAC_PRECOMP,
2330 },
2331 },
2332 {
2333 .aead = {
2334 .base = {
2335 .cra_name = "echainiv(authenc(hmac(sha384),"
2336 "cbc(des3_ede)))",
2337 .cra_driver_name = "echainiv-authenc-"
2338 "hmac-sha384-"
2339 "cbc-des3_ede-caam-qi2",
2340 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2341 },
2342 .setkey = des3_aead_setkey,
2343 .setauthsize = aead_setauthsize,
2344 .encrypt = aead_encrypt,
2345 .decrypt = aead_decrypt,
2346 .ivsize = DES3_EDE_BLOCK_SIZE,
2347 .maxauthsize = SHA384_DIGEST_SIZE,
2348 },
2349 .caam = {
2350 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2351 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2352 OP_ALG_AAI_HMAC_PRECOMP,
2353 .geniv = true,
2354 }
2355 },
2356 {
2357 .aead = {
2358 .base = {
2359 .cra_name = "authenc(hmac(sha512),"
2360 "cbc(des3_ede))",
2361 .cra_driver_name = "authenc-hmac-sha512-"
2362 "cbc-des3_ede-caam-qi2",
2363 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2364 },
2365 .setkey = des3_aead_setkey,
2366 .setauthsize = aead_setauthsize,
2367 .encrypt = aead_encrypt,
2368 .decrypt = aead_decrypt,
2369 .ivsize = DES3_EDE_BLOCK_SIZE,
2370 .maxauthsize = SHA512_DIGEST_SIZE,
2371 },
2372 .caam = {
2373 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2374 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2375 OP_ALG_AAI_HMAC_PRECOMP,
2376 },
2377 },
2378 {
2379 .aead = {
2380 .base = {
2381 .cra_name = "echainiv(authenc(hmac(sha512),"
2382 "cbc(des3_ede)))",
2383 .cra_driver_name = "echainiv-authenc-"
2384 "hmac-sha512-"
2385 "cbc-des3_ede-caam-qi2",
2386 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2387 },
2388 .setkey = des3_aead_setkey,
2389 .setauthsize = aead_setauthsize,
2390 .encrypt = aead_encrypt,
2391 .decrypt = aead_decrypt,
2392 .ivsize = DES3_EDE_BLOCK_SIZE,
2393 .maxauthsize = SHA512_DIGEST_SIZE,
2394 },
2395 .caam = {
2396 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2397 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2398 OP_ALG_AAI_HMAC_PRECOMP,
2399 .geniv = true,
2400 }
2401 },
2402 {
2403 .aead = {
2404 .base = {
2405 .cra_name = "authenc(hmac(md5),cbc(des))",
2406 .cra_driver_name = "authenc-hmac-md5-"
2407 "cbc-des-caam-qi2",
2408 .cra_blocksize = DES_BLOCK_SIZE,
2409 },
2410 .setkey = aead_setkey,
2411 .setauthsize = aead_setauthsize,
2412 .encrypt = aead_encrypt,
2413 .decrypt = aead_decrypt,
2414 .ivsize = DES_BLOCK_SIZE,
2415 .maxauthsize = MD5_DIGEST_SIZE,
2416 },
2417 .caam = {
2418 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2419 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2420 OP_ALG_AAI_HMAC_PRECOMP,
2421 },
2422 },
2423 {
2424 .aead = {
2425 .base = {
2426 .cra_name = "echainiv(authenc(hmac(md5),"
2427 "cbc(des)))",
2428 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2429 "cbc-des-caam-qi2",
2430 .cra_blocksize = DES_BLOCK_SIZE,
2431 },
2432 .setkey = aead_setkey,
2433 .setauthsize = aead_setauthsize,
2434 .encrypt = aead_encrypt,
2435 .decrypt = aead_decrypt,
2436 .ivsize = DES_BLOCK_SIZE,
2437 .maxauthsize = MD5_DIGEST_SIZE,
2438 },
2439 .caam = {
2440 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2441 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2442 OP_ALG_AAI_HMAC_PRECOMP,
2443 .geniv = true,
2444 }
2445 },
2446 {
2447 .aead = {
2448 .base = {
2449 .cra_name = "authenc(hmac(sha1),cbc(des))",
2450 .cra_driver_name = "authenc-hmac-sha1-"
2451 "cbc-des-caam-qi2",
2452 .cra_blocksize = DES_BLOCK_SIZE,
2453 },
2454 .setkey = aead_setkey,
2455 .setauthsize = aead_setauthsize,
2456 .encrypt = aead_encrypt,
2457 .decrypt = aead_decrypt,
2458 .ivsize = DES_BLOCK_SIZE,
2459 .maxauthsize = SHA1_DIGEST_SIZE,
2460 },
2461 .caam = {
2462 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2463 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2464 OP_ALG_AAI_HMAC_PRECOMP,
2465 },
2466 },
2467 {
2468 .aead = {
2469 .base = {
2470 .cra_name = "echainiv(authenc(hmac(sha1),"
2471 "cbc(des)))",
2472 .cra_driver_name = "echainiv-authenc-"
2473 "hmac-sha1-cbc-des-caam-qi2",
2474 .cra_blocksize = DES_BLOCK_SIZE,
2475 },
2476 .setkey = aead_setkey,
2477 .setauthsize = aead_setauthsize,
2478 .encrypt = aead_encrypt,
2479 .decrypt = aead_decrypt,
2480 .ivsize = DES_BLOCK_SIZE,
2481 .maxauthsize = SHA1_DIGEST_SIZE,
2482 },
2483 .caam = {
2484 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2485 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2486 OP_ALG_AAI_HMAC_PRECOMP,
2487 .geniv = true,
2488 }
2489 },
2490 {
2491 .aead = {
2492 .base = {
2493 .cra_name = "authenc(hmac(sha224),cbc(des))",
2494 .cra_driver_name = "authenc-hmac-sha224-"
2495 "cbc-des-caam-qi2",
2496 .cra_blocksize = DES_BLOCK_SIZE,
2497 },
2498 .setkey = aead_setkey,
2499 .setauthsize = aead_setauthsize,
2500 .encrypt = aead_encrypt,
2501 .decrypt = aead_decrypt,
2502 .ivsize = DES_BLOCK_SIZE,
2503 .maxauthsize = SHA224_DIGEST_SIZE,
2504 },
2505 .caam = {
2506 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2507 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2508 OP_ALG_AAI_HMAC_PRECOMP,
2509 },
2510 },
2511 {
2512 .aead = {
2513 .base = {
2514 .cra_name = "echainiv(authenc(hmac(sha224),"
2515 "cbc(des)))",
2516 .cra_driver_name = "echainiv-authenc-"
2517 "hmac-sha224-cbc-des-"
2518 "caam-qi2",
2519 .cra_blocksize = DES_BLOCK_SIZE,
2520 },
2521 .setkey = aead_setkey,
2522 .setauthsize = aead_setauthsize,
2523 .encrypt = aead_encrypt,
2524 .decrypt = aead_decrypt,
2525 .ivsize = DES_BLOCK_SIZE,
2526 .maxauthsize = SHA224_DIGEST_SIZE,
2527 },
2528 .caam = {
2529 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2530 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2531 OP_ALG_AAI_HMAC_PRECOMP,
2532 .geniv = true,
2533 }
2534 },
2535 {
2536 .aead = {
2537 .base = {
2538 .cra_name = "authenc(hmac(sha256),cbc(des))",
2539 .cra_driver_name = "authenc-hmac-sha256-"
2540 "cbc-des-caam-qi2",
2541 .cra_blocksize = DES_BLOCK_SIZE,
2542 },
2543 .setkey = aead_setkey,
2544 .setauthsize = aead_setauthsize,
2545 .encrypt = aead_encrypt,
2546 .decrypt = aead_decrypt,
2547 .ivsize = DES_BLOCK_SIZE,
2548 .maxauthsize = SHA256_DIGEST_SIZE,
2549 },
2550 .caam = {
2551 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2552 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2553 OP_ALG_AAI_HMAC_PRECOMP,
2554 },
2555 },
2556 {
2557 .aead = {
2558 .base = {
2559 .cra_name = "echainiv(authenc(hmac(sha256),"
2560 "cbc(des)))",
2561 .cra_driver_name = "echainiv-authenc-"
2562 "hmac-sha256-cbc-des-"
2563 "caam-qi2",
2564 .cra_blocksize = DES_BLOCK_SIZE,
2565 },
2566 .setkey = aead_setkey,
2567 .setauthsize = aead_setauthsize,
2568 .encrypt = aead_encrypt,
2569 .decrypt = aead_decrypt,
2570 .ivsize = DES_BLOCK_SIZE,
2571 .maxauthsize = SHA256_DIGEST_SIZE,
2572 },
2573 .caam = {
2574 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2575 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2576 OP_ALG_AAI_HMAC_PRECOMP,
2577 .geniv = true,
2578 },
2579 },
2580 {
2581 .aead = {
2582 .base = {
2583 .cra_name = "authenc(hmac(sha384),cbc(des))",
2584 .cra_driver_name = "authenc-hmac-sha384-"
2585 "cbc-des-caam-qi2",
2586 .cra_blocksize = DES_BLOCK_SIZE,
2587 },
2588 .setkey = aead_setkey,
2589 .setauthsize = aead_setauthsize,
2590 .encrypt = aead_encrypt,
2591 .decrypt = aead_decrypt,
2592 .ivsize = DES_BLOCK_SIZE,
2593 .maxauthsize = SHA384_DIGEST_SIZE,
2594 },
2595 .caam = {
2596 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2597 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2598 OP_ALG_AAI_HMAC_PRECOMP,
2599 },
2600 },
2601 {
2602 .aead = {
2603 .base = {
2604 .cra_name = "echainiv(authenc(hmac(sha384),"
2605 "cbc(des)))",
2606 .cra_driver_name = "echainiv-authenc-"
2607 "hmac-sha384-cbc-des-"
2608 "caam-qi2",
2609 .cra_blocksize = DES_BLOCK_SIZE,
2610 },
2611 .setkey = aead_setkey,
2612 .setauthsize = aead_setauthsize,
2613 .encrypt = aead_encrypt,
2614 .decrypt = aead_decrypt,
2615 .ivsize = DES_BLOCK_SIZE,
2616 .maxauthsize = SHA384_DIGEST_SIZE,
2617 },
2618 .caam = {
2619 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2620 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2621 OP_ALG_AAI_HMAC_PRECOMP,
2622 .geniv = true,
2623 }
2624 },
2625 {
2626 .aead = {
2627 .base = {
2628 .cra_name = "authenc(hmac(sha512),cbc(des))",
2629 .cra_driver_name = "authenc-hmac-sha512-"
2630 "cbc-des-caam-qi2",
2631 .cra_blocksize = DES_BLOCK_SIZE,
2632 },
2633 .setkey = aead_setkey,
2634 .setauthsize = aead_setauthsize,
2635 .encrypt = aead_encrypt,
2636 .decrypt = aead_decrypt,
2637 .ivsize = DES_BLOCK_SIZE,
2638 .maxauthsize = SHA512_DIGEST_SIZE,
2639 },
2640 .caam = {
2641 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2642 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2643 OP_ALG_AAI_HMAC_PRECOMP,
2644 }
2645 },
2646 {
2647 .aead = {
2648 .base = {
2649 .cra_name = "echainiv(authenc(hmac(sha512),"
2650 "cbc(des)))",
2651 .cra_driver_name = "echainiv-authenc-"
2652 "hmac-sha512-cbc-des-"
2653 "caam-qi2",
2654 .cra_blocksize = DES_BLOCK_SIZE,
2655 },
2656 .setkey = aead_setkey,
2657 .setauthsize = aead_setauthsize,
2658 .encrypt = aead_encrypt,
2659 .decrypt = aead_decrypt,
2660 .ivsize = DES_BLOCK_SIZE,
2661 .maxauthsize = SHA512_DIGEST_SIZE,
2662 },
2663 .caam = {
2664 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2665 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2666 OP_ALG_AAI_HMAC_PRECOMP,
2667 .geniv = true,
2668 }
2669 },
2670 {
2671 .aead = {
2672 .base = {
2673 .cra_name = "authenc(hmac(md5),"
2674 "rfc3686(ctr(aes)))",
2675 .cra_driver_name = "authenc-hmac-md5-"
2676 "rfc3686-ctr-aes-caam-qi2",
2677 .cra_blocksize = 1,
2678 },
2679 .setkey = aead_setkey,
2680 .setauthsize = aead_setauthsize,
2681 .encrypt = aead_encrypt,
2682 .decrypt = aead_decrypt,
2683 .ivsize = CTR_RFC3686_IV_SIZE,
2684 .maxauthsize = MD5_DIGEST_SIZE,
2685 },
2686 .caam = {
2687 .class1_alg_type = OP_ALG_ALGSEL_AES |
2688 OP_ALG_AAI_CTR_MOD128,
2689 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2690 OP_ALG_AAI_HMAC_PRECOMP,
2691 .rfc3686 = true,
2692 },
2693 },
2694 {
2695 .aead = {
2696 .base = {
2697 .cra_name = "seqiv(authenc("
2698 "hmac(md5),rfc3686(ctr(aes))))",
2699 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2700 "rfc3686-ctr-aes-caam-qi2",
2701 .cra_blocksize = 1,
2702 },
2703 .setkey = aead_setkey,
2704 .setauthsize = aead_setauthsize,
2705 .encrypt = aead_encrypt,
2706 .decrypt = aead_decrypt,
2707 .ivsize = CTR_RFC3686_IV_SIZE,
2708 .maxauthsize = MD5_DIGEST_SIZE,
2709 },
2710 .caam = {
2711 .class1_alg_type = OP_ALG_ALGSEL_AES |
2712 OP_ALG_AAI_CTR_MOD128,
2713 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2714 OP_ALG_AAI_HMAC_PRECOMP,
2715 .rfc3686 = true,
2716 .geniv = true,
2717 },
2718 },
2719 {
2720 .aead = {
2721 .base = {
2722 .cra_name = "authenc(hmac(sha1),"
2723 "rfc3686(ctr(aes)))",
2724 .cra_driver_name = "authenc-hmac-sha1-"
2725 "rfc3686-ctr-aes-caam-qi2",
2726 .cra_blocksize = 1,
2727 },
2728 .setkey = aead_setkey,
2729 .setauthsize = aead_setauthsize,
2730 .encrypt = aead_encrypt,
2731 .decrypt = aead_decrypt,
2732 .ivsize = CTR_RFC3686_IV_SIZE,
2733 .maxauthsize = SHA1_DIGEST_SIZE,
2734 },
2735 .caam = {
2736 .class1_alg_type = OP_ALG_ALGSEL_AES |
2737 OP_ALG_AAI_CTR_MOD128,
2738 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2739 OP_ALG_AAI_HMAC_PRECOMP,
2740 .rfc3686 = true,
2741 },
2742 },
2743 {
2744 .aead = {
2745 .base = {
2746 .cra_name = "seqiv(authenc("
2747 "hmac(sha1),rfc3686(ctr(aes))))",
2748 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2749 "rfc3686-ctr-aes-caam-qi2",
2750 .cra_blocksize = 1,
2751 },
2752 .setkey = aead_setkey,
2753 .setauthsize = aead_setauthsize,
2754 .encrypt = aead_encrypt,
2755 .decrypt = aead_decrypt,
2756 .ivsize = CTR_RFC3686_IV_SIZE,
2757 .maxauthsize = SHA1_DIGEST_SIZE,
2758 },
2759 .caam = {
2760 .class1_alg_type = OP_ALG_ALGSEL_AES |
2761 OP_ALG_AAI_CTR_MOD128,
2762 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2763 OP_ALG_AAI_HMAC_PRECOMP,
2764 .rfc3686 = true,
2765 .geniv = true,
2766 },
2767 },
2768 {
2769 .aead = {
2770 .base = {
2771 .cra_name = "authenc(hmac(sha224),"
2772 "rfc3686(ctr(aes)))",
2773 .cra_driver_name = "authenc-hmac-sha224-"
2774 "rfc3686-ctr-aes-caam-qi2",
2775 .cra_blocksize = 1,
2776 },
2777 .setkey = aead_setkey,
2778 .setauthsize = aead_setauthsize,
2779 .encrypt = aead_encrypt,
2780 .decrypt = aead_decrypt,
2781 .ivsize = CTR_RFC3686_IV_SIZE,
2782 .maxauthsize = SHA224_DIGEST_SIZE,
2783 },
2784 .caam = {
2785 .class1_alg_type = OP_ALG_ALGSEL_AES |
2786 OP_ALG_AAI_CTR_MOD128,
2787 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2788 OP_ALG_AAI_HMAC_PRECOMP,
2789 .rfc3686 = true,
2790 },
2791 },
2792 {
2793 .aead = {
2794 .base = {
2795 .cra_name = "seqiv(authenc("
2796 "hmac(sha224),rfc3686(ctr(aes))))",
2797 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2798 "rfc3686-ctr-aes-caam-qi2",
2799 .cra_blocksize = 1,
2800 },
2801 .setkey = aead_setkey,
2802 .setauthsize = aead_setauthsize,
2803 .encrypt = aead_encrypt,
2804 .decrypt = aead_decrypt,
2805 .ivsize = CTR_RFC3686_IV_SIZE,
2806 .maxauthsize = SHA224_DIGEST_SIZE,
2807 },
2808 .caam = {
2809 .class1_alg_type = OP_ALG_ALGSEL_AES |
2810 OP_ALG_AAI_CTR_MOD128,
2811 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2812 OP_ALG_AAI_HMAC_PRECOMP,
2813 .rfc3686 = true,
2814 .geniv = true,
2815 },
2816 },
2817 {
2818 .aead = {
2819 .base = {
2820 .cra_name = "authenc(hmac(sha256),"
2821 "rfc3686(ctr(aes)))",
2822 .cra_driver_name = "authenc-hmac-sha256-"
2823 "rfc3686-ctr-aes-caam-qi2",
2824 .cra_blocksize = 1,
2825 },
2826 .setkey = aead_setkey,
2827 .setauthsize = aead_setauthsize,
2828 .encrypt = aead_encrypt,
2829 .decrypt = aead_decrypt,
2830 .ivsize = CTR_RFC3686_IV_SIZE,
2831 .maxauthsize = SHA256_DIGEST_SIZE,
2832 },
2833 .caam = {
2834 .class1_alg_type = OP_ALG_ALGSEL_AES |
2835 OP_ALG_AAI_CTR_MOD128,
2836 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2837 OP_ALG_AAI_HMAC_PRECOMP,
2838 .rfc3686 = true,
2839 },
2840 },
2841 {
2842 .aead = {
2843 .base = {
2844 .cra_name = "seqiv(authenc(hmac(sha256),"
2845 "rfc3686(ctr(aes))))",
2846 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2847 "rfc3686-ctr-aes-caam-qi2",
2848 .cra_blocksize = 1,
2849 },
2850 .setkey = aead_setkey,
2851 .setauthsize = aead_setauthsize,
2852 .encrypt = aead_encrypt,
2853 .decrypt = aead_decrypt,
2854 .ivsize = CTR_RFC3686_IV_SIZE,
2855 .maxauthsize = SHA256_DIGEST_SIZE,
2856 },
2857 .caam = {
2858 .class1_alg_type = OP_ALG_ALGSEL_AES |
2859 OP_ALG_AAI_CTR_MOD128,
2860 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2861 OP_ALG_AAI_HMAC_PRECOMP,
2862 .rfc3686 = true,
2863 .geniv = true,
2864 },
2865 },
2866 {
2867 .aead = {
2868 .base = {
2869 .cra_name = "authenc(hmac(sha384),"
2870 "rfc3686(ctr(aes)))",
2871 .cra_driver_name = "authenc-hmac-sha384-"
2872 "rfc3686-ctr-aes-caam-qi2",
2873 .cra_blocksize = 1,
2874 },
2875 .setkey = aead_setkey,
2876 .setauthsize = aead_setauthsize,
2877 .encrypt = aead_encrypt,
2878 .decrypt = aead_decrypt,
2879 .ivsize = CTR_RFC3686_IV_SIZE,
2880 .maxauthsize = SHA384_DIGEST_SIZE,
2881 },
2882 .caam = {
2883 .class1_alg_type = OP_ALG_ALGSEL_AES |
2884 OP_ALG_AAI_CTR_MOD128,
2885 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2886 OP_ALG_AAI_HMAC_PRECOMP,
2887 .rfc3686 = true,
2888 },
2889 },
2890 {
2891 .aead = {
2892 .base = {
2893 .cra_name = "seqiv(authenc(hmac(sha384),"
2894 "rfc3686(ctr(aes))))",
2895 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2896 "rfc3686-ctr-aes-caam-qi2",
2897 .cra_blocksize = 1,
2898 },
2899 .setkey = aead_setkey,
2900 .setauthsize = aead_setauthsize,
2901 .encrypt = aead_encrypt,
2902 .decrypt = aead_decrypt,
2903 .ivsize = CTR_RFC3686_IV_SIZE,
2904 .maxauthsize = SHA384_DIGEST_SIZE,
2905 },
2906 .caam = {
2907 .class1_alg_type = OP_ALG_ALGSEL_AES |
2908 OP_ALG_AAI_CTR_MOD128,
2909 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2910 OP_ALG_AAI_HMAC_PRECOMP,
2911 .rfc3686 = true,
2912 .geniv = true,
2913 },
2914 },
2915 {
2916 .aead = {
2917 .base = {
2918 .cra_name = "rfc7539(chacha20,poly1305)",
2919 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2920 "caam-qi2",
2921 .cra_blocksize = 1,
2922 },
2923 .setkey = chachapoly_setkey,
2924 .setauthsize = chachapoly_setauthsize,
2925 .encrypt = aead_encrypt,
2926 .decrypt = aead_decrypt,
2927 .ivsize = CHACHAPOLY_IV_SIZE,
2928 .maxauthsize = POLY1305_DIGEST_SIZE,
2929 },
2930 .caam = {
2931 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2932 OP_ALG_AAI_AEAD,
2933 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2934 OP_ALG_AAI_AEAD,
2935 .nodkp = true,
2936 },
2937 },
2938 {
2939 .aead = {
2940 .base = {
2941 .cra_name = "rfc7539esp(chacha20,poly1305)",
2942 .cra_driver_name = "rfc7539esp-chacha20-"
2943 "poly1305-caam-qi2",
2944 .cra_blocksize = 1,
2945 },
2946 .setkey = chachapoly_setkey,
2947 .setauthsize = chachapoly_setauthsize,
2948 .encrypt = aead_encrypt,
2949 .decrypt = aead_decrypt,
2950 .ivsize = 8,
2951 .maxauthsize = POLY1305_DIGEST_SIZE,
2952 },
2953 .caam = {
2954 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2955 OP_ALG_AAI_AEAD,
2956 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2957 OP_ALG_AAI_AEAD,
2958 .nodkp = true,
2959 },
2960 },
2961 {
2962 .aead = {
2963 .base = {
2964 .cra_name = "authenc(hmac(sha512),"
2965 "rfc3686(ctr(aes)))",
2966 .cra_driver_name = "authenc-hmac-sha512-"
2967 "rfc3686-ctr-aes-caam-qi2",
2968 .cra_blocksize = 1,
2969 },
2970 .setkey = aead_setkey,
2971 .setauthsize = aead_setauthsize,
2972 .encrypt = aead_encrypt,
2973 .decrypt = aead_decrypt,
2974 .ivsize = CTR_RFC3686_IV_SIZE,
2975 .maxauthsize = SHA512_DIGEST_SIZE,
2976 },
2977 .caam = {
2978 .class1_alg_type = OP_ALG_ALGSEL_AES |
2979 OP_ALG_AAI_CTR_MOD128,
2980 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2981 OP_ALG_AAI_HMAC_PRECOMP,
2982 .rfc3686 = true,
2983 },
2984 },
2985 {
2986 .aead = {
2987 .base = {
2988 .cra_name = "seqiv(authenc(hmac(sha512),"
2989 "rfc3686(ctr(aes))))",
2990 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2991 "rfc3686-ctr-aes-caam-qi2",
2992 .cra_blocksize = 1,
2993 },
2994 .setkey = aead_setkey,
2995 .setauthsize = aead_setauthsize,
2996 .encrypt = aead_encrypt,
2997 .decrypt = aead_decrypt,
2998 .ivsize = CTR_RFC3686_IV_SIZE,
2999 .maxauthsize = SHA512_DIGEST_SIZE,
3000 },
3001 .caam = {
3002 .class1_alg_type = OP_ALG_ALGSEL_AES |
3003 OP_ALG_AAI_CTR_MOD128,
3004 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3005 OP_ALG_AAI_HMAC_PRECOMP,
3006 .rfc3686 = true,
3007 .geniv = true,
3008 },
3009 },
3010 };
3011
caam_skcipher_alg_init(struct caam_skcipher_alg * t_alg)3012 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3013 {
3014 struct skcipher_alg *alg = &t_alg->skcipher;
3015
3016 alg->base.cra_module = THIS_MODULE;
3017 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3018 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3019 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3020 CRYPTO_ALG_KERN_DRIVER_ONLY);
3021
3022 alg->init = caam_cra_init_skcipher;
3023 alg->exit = caam_cra_exit;
3024 }
3025
caam_aead_alg_init(struct caam_aead_alg * t_alg)3026 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3027 {
3028 struct aead_alg *alg = &t_alg->aead;
3029
3030 alg->base.cra_module = THIS_MODULE;
3031 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3032 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3033 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3034 CRYPTO_ALG_KERN_DRIVER_ONLY;
3035
3036 alg->init = caam_cra_init_aead;
3037 alg->exit = caam_cra_exit_aead;
3038 }
3039
3040 /* max hash key is max split key size */
3041 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
3042
3043 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
3044
3045 /* caam context sizes for hashes: running digest + 8 */
3046 #define HASH_MSG_LEN 8
3047 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3048
3049 enum hash_optype {
3050 UPDATE = 0,
3051 UPDATE_FIRST,
3052 FINALIZE,
3053 DIGEST,
3054 HASH_NUM_OP
3055 };
3056
3057 /**
3058 * struct caam_hash_ctx - ahash per-session context
3059 * @flc: Flow Contexts array
3060 * @key: authentication key
3061 * @flc_dma: I/O virtual addresses of the Flow Contexts
3062 * @dev: dpseci device
3063 * @ctx_len: size of Context Register
3064 * @adata: hashing algorithm details
3065 */
3066 struct caam_hash_ctx {
3067 struct caam_flc flc[HASH_NUM_OP];
3068 u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3069 dma_addr_t flc_dma[HASH_NUM_OP];
3070 struct device *dev;
3071 int ctx_len;
3072 struct alginfo adata;
3073 };
3074
3075 /* ahash state */
3076 struct caam_hash_state {
3077 struct caam_request caam_req;
3078 dma_addr_t buf_dma;
3079 dma_addr_t ctx_dma;
3080 int ctx_dma_len;
3081 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3082 int buflen;
3083 int next_buflen;
3084 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3085 int (*update)(struct ahash_request *req);
3086 int (*final)(struct ahash_request *req);
3087 int (*finup)(struct ahash_request *req);
3088 };
3089
3090 struct caam_export_state {
3091 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3092 u8 caam_ctx[MAX_CTX_LEN];
3093 int buflen;
3094 int (*update)(struct ahash_request *req);
3095 int (*final)(struct ahash_request *req);
3096 int (*finup)(struct ahash_request *req);
3097 };
3098
3099 /* Map current buffer in state (if length > 0) and put it in link table */
buf_map_to_qm_sg(struct device * dev,struct dpaa2_sg_entry * qm_sg,struct caam_hash_state * state)3100 static inline int buf_map_to_qm_sg(struct device *dev,
3101 struct dpaa2_sg_entry *qm_sg,
3102 struct caam_hash_state *state)
3103 {
3104 int buflen = state->buflen;
3105
3106 if (!buflen)
3107 return 0;
3108
3109 state->buf_dma = dma_map_single(dev, state->buf, buflen,
3110 DMA_TO_DEVICE);
3111 if (dma_mapping_error(dev, state->buf_dma)) {
3112 dev_err(dev, "unable to map buf\n");
3113 state->buf_dma = 0;
3114 return -ENOMEM;
3115 }
3116
3117 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3118
3119 return 0;
3120 }
3121
3122 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_qm_sg(struct device * dev,struct caam_hash_state * state,int ctx_len,struct dpaa2_sg_entry * qm_sg,u32 flag)3123 static inline int ctx_map_to_qm_sg(struct device *dev,
3124 struct caam_hash_state *state, int ctx_len,
3125 struct dpaa2_sg_entry *qm_sg, u32 flag)
3126 {
3127 state->ctx_dma_len = ctx_len;
3128 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3129 if (dma_mapping_error(dev, state->ctx_dma)) {
3130 dev_err(dev, "unable to map ctx\n");
3131 state->ctx_dma = 0;
3132 return -ENOMEM;
3133 }
3134
3135 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3136
3137 return 0;
3138 }
3139
ahash_set_sh_desc(struct crypto_ahash * ahash)3140 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3141 {
3142 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3143 int digestsize = crypto_ahash_digestsize(ahash);
3144 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3145 struct caam_flc *flc;
3146 u32 *desc;
3147
3148 /* ahash_update shared descriptor */
3149 flc = &ctx->flc[UPDATE];
3150 desc = flc->sh_desc;
3151 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3152 ctx->ctx_len, true, priv->sec_attr.era);
3153 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3154 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3155 desc_bytes(desc), DMA_BIDIRECTIONAL);
3156 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3157 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3158 1);
3159
3160 /* ahash_update_first shared descriptor */
3161 flc = &ctx->flc[UPDATE_FIRST];
3162 desc = flc->sh_desc;
3163 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3164 ctx->ctx_len, false, priv->sec_attr.era);
3165 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3166 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3167 desc_bytes(desc), DMA_BIDIRECTIONAL);
3168 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3169 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3170 1);
3171
3172 /* ahash_final shared descriptor */
3173 flc = &ctx->flc[FINALIZE];
3174 desc = flc->sh_desc;
3175 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3176 ctx->ctx_len, true, priv->sec_attr.era);
3177 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3178 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3179 desc_bytes(desc), DMA_BIDIRECTIONAL);
3180 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3181 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3182 1);
3183
3184 /* ahash_digest shared descriptor */
3185 flc = &ctx->flc[DIGEST];
3186 desc = flc->sh_desc;
3187 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3188 ctx->ctx_len, false, priv->sec_attr.era);
3189 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3190 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3191 desc_bytes(desc), DMA_BIDIRECTIONAL);
3192 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3193 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3194 1);
3195
3196 return 0;
3197 }
3198
3199 struct split_key_sh_result {
3200 struct completion completion;
3201 int err;
3202 struct device *dev;
3203 };
3204
split_key_sh_done(void * cbk_ctx,u32 err)3205 static void split_key_sh_done(void *cbk_ctx, u32 err)
3206 {
3207 struct split_key_sh_result *res = cbk_ctx;
3208
3209 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3210
3211 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3212 complete(&res->completion);
3213 }
3214
3215 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx * ctx,u32 * keylen,u8 * key,u32 digestsize)3216 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3217 u32 digestsize)
3218 {
3219 struct caam_request *req_ctx;
3220 u32 *desc;
3221 struct split_key_sh_result result;
3222 dma_addr_t key_dma;
3223 struct caam_flc *flc;
3224 dma_addr_t flc_dma;
3225 int ret = -ENOMEM;
3226 struct dpaa2_fl_entry *in_fle, *out_fle;
3227
3228 req_ctx = kzalloc_obj(*req_ctx);
3229 if (!req_ctx)
3230 return -ENOMEM;
3231
3232 in_fle = &req_ctx->fd_flt[1];
3233 out_fle = &req_ctx->fd_flt[0];
3234
3235 flc = kzalloc_obj(*flc);
3236 if (!flc)
3237 goto err_flc;
3238
3239 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3240 if (dma_mapping_error(ctx->dev, key_dma)) {
3241 dev_err(ctx->dev, "unable to map key memory\n");
3242 goto err_key_dma;
3243 }
3244
3245 desc = flc->sh_desc;
3246
3247 init_sh_desc(desc, 0);
3248
3249 /* descriptor to perform unkeyed hash on key_in */
3250 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3251 OP_ALG_AS_INITFINAL);
3252 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3253 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3254 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3255 LDST_SRCDST_BYTE_CONTEXT);
3256
3257 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3258 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3259 desc_bytes(desc), DMA_TO_DEVICE);
3260 if (dma_mapping_error(ctx->dev, flc_dma)) {
3261 dev_err(ctx->dev, "unable to map shared descriptor\n");
3262 goto err_flc_dma;
3263 }
3264
3265 dpaa2_fl_set_final(in_fle, true);
3266 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3267 dpaa2_fl_set_addr(in_fle, key_dma);
3268 dpaa2_fl_set_len(in_fle, *keylen);
3269 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3270 dpaa2_fl_set_addr(out_fle, key_dma);
3271 dpaa2_fl_set_len(out_fle, digestsize);
3272
3273 print_hex_dump_devel("key_in@" __stringify(__LINE__)": ",
3274 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3275 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3276 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3277 1);
3278
3279 result.err = 0;
3280 init_completion(&result.completion);
3281 result.dev = ctx->dev;
3282
3283 req_ctx->flc = flc;
3284 req_ctx->flc_dma = flc_dma;
3285 req_ctx->cbk = split_key_sh_done;
3286 req_ctx->ctx = &result;
3287
3288 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3289 if (ret == -EINPROGRESS) {
3290 /* in progress */
3291 wait_for_completion(&result.completion);
3292 ret = result.err;
3293 print_hex_dump_devel("digested key@" __stringify(__LINE__)": ",
3294 DUMP_PREFIX_ADDRESS, 16, 4, key,
3295 digestsize, 1);
3296 }
3297
3298 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3299 DMA_TO_DEVICE);
3300 err_flc_dma:
3301 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3302 err_key_dma:
3303 kfree(flc);
3304 err_flc:
3305 kfree(req_ctx);
3306
3307 *keylen = digestsize;
3308
3309 return ret;
3310 }
3311
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)3312 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3313 unsigned int keylen)
3314 {
3315 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3316 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3317 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3318 int ret;
3319 u8 *hashed_key = NULL;
3320
3321 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3322
3323 if (keylen > blocksize) {
3324 unsigned int aligned_len =
3325 ALIGN(keylen, dma_get_cache_alignment());
3326
3327 if (aligned_len < keylen)
3328 return -EOVERFLOW;
3329
3330 hashed_key = kmalloc(aligned_len, GFP_KERNEL);
3331 if (!hashed_key)
3332 return -ENOMEM;
3333 memcpy(hashed_key, key, keylen);
3334 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3335 if (ret)
3336 goto bad_free_key;
3337 key = hashed_key;
3338 }
3339
3340 ctx->adata.keylen = keylen;
3341 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3342 OP_ALG_ALGSEL_MASK);
3343 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3344 goto bad_free_key;
3345
3346 ctx->adata.key_virt = key;
3347 ctx->adata.key_inline = true;
3348
3349 /*
3350 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3351 * in invalid opcodes (last bytes of user key) in the resulting
3352 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3353 * addresses are needed.
3354 */
3355 if (keylen > ctx->adata.keylen_pad) {
3356 memcpy(ctx->key, key, keylen);
3357 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3358 ctx->adata.keylen_pad,
3359 DMA_TO_DEVICE);
3360 }
3361
3362 ret = ahash_set_sh_desc(ahash);
3363 kfree(hashed_key);
3364 return ret;
3365 bad_free_key:
3366 kfree(hashed_key);
3367 return -EINVAL;
3368 }
3369
ahash_unmap(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req)3370 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3371 struct ahash_request *req)
3372 {
3373 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3374
3375 if (edesc->src_nents)
3376 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3377
3378 if (edesc->qm_sg_bytes)
3379 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3380 DMA_TO_DEVICE);
3381
3382 if (state->buf_dma) {
3383 dma_unmap_single(dev, state->buf_dma, state->buflen,
3384 DMA_TO_DEVICE);
3385 state->buf_dma = 0;
3386 }
3387 }
3388
ahash_unmap_ctx(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,u32 flag)3389 static inline void ahash_unmap_ctx(struct device *dev,
3390 struct ahash_edesc *edesc,
3391 struct ahash_request *req, u32 flag)
3392 {
3393 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3394
3395 if (state->ctx_dma) {
3396 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3397 state->ctx_dma = 0;
3398 }
3399 ahash_unmap(dev, edesc, req);
3400 }
3401
ahash_done(void * cbk_ctx,u32 status)3402 static void ahash_done(void *cbk_ctx, u32 status)
3403 {
3404 struct crypto_async_request *areq = cbk_ctx;
3405 struct ahash_request *req = ahash_request_cast(areq);
3406 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3407 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3408 struct ahash_edesc *edesc = state->caam_req.edesc;
3409 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3410 int digestsize = crypto_ahash_digestsize(ahash);
3411 int ecode = 0;
3412
3413 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3414
3415 if (unlikely(status))
3416 ecode = caam_qi2_strstatus(ctx->dev, status);
3417
3418 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3419 memcpy(req->result, state->caam_ctx, digestsize);
3420 qi_cache_free(edesc);
3421
3422 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3423 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3424 ctx->ctx_len, 1);
3425
3426 ahash_request_complete(req, ecode);
3427 }
3428
ahash_done_bi(void * cbk_ctx,u32 status)3429 static void ahash_done_bi(void *cbk_ctx, u32 status)
3430 {
3431 struct crypto_async_request *areq = cbk_ctx;
3432 struct ahash_request *req = ahash_request_cast(areq);
3433 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3434 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3435 struct ahash_edesc *edesc = state->caam_req.edesc;
3436 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3437 int ecode = 0;
3438
3439 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3440
3441 if (unlikely(status))
3442 ecode = caam_qi2_strstatus(ctx->dev, status);
3443
3444 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3445 qi_cache_free(edesc);
3446
3447 scatterwalk_map_and_copy(state->buf, req->src,
3448 req->nbytes - state->next_buflen,
3449 state->next_buflen, 0);
3450 state->buflen = state->next_buflen;
3451
3452 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3453 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3454 state->buflen, 1);
3455
3456 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3457 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3458 ctx->ctx_len, 1);
3459 if (req->result)
3460 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3461 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3462 crypto_ahash_digestsize(ahash), 1);
3463
3464 ahash_request_complete(req, ecode);
3465 }
3466
ahash_done_ctx_src(void * cbk_ctx,u32 status)3467 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3468 {
3469 struct crypto_async_request *areq = cbk_ctx;
3470 struct ahash_request *req = ahash_request_cast(areq);
3471 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3472 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3473 struct ahash_edesc *edesc = state->caam_req.edesc;
3474 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3475 int digestsize = crypto_ahash_digestsize(ahash);
3476 int ecode = 0;
3477
3478 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3479
3480 if (unlikely(status))
3481 ecode = caam_qi2_strstatus(ctx->dev, status);
3482
3483 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3484 memcpy(req->result, state->caam_ctx, digestsize);
3485 qi_cache_free(edesc);
3486
3487 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3488 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3489 ctx->ctx_len, 1);
3490
3491 ahash_request_complete(req, ecode);
3492 }
3493
ahash_done_ctx_dst(void * cbk_ctx,u32 status)3494 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3495 {
3496 struct crypto_async_request *areq = cbk_ctx;
3497 struct ahash_request *req = ahash_request_cast(areq);
3498 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3499 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3500 struct ahash_edesc *edesc = state->caam_req.edesc;
3501 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3502 int ecode = 0;
3503
3504 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3505
3506 if (unlikely(status))
3507 ecode = caam_qi2_strstatus(ctx->dev, status);
3508
3509 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3510 qi_cache_free(edesc);
3511
3512 scatterwalk_map_and_copy(state->buf, req->src,
3513 req->nbytes - state->next_buflen,
3514 state->next_buflen, 0);
3515 state->buflen = state->next_buflen;
3516
3517 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3518 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3519 state->buflen, 1);
3520
3521 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3522 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3523 ctx->ctx_len, 1);
3524 if (req->result)
3525 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3526 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3527 crypto_ahash_digestsize(ahash), 1);
3528
3529 ahash_request_complete(req, ecode);
3530 }
3531
ahash_update_ctx(struct ahash_request * req)3532 static int ahash_update_ctx(struct ahash_request *req)
3533 {
3534 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3535 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3536 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3537 struct caam_request *req_ctx = &state->caam_req;
3538 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3539 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3540 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3541 GFP_KERNEL : GFP_ATOMIC;
3542 u8 *buf = state->buf;
3543 int *buflen = &state->buflen;
3544 int *next_buflen = &state->next_buflen;
3545 int in_len = *buflen + req->nbytes, to_hash;
3546 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3547 struct ahash_edesc *edesc;
3548 int ret = 0;
3549
3550 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3551 to_hash = in_len - *next_buflen;
3552
3553 if (to_hash) {
3554 struct dpaa2_sg_entry *sg_table;
3555 int src_len = req->nbytes - *next_buflen;
3556
3557 src_nents = sg_nents_for_len(req->src, src_len);
3558 if (src_nents < 0) {
3559 dev_err(ctx->dev, "Invalid number of src SG.\n");
3560 return src_nents;
3561 }
3562
3563 if (src_nents) {
3564 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3565 DMA_TO_DEVICE);
3566 if (!mapped_nents) {
3567 dev_err(ctx->dev, "unable to DMA map source\n");
3568 return -ENOMEM;
3569 }
3570 } else {
3571 mapped_nents = 0;
3572 }
3573
3574 /* allocate space for base edesc and link tables */
3575 edesc = qi_cache_zalloc(flags);
3576 if (!edesc) {
3577 dma_unmap_sg(ctx->dev, req->src, src_nents,
3578 DMA_TO_DEVICE);
3579 return -ENOMEM;
3580 }
3581
3582 edesc->src_nents = src_nents;
3583 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3584 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3585 sizeof(*sg_table);
3586 sg_table = &edesc->sgt[0];
3587
3588 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3589 DMA_BIDIRECTIONAL);
3590 if (ret)
3591 goto unmap_ctx;
3592
3593 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3594 if (ret)
3595 goto unmap_ctx;
3596
3597 if (mapped_nents) {
3598 sg_to_qm_sg_last(req->src, src_len,
3599 sg_table + qm_sg_src_index, 0);
3600 } else {
3601 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3602 true);
3603 }
3604
3605 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3606 qm_sg_bytes, DMA_TO_DEVICE);
3607 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3608 dev_err(ctx->dev, "unable to map S/G table\n");
3609 ret = -ENOMEM;
3610 goto unmap_ctx;
3611 }
3612 edesc->qm_sg_bytes = qm_sg_bytes;
3613
3614 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3615 dpaa2_fl_set_final(in_fle, true);
3616 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3617 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3618 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3619 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3620 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3621 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3622
3623 req_ctx->flc = &ctx->flc[UPDATE];
3624 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3625 req_ctx->cbk = ahash_done_bi;
3626 req_ctx->ctx = &req->base;
3627 req_ctx->edesc = edesc;
3628
3629 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3630 if (ret != -EINPROGRESS &&
3631 !(ret == -EBUSY &&
3632 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3633 goto unmap_ctx;
3634 } else if (*next_buflen) {
3635 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3636 req->nbytes, 0);
3637 *buflen = *next_buflen;
3638
3639 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3640 DUMP_PREFIX_ADDRESS, 16, 4, buf,
3641 *buflen, 1);
3642 }
3643
3644 return ret;
3645 unmap_ctx:
3646 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3647 qi_cache_free(edesc);
3648 return ret;
3649 }
3650
ahash_final_ctx(struct ahash_request * req)3651 static int ahash_final_ctx(struct ahash_request *req)
3652 {
3653 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3654 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3655 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3656 struct caam_request *req_ctx = &state->caam_req;
3657 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3658 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3659 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3660 GFP_KERNEL : GFP_ATOMIC;
3661 int buflen = state->buflen;
3662 int qm_sg_bytes;
3663 int digestsize = crypto_ahash_digestsize(ahash);
3664 struct ahash_edesc *edesc;
3665 struct dpaa2_sg_entry *sg_table;
3666 int ret;
3667
3668 /* allocate space for base edesc and link tables */
3669 edesc = qi_cache_zalloc(flags);
3670 if (!edesc)
3671 return -ENOMEM;
3672
3673 qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3674 sg_table = &edesc->sgt[0];
3675
3676 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3677 DMA_BIDIRECTIONAL);
3678 if (ret)
3679 goto unmap_ctx;
3680
3681 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3682 if (ret)
3683 goto unmap_ctx;
3684
3685 dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3686
3687 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3688 DMA_TO_DEVICE);
3689 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3690 dev_err(ctx->dev, "unable to map S/G table\n");
3691 ret = -ENOMEM;
3692 goto unmap_ctx;
3693 }
3694 edesc->qm_sg_bytes = qm_sg_bytes;
3695
3696 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3697 dpaa2_fl_set_final(in_fle, true);
3698 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3699 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3700 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3701 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3702 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3703 dpaa2_fl_set_len(out_fle, digestsize);
3704
3705 req_ctx->flc = &ctx->flc[FINALIZE];
3706 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3707 req_ctx->cbk = ahash_done_ctx_src;
3708 req_ctx->ctx = &req->base;
3709 req_ctx->edesc = edesc;
3710
3711 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3712 if (ret == -EINPROGRESS ||
3713 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3714 return ret;
3715
3716 unmap_ctx:
3717 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3718 qi_cache_free(edesc);
3719 return ret;
3720 }
3721
ahash_finup_ctx(struct ahash_request * req)3722 static int ahash_finup_ctx(struct ahash_request *req)
3723 {
3724 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3725 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3726 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3727 struct caam_request *req_ctx = &state->caam_req;
3728 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3729 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3730 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3731 GFP_KERNEL : GFP_ATOMIC;
3732 int buflen = state->buflen;
3733 int qm_sg_bytes, qm_sg_src_index;
3734 int src_nents, mapped_nents;
3735 int digestsize = crypto_ahash_digestsize(ahash);
3736 struct ahash_edesc *edesc;
3737 struct dpaa2_sg_entry *sg_table;
3738 int ret;
3739
3740 src_nents = sg_nents_for_len(req->src, req->nbytes);
3741 if (src_nents < 0) {
3742 dev_err(ctx->dev, "Invalid number of src SG.\n");
3743 return src_nents;
3744 }
3745
3746 if (src_nents) {
3747 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3748 DMA_TO_DEVICE);
3749 if (!mapped_nents) {
3750 dev_err(ctx->dev, "unable to DMA map source\n");
3751 return -ENOMEM;
3752 }
3753 } else {
3754 mapped_nents = 0;
3755 }
3756
3757 /* allocate space for base edesc and link tables */
3758 edesc = qi_cache_zalloc(flags);
3759 if (!edesc) {
3760 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3761 return -ENOMEM;
3762 }
3763
3764 edesc->src_nents = src_nents;
3765 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3766 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3767 sizeof(*sg_table);
3768 sg_table = &edesc->sgt[0];
3769
3770 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3771 DMA_BIDIRECTIONAL);
3772 if (ret)
3773 goto unmap_ctx;
3774
3775 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3776 if (ret)
3777 goto unmap_ctx;
3778
3779 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3780
3781 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3782 DMA_TO_DEVICE);
3783 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3784 dev_err(ctx->dev, "unable to map S/G table\n");
3785 ret = -ENOMEM;
3786 goto unmap_ctx;
3787 }
3788 edesc->qm_sg_bytes = qm_sg_bytes;
3789
3790 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3791 dpaa2_fl_set_final(in_fle, true);
3792 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3793 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3794 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3795 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3796 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3797 dpaa2_fl_set_len(out_fle, digestsize);
3798
3799 req_ctx->flc = &ctx->flc[FINALIZE];
3800 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3801 req_ctx->cbk = ahash_done_ctx_src;
3802 req_ctx->ctx = &req->base;
3803 req_ctx->edesc = edesc;
3804
3805 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3806 if (ret == -EINPROGRESS ||
3807 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3808 return ret;
3809
3810 unmap_ctx:
3811 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3812 qi_cache_free(edesc);
3813 return ret;
3814 }
3815
ahash_digest(struct ahash_request * req)3816 static int ahash_digest(struct ahash_request *req)
3817 {
3818 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3819 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3820 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3821 struct caam_request *req_ctx = &state->caam_req;
3822 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3823 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3824 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3825 GFP_KERNEL : GFP_ATOMIC;
3826 int digestsize = crypto_ahash_digestsize(ahash);
3827 int src_nents, mapped_nents;
3828 struct ahash_edesc *edesc;
3829 int ret = -ENOMEM;
3830
3831 state->buf_dma = 0;
3832
3833 src_nents = sg_nents_for_len(req->src, req->nbytes);
3834 if (src_nents < 0) {
3835 dev_err(ctx->dev, "Invalid number of src SG.\n");
3836 return src_nents;
3837 }
3838
3839 if (src_nents) {
3840 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3841 DMA_TO_DEVICE);
3842 if (!mapped_nents) {
3843 dev_err(ctx->dev, "unable to map source for DMA\n");
3844 return ret;
3845 }
3846 } else {
3847 mapped_nents = 0;
3848 }
3849
3850 /* allocate space for base edesc and link tables */
3851 edesc = qi_cache_zalloc(flags);
3852 if (!edesc) {
3853 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3854 return ret;
3855 }
3856
3857 edesc->src_nents = src_nents;
3858 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3859
3860 if (mapped_nents > 1) {
3861 int qm_sg_bytes;
3862 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3863
3864 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3865 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3866 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3867 qm_sg_bytes, DMA_TO_DEVICE);
3868 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3869 dev_err(ctx->dev, "unable to map S/G table\n");
3870 goto unmap;
3871 }
3872 edesc->qm_sg_bytes = qm_sg_bytes;
3873 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3874 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3875 } else {
3876 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3877 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3878 }
3879
3880 state->ctx_dma_len = digestsize;
3881 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3882 DMA_FROM_DEVICE);
3883 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3884 dev_err(ctx->dev, "unable to map ctx\n");
3885 state->ctx_dma = 0;
3886 goto unmap;
3887 }
3888
3889 dpaa2_fl_set_final(in_fle, true);
3890 dpaa2_fl_set_len(in_fle, req->nbytes);
3891 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3892 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3893 dpaa2_fl_set_len(out_fle, digestsize);
3894
3895 req_ctx->flc = &ctx->flc[DIGEST];
3896 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3897 req_ctx->cbk = ahash_done;
3898 req_ctx->ctx = &req->base;
3899 req_ctx->edesc = edesc;
3900 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3901 if (ret == -EINPROGRESS ||
3902 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3903 return ret;
3904
3905 unmap:
3906 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3907 qi_cache_free(edesc);
3908 return ret;
3909 }
3910
ahash_final_no_ctx(struct ahash_request * req)3911 static int ahash_final_no_ctx(struct ahash_request *req)
3912 {
3913 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3914 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3915 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3916 struct caam_request *req_ctx = &state->caam_req;
3917 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3918 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3919 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3920 GFP_KERNEL : GFP_ATOMIC;
3921 u8 *buf = state->buf;
3922 int buflen = state->buflen;
3923 int digestsize = crypto_ahash_digestsize(ahash);
3924 struct ahash_edesc *edesc;
3925 int ret = -ENOMEM;
3926
3927 /* allocate space for base edesc and link tables */
3928 edesc = qi_cache_zalloc(flags);
3929 if (!edesc)
3930 return ret;
3931
3932 if (buflen) {
3933 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3934 DMA_TO_DEVICE);
3935 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3936 dev_err(ctx->dev, "unable to map src\n");
3937 goto unmap;
3938 }
3939 }
3940
3941 state->ctx_dma_len = digestsize;
3942 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3943 DMA_FROM_DEVICE);
3944 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3945 dev_err(ctx->dev, "unable to map ctx\n");
3946 state->ctx_dma = 0;
3947 goto unmap;
3948 }
3949
3950 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3951 dpaa2_fl_set_final(in_fle, true);
3952 /*
3953 * crypto engine requires the input entry to be present when
3954 * "frame list" FD is used.
3955 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3956 * in_fle zeroized (except for "Final" flag) is the best option.
3957 */
3958 if (buflen) {
3959 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3960 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3961 dpaa2_fl_set_len(in_fle, buflen);
3962 }
3963 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3964 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3965 dpaa2_fl_set_len(out_fle, digestsize);
3966
3967 req_ctx->flc = &ctx->flc[DIGEST];
3968 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3969 req_ctx->cbk = ahash_done;
3970 req_ctx->ctx = &req->base;
3971 req_ctx->edesc = edesc;
3972
3973 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3974 if (ret == -EINPROGRESS ||
3975 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3976 return ret;
3977
3978 unmap:
3979 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3980 qi_cache_free(edesc);
3981 return ret;
3982 }
3983
ahash_update_no_ctx(struct ahash_request * req)3984 static int ahash_update_no_ctx(struct ahash_request *req)
3985 {
3986 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3987 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3988 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3989 struct caam_request *req_ctx = &state->caam_req;
3990 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3991 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3992 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3993 GFP_KERNEL : GFP_ATOMIC;
3994 u8 *buf = state->buf;
3995 int *buflen = &state->buflen;
3996 int *next_buflen = &state->next_buflen;
3997 int in_len = *buflen + req->nbytes, to_hash;
3998 int qm_sg_bytes, src_nents, mapped_nents;
3999 struct ahash_edesc *edesc;
4000 int ret = 0;
4001
4002 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
4003 to_hash = in_len - *next_buflen;
4004
4005 if (to_hash) {
4006 struct dpaa2_sg_entry *sg_table;
4007 int src_len = req->nbytes - *next_buflen;
4008
4009 src_nents = sg_nents_for_len(req->src, src_len);
4010 if (src_nents < 0) {
4011 dev_err(ctx->dev, "Invalid number of src SG.\n");
4012 return src_nents;
4013 }
4014
4015 if (src_nents) {
4016 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4017 DMA_TO_DEVICE);
4018 if (!mapped_nents) {
4019 dev_err(ctx->dev, "unable to DMA map source\n");
4020 return -ENOMEM;
4021 }
4022 } else {
4023 mapped_nents = 0;
4024 }
4025
4026 /* allocate space for base edesc and link tables */
4027 edesc = qi_cache_zalloc(flags);
4028 if (!edesc) {
4029 dma_unmap_sg(ctx->dev, req->src, src_nents,
4030 DMA_TO_DEVICE);
4031 return -ENOMEM;
4032 }
4033
4034 edesc->src_nents = src_nents;
4035 qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4036 sizeof(*sg_table);
4037 sg_table = &edesc->sgt[0];
4038
4039 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4040 if (ret)
4041 goto unmap_ctx;
4042
4043 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4044
4045 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4046 qm_sg_bytes, DMA_TO_DEVICE);
4047 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4048 dev_err(ctx->dev, "unable to map S/G table\n");
4049 ret = -ENOMEM;
4050 goto unmap_ctx;
4051 }
4052 edesc->qm_sg_bytes = qm_sg_bytes;
4053
4054 state->ctx_dma_len = ctx->ctx_len;
4055 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4056 ctx->ctx_len, DMA_FROM_DEVICE);
4057 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4058 dev_err(ctx->dev, "unable to map ctx\n");
4059 state->ctx_dma = 0;
4060 ret = -ENOMEM;
4061 goto unmap_ctx;
4062 }
4063
4064 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4065 dpaa2_fl_set_final(in_fle, true);
4066 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4067 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4068 dpaa2_fl_set_len(in_fle, to_hash);
4069 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4070 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4071 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4072
4073 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4074 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4075 req_ctx->cbk = ahash_done_ctx_dst;
4076 req_ctx->ctx = &req->base;
4077 req_ctx->edesc = edesc;
4078
4079 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4080 if (ret != -EINPROGRESS &&
4081 !(ret == -EBUSY &&
4082 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4083 goto unmap_ctx;
4084
4085 state->update = ahash_update_ctx;
4086 state->finup = ahash_finup_ctx;
4087 state->final = ahash_final_ctx;
4088 } else if (*next_buflen) {
4089 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4090 req->nbytes, 0);
4091 *buflen = *next_buflen;
4092
4093 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4094 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4095 *buflen, 1);
4096 }
4097
4098 return ret;
4099 unmap_ctx:
4100 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4101 qi_cache_free(edesc);
4102 return ret;
4103 }
4104
ahash_finup_no_ctx(struct ahash_request * req)4105 static int ahash_finup_no_ctx(struct ahash_request *req)
4106 {
4107 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4108 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4109 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4110 struct caam_request *req_ctx = &state->caam_req;
4111 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4112 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4113 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4114 GFP_KERNEL : GFP_ATOMIC;
4115 int buflen = state->buflen;
4116 int qm_sg_bytes, src_nents, mapped_nents;
4117 int digestsize = crypto_ahash_digestsize(ahash);
4118 struct ahash_edesc *edesc;
4119 struct dpaa2_sg_entry *sg_table;
4120 int ret = -ENOMEM;
4121
4122 src_nents = sg_nents_for_len(req->src, req->nbytes);
4123 if (src_nents < 0) {
4124 dev_err(ctx->dev, "Invalid number of src SG.\n");
4125 return src_nents;
4126 }
4127
4128 if (src_nents) {
4129 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4130 DMA_TO_DEVICE);
4131 if (!mapped_nents) {
4132 dev_err(ctx->dev, "unable to DMA map source\n");
4133 return ret;
4134 }
4135 } else {
4136 mapped_nents = 0;
4137 }
4138
4139 /* allocate space for base edesc and link tables */
4140 edesc = qi_cache_zalloc(flags);
4141 if (!edesc) {
4142 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4143 return ret;
4144 }
4145
4146 edesc->src_nents = src_nents;
4147 qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4148 sg_table = &edesc->sgt[0];
4149
4150 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4151 if (ret)
4152 goto unmap;
4153
4154 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4155
4156 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4157 DMA_TO_DEVICE);
4158 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4159 dev_err(ctx->dev, "unable to map S/G table\n");
4160 ret = -ENOMEM;
4161 goto unmap;
4162 }
4163 edesc->qm_sg_bytes = qm_sg_bytes;
4164
4165 state->ctx_dma_len = digestsize;
4166 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4167 DMA_FROM_DEVICE);
4168 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4169 dev_err(ctx->dev, "unable to map ctx\n");
4170 state->ctx_dma = 0;
4171 ret = -ENOMEM;
4172 goto unmap;
4173 }
4174
4175 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4176 dpaa2_fl_set_final(in_fle, true);
4177 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4178 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4179 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4180 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4181 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4182 dpaa2_fl_set_len(out_fle, digestsize);
4183
4184 req_ctx->flc = &ctx->flc[DIGEST];
4185 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4186 req_ctx->cbk = ahash_done;
4187 req_ctx->ctx = &req->base;
4188 req_ctx->edesc = edesc;
4189 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4190 if (ret != -EINPROGRESS &&
4191 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4192 goto unmap;
4193
4194 return ret;
4195 unmap:
4196 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4197 qi_cache_free(edesc);
4198 return ret;
4199 }
4200
ahash_update_first(struct ahash_request * req)4201 static int ahash_update_first(struct ahash_request *req)
4202 {
4203 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4204 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4205 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4206 struct caam_request *req_ctx = &state->caam_req;
4207 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4208 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4209 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4210 GFP_KERNEL : GFP_ATOMIC;
4211 u8 *buf = state->buf;
4212 int *buflen = &state->buflen;
4213 int *next_buflen = &state->next_buflen;
4214 int to_hash;
4215 int src_nents, mapped_nents;
4216 struct ahash_edesc *edesc;
4217 int ret = 0;
4218
4219 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4220 1);
4221 to_hash = req->nbytes - *next_buflen;
4222
4223 if (to_hash) {
4224 struct dpaa2_sg_entry *sg_table;
4225 int src_len = req->nbytes - *next_buflen;
4226
4227 src_nents = sg_nents_for_len(req->src, src_len);
4228 if (src_nents < 0) {
4229 dev_err(ctx->dev, "Invalid number of src SG.\n");
4230 return src_nents;
4231 }
4232
4233 if (src_nents) {
4234 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4235 DMA_TO_DEVICE);
4236 if (!mapped_nents) {
4237 dev_err(ctx->dev, "unable to map source for DMA\n");
4238 return -ENOMEM;
4239 }
4240 } else {
4241 mapped_nents = 0;
4242 }
4243
4244 /* allocate space for base edesc and link tables */
4245 edesc = qi_cache_zalloc(flags);
4246 if (!edesc) {
4247 dma_unmap_sg(ctx->dev, req->src, src_nents,
4248 DMA_TO_DEVICE);
4249 return -ENOMEM;
4250 }
4251
4252 edesc->src_nents = src_nents;
4253 sg_table = &edesc->sgt[0];
4254
4255 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4256 dpaa2_fl_set_final(in_fle, true);
4257 dpaa2_fl_set_len(in_fle, to_hash);
4258
4259 if (mapped_nents > 1) {
4260 int qm_sg_bytes;
4261
4262 sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4263 qm_sg_bytes = pad_sg_nents(mapped_nents) *
4264 sizeof(*sg_table);
4265 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4266 qm_sg_bytes,
4267 DMA_TO_DEVICE);
4268 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4269 dev_err(ctx->dev, "unable to map S/G table\n");
4270 ret = -ENOMEM;
4271 goto unmap_ctx;
4272 }
4273 edesc->qm_sg_bytes = qm_sg_bytes;
4274 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4275 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4276 } else {
4277 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4278 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4279 }
4280
4281 state->ctx_dma_len = ctx->ctx_len;
4282 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4283 ctx->ctx_len, DMA_FROM_DEVICE);
4284 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4285 dev_err(ctx->dev, "unable to map ctx\n");
4286 state->ctx_dma = 0;
4287 ret = -ENOMEM;
4288 goto unmap_ctx;
4289 }
4290
4291 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4292 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4293 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4294
4295 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4296 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4297 req_ctx->cbk = ahash_done_ctx_dst;
4298 req_ctx->ctx = &req->base;
4299 req_ctx->edesc = edesc;
4300
4301 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4302 if (ret != -EINPROGRESS &&
4303 !(ret == -EBUSY && req->base.flags &
4304 CRYPTO_TFM_REQ_MAY_BACKLOG))
4305 goto unmap_ctx;
4306
4307 state->update = ahash_update_ctx;
4308 state->finup = ahash_finup_ctx;
4309 state->final = ahash_final_ctx;
4310 } else if (*next_buflen) {
4311 state->update = ahash_update_no_ctx;
4312 state->finup = ahash_finup_no_ctx;
4313 state->final = ahash_final_no_ctx;
4314 scatterwalk_map_and_copy(buf, req->src, 0,
4315 req->nbytes, 0);
4316 *buflen = *next_buflen;
4317
4318 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4319 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4320 *buflen, 1);
4321 }
4322
4323 return ret;
4324 unmap_ctx:
4325 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4326 qi_cache_free(edesc);
4327 return ret;
4328 }
4329
ahash_finup_first(struct ahash_request * req)4330 static int ahash_finup_first(struct ahash_request *req)
4331 {
4332 return ahash_digest(req);
4333 }
4334
ahash_init(struct ahash_request * req)4335 static int ahash_init(struct ahash_request *req)
4336 {
4337 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4338
4339 state->update = ahash_update_first;
4340 state->finup = ahash_finup_first;
4341 state->final = ahash_final_no_ctx;
4342
4343 state->ctx_dma = 0;
4344 state->ctx_dma_len = 0;
4345 state->buf_dma = 0;
4346 state->buflen = 0;
4347 state->next_buflen = 0;
4348
4349 return 0;
4350 }
4351
ahash_update(struct ahash_request * req)4352 static int ahash_update(struct ahash_request *req)
4353 {
4354 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4355
4356 return state->update(req);
4357 }
4358
ahash_finup(struct ahash_request * req)4359 static int ahash_finup(struct ahash_request *req)
4360 {
4361 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4362
4363 return state->finup(req);
4364 }
4365
ahash_final(struct ahash_request * req)4366 static int ahash_final(struct ahash_request *req)
4367 {
4368 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4369
4370 return state->final(req);
4371 }
4372
ahash_export(struct ahash_request * req,void * out)4373 static int ahash_export(struct ahash_request *req, void *out)
4374 {
4375 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4376 struct caam_export_state *export = out;
4377 u8 *buf = state->buf;
4378 int len = state->buflen;
4379
4380 memcpy(export->buf, buf, len);
4381 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4382 export->buflen = len;
4383 export->update = state->update;
4384 export->final = state->final;
4385 export->finup = state->finup;
4386
4387 return 0;
4388 }
4389
ahash_import(struct ahash_request * req,const void * in)4390 static int ahash_import(struct ahash_request *req, const void *in)
4391 {
4392 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4393 const struct caam_export_state *export = in;
4394
4395 memset(state, 0, sizeof(*state));
4396 memcpy(state->buf, export->buf, export->buflen);
4397 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4398 state->buflen = export->buflen;
4399 state->update = export->update;
4400 state->final = export->final;
4401 state->finup = export->finup;
4402
4403 return 0;
4404 }
4405
4406 struct caam_hash_template {
4407 char name[CRYPTO_MAX_ALG_NAME];
4408 char driver_name[CRYPTO_MAX_ALG_NAME];
4409 char hmac_name[CRYPTO_MAX_ALG_NAME];
4410 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4411 unsigned int blocksize;
4412 struct ahash_alg template_ahash;
4413 u32 alg_type;
4414 };
4415
4416 /* ahash descriptors */
4417 static struct caam_hash_template driver_hash[] = {
4418 {
4419 .name = "sha1",
4420 .driver_name = "sha1-caam-qi2",
4421 .hmac_name = "hmac(sha1)",
4422 .hmac_driver_name = "hmac-sha1-caam-qi2",
4423 .blocksize = SHA1_BLOCK_SIZE,
4424 .template_ahash = {
4425 .init = ahash_init,
4426 .update = ahash_update,
4427 .final = ahash_final,
4428 .finup = ahash_finup,
4429 .digest = ahash_digest,
4430 .export = ahash_export,
4431 .import = ahash_import,
4432 .setkey = ahash_setkey,
4433 .halg = {
4434 .digestsize = SHA1_DIGEST_SIZE,
4435 .statesize = sizeof(struct caam_export_state),
4436 },
4437 },
4438 .alg_type = OP_ALG_ALGSEL_SHA1,
4439 }, {
4440 .name = "sha224",
4441 .driver_name = "sha224-caam-qi2",
4442 .hmac_name = "hmac(sha224)",
4443 .hmac_driver_name = "hmac-sha224-caam-qi2",
4444 .blocksize = SHA224_BLOCK_SIZE,
4445 .template_ahash = {
4446 .init = ahash_init,
4447 .update = ahash_update,
4448 .final = ahash_final,
4449 .finup = ahash_finup,
4450 .digest = ahash_digest,
4451 .export = ahash_export,
4452 .import = ahash_import,
4453 .setkey = ahash_setkey,
4454 .halg = {
4455 .digestsize = SHA224_DIGEST_SIZE,
4456 .statesize = sizeof(struct caam_export_state),
4457 },
4458 },
4459 .alg_type = OP_ALG_ALGSEL_SHA224,
4460 }, {
4461 .name = "sha256",
4462 .driver_name = "sha256-caam-qi2",
4463 .hmac_name = "hmac(sha256)",
4464 .hmac_driver_name = "hmac-sha256-caam-qi2",
4465 .blocksize = SHA256_BLOCK_SIZE,
4466 .template_ahash = {
4467 .init = ahash_init,
4468 .update = ahash_update,
4469 .final = ahash_final,
4470 .finup = ahash_finup,
4471 .digest = ahash_digest,
4472 .export = ahash_export,
4473 .import = ahash_import,
4474 .setkey = ahash_setkey,
4475 .halg = {
4476 .digestsize = SHA256_DIGEST_SIZE,
4477 .statesize = sizeof(struct caam_export_state),
4478 },
4479 },
4480 .alg_type = OP_ALG_ALGSEL_SHA256,
4481 }, {
4482 .name = "sha384",
4483 .driver_name = "sha384-caam-qi2",
4484 .hmac_name = "hmac(sha384)",
4485 .hmac_driver_name = "hmac-sha384-caam-qi2",
4486 .blocksize = SHA384_BLOCK_SIZE,
4487 .template_ahash = {
4488 .init = ahash_init,
4489 .update = ahash_update,
4490 .final = ahash_final,
4491 .finup = ahash_finup,
4492 .digest = ahash_digest,
4493 .export = ahash_export,
4494 .import = ahash_import,
4495 .setkey = ahash_setkey,
4496 .halg = {
4497 .digestsize = SHA384_DIGEST_SIZE,
4498 .statesize = sizeof(struct caam_export_state),
4499 },
4500 },
4501 .alg_type = OP_ALG_ALGSEL_SHA384,
4502 }, {
4503 .name = "sha512",
4504 .driver_name = "sha512-caam-qi2",
4505 .hmac_name = "hmac(sha512)",
4506 .hmac_driver_name = "hmac-sha512-caam-qi2",
4507 .blocksize = SHA512_BLOCK_SIZE,
4508 .template_ahash = {
4509 .init = ahash_init,
4510 .update = ahash_update,
4511 .final = ahash_final,
4512 .finup = ahash_finup,
4513 .digest = ahash_digest,
4514 .export = ahash_export,
4515 .import = ahash_import,
4516 .setkey = ahash_setkey,
4517 .halg = {
4518 .digestsize = SHA512_DIGEST_SIZE,
4519 .statesize = sizeof(struct caam_export_state),
4520 },
4521 },
4522 .alg_type = OP_ALG_ALGSEL_SHA512,
4523 }, {
4524 .name = "md5",
4525 .driver_name = "md5-caam-qi2",
4526 .hmac_name = "hmac(md5)",
4527 .hmac_driver_name = "hmac-md5-caam-qi2",
4528 .blocksize = MD5_BLOCK_WORDS * 4,
4529 .template_ahash = {
4530 .init = ahash_init,
4531 .update = ahash_update,
4532 .final = ahash_final,
4533 .finup = ahash_finup,
4534 .digest = ahash_digest,
4535 .export = ahash_export,
4536 .import = ahash_import,
4537 .setkey = ahash_setkey,
4538 .halg = {
4539 .digestsize = MD5_DIGEST_SIZE,
4540 .statesize = sizeof(struct caam_export_state),
4541 },
4542 },
4543 .alg_type = OP_ALG_ALGSEL_MD5,
4544 }
4545 };
4546
4547 struct caam_hash_alg {
4548 struct list_head entry;
4549 struct device *dev;
4550 int alg_type;
4551 bool is_hmac;
4552 struct ahash_alg ahash_alg;
4553 };
4554
caam_hash_cra_init(struct crypto_tfm * tfm)4555 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4556 {
4557 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4558 struct crypto_alg *base = tfm->__crt_alg;
4559 struct hash_alg_common *halg =
4560 container_of(base, struct hash_alg_common, base);
4561 struct ahash_alg *alg =
4562 container_of(halg, struct ahash_alg, halg);
4563 struct caam_hash_alg *caam_hash =
4564 container_of(alg, struct caam_hash_alg, ahash_alg);
4565 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4566 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4567 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4568 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4569 HASH_MSG_LEN + 32,
4570 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4571 HASH_MSG_LEN + 64,
4572 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4573 dma_addr_t dma_addr;
4574 int i;
4575
4576 ctx->dev = caam_hash->dev;
4577
4578 if (caam_hash->is_hmac) {
4579 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4580 ARRAY_SIZE(ctx->key),
4581 DMA_TO_DEVICE,
4582 DMA_ATTR_SKIP_CPU_SYNC);
4583 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4584 dev_err(ctx->dev, "unable to map key\n");
4585 return -ENOMEM;
4586 }
4587 }
4588
4589 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4590 DMA_BIDIRECTIONAL,
4591 DMA_ATTR_SKIP_CPU_SYNC);
4592 if (dma_mapping_error(ctx->dev, dma_addr)) {
4593 dev_err(ctx->dev, "unable to map shared descriptors\n");
4594 if (ctx->adata.key_dma)
4595 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4596 ARRAY_SIZE(ctx->key),
4597 DMA_TO_DEVICE,
4598 DMA_ATTR_SKIP_CPU_SYNC);
4599 return -ENOMEM;
4600 }
4601
4602 for (i = 0; i < HASH_NUM_OP; i++)
4603 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4604
4605 /* copy descriptor header template value */
4606 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4607
4608 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4609 OP_ALG_ALGSEL_SUBMASK) >>
4610 OP_ALG_ALGSEL_SHIFT];
4611
4612 crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
4613
4614 /*
4615 * For keyed hash algorithms shared descriptors
4616 * will be created later in setkey() callback
4617 */
4618 return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
4619 }
4620
caam_hash_cra_exit(struct crypto_tfm * tfm)4621 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4622 {
4623 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4624
4625 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4626 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4627 if (ctx->adata.key_dma)
4628 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4629 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4630 DMA_ATTR_SKIP_CPU_SYNC);
4631 }
4632
caam_hash_alloc(struct device * dev,struct caam_hash_template * template,bool keyed)4633 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4634 struct caam_hash_template *template, bool keyed)
4635 {
4636 struct caam_hash_alg *t_alg;
4637 struct ahash_alg *halg;
4638 struct crypto_alg *alg;
4639
4640 t_alg = kzalloc_obj(*t_alg);
4641 if (!t_alg)
4642 return ERR_PTR(-ENOMEM);
4643
4644 t_alg->ahash_alg = template->template_ahash;
4645 halg = &t_alg->ahash_alg;
4646 alg = &halg->halg.base;
4647
4648 if (keyed) {
4649 strscpy(alg->cra_name, template->hmac_name);
4650 strscpy(alg->cra_driver_name, template->hmac_driver_name);
4651 t_alg->is_hmac = true;
4652 } else {
4653 strscpy(alg->cra_name, template->name);
4654 strscpy(alg->cra_driver_name, template->driver_name);
4655 t_alg->ahash_alg.setkey = NULL;
4656 t_alg->is_hmac = false;
4657 }
4658 alg->cra_module = THIS_MODULE;
4659 alg->cra_init = caam_hash_cra_init;
4660 alg->cra_exit = caam_hash_cra_exit;
4661 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
4662 alg->cra_priority = CAAM_CRA_PRIORITY;
4663 alg->cra_blocksize = template->blocksize;
4664 alg->cra_alignmask = 0;
4665 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4666
4667 t_alg->alg_type = template->alg_type;
4668 t_alg->dev = dev;
4669
4670 return t_alg;
4671 }
4672
dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx * nctx)4673 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4674 {
4675 struct dpaa2_caam_priv_per_cpu *ppriv;
4676
4677 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4678 napi_schedule_irqoff(&ppriv->napi);
4679 }
4680
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv * priv)4681 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4682 {
4683 struct device *dev = priv->dev;
4684 struct dpaa2_io_notification_ctx *nctx;
4685 struct dpaa2_caam_priv_per_cpu *ppriv;
4686 int err, i = 0, cpu;
4687
4688 for_each_online_cpu(cpu) {
4689 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4690 ppriv->priv = priv;
4691 nctx = &ppriv->nctx;
4692 nctx->is_cdan = 0;
4693 nctx->id = ppriv->rsp_fqid;
4694 nctx->desired_cpu = cpu;
4695 nctx->cb = dpaa2_caam_fqdan_cb;
4696
4697 /* Register notification callbacks */
4698 ppriv->dpio = dpaa2_io_service_select(cpu);
4699 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4700 if (unlikely(err)) {
4701 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4702 nctx->cb = NULL;
4703 /*
4704 * If no affine DPIO for this core, there's probably
4705 * none available for next cores either. Signal we want
4706 * to retry later, in case the DPIO devices weren't
4707 * probed yet.
4708 */
4709 err = -EPROBE_DEFER;
4710 goto err;
4711 }
4712
4713 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4714 dev);
4715 if (unlikely(!ppriv->store)) {
4716 dev_err(dev, "dpaa2_io_store_create() failed\n");
4717 err = -ENOMEM;
4718 goto err;
4719 }
4720
4721 if (++i == priv->num_pairs)
4722 break;
4723 }
4724
4725 return 0;
4726
4727 err:
4728 for_each_online_cpu(cpu) {
4729 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4730 if (!ppriv->nctx.cb)
4731 break;
4732 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4733 }
4734
4735 for_each_online_cpu(cpu) {
4736 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4737 if (!ppriv->store)
4738 break;
4739 dpaa2_io_store_destroy(ppriv->store);
4740 }
4741
4742 return err;
4743 }
4744
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv * priv)4745 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4746 {
4747 struct dpaa2_caam_priv_per_cpu *ppriv;
4748 int i = 0, cpu;
4749
4750 for_each_online_cpu(cpu) {
4751 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4752 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4753 priv->dev);
4754 dpaa2_io_store_destroy(ppriv->store);
4755
4756 if (++i == priv->num_pairs)
4757 return;
4758 }
4759 }
4760
dpaa2_dpseci_bind(struct dpaa2_caam_priv * priv)4761 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4762 {
4763 struct dpseci_rx_queue_cfg rx_queue_cfg;
4764 struct device *dev = priv->dev;
4765 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4766 struct dpaa2_caam_priv_per_cpu *ppriv;
4767 int err = 0, i = 0, cpu;
4768
4769 /* Configure Rx queues */
4770 for_each_online_cpu(cpu) {
4771 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4772
4773 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4774 DPSECI_QUEUE_OPT_USER_CTX;
4775 rx_queue_cfg.order_preservation_en = 0;
4776 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4777 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4778 /*
4779 * Rx priority (WQ) doesn't really matter, since we use
4780 * pull mode, i.e. volatile dequeues from specific FQs
4781 */
4782 rx_queue_cfg.dest_cfg.priority = 0;
4783 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4784
4785 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4786 &rx_queue_cfg);
4787 if (err) {
4788 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4789 err);
4790 return err;
4791 }
4792
4793 if (++i == priv->num_pairs)
4794 break;
4795 }
4796
4797 return err;
4798 }
4799
dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv * priv)4800 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4801 {
4802 struct device *dev = priv->dev;
4803
4804 if (!priv->cscn_mem)
4805 return;
4806
4807 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4808 kfree(priv->cscn_mem);
4809 }
4810
dpaa2_dpseci_free(struct dpaa2_caam_priv * priv)4811 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4812 {
4813 struct device *dev = priv->dev;
4814 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4815 struct dpaa2_caam_priv_per_cpu *ppriv;
4816 int i, err;
4817
4818 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4819 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4820 if (err)
4821 dev_err(dev, "dpseci_reset() failed\n");
4822 }
4823
4824 for_each_cpu(i, priv->clean_mask) {
4825 ppriv = per_cpu_ptr(priv->ppriv, i);
4826 free_netdev(ppriv->net_dev);
4827 }
4828 free_cpumask_var(priv->clean_mask);
4829
4830 dpaa2_dpseci_congestion_free(priv);
4831 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4832 }
4833
dpaa2_caam_process_fd(struct dpaa2_caam_priv * priv,const struct dpaa2_fd * fd)4834 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4835 const struct dpaa2_fd *fd)
4836 {
4837 struct caam_request *req;
4838 u32 fd_err;
4839
4840 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4841 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4842 return;
4843 }
4844
4845 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4846 if (unlikely(fd_err))
4847 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4848
4849 /*
4850 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4851 * in FD[ERR] or FD[FRC].
4852 */
4853 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4854 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4855 DMA_BIDIRECTIONAL);
4856 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4857 }
4858
dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu * ppriv)4859 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4860 {
4861 int err;
4862
4863 /* Retry while portal is busy */
4864 do {
4865 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4866 ppriv->store);
4867 } while (err == -EBUSY);
4868
4869 if (unlikely(err))
4870 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4871
4872 return err;
4873 }
4874
dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu * ppriv)4875 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4876 {
4877 struct dpaa2_dq *dq;
4878 int cleaned = 0, is_last;
4879
4880 do {
4881 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4882 if (unlikely(!dq)) {
4883 if (unlikely(!is_last)) {
4884 dev_dbg(ppriv->priv->dev,
4885 "FQ %d returned no valid frames\n",
4886 ppriv->rsp_fqid);
4887 /*
4888 * MUST retry until we get some sort of
4889 * valid response token (be it "empty dequeue"
4890 * or a valid frame).
4891 */
4892 continue;
4893 }
4894 break;
4895 }
4896
4897 /* Process FD */
4898 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4899 cleaned++;
4900 } while (!is_last);
4901
4902 return cleaned;
4903 }
4904
dpaa2_dpseci_poll(struct napi_struct * napi,int budget)4905 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4906 {
4907 struct dpaa2_caam_priv_per_cpu *ppriv;
4908 struct dpaa2_caam_priv *priv;
4909 int err, cleaned = 0, store_cleaned;
4910
4911 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4912 priv = ppriv->priv;
4913
4914 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4915 return 0;
4916
4917 do {
4918 store_cleaned = dpaa2_caam_store_consume(ppriv);
4919 cleaned += store_cleaned;
4920
4921 if (store_cleaned == 0 ||
4922 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4923 break;
4924
4925 /* Try to dequeue some more */
4926 err = dpaa2_caam_pull_fq(ppriv);
4927 if (unlikely(err))
4928 break;
4929 } while (1);
4930
4931 if (cleaned < budget) {
4932 napi_complete_done(napi, cleaned);
4933 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4934 if (unlikely(err))
4935 dev_err(priv->dev, "Notification rearm failed: %d\n",
4936 err);
4937 }
4938
4939 return cleaned;
4940 }
4941
dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv * priv,u16 token)4942 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4943 u16 token)
4944 {
4945 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4946 struct device *dev = priv->dev;
4947 unsigned int alignmask;
4948 int err;
4949
4950 /*
4951 * Congestion group feature supported starting with DPSECI API v5.1
4952 * and only when object has been created with this capability.
4953 */
4954 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4955 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4956 return 0;
4957
4958 alignmask = DPAA2_CSCN_ALIGN - 1;
4959 alignmask |= dma_get_cache_alignment() - 1;
4960 priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1),
4961 GFP_KERNEL);
4962 if (!priv->cscn_mem)
4963 return -ENOMEM;
4964
4965 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem,
4966 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4967 if (dma_mapping_error(dev, priv->cscn_dma)) {
4968 dev_err(dev, "Error mapping CSCN memory area\n");
4969 err = -ENOMEM;
4970 goto err_dma_map;
4971 }
4972
4973 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4974 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4975 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4976 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4977 cong_notif_cfg.message_iova = priv->cscn_dma;
4978 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4979 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4980 DPSECI_CGN_MODE_COHERENT_WRITE;
4981
4982 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4983 &cong_notif_cfg);
4984 if (err) {
4985 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4986 goto err_set_cong;
4987 }
4988
4989 return 0;
4990
4991 err_set_cong:
4992 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4993 err_dma_map:
4994 kfree(priv->cscn_mem);
4995
4996 return err;
4997 }
4998
free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv * priv,const cpumask_t * cpus)4999 static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus)
5000 {
5001 struct dpaa2_caam_priv_per_cpu *ppriv;
5002 int i;
5003
5004 for_each_cpu(i, cpus) {
5005 ppriv = per_cpu_ptr(priv->ppriv, i);
5006 free_netdev(ppriv->net_dev);
5007 }
5008 }
5009
dpaa2_dpseci_setup(struct fsl_mc_device * ls_dev)5010 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
5011 {
5012 struct device *dev = &ls_dev->dev;
5013 struct dpaa2_caam_priv *priv;
5014 struct dpaa2_caam_priv_per_cpu *ppriv;
5015 int err, cpu;
5016 u8 i;
5017
5018 err = -ENOMEM;
5019 priv = dev_get_drvdata(dev);
5020
5021 if (!zalloc_cpumask_var(&priv->clean_mask, GFP_KERNEL))
5022 goto err_cpumask;
5023
5024 priv->dev = dev;
5025 priv->dpsec_id = ls_dev->obj_desc.id;
5026
5027 /* Get a handle for the DPSECI this interface is associate with */
5028 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
5029 if (err) {
5030 dev_err(dev, "dpseci_open() failed: %d\n", err);
5031 goto err_open;
5032 }
5033
5034 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
5035 &priv->minor_ver);
5036 if (err) {
5037 dev_err(dev, "dpseci_get_api_version() failed\n");
5038 goto err_get_vers;
5039 }
5040
5041 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5042
5043 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5044 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5045 if (err) {
5046 dev_err(dev, "dpseci_reset() failed\n");
5047 goto err_get_vers;
5048 }
5049 }
5050
5051 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5052 &priv->dpseci_attr);
5053 if (err) {
5054 dev_err(dev, "dpseci_get_attributes() failed\n");
5055 goto err_get_vers;
5056 }
5057
5058 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5059 &priv->sec_attr);
5060 if (err) {
5061 dev_err(dev, "dpseci_get_sec_attr() failed\n");
5062 goto err_get_vers;
5063 }
5064
5065 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5066 if (err) {
5067 dev_err(dev, "setup_congestion() failed\n");
5068 goto err_get_vers;
5069 }
5070
5071 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5072 priv->dpseci_attr.num_tx_queues);
5073 if (priv->num_pairs > num_online_cpus()) {
5074 dev_warn(dev, "%d queues won't be used\n",
5075 priv->num_pairs - num_online_cpus());
5076 priv->num_pairs = num_online_cpus();
5077 }
5078
5079 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5080 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5081 &priv->rx_queue_attr[i]);
5082 if (err) {
5083 dev_err(dev, "dpseci_get_rx_queue() failed\n");
5084 goto err_get_rx_queue;
5085 }
5086 }
5087
5088 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5089 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5090 &priv->tx_queue_attr[i]);
5091 if (err) {
5092 dev_err(dev, "dpseci_get_tx_queue() failed\n");
5093 goto err_get_rx_queue;
5094 }
5095 }
5096
5097 i = 0;
5098 for_each_online_cpu(cpu) {
5099 u8 j;
5100
5101 j = i % priv->num_pairs;
5102
5103 ppriv = per_cpu_ptr(priv->ppriv, cpu);
5104 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5105
5106 /*
5107 * Allow all cores to enqueue, while only some of them
5108 * will take part in dequeuing.
5109 */
5110 if (++i > priv->num_pairs)
5111 continue;
5112
5113 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5114 ppriv->prio = j;
5115
5116 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5117 priv->rx_queue_attr[j].fqid,
5118 priv->tx_queue_attr[j].fqid);
5119
5120 ppriv->net_dev = alloc_netdev_dummy(0);
5121 if (!ppriv->net_dev) {
5122 err = -ENOMEM;
5123 goto err_alloc_netdev;
5124 }
5125 cpumask_set_cpu(cpu, priv->clean_mask);
5126 ppriv->net_dev->dev = *dev;
5127
5128 netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
5129 dpaa2_dpseci_poll,
5130 DPAA2_CAAM_NAPI_WEIGHT);
5131 }
5132
5133 return 0;
5134
5135 err_alloc_netdev:
5136 free_dpaa2_pcpu_netdev(priv, priv->clean_mask);
5137 err_get_rx_queue:
5138 dpaa2_dpseci_congestion_free(priv);
5139 err_get_vers:
5140 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5141 err_open:
5142 free_cpumask_var(priv->clean_mask);
5143 err_cpumask:
5144 return err;
5145 }
5146
dpaa2_dpseci_enable(struct dpaa2_caam_priv * priv)5147 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5148 {
5149 struct device *dev = priv->dev;
5150 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5151 struct dpaa2_caam_priv_per_cpu *ppriv;
5152 int i;
5153
5154 for (i = 0; i < priv->num_pairs; i++) {
5155 ppriv = per_cpu_ptr(priv->ppriv, i);
5156 napi_enable(&ppriv->napi);
5157 }
5158
5159 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5160 }
5161
dpaa2_dpseci_disable(struct dpaa2_caam_priv * priv)5162 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5163 {
5164 struct device *dev = priv->dev;
5165 struct dpaa2_caam_priv_per_cpu *ppriv;
5166 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5167 int i, err = 0, enabled;
5168
5169 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5170 if (err) {
5171 dev_err(dev, "dpseci_disable() failed\n");
5172 return err;
5173 }
5174
5175 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5176 if (err) {
5177 dev_err(dev, "dpseci_is_enabled() failed\n");
5178 return err;
5179 }
5180
5181 dev_dbg(dev, "disable: %s\n", str_false_true(enabled));
5182
5183 for (i = 0; i < priv->num_pairs; i++) {
5184 ppriv = per_cpu_ptr(priv->ppriv, i);
5185 napi_disable(&ppriv->napi);
5186 netif_napi_del(&ppriv->napi);
5187 }
5188
5189 return 0;
5190 }
5191
5192 static struct list_head hash_list;
5193
dpaa2_caam_probe(struct fsl_mc_device * dpseci_dev)5194 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5195 {
5196 struct device *dev;
5197 struct dpaa2_caam_priv *priv;
5198 int i, err = 0;
5199 bool registered = false;
5200
5201 /*
5202 * There is no way to get CAAM endianness - there is no direct register
5203 * space access and MC f/w does not provide this attribute.
5204 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5205 * property.
5206 */
5207 caam_little_end = true;
5208
5209 caam_imx = false;
5210
5211 dev = &dpseci_dev->dev;
5212
5213 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5214 if (!priv)
5215 return -ENOMEM;
5216
5217 dev_set_drvdata(dev, priv);
5218
5219 priv->domain = iommu_get_domain_for_dev(dev);
5220
5221 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5222 0, 0, NULL);
5223 if (!qi_cache) {
5224 dev_err(dev, "Can't allocate SEC cache\n");
5225 return -ENOMEM;
5226 }
5227
5228 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5229 if (err) {
5230 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5231 goto err_dma_mask;
5232 }
5233
5234 /* Obtain a MC portal */
5235 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5236 if (err) {
5237 if (err == -ENXIO)
5238 err = -EPROBE_DEFER;
5239 else
5240 dev_err(dev, "MC portal allocation failed\n");
5241
5242 goto err_dma_mask;
5243 }
5244
5245 priv->ppriv = alloc_percpu(*priv->ppriv);
5246 if (!priv->ppriv) {
5247 dev_err(dev, "alloc_percpu() failed\n");
5248 err = -ENOMEM;
5249 goto err_alloc_ppriv;
5250 }
5251
5252 /* DPSECI initialization */
5253 err = dpaa2_dpseci_setup(dpseci_dev);
5254 if (err) {
5255 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5256 goto err_dpseci_setup;
5257 }
5258
5259 /* DPIO */
5260 err = dpaa2_dpseci_dpio_setup(priv);
5261 if (err) {
5262 dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5263 goto err_dpio_setup;
5264 }
5265
5266 /* DPSECI binding to DPIO */
5267 err = dpaa2_dpseci_bind(priv);
5268 if (err) {
5269 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5270 goto err_bind;
5271 }
5272
5273 /* DPSECI enable */
5274 err = dpaa2_dpseci_enable(priv);
5275 if (err) {
5276 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5277 goto err_bind;
5278 }
5279
5280 dpaa2_dpseci_debugfs_init(priv);
5281
5282 /* register crypto algorithms the device supports */
5283 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5284 struct caam_skcipher_alg *t_alg = driver_algs + i;
5285 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5286
5287 /* Skip DES algorithms if not supported by device */
5288 if (!priv->sec_attr.des_acc_num &&
5289 (alg_sel == OP_ALG_ALGSEL_3DES ||
5290 alg_sel == OP_ALG_ALGSEL_DES))
5291 continue;
5292
5293 /* Skip AES algorithms if not supported by device */
5294 if (!priv->sec_attr.aes_acc_num &&
5295 alg_sel == OP_ALG_ALGSEL_AES)
5296 continue;
5297
5298 /* Skip CHACHA20 algorithms if not supported by device */
5299 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5300 !priv->sec_attr.ccha_acc_num)
5301 continue;
5302
5303 t_alg->caam.dev = dev;
5304 caam_skcipher_alg_init(t_alg);
5305
5306 err = crypto_register_skcipher(&t_alg->skcipher);
5307 if (err) {
5308 dev_warn(dev, "%s alg registration failed: %d\n",
5309 t_alg->skcipher.base.cra_driver_name, err);
5310 continue;
5311 }
5312
5313 t_alg->registered = true;
5314 registered = true;
5315 }
5316
5317 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5318 struct caam_aead_alg *t_alg = driver_aeads + i;
5319 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5320 OP_ALG_ALGSEL_MASK;
5321 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5322 OP_ALG_ALGSEL_MASK;
5323
5324 /* Skip DES algorithms if not supported by device */
5325 if (!priv->sec_attr.des_acc_num &&
5326 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5327 c1_alg_sel == OP_ALG_ALGSEL_DES))
5328 continue;
5329
5330 /* Skip AES algorithms if not supported by device */
5331 if (!priv->sec_attr.aes_acc_num &&
5332 c1_alg_sel == OP_ALG_ALGSEL_AES)
5333 continue;
5334
5335 /* Skip CHACHA20 algorithms if not supported by device */
5336 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5337 !priv->sec_attr.ccha_acc_num)
5338 continue;
5339
5340 /* Skip POLY1305 algorithms if not supported by device */
5341 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5342 !priv->sec_attr.ptha_acc_num)
5343 continue;
5344
5345 /*
5346 * Skip algorithms requiring message digests
5347 * if MD not supported by device.
5348 */
5349 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5350 !priv->sec_attr.md_acc_num)
5351 continue;
5352
5353 t_alg->caam.dev = dev;
5354 caam_aead_alg_init(t_alg);
5355
5356 err = crypto_register_aead(&t_alg->aead);
5357 if (err) {
5358 dev_warn(dev, "%s alg registration failed: %d\n",
5359 t_alg->aead.base.cra_driver_name, err);
5360 continue;
5361 }
5362
5363 t_alg->registered = true;
5364 registered = true;
5365 }
5366 if (registered)
5367 dev_info(dev, "algorithms registered in /proc/crypto\n");
5368
5369 /* register hash algorithms the device supports */
5370 INIT_LIST_HEAD(&hash_list);
5371
5372 /*
5373 * Skip registration of any hashing algorithms if MD block
5374 * is not present.
5375 */
5376 if (!priv->sec_attr.md_acc_num)
5377 return 0;
5378
5379 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5380 struct caam_hash_alg *t_alg;
5381 struct caam_hash_template *alg = driver_hash + i;
5382
5383 /* register hmac version */
5384 t_alg = caam_hash_alloc(dev, alg, true);
5385 if (IS_ERR(t_alg)) {
5386 err = PTR_ERR(t_alg);
5387 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5388 alg->hmac_driver_name, err);
5389 continue;
5390 }
5391
5392 err = crypto_register_ahash(&t_alg->ahash_alg);
5393 if (err) {
5394 dev_warn(dev, "%s alg registration failed: %d\n",
5395 t_alg->ahash_alg.halg.base.cra_driver_name,
5396 err);
5397 kfree(t_alg);
5398 } else {
5399 list_add_tail(&t_alg->entry, &hash_list);
5400 }
5401
5402 /* register unkeyed version */
5403 t_alg = caam_hash_alloc(dev, alg, false);
5404 if (IS_ERR(t_alg)) {
5405 err = PTR_ERR(t_alg);
5406 dev_warn(dev, "%s alg allocation failed: %d\n",
5407 alg->driver_name, err);
5408 continue;
5409 }
5410
5411 err = crypto_register_ahash(&t_alg->ahash_alg);
5412 if (err) {
5413 dev_warn(dev, "%s alg registration failed: %d\n",
5414 t_alg->ahash_alg.halg.base.cra_driver_name,
5415 err);
5416 kfree(t_alg);
5417 } else {
5418 list_add_tail(&t_alg->entry, &hash_list);
5419 }
5420 }
5421 if (!list_empty(&hash_list))
5422 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5423
5424 return err;
5425
5426 err_bind:
5427 dpaa2_dpseci_dpio_free(priv);
5428 err_dpio_setup:
5429 dpaa2_dpseci_free(priv);
5430 err_dpseci_setup:
5431 free_percpu(priv->ppriv);
5432 err_alloc_ppriv:
5433 fsl_mc_portal_free(priv->mc_io);
5434 err_dma_mask:
5435 kmem_cache_destroy(qi_cache);
5436
5437 return err;
5438 }
5439
dpaa2_caam_remove(struct fsl_mc_device * ls_dev)5440 static void __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5441 {
5442 struct device *dev;
5443 struct dpaa2_caam_priv *priv;
5444 int i;
5445
5446 dev = &ls_dev->dev;
5447 priv = dev_get_drvdata(dev);
5448
5449 dpaa2_dpseci_debugfs_exit(priv);
5450
5451 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5452 struct caam_aead_alg *t_alg = driver_aeads + i;
5453
5454 if (t_alg->registered)
5455 crypto_unregister_aead(&t_alg->aead);
5456 }
5457
5458 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5459 struct caam_skcipher_alg *t_alg = driver_algs + i;
5460
5461 if (t_alg->registered)
5462 crypto_unregister_skcipher(&t_alg->skcipher);
5463 }
5464
5465 if (hash_list.next) {
5466 struct caam_hash_alg *t_hash_alg, *p;
5467
5468 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5469 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5470 list_del(&t_hash_alg->entry);
5471 kfree(t_hash_alg);
5472 }
5473 }
5474
5475 dpaa2_dpseci_disable(priv);
5476 dpaa2_dpseci_dpio_free(priv);
5477 dpaa2_dpseci_free(priv);
5478 free_percpu(priv->ppriv);
5479 fsl_mc_portal_free(priv->mc_io);
5480 kmem_cache_destroy(qi_cache);
5481 }
5482
dpaa2_caam_enqueue(struct device * dev,struct caam_request * req)5483 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5484 {
5485 struct dpaa2_fd fd;
5486 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5487 struct dpaa2_caam_priv_per_cpu *ppriv;
5488 int err = 0, i;
5489
5490 if (IS_ERR(req))
5491 return PTR_ERR(req);
5492
5493 if (priv->cscn_mem) {
5494 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5495 DPAA2_CSCN_SIZE,
5496 DMA_FROM_DEVICE);
5497 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
5498 dev_dbg_ratelimited(dev, "Dropping request\n");
5499 return -EBUSY;
5500 }
5501 }
5502
5503 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5504
5505 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5506 DMA_BIDIRECTIONAL);
5507 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5508 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5509 goto err_out;
5510 }
5511
5512 memset(&fd, 0, sizeof(fd));
5513 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5514 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5515 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5516 dpaa2_fd_set_flc(&fd, req->flc_dma);
5517
5518 ppriv = raw_cpu_ptr(priv->ppriv);
5519 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5520 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5521 &fd);
5522 if (err != -EBUSY)
5523 break;
5524
5525 cpu_relax();
5526 }
5527
5528 if (unlikely(err)) {
5529 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5530 goto err_out;
5531 }
5532
5533 return -EINPROGRESS;
5534
5535 err_out:
5536 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5537 DMA_BIDIRECTIONAL);
5538 return -EIO;
5539 }
5540 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5541
5542 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5543 {
5544 .vendor = FSL_MC_VENDOR_FREESCALE,
5545 .obj_type = "dpseci",
5546 },
5547 { .vendor = 0x0 }
5548 };
5549 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5550
5551 static struct fsl_mc_driver dpaa2_caam_driver = {
5552 .driver = {
5553 .name = KBUILD_MODNAME,
5554 .owner = THIS_MODULE,
5555 },
5556 .probe = dpaa2_caam_probe,
5557 .remove = dpaa2_caam_remove,
5558 .match_id_table = dpaa2_caam_match_id_table
5559 };
5560
5561 MODULE_LICENSE("Dual BSD/GPL");
5562 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5563 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5564
5565 module_fsl_mc_driver(dpaa2_caam_driver);
5566