1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
5 */
6
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/dma-mapping.h>
20 #include <linux/fsl/mc.h>
21 #include <linux/kernel.h>
22 #include <linux/string_choices.h>
23 #include <soc/fsl/dpaa2-io.h>
24 #include <soc/fsl/dpaa2-fd.h>
25 #include <crypto/xts.h>
26 #include <linux/unaligned.h>
27
28 #define CAAM_CRA_PRIORITY 2000
29
30 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
31 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
32 SHA512_DIGEST_SIZE * 2)
33
34 /*
35 * This is a cache of buffers, from which the users of CAAM QI driver
36 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
37 * NOTE: A more elegant solution would be to have some headroom in the frames
38 * being processed. This can be added by the dpaa2-eth driver. This would
39 * pose a problem for userspace application processing which cannot
40 * know of this limitation. So for now, this will work.
41 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
42 */
43 static struct kmem_cache *qi_cache;
44
45 struct caam_alg_entry {
46 struct device *dev;
47 int class1_alg_type;
48 int class2_alg_type;
49 bool rfc3686;
50 bool geniv;
51 bool nodkp;
52 };
53
54 struct caam_aead_alg {
55 struct aead_alg aead;
56 struct caam_alg_entry caam;
57 bool registered;
58 };
59
60 struct caam_skcipher_alg {
61 struct skcipher_alg skcipher;
62 struct caam_alg_entry caam;
63 bool registered;
64 };
65
66 /**
67 * struct caam_ctx - per-session context
68 * @flc: Flow Contexts array
69 * @key: [authentication key], encryption key
70 * @flc_dma: I/O virtual addresses of the Flow Contexts
71 * @key_dma: I/O virtual address of the key
72 * @dir: DMA direction for mapping key and Flow Contexts
73 * @dev: dpseci device
74 * @adata: authentication algorithm details
75 * @cdata: encryption algorithm details
76 * @authsize: authentication tag (a.k.a. ICV / MAC) size
77 * @xts_key_fallback: true if fallback tfm needs to be used due
78 * to unsupported xts key lengths
79 * @fallback: xts fallback tfm
80 */
81 struct caam_ctx {
82 struct caam_flc flc[NUM_OP];
83 u8 key[CAAM_MAX_KEY_SIZE];
84 dma_addr_t flc_dma[NUM_OP];
85 dma_addr_t key_dma;
86 enum dma_data_direction dir;
87 struct device *dev;
88 struct alginfo adata;
89 struct alginfo cdata;
90 unsigned int authsize;
91 bool xts_key_fallback;
92 struct crypto_skcipher *fallback;
93 };
94
dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv * priv,dma_addr_t iova_addr)95 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
96 dma_addr_t iova_addr)
97 {
98 phys_addr_t phys_addr;
99
100 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
101 iova_addr;
102
103 return phys_to_virt(phys_addr);
104 }
105
106 /*
107 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
108 *
109 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
110 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
111 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
112 * hosting 16 SG entries.
113 *
114 * @flags - flags that would be used for the equivalent kmalloc(..) call
115 *
116 * Returns a pointer to a retrieved buffer on success or NULL on failure.
117 */
qi_cache_zalloc(gfp_t flags)118 static inline void *qi_cache_zalloc(gfp_t flags)
119 {
120 return kmem_cache_zalloc(qi_cache, flags);
121 }
122
123 /*
124 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
125 *
126 * @obj - buffer previously allocated by qi_cache_zalloc
127 *
128 * No checking is being done, the call is a passthrough call to
129 * kmem_cache_free(...)
130 */
qi_cache_free(void * obj)131 static inline void qi_cache_free(void *obj)
132 {
133 kmem_cache_free(qi_cache, obj);
134 }
135
to_caam_req(struct crypto_async_request * areq)136 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
137 {
138 switch (crypto_tfm_alg_type(areq->tfm)) {
139 case CRYPTO_ALG_TYPE_SKCIPHER:
140 return skcipher_request_ctx_dma(skcipher_request_cast(areq));
141 case CRYPTO_ALG_TYPE_AEAD:
142 return aead_request_ctx_dma(
143 container_of(areq, struct aead_request, base));
144 case CRYPTO_ALG_TYPE_AHASH:
145 return ahash_request_ctx_dma(ahash_request_cast(areq));
146 default:
147 return ERR_PTR(-EINVAL);
148 }
149 }
150
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,enum dma_data_direction iv_dir,dma_addr_t qm_sg_dma,int qm_sg_bytes)151 static void caam_unmap(struct device *dev, struct scatterlist *src,
152 struct scatterlist *dst, int src_nents,
153 int dst_nents, dma_addr_t iv_dma, int ivsize,
154 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
155 int qm_sg_bytes)
156 {
157 if (dst != src) {
158 if (src_nents)
159 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
160 if (dst_nents)
161 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
162 } else {
163 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
164 }
165
166 if (iv_dma)
167 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
168
169 if (qm_sg_bytes)
170 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
171 }
172
aead_set_sh_desc(struct crypto_aead * aead)173 static int aead_set_sh_desc(struct crypto_aead *aead)
174 {
175 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
176 typeof(*alg), aead);
177 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
178 unsigned int ivsize = crypto_aead_ivsize(aead);
179 struct device *dev = ctx->dev;
180 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
181 struct caam_flc *flc;
182 u32 *desc;
183 u32 ctx1_iv_off = 0;
184 u32 *nonce = NULL;
185 unsigned int data_len[2];
186 u32 inl_mask;
187 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
188 OP_ALG_AAI_CTR_MOD128);
189 const bool is_rfc3686 = alg->caam.rfc3686;
190
191 if (!ctx->cdata.keylen || !ctx->authsize)
192 return 0;
193
194 /*
195 * AES-CTR needs to load IV in CONTEXT1 reg
196 * at an offset of 128bits (16bytes)
197 * CONTEXT1[255:128] = IV
198 */
199 if (ctr_mode)
200 ctx1_iv_off = 16;
201
202 /*
203 * RFC3686 specific:
204 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
205 */
206 if (is_rfc3686) {
207 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
208 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
209 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
210 }
211
212 /*
213 * In case |user key| > |derived key|, using DKP<imm,imm> would result
214 * in invalid opcodes (last bytes of user key) in the resulting
215 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
216 * addresses are needed.
217 */
218 ctx->adata.key_virt = ctx->key;
219 ctx->adata.key_dma = ctx->key_dma;
220
221 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
222 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
223
224 data_len[0] = ctx->adata.keylen_pad;
225 data_len[1] = ctx->cdata.keylen;
226
227 /* aead_encrypt shared descriptor */
228 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
229 DESC_QI_AEAD_ENC_LEN) +
230 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
231 DESC_JOB_IO_LEN, data_len, &inl_mask,
232 ARRAY_SIZE(data_len)) < 0)
233 return -EINVAL;
234
235 ctx->adata.key_inline = !!(inl_mask & 1);
236 ctx->cdata.key_inline = !!(inl_mask & 2);
237
238 flc = &ctx->flc[ENCRYPT];
239 desc = flc->sh_desc;
240
241 if (alg->caam.geniv)
242 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
243 ivsize, ctx->authsize, is_rfc3686,
244 nonce, ctx1_iv_off, true,
245 priv->sec_attr.era);
246 else
247 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
248 ivsize, ctx->authsize, is_rfc3686, nonce,
249 ctx1_iv_off, true, priv->sec_attr.era);
250
251 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
252 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
253 sizeof(flc->flc) + desc_bytes(desc),
254 ctx->dir);
255
256 /* aead_decrypt shared descriptor */
257 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
258 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
259 DESC_JOB_IO_LEN, data_len, &inl_mask,
260 ARRAY_SIZE(data_len)) < 0)
261 return -EINVAL;
262
263 ctx->adata.key_inline = !!(inl_mask & 1);
264 ctx->cdata.key_inline = !!(inl_mask & 2);
265
266 flc = &ctx->flc[DECRYPT];
267 desc = flc->sh_desc;
268 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
269 ivsize, ctx->authsize, alg->caam.geniv,
270 is_rfc3686, nonce, ctx1_iv_off, true,
271 priv->sec_attr.era);
272 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
273 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
274 sizeof(flc->flc) + desc_bytes(desc),
275 ctx->dir);
276
277 return 0;
278 }
279
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)280 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
281 {
282 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
283
284 ctx->authsize = authsize;
285 aead_set_sh_desc(authenc);
286
287 return 0;
288 }
289
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)290 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
291 unsigned int keylen)
292 {
293 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
294 struct device *dev = ctx->dev;
295 struct crypto_authenc_keys keys;
296
297 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
298 goto badkey;
299
300 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
301 keys.authkeylen + keys.enckeylen, keys.enckeylen,
302 keys.authkeylen);
303 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
304 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
305
306 ctx->adata.keylen = keys.authkeylen;
307 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
308 OP_ALG_ALGSEL_MASK);
309
310 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
311 goto badkey;
312
313 memcpy(ctx->key, keys.authkey, keys.authkeylen);
314 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
315 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
316 keys.enckeylen, ctx->dir);
317 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
318 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
319 ctx->adata.keylen_pad + keys.enckeylen, 1);
320
321 ctx->cdata.keylen = keys.enckeylen;
322
323 memzero_explicit(&keys, sizeof(keys));
324 return aead_set_sh_desc(aead);
325 badkey:
326 memzero_explicit(&keys, sizeof(keys));
327 return -EINVAL;
328 }
329
des3_aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)330 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
331 unsigned int keylen)
332 {
333 struct crypto_authenc_keys keys;
334 int err;
335
336 err = crypto_authenc_extractkeys(&keys, key, keylen);
337 if (unlikely(err))
338 goto out;
339
340 err = -EINVAL;
341 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
342 goto out;
343
344 err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
345 aead_setkey(aead, key, keylen);
346
347 out:
348 memzero_explicit(&keys, sizeof(keys));
349 return err;
350 }
351
aead_edesc_alloc(struct aead_request * req,bool encrypt)352 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
353 bool encrypt)
354 {
355 struct crypto_aead *aead = crypto_aead_reqtfm(req);
356 struct caam_request *req_ctx = aead_request_ctx_dma(req);
357 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
358 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
359 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
360 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
361 typeof(*alg), aead);
362 struct device *dev = ctx->dev;
363 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
364 GFP_KERNEL : GFP_ATOMIC;
365 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
366 int src_len, dst_len = 0;
367 struct aead_edesc *edesc;
368 dma_addr_t qm_sg_dma, iv_dma = 0;
369 int ivsize = 0;
370 unsigned int authsize = ctx->authsize;
371 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
372 int in_len, out_len;
373 struct dpaa2_sg_entry *sg_table;
374
375 /* allocate space for base edesc, link tables and IV */
376 edesc = qi_cache_zalloc(flags);
377 if (unlikely(!edesc)) {
378 dev_err(dev, "could not allocate extended descriptor\n");
379 return ERR_PTR(-ENOMEM);
380 }
381
382 if (unlikely(req->dst != req->src)) {
383 src_len = req->assoclen + req->cryptlen;
384 dst_len = src_len + (encrypt ? authsize : (-authsize));
385
386 src_nents = sg_nents_for_len(req->src, src_len);
387 if (unlikely(src_nents < 0)) {
388 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
389 src_len);
390 qi_cache_free(edesc);
391 return ERR_PTR(src_nents);
392 }
393
394 dst_nents = sg_nents_for_len(req->dst, dst_len);
395 if (unlikely(dst_nents < 0)) {
396 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
397 dst_len);
398 qi_cache_free(edesc);
399 return ERR_PTR(dst_nents);
400 }
401
402 if (src_nents) {
403 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
404 DMA_TO_DEVICE);
405 if (unlikely(!mapped_src_nents)) {
406 dev_err(dev, "unable to map source\n");
407 qi_cache_free(edesc);
408 return ERR_PTR(-ENOMEM);
409 }
410 } else {
411 mapped_src_nents = 0;
412 }
413
414 if (dst_nents) {
415 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
416 DMA_FROM_DEVICE);
417 if (unlikely(!mapped_dst_nents)) {
418 dev_err(dev, "unable to map destination\n");
419 dma_unmap_sg(dev, req->src, src_nents,
420 DMA_TO_DEVICE);
421 qi_cache_free(edesc);
422 return ERR_PTR(-ENOMEM);
423 }
424 } else {
425 mapped_dst_nents = 0;
426 }
427 } else {
428 src_len = req->assoclen + req->cryptlen +
429 (encrypt ? authsize : 0);
430
431 src_nents = sg_nents_for_len(req->src, src_len);
432 if (unlikely(src_nents < 0)) {
433 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
434 src_len);
435 qi_cache_free(edesc);
436 return ERR_PTR(src_nents);
437 }
438
439 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
440 DMA_BIDIRECTIONAL);
441 if (unlikely(!mapped_src_nents)) {
442 dev_err(dev, "unable to map source\n");
443 qi_cache_free(edesc);
444 return ERR_PTR(-ENOMEM);
445 }
446 }
447
448 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
449 ivsize = crypto_aead_ivsize(aead);
450
451 /*
452 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
453 * Input is not contiguous.
454 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
455 * the end of the table by allocating more S/G entries. Logic:
456 * if (src != dst && output S/G)
457 * pad output S/G, if needed
458 * else if (src == dst && S/G)
459 * overlapping S/Gs; pad one of them
460 * else if (input S/G) ...
461 * pad input S/G, if needed
462 */
463 qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
464 if (mapped_dst_nents > 1)
465 qm_sg_nents += pad_sg_nents(mapped_dst_nents);
466 else if ((req->src == req->dst) && (mapped_src_nents > 1))
467 qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
468 1 + !!ivsize +
469 pad_sg_nents(mapped_src_nents));
470 else
471 qm_sg_nents = pad_sg_nents(qm_sg_nents);
472
473 sg_table = &edesc->sgt[0];
474 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
475 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
476 CAAM_QI_MEMCACHE_SIZE)) {
477 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
478 qm_sg_nents, ivsize);
479 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
480 0, DMA_NONE, 0, 0);
481 qi_cache_free(edesc);
482 return ERR_PTR(-ENOMEM);
483 }
484
485 if (ivsize) {
486 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
487
488 /* Make sure IV is located in a DMAable area */
489 memcpy(iv, req->iv, ivsize);
490
491 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
492 if (dma_mapping_error(dev, iv_dma)) {
493 dev_err(dev, "unable to map IV\n");
494 caam_unmap(dev, req->src, req->dst, src_nents,
495 dst_nents, 0, 0, DMA_NONE, 0, 0);
496 qi_cache_free(edesc);
497 return ERR_PTR(-ENOMEM);
498 }
499 }
500
501 edesc->src_nents = src_nents;
502 edesc->dst_nents = dst_nents;
503 edesc->iv_dma = iv_dma;
504
505 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
506 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
507 /*
508 * The associated data comes already with the IV but we need
509 * to skip it when we authenticate or encrypt...
510 */
511 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
512 else
513 edesc->assoclen = cpu_to_caam32(req->assoclen);
514 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
515 DMA_TO_DEVICE);
516 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
517 dev_err(dev, "unable to map assoclen\n");
518 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
519 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
520 qi_cache_free(edesc);
521 return ERR_PTR(-ENOMEM);
522 }
523
524 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
525 qm_sg_index++;
526 if (ivsize) {
527 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
528 qm_sg_index++;
529 }
530 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
531 qm_sg_index += mapped_src_nents;
532
533 if (mapped_dst_nents > 1)
534 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
535
536 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
537 if (dma_mapping_error(dev, qm_sg_dma)) {
538 dev_err(dev, "unable to map S/G table\n");
539 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
540 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
541 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
542 qi_cache_free(edesc);
543 return ERR_PTR(-ENOMEM);
544 }
545
546 edesc->qm_sg_dma = qm_sg_dma;
547 edesc->qm_sg_bytes = qm_sg_bytes;
548
549 out_len = req->assoclen + req->cryptlen +
550 (encrypt ? ctx->authsize : (-ctx->authsize));
551 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
552
553 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
554 dpaa2_fl_set_final(in_fle, true);
555 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
556 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
557 dpaa2_fl_set_len(in_fle, in_len);
558
559 if (req->dst == req->src) {
560 if (mapped_src_nents == 1) {
561 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
562 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
563 } else {
564 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
565 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
566 (1 + !!ivsize) * sizeof(*sg_table));
567 }
568 } else if (!mapped_dst_nents) {
569 /*
570 * crypto engine requires the output entry to be present when
571 * "frame list" FD is used.
572 * Since engine does not support FMT=2'b11 (unused entry type),
573 * leaving out_fle zeroized is the best option.
574 */
575 goto skip_out_fle;
576 } else if (mapped_dst_nents == 1) {
577 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
578 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
579 } else {
580 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
581 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
582 sizeof(*sg_table));
583 }
584
585 dpaa2_fl_set_len(out_fle, out_len);
586
587 skip_out_fle:
588 return edesc;
589 }
590
chachapoly_set_sh_desc(struct crypto_aead * aead)591 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
592 {
593 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
594 unsigned int ivsize = crypto_aead_ivsize(aead);
595 struct device *dev = ctx->dev;
596 struct caam_flc *flc;
597 u32 *desc;
598
599 if (!ctx->cdata.keylen || !ctx->authsize)
600 return 0;
601
602 flc = &ctx->flc[ENCRYPT];
603 desc = flc->sh_desc;
604 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
605 ctx->authsize, true, true);
606 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
607 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
608 sizeof(flc->flc) + desc_bytes(desc),
609 ctx->dir);
610
611 flc = &ctx->flc[DECRYPT];
612 desc = flc->sh_desc;
613 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
614 ctx->authsize, false, true);
615 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
616 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
617 sizeof(flc->flc) + desc_bytes(desc),
618 ctx->dir);
619
620 return 0;
621 }
622
chachapoly_setauthsize(struct crypto_aead * aead,unsigned int authsize)623 static int chachapoly_setauthsize(struct crypto_aead *aead,
624 unsigned int authsize)
625 {
626 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
627
628 if (authsize != POLY1305_DIGEST_SIZE)
629 return -EINVAL;
630
631 ctx->authsize = authsize;
632 return chachapoly_set_sh_desc(aead);
633 }
634
chachapoly_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)635 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
636 unsigned int keylen)
637 {
638 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
639 unsigned int ivsize = crypto_aead_ivsize(aead);
640 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
641
642 if (keylen != CHACHA_KEY_SIZE + saltlen)
643 return -EINVAL;
644
645 memcpy(ctx->key, key, keylen);
646 ctx->cdata.key_virt = ctx->key;
647 ctx->cdata.keylen = keylen - saltlen;
648
649 return chachapoly_set_sh_desc(aead);
650 }
651
gcm_set_sh_desc(struct crypto_aead * aead)652 static int gcm_set_sh_desc(struct crypto_aead *aead)
653 {
654 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
655 struct device *dev = ctx->dev;
656 unsigned int ivsize = crypto_aead_ivsize(aead);
657 struct caam_flc *flc;
658 u32 *desc;
659 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
660 ctx->cdata.keylen;
661
662 if (!ctx->cdata.keylen || !ctx->authsize)
663 return 0;
664
665 /*
666 * AES GCM encrypt shared descriptor
667 * Job Descriptor and Shared Descriptor
668 * must fit into the 64-word Descriptor h/w Buffer
669 */
670 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
671 ctx->cdata.key_inline = true;
672 ctx->cdata.key_virt = ctx->key;
673 } else {
674 ctx->cdata.key_inline = false;
675 ctx->cdata.key_dma = ctx->key_dma;
676 }
677
678 flc = &ctx->flc[ENCRYPT];
679 desc = flc->sh_desc;
680 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
681 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
682 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
683 sizeof(flc->flc) + desc_bytes(desc),
684 ctx->dir);
685
686 /*
687 * Job Descriptor and Shared Descriptors
688 * must all fit into the 64-word Descriptor h/w Buffer
689 */
690 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
691 ctx->cdata.key_inline = true;
692 ctx->cdata.key_virt = ctx->key;
693 } else {
694 ctx->cdata.key_inline = false;
695 ctx->cdata.key_dma = ctx->key_dma;
696 }
697
698 flc = &ctx->flc[DECRYPT];
699 desc = flc->sh_desc;
700 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
701 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
702 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
703 sizeof(flc->flc) + desc_bytes(desc),
704 ctx->dir);
705
706 return 0;
707 }
708
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)709 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
710 {
711 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
712 int err;
713
714 err = crypto_gcm_check_authsize(authsize);
715 if (err)
716 return err;
717
718 ctx->authsize = authsize;
719 gcm_set_sh_desc(authenc);
720
721 return 0;
722 }
723
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)724 static int gcm_setkey(struct crypto_aead *aead,
725 const u8 *key, unsigned int keylen)
726 {
727 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
728 struct device *dev = ctx->dev;
729 int ret;
730
731 ret = aes_check_keylen(keylen);
732 if (ret)
733 return ret;
734 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
735 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
736
737 memcpy(ctx->key, key, keylen);
738 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
739 ctx->cdata.keylen = keylen;
740
741 return gcm_set_sh_desc(aead);
742 }
743
rfc4106_set_sh_desc(struct crypto_aead * aead)744 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
745 {
746 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
747 struct device *dev = ctx->dev;
748 unsigned int ivsize = crypto_aead_ivsize(aead);
749 struct caam_flc *flc;
750 u32 *desc;
751 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
752 ctx->cdata.keylen;
753
754 if (!ctx->cdata.keylen || !ctx->authsize)
755 return 0;
756
757 ctx->cdata.key_virt = ctx->key;
758
759 /*
760 * RFC4106 encrypt shared descriptor
761 * Job Descriptor and Shared Descriptor
762 * must fit into the 64-word Descriptor h/w Buffer
763 */
764 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
765 ctx->cdata.key_inline = true;
766 } else {
767 ctx->cdata.key_inline = false;
768 ctx->cdata.key_dma = ctx->key_dma;
769 }
770
771 flc = &ctx->flc[ENCRYPT];
772 desc = flc->sh_desc;
773 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
774 true);
775 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
776 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
777 sizeof(flc->flc) + desc_bytes(desc),
778 ctx->dir);
779
780 /*
781 * Job Descriptor and Shared Descriptors
782 * must all fit into the 64-word Descriptor h/w Buffer
783 */
784 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
785 ctx->cdata.key_inline = true;
786 } else {
787 ctx->cdata.key_inline = false;
788 ctx->cdata.key_dma = ctx->key_dma;
789 }
790
791 flc = &ctx->flc[DECRYPT];
792 desc = flc->sh_desc;
793 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
794 true);
795 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
796 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
797 sizeof(flc->flc) + desc_bytes(desc),
798 ctx->dir);
799
800 return 0;
801 }
802
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)803 static int rfc4106_setauthsize(struct crypto_aead *authenc,
804 unsigned int authsize)
805 {
806 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
807 int err;
808
809 err = crypto_rfc4106_check_authsize(authsize);
810 if (err)
811 return err;
812
813 ctx->authsize = authsize;
814 rfc4106_set_sh_desc(authenc);
815
816 return 0;
817 }
818
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)819 static int rfc4106_setkey(struct crypto_aead *aead,
820 const u8 *key, unsigned int keylen)
821 {
822 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
823 struct device *dev = ctx->dev;
824 int ret;
825
826 ret = aes_check_keylen(keylen - 4);
827 if (ret)
828 return ret;
829
830 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
831 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
832
833 memcpy(ctx->key, key, keylen);
834 /*
835 * The last four bytes of the key material are used as the salt value
836 * in the nonce. Update the AES key length.
837 */
838 ctx->cdata.keylen = keylen - 4;
839 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
840 ctx->dir);
841
842 return rfc4106_set_sh_desc(aead);
843 }
844
rfc4543_set_sh_desc(struct crypto_aead * aead)845 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
846 {
847 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
848 struct device *dev = ctx->dev;
849 unsigned int ivsize = crypto_aead_ivsize(aead);
850 struct caam_flc *flc;
851 u32 *desc;
852 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
853 ctx->cdata.keylen;
854
855 if (!ctx->cdata.keylen || !ctx->authsize)
856 return 0;
857
858 ctx->cdata.key_virt = ctx->key;
859
860 /*
861 * RFC4543 encrypt shared descriptor
862 * Job Descriptor and Shared Descriptor
863 * must fit into the 64-word Descriptor h/w Buffer
864 */
865 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
866 ctx->cdata.key_inline = true;
867 } else {
868 ctx->cdata.key_inline = false;
869 ctx->cdata.key_dma = ctx->key_dma;
870 }
871
872 flc = &ctx->flc[ENCRYPT];
873 desc = flc->sh_desc;
874 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
875 true);
876 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
877 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
878 sizeof(flc->flc) + desc_bytes(desc),
879 ctx->dir);
880
881 /*
882 * Job Descriptor and Shared Descriptors
883 * must all fit into the 64-word Descriptor h/w Buffer
884 */
885 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
886 ctx->cdata.key_inline = true;
887 } else {
888 ctx->cdata.key_inline = false;
889 ctx->cdata.key_dma = ctx->key_dma;
890 }
891
892 flc = &ctx->flc[DECRYPT];
893 desc = flc->sh_desc;
894 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
895 true);
896 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
897 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
898 sizeof(flc->flc) + desc_bytes(desc),
899 ctx->dir);
900
901 return 0;
902 }
903
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)904 static int rfc4543_setauthsize(struct crypto_aead *authenc,
905 unsigned int authsize)
906 {
907 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
908
909 if (authsize != 16)
910 return -EINVAL;
911
912 ctx->authsize = authsize;
913 rfc4543_set_sh_desc(authenc);
914
915 return 0;
916 }
917
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)918 static int rfc4543_setkey(struct crypto_aead *aead,
919 const u8 *key, unsigned int keylen)
920 {
921 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
922 struct device *dev = ctx->dev;
923 int ret;
924
925 ret = aes_check_keylen(keylen - 4);
926 if (ret)
927 return ret;
928
929 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
930 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
931
932 memcpy(ctx->key, key, keylen);
933 /*
934 * The last four bytes of the key material are used as the salt value
935 * in the nonce. Update the AES key length.
936 */
937 ctx->cdata.keylen = keylen - 4;
938 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
939 ctx->dir);
940
941 return rfc4543_set_sh_desc(aead);
942 }
943
skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen,const u32 ctx1_iv_off)944 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
945 unsigned int keylen, const u32 ctx1_iv_off)
946 {
947 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
948 struct caam_skcipher_alg *alg =
949 container_of(crypto_skcipher_alg(skcipher),
950 struct caam_skcipher_alg, skcipher);
951 struct device *dev = ctx->dev;
952 struct caam_flc *flc;
953 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
954 u32 *desc;
955 const bool is_rfc3686 = alg->caam.rfc3686;
956
957 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
958 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
959
960 ctx->cdata.keylen = keylen;
961 ctx->cdata.key_virt = key;
962 ctx->cdata.key_inline = true;
963
964 /* skcipher_encrypt shared descriptor */
965 flc = &ctx->flc[ENCRYPT];
966 desc = flc->sh_desc;
967 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
968 ctx1_iv_off);
969 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
970 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
971 sizeof(flc->flc) + desc_bytes(desc),
972 ctx->dir);
973
974 /* skcipher_decrypt shared descriptor */
975 flc = &ctx->flc[DECRYPT];
976 desc = flc->sh_desc;
977 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
978 ctx1_iv_off);
979 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
980 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
981 sizeof(flc->flc) + desc_bytes(desc),
982 ctx->dir);
983
984 return 0;
985 }
986
aes_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)987 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
988 const u8 *key, unsigned int keylen)
989 {
990 int err;
991
992 err = aes_check_keylen(keylen);
993 if (err)
994 return err;
995
996 return skcipher_setkey(skcipher, key, keylen, 0);
997 }
998
rfc3686_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)999 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
1000 const u8 *key, unsigned int keylen)
1001 {
1002 u32 ctx1_iv_off;
1003 int err;
1004
1005 /*
1006 * RFC3686 specific:
1007 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1008 * | *key = {KEY, NONCE}
1009 */
1010 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1011 keylen -= CTR_RFC3686_NONCE_SIZE;
1012
1013 err = aes_check_keylen(keylen);
1014 if (err)
1015 return err;
1016
1017 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1018 }
1019
ctr_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1020 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1021 const u8 *key, unsigned int keylen)
1022 {
1023 u32 ctx1_iv_off;
1024 int err;
1025
1026 /*
1027 * AES-CTR needs to load IV in CONTEXT1 reg
1028 * at an offset of 128bits (16bytes)
1029 * CONTEXT1[255:128] = IV
1030 */
1031 ctx1_iv_off = 16;
1032
1033 err = aes_check_keylen(keylen);
1034 if (err)
1035 return err;
1036
1037 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1038 }
1039
chacha20_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1040 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1041 const u8 *key, unsigned int keylen)
1042 {
1043 if (keylen != CHACHA_KEY_SIZE)
1044 return -EINVAL;
1045
1046 return skcipher_setkey(skcipher, key, keylen, 0);
1047 }
1048
des_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1049 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1050 const u8 *key, unsigned int keylen)
1051 {
1052 return verify_skcipher_des_key(skcipher, key) ?:
1053 skcipher_setkey(skcipher, key, keylen, 0);
1054 }
1055
des3_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1056 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1057 const u8 *key, unsigned int keylen)
1058 {
1059 return verify_skcipher_des3_key(skcipher, key) ?:
1060 skcipher_setkey(skcipher, key, keylen, 0);
1061 }
1062
xts_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1063 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1064 unsigned int keylen)
1065 {
1066 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1067 struct device *dev = ctx->dev;
1068 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1069 struct caam_flc *flc;
1070 u32 *desc;
1071 int err;
1072
1073 err = xts_verify_key(skcipher, key, keylen);
1074 if (err) {
1075 dev_dbg(dev, "key size mismatch\n");
1076 return err;
1077 }
1078
1079 if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1080 ctx->xts_key_fallback = true;
1081
1082 if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1083 err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1084 if (err)
1085 return err;
1086 }
1087
1088 ctx->cdata.keylen = keylen;
1089 ctx->cdata.key_virt = key;
1090 ctx->cdata.key_inline = true;
1091
1092 /* xts_skcipher_encrypt shared descriptor */
1093 flc = &ctx->flc[ENCRYPT];
1094 desc = flc->sh_desc;
1095 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1096 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1097 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1098 sizeof(flc->flc) + desc_bytes(desc),
1099 ctx->dir);
1100
1101 /* xts_skcipher_decrypt shared descriptor */
1102 flc = &ctx->flc[DECRYPT];
1103 desc = flc->sh_desc;
1104 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1105 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1106 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1107 sizeof(flc->flc) + desc_bytes(desc),
1108 ctx->dir);
1109
1110 return 0;
1111 }
1112
skcipher_edesc_alloc(struct skcipher_request * req)1113 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1114 {
1115 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1116 struct caam_request *req_ctx = skcipher_request_ctx_dma(req);
1117 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1118 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1119 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1120 struct device *dev = ctx->dev;
1121 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1122 GFP_KERNEL : GFP_ATOMIC;
1123 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1124 struct skcipher_edesc *edesc;
1125 dma_addr_t iv_dma;
1126 u8 *iv;
1127 int ivsize = crypto_skcipher_ivsize(skcipher);
1128 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1129 struct dpaa2_sg_entry *sg_table;
1130
1131 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1132 if (unlikely(src_nents < 0)) {
1133 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1134 req->cryptlen);
1135 return ERR_PTR(src_nents);
1136 }
1137
1138 if (unlikely(req->dst != req->src)) {
1139 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1140 if (unlikely(dst_nents < 0)) {
1141 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1142 req->cryptlen);
1143 return ERR_PTR(dst_nents);
1144 }
1145
1146 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1147 DMA_TO_DEVICE);
1148 if (unlikely(!mapped_src_nents)) {
1149 dev_err(dev, "unable to map source\n");
1150 return ERR_PTR(-ENOMEM);
1151 }
1152
1153 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1154 DMA_FROM_DEVICE);
1155 if (unlikely(!mapped_dst_nents)) {
1156 dev_err(dev, "unable to map destination\n");
1157 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1158 return ERR_PTR(-ENOMEM);
1159 }
1160 } else {
1161 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1162 DMA_BIDIRECTIONAL);
1163 if (unlikely(!mapped_src_nents)) {
1164 dev_err(dev, "unable to map source\n");
1165 return ERR_PTR(-ENOMEM);
1166 }
1167 }
1168
1169 qm_sg_ents = 1 + mapped_src_nents;
1170 dst_sg_idx = qm_sg_ents;
1171
1172 /*
1173 * Input, output HW S/G tables: [IV, src][dst, IV]
1174 * IV entries point to the same buffer
1175 * If src == dst, S/G entries are reused (S/G tables overlap)
1176 *
1177 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1178 * the end of the table by allocating more S/G entries.
1179 */
1180 if (req->src != req->dst)
1181 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1182 else
1183 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1184
1185 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1186 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1187 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1188 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1189 qm_sg_ents, ivsize);
1190 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1191 0, DMA_NONE, 0, 0);
1192 return ERR_PTR(-ENOMEM);
1193 }
1194
1195 /* allocate space for base edesc, link tables and IV */
1196 edesc = qi_cache_zalloc(flags);
1197 if (unlikely(!edesc)) {
1198 dev_err(dev, "could not allocate extended descriptor\n");
1199 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1200 0, DMA_NONE, 0, 0);
1201 return ERR_PTR(-ENOMEM);
1202 }
1203
1204 /* Make sure IV is located in a DMAable area */
1205 sg_table = &edesc->sgt[0];
1206 iv = (u8 *)(sg_table + qm_sg_ents);
1207 memcpy(iv, req->iv, ivsize);
1208
1209 iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1210 if (dma_mapping_error(dev, iv_dma)) {
1211 dev_err(dev, "unable to map IV\n");
1212 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1213 0, DMA_NONE, 0, 0);
1214 qi_cache_free(edesc);
1215 return ERR_PTR(-ENOMEM);
1216 }
1217
1218 edesc->src_nents = src_nents;
1219 edesc->dst_nents = dst_nents;
1220 edesc->iv_dma = iv_dma;
1221 edesc->qm_sg_bytes = qm_sg_bytes;
1222
1223 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1224 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1225
1226 if (req->src != req->dst)
1227 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1228
1229 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1230 ivsize, 0);
1231
1232 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1233 DMA_TO_DEVICE);
1234 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1235 dev_err(dev, "unable to map S/G table\n");
1236 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1237 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1238 qi_cache_free(edesc);
1239 return ERR_PTR(-ENOMEM);
1240 }
1241
1242 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1243 dpaa2_fl_set_final(in_fle, true);
1244 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1245 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1246
1247 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1248 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1249
1250 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1251
1252 if (req->src == req->dst)
1253 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1254 sizeof(*sg_table));
1255 else
1256 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1257 sizeof(*sg_table));
1258
1259 return edesc;
1260 }
1261
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)1262 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1263 struct aead_request *req)
1264 {
1265 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1266 int ivsize = crypto_aead_ivsize(aead);
1267
1268 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1269 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1270 edesc->qm_sg_bytes);
1271 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1272 }
1273
skcipher_unmap(struct device * dev,struct skcipher_edesc * edesc,struct skcipher_request * req)1274 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1275 struct skcipher_request *req)
1276 {
1277 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1278 int ivsize = crypto_skcipher_ivsize(skcipher);
1279
1280 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1281 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1282 edesc->qm_sg_bytes);
1283 }
1284
aead_encrypt_done(void * cbk_ctx,u32 status)1285 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1286 {
1287 struct crypto_async_request *areq = cbk_ctx;
1288 struct aead_request *req = container_of(areq, struct aead_request,
1289 base);
1290 struct caam_request *req_ctx = to_caam_req(areq);
1291 struct aead_edesc *edesc = req_ctx->edesc;
1292 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1293 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1294 int ecode = 0;
1295
1296 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1297
1298 if (unlikely(status))
1299 ecode = caam_qi2_strstatus(ctx->dev, status);
1300
1301 aead_unmap(ctx->dev, edesc, req);
1302 qi_cache_free(edesc);
1303 aead_request_complete(req, ecode);
1304 }
1305
aead_decrypt_done(void * cbk_ctx,u32 status)1306 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1307 {
1308 struct crypto_async_request *areq = cbk_ctx;
1309 struct aead_request *req = container_of(areq, struct aead_request,
1310 base);
1311 struct caam_request *req_ctx = to_caam_req(areq);
1312 struct aead_edesc *edesc = req_ctx->edesc;
1313 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1314 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1315 int ecode = 0;
1316
1317 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1318
1319 if (unlikely(status))
1320 ecode = caam_qi2_strstatus(ctx->dev, status);
1321
1322 aead_unmap(ctx->dev, edesc, req);
1323 qi_cache_free(edesc);
1324 aead_request_complete(req, ecode);
1325 }
1326
aead_encrypt(struct aead_request * req)1327 static int aead_encrypt(struct aead_request *req)
1328 {
1329 struct aead_edesc *edesc;
1330 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1331 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1332 struct caam_request *caam_req = aead_request_ctx_dma(req);
1333 int ret;
1334
1335 /* allocate extended descriptor */
1336 edesc = aead_edesc_alloc(req, true);
1337 if (IS_ERR(edesc))
1338 return PTR_ERR(edesc);
1339
1340 caam_req->flc = &ctx->flc[ENCRYPT];
1341 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1342 caam_req->cbk = aead_encrypt_done;
1343 caam_req->ctx = &req->base;
1344 caam_req->edesc = edesc;
1345 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1346 if (ret != -EINPROGRESS &&
1347 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1348 aead_unmap(ctx->dev, edesc, req);
1349 qi_cache_free(edesc);
1350 }
1351
1352 return ret;
1353 }
1354
aead_decrypt(struct aead_request * req)1355 static int aead_decrypt(struct aead_request *req)
1356 {
1357 struct aead_edesc *edesc;
1358 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1359 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1360 struct caam_request *caam_req = aead_request_ctx_dma(req);
1361 int ret;
1362
1363 /* allocate extended descriptor */
1364 edesc = aead_edesc_alloc(req, false);
1365 if (IS_ERR(edesc))
1366 return PTR_ERR(edesc);
1367
1368 caam_req->flc = &ctx->flc[DECRYPT];
1369 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1370 caam_req->cbk = aead_decrypt_done;
1371 caam_req->ctx = &req->base;
1372 caam_req->edesc = edesc;
1373 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1374 if (ret != -EINPROGRESS &&
1375 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1376 aead_unmap(ctx->dev, edesc, req);
1377 qi_cache_free(edesc);
1378 }
1379
1380 return ret;
1381 }
1382
ipsec_gcm_encrypt(struct aead_request * req)1383 static int ipsec_gcm_encrypt(struct aead_request *req)
1384 {
1385 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1386 }
1387
ipsec_gcm_decrypt(struct aead_request * req)1388 static int ipsec_gcm_decrypt(struct aead_request *req)
1389 {
1390 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1391 }
1392
skcipher_encrypt_done(void * cbk_ctx,u32 status)1393 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1394 {
1395 struct crypto_async_request *areq = cbk_ctx;
1396 struct skcipher_request *req = skcipher_request_cast(areq);
1397 struct caam_request *req_ctx = to_caam_req(areq);
1398 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1399 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1400 struct skcipher_edesc *edesc = req_ctx->edesc;
1401 int ecode = 0;
1402 int ivsize = crypto_skcipher_ivsize(skcipher);
1403
1404 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1405
1406 if (unlikely(status))
1407 ecode = caam_qi2_strstatus(ctx->dev, status);
1408
1409 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1410 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1411 edesc->src_nents > 1 ? 100 : ivsize, 1);
1412 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1413 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1414 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1415
1416 skcipher_unmap(ctx->dev, edesc, req);
1417
1418 /*
1419 * The crypto API expects us to set the IV (req->iv) to the last
1420 * ciphertext block (CBC mode) or last counter (CTR mode).
1421 * This is used e.g. by the CTS mode.
1422 */
1423 if (!ecode)
1424 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1425 ivsize);
1426
1427 qi_cache_free(edesc);
1428 skcipher_request_complete(req, ecode);
1429 }
1430
skcipher_decrypt_done(void * cbk_ctx,u32 status)1431 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1432 {
1433 struct crypto_async_request *areq = cbk_ctx;
1434 struct skcipher_request *req = skcipher_request_cast(areq);
1435 struct caam_request *req_ctx = to_caam_req(areq);
1436 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1437 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1438 struct skcipher_edesc *edesc = req_ctx->edesc;
1439 int ecode = 0;
1440 int ivsize = crypto_skcipher_ivsize(skcipher);
1441
1442 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1443
1444 if (unlikely(status))
1445 ecode = caam_qi2_strstatus(ctx->dev, status);
1446
1447 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1448 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1449 edesc->src_nents > 1 ? 100 : ivsize, 1);
1450 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1451 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1452 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1453
1454 skcipher_unmap(ctx->dev, edesc, req);
1455
1456 /*
1457 * The crypto API expects us to set the IV (req->iv) to the last
1458 * ciphertext block (CBC mode) or last counter (CTR mode).
1459 * This is used e.g. by the CTS mode.
1460 */
1461 if (!ecode)
1462 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1463 ivsize);
1464
1465 qi_cache_free(edesc);
1466 skcipher_request_complete(req, ecode);
1467 }
1468
xts_skcipher_ivsize(struct skcipher_request * req)1469 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1470 {
1471 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1472 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1473
1474 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1475 }
1476
skcipher_encrypt(struct skcipher_request * req)1477 static int skcipher_encrypt(struct skcipher_request *req)
1478 {
1479 struct skcipher_edesc *edesc;
1480 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1481 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1482 struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1483 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1484 int ret;
1485
1486 /*
1487 * XTS is expected to return an error even for input length = 0
1488 * Note that the case input length < block size will be caught during
1489 * HW offloading and return an error.
1490 */
1491 if (!req->cryptlen && !ctx->fallback)
1492 return 0;
1493
1494 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1495 ctx->xts_key_fallback)) {
1496 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1497 skcipher_request_set_callback(&caam_req->fallback_req,
1498 req->base.flags,
1499 req->base.complete,
1500 req->base.data);
1501 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1502 req->dst, req->cryptlen, req->iv);
1503
1504 return crypto_skcipher_encrypt(&caam_req->fallback_req);
1505 }
1506
1507 /* allocate extended descriptor */
1508 edesc = skcipher_edesc_alloc(req);
1509 if (IS_ERR(edesc))
1510 return PTR_ERR(edesc);
1511
1512 caam_req->flc = &ctx->flc[ENCRYPT];
1513 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1514 caam_req->cbk = skcipher_encrypt_done;
1515 caam_req->ctx = &req->base;
1516 caam_req->edesc = edesc;
1517 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1518 if (ret != -EINPROGRESS &&
1519 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1520 skcipher_unmap(ctx->dev, edesc, req);
1521 qi_cache_free(edesc);
1522 }
1523
1524 return ret;
1525 }
1526
skcipher_decrypt(struct skcipher_request * req)1527 static int skcipher_decrypt(struct skcipher_request *req)
1528 {
1529 struct skcipher_edesc *edesc;
1530 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1531 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1532 struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1533 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1534 int ret;
1535
1536 /*
1537 * XTS is expected to return an error even for input length = 0
1538 * Note that the case input length < block size will be caught during
1539 * HW offloading and return an error.
1540 */
1541 if (!req->cryptlen && !ctx->fallback)
1542 return 0;
1543
1544 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1545 ctx->xts_key_fallback)) {
1546 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1547 skcipher_request_set_callback(&caam_req->fallback_req,
1548 req->base.flags,
1549 req->base.complete,
1550 req->base.data);
1551 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1552 req->dst, req->cryptlen, req->iv);
1553
1554 return crypto_skcipher_decrypt(&caam_req->fallback_req);
1555 }
1556
1557 /* allocate extended descriptor */
1558 edesc = skcipher_edesc_alloc(req);
1559 if (IS_ERR(edesc))
1560 return PTR_ERR(edesc);
1561
1562 caam_req->flc = &ctx->flc[DECRYPT];
1563 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1564 caam_req->cbk = skcipher_decrypt_done;
1565 caam_req->ctx = &req->base;
1566 caam_req->edesc = edesc;
1567 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1568 if (ret != -EINPROGRESS &&
1569 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1570 skcipher_unmap(ctx->dev, edesc, req);
1571 qi_cache_free(edesc);
1572 }
1573
1574 return ret;
1575 }
1576
caam_cra_init(struct caam_ctx * ctx,struct caam_alg_entry * caam,bool uses_dkp)1577 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1578 bool uses_dkp)
1579 {
1580 dma_addr_t dma_addr;
1581 int i;
1582
1583 /* copy descriptor header template value */
1584 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1585 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1586
1587 ctx->dev = caam->dev;
1588 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1589
1590 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1591 offsetof(struct caam_ctx, flc_dma),
1592 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1593 if (dma_mapping_error(ctx->dev, dma_addr)) {
1594 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1595 return -ENOMEM;
1596 }
1597
1598 for (i = 0; i < NUM_OP; i++)
1599 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1600 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1601
1602 return 0;
1603 }
1604
caam_cra_init_skcipher(struct crypto_skcipher * tfm)1605 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1606 {
1607 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1608 struct caam_skcipher_alg *caam_alg =
1609 container_of(alg, typeof(*caam_alg), skcipher);
1610 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1611 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1612 int ret = 0;
1613
1614 if (alg_aai == OP_ALG_AAI_XTS) {
1615 const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1616 struct crypto_skcipher *fallback;
1617
1618 fallback = crypto_alloc_skcipher(tfm_name, 0,
1619 CRYPTO_ALG_NEED_FALLBACK);
1620 if (IS_ERR(fallback)) {
1621 dev_err(caam_alg->caam.dev,
1622 "Failed to allocate %s fallback: %ld\n",
1623 tfm_name, PTR_ERR(fallback));
1624 return PTR_ERR(fallback);
1625 }
1626
1627 ctx->fallback = fallback;
1628 crypto_skcipher_set_reqsize_dma(
1629 tfm, sizeof(struct caam_request) +
1630 crypto_skcipher_reqsize(fallback));
1631 } else {
1632 crypto_skcipher_set_reqsize_dma(tfm,
1633 sizeof(struct caam_request));
1634 }
1635
1636 ret = caam_cra_init(ctx, &caam_alg->caam, false);
1637 if (ret && ctx->fallback)
1638 crypto_free_skcipher(ctx->fallback);
1639
1640 return ret;
1641 }
1642
caam_cra_init_aead(struct crypto_aead * tfm)1643 static int caam_cra_init_aead(struct crypto_aead *tfm)
1644 {
1645 struct aead_alg *alg = crypto_aead_alg(tfm);
1646 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1647 aead);
1648
1649 crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request));
1650 return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam,
1651 !caam_alg->caam.nodkp);
1652 }
1653
caam_exit_common(struct caam_ctx * ctx)1654 static void caam_exit_common(struct caam_ctx *ctx)
1655 {
1656 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1657 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1658 DMA_ATTR_SKIP_CPU_SYNC);
1659 }
1660
caam_cra_exit(struct crypto_skcipher * tfm)1661 static void caam_cra_exit(struct crypto_skcipher *tfm)
1662 {
1663 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1664
1665 if (ctx->fallback)
1666 crypto_free_skcipher(ctx->fallback);
1667 caam_exit_common(ctx);
1668 }
1669
caam_cra_exit_aead(struct crypto_aead * tfm)1670 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1671 {
1672 caam_exit_common(crypto_aead_ctx_dma(tfm));
1673 }
1674
1675 static struct caam_skcipher_alg driver_algs[] = {
1676 {
1677 .skcipher = {
1678 .base = {
1679 .cra_name = "cbc(aes)",
1680 .cra_driver_name = "cbc-aes-caam-qi2",
1681 .cra_blocksize = AES_BLOCK_SIZE,
1682 },
1683 .setkey = aes_skcipher_setkey,
1684 .encrypt = skcipher_encrypt,
1685 .decrypt = skcipher_decrypt,
1686 .min_keysize = AES_MIN_KEY_SIZE,
1687 .max_keysize = AES_MAX_KEY_SIZE,
1688 .ivsize = AES_BLOCK_SIZE,
1689 },
1690 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1691 },
1692 {
1693 .skcipher = {
1694 .base = {
1695 .cra_name = "cbc(des3_ede)",
1696 .cra_driver_name = "cbc-3des-caam-qi2",
1697 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1698 },
1699 .setkey = des3_skcipher_setkey,
1700 .encrypt = skcipher_encrypt,
1701 .decrypt = skcipher_decrypt,
1702 .min_keysize = DES3_EDE_KEY_SIZE,
1703 .max_keysize = DES3_EDE_KEY_SIZE,
1704 .ivsize = DES3_EDE_BLOCK_SIZE,
1705 },
1706 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1707 },
1708 {
1709 .skcipher = {
1710 .base = {
1711 .cra_name = "cbc(des)",
1712 .cra_driver_name = "cbc-des-caam-qi2",
1713 .cra_blocksize = DES_BLOCK_SIZE,
1714 },
1715 .setkey = des_skcipher_setkey,
1716 .encrypt = skcipher_encrypt,
1717 .decrypt = skcipher_decrypt,
1718 .min_keysize = DES_KEY_SIZE,
1719 .max_keysize = DES_KEY_SIZE,
1720 .ivsize = DES_BLOCK_SIZE,
1721 },
1722 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1723 },
1724 {
1725 .skcipher = {
1726 .base = {
1727 .cra_name = "ctr(aes)",
1728 .cra_driver_name = "ctr-aes-caam-qi2",
1729 .cra_blocksize = 1,
1730 },
1731 .setkey = ctr_skcipher_setkey,
1732 .encrypt = skcipher_encrypt,
1733 .decrypt = skcipher_decrypt,
1734 .min_keysize = AES_MIN_KEY_SIZE,
1735 .max_keysize = AES_MAX_KEY_SIZE,
1736 .ivsize = AES_BLOCK_SIZE,
1737 .chunksize = AES_BLOCK_SIZE,
1738 },
1739 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1740 OP_ALG_AAI_CTR_MOD128,
1741 },
1742 {
1743 .skcipher = {
1744 .base = {
1745 .cra_name = "rfc3686(ctr(aes))",
1746 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1747 .cra_blocksize = 1,
1748 },
1749 .setkey = rfc3686_skcipher_setkey,
1750 .encrypt = skcipher_encrypt,
1751 .decrypt = skcipher_decrypt,
1752 .min_keysize = AES_MIN_KEY_SIZE +
1753 CTR_RFC3686_NONCE_SIZE,
1754 .max_keysize = AES_MAX_KEY_SIZE +
1755 CTR_RFC3686_NONCE_SIZE,
1756 .ivsize = CTR_RFC3686_IV_SIZE,
1757 .chunksize = AES_BLOCK_SIZE,
1758 },
1759 .caam = {
1760 .class1_alg_type = OP_ALG_ALGSEL_AES |
1761 OP_ALG_AAI_CTR_MOD128,
1762 .rfc3686 = true,
1763 },
1764 },
1765 {
1766 .skcipher = {
1767 .base = {
1768 .cra_name = "xts(aes)",
1769 .cra_driver_name = "xts-aes-caam-qi2",
1770 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1771 .cra_blocksize = AES_BLOCK_SIZE,
1772 },
1773 .setkey = xts_skcipher_setkey,
1774 .encrypt = skcipher_encrypt,
1775 .decrypt = skcipher_decrypt,
1776 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1777 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1778 .ivsize = AES_BLOCK_SIZE,
1779 },
1780 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1781 },
1782 {
1783 .skcipher = {
1784 .base = {
1785 .cra_name = "chacha20",
1786 .cra_driver_name = "chacha20-caam-qi2",
1787 .cra_blocksize = 1,
1788 },
1789 .setkey = chacha20_skcipher_setkey,
1790 .encrypt = skcipher_encrypt,
1791 .decrypt = skcipher_decrypt,
1792 .min_keysize = CHACHA_KEY_SIZE,
1793 .max_keysize = CHACHA_KEY_SIZE,
1794 .ivsize = CHACHA_IV_SIZE,
1795 },
1796 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1797 },
1798 };
1799
1800 static struct caam_aead_alg driver_aeads[] = {
1801 {
1802 .aead = {
1803 .base = {
1804 .cra_name = "rfc4106(gcm(aes))",
1805 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1806 .cra_blocksize = 1,
1807 },
1808 .setkey = rfc4106_setkey,
1809 .setauthsize = rfc4106_setauthsize,
1810 .encrypt = ipsec_gcm_encrypt,
1811 .decrypt = ipsec_gcm_decrypt,
1812 .ivsize = 8,
1813 .maxauthsize = AES_BLOCK_SIZE,
1814 },
1815 .caam = {
1816 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1817 .nodkp = true,
1818 },
1819 },
1820 {
1821 .aead = {
1822 .base = {
1823 .cra_name = "rfc4543(gcm(aes))",
1824 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1825 .cra_blocksize = 1,
1826 },
1827 .setkey = rfc4543_setkey,
1828 .setauthsize = rfc4543_setauthsize,
1829 .encrypt = ipsec_gcm_encrypt,
1830 .decrypt = ipsec_gcm_decrypt,
1831 .ivsize = 8,
1832 .maxauthsize = AES_BLOCK_SIZE,
1833 },
1834 .caam = {
1835 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1836 .nodkp = true,
1837 },
1838 },
1839 /* Galois Counter Mode */
1840 {
1841 .aead = {
1842 .base = {
1843 .cra_name = "gcm(aes)",
1844 .cra_driver_name = "gcm-aes-caam-qi2",
1845 .cra_blocksize = 1,
1846 },
1847 .setkey = gcm_setkey,
1848 .setauthsize = gcm_setauthsize,
1849 .encrypt = aead_encrypt,
1850 .decrypt = aead_decrypt,
1851 .ivsize = 12,
1852 .maxauthsize = AES_BLOCK_SIZE,
1853 },
1854 .caam = {
1855 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1856 .nodkp = true,
1857 }
1858 },
1859 /* single-pass ipsec_esp descriptor */
1860 {
1861 .aead = {
1862 .base = {
1863 .cra_name = "authenc(hmac(md5),cbc(aes))",
1864 .cra_driver_name = "authenc-hmac-md5-"
1865 "cbc-aes-caam-qi2",
1866 .cra_blocksize = AES_BLOCK_SIZE,
1867 },
1868 .setkey = aead_setkey,
1869 .setauthsize = aead_setauthsize,
1870 .encrypt = aead_encrypt,
1871 .decrypt = aead_decrypt,
1872 .ivsize = AES_BLOCK_SIZE,
1873 .maxauthsize = MD5_DIGEST_SIZE,
1874 },
1875 .caam = {
1876 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1877 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1878 OP_ALG_AAI_HMAC_PRECOMP,
1879 }
1880 },
1881 {
1882 .aead = {
1883 .base = {
1884 .cra_name = "echainiv(authenc(hmac(md5),"
1885 "cbc(aes)))",
1886 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1887 "cbc-aes-caam-qi2",
1888 .cra_blocksize = AES_BLOCK_SIZE,
1889 },
1890 .setkey = aead_setkey,
1891 .setauthsize = aead_setauthsize,
1892 .encrypt = aead_encrypt,
1893 .decrypt = aead_decrypt,
1894 .ivsize = AES_BLOCK_SIZE,
1895 .maxauthsize = MD5_DIGEST_SIZE,
1896 },
1897 .caam = {
1898 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1899 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1900 OP_ALG_AAI_HMAC_PRECOMP,
1901 .geniv = true,
1902 }
1903 },
1904 {
1905 .aead = {
1906 .base = {
1907 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1908 .cra_driver_name = "authenc-hmac-sha1-"
1909 "cbc-aes-caam-qi2",
1910 .cra_blocksize = AES_BLOCK_SIZE,
1911 },
1912 .setkey = aead_setkey,
1913 .setauthsize = aead_setauthsize,
1914 .encrypt = aead_encrypt,
1915 .decrypt = aead_decrypt,
1916 .ivsize = AES_BLOCK_SIZE,
1917 .maxauthsize = SHA1_DIGEST_SIZE,
1918 },
1919 .caam = {
1920 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1921 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1922 OP_ALG_AAI_HMAC_PRECOMP,
1923 }
1924 },
1925 {
1926 .aead = {
1927 .base = {
1928 .cra_name = "echainiv(authenc(hmac(sha1),"
1929 "cbc(aes)))",
1930 .cra_driver_name = "echainiv-authenc-"
1931 "hmac-sha1-cbc-aes-caam-qi2",
1932 .cra_blocksize = AES_BLOCK_SIZE,
1933 },
1934 .setkey = aead_setkey,
1935 .setauthsize = aead_setauthsize,
1936 .encrypt = aead_encrypt,
1937 .decrypt = aead_decrypt,
1938 .ivsize = AES_BLOCK_SIZE,
1939 .maxauthsize = SHA1_DIGEST_SIZE,
1940 },
1941 .caam = {
1942 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1943 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1944 OP_ALG_AAI_HMAC_PRECOMP,
1945 .geniv = true,
1946 },
1947 },
1948 {
1949 .aead = {
1950 .base = {
1951 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1952 .cra_driver_name = "authenc-hmac-sha224-"
1953 "cbc-aes-caam-qi2",
1954 .cra_blocksize = AES_BLOCK_SIZE,
1955 },
1956 .setkey = aead_setkey,
1957 .setauthsize = aead_setauthsize,
1958 .encrypt = aead_encrypt,
1959 .decrypt = aead_decrypt,
1960 .ivsize = AES_BLOCK_SIZE,
1961 .maxauthsize = SHA224_DIGEST_SIZE,
1962 },
1963 .caam = {
1964 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1965 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1966 OP_ALG_AAI_HMAC_PRECOMP,
1967 }
1968 },
1969 {
1970 .aead = {
1971 .base = {
1972 .cra_name = "echainiv(authenc(hmac(sha224),"
1973 "cbc(aes)))",
1974 .cra_driver_name = "echainiv-authenc-"
1975 "hmac-sha224-cbc-aes-caam-qi2",
1976 .cra_blocksize = AES_BLOCK_SIZE,
1977 },
1978 .setkey = aead_setkey,
1979 .setauthsize = aead_setauthsize,
1980 .encrypt = aead_encrypt,
1981 .decrypt = aead_decrypt,
1982 .ivsize = AES_BLOCK_SIZE,
1983 .maxauthsize = SHA224_DIGEST_SIZE,
1984 },
1985 .caam = {
1986 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1987 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1988 OP_ALG_AAI_HMAC_PRECOMP,
1989 .geniv = true,
1990 }
1991 },
1992 {
1993 .aead = {
1994 .base = {
1995 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1996 .cra_driver_name = "authenc-hmac-sha256-"
1997 "cbc-aes-caam-qi2",
1998 .cra_blocksize = AES_BLOCK_SIZE,
1999 },
2000 .setkey = aead_setkey,
2001 .setauthsize = aead_setauthsize,
2002 .encrypt = aead_encrypt,
2003 .decrypt = aead_decrypt,
2004 .ivsize = AES_BLOCK_SIZE,
2005 .maxauthsize = SHA256_DIGEST_SIZE,
2006 },
2007 .caam = {
2008 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2009 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2010 OP_ALG_AAI_HMAC_PRECOMP,
2011 }
2012 },
2013 {
2014 .aead = {
2015 .base = {
2016 .cra_name = "echainiv(authenc(hmac(sha256),"
2017 "cbc(aes)))",
2018 .cra_driver_name = "echainiv-authenc-"
2019 "hmac-sha256-cbc-aes-"
2020 "caam-qi2",
2021 .cra_blocksize = AES_BLOCK_SIZE,
2022 },
2023 .setkey = aead_setkey,
2024 .setauthsize = aead_setauthsize,
2025 .encrypt = aead_encrypt,
2026 .decrypt = aead_decrypt,
2027 .ivsize = AES_BLOCK_SIZE,
2028 .maxauthsize = SHA256_DIGEST_SIZE,
2029 },
2030 .caam = {
2031 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2032 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2033 OP_ALG_AAI_HMAC_PRECOMP,
2034 .geniv = true,
2035 }
2036 },
2037 {
2038 .aead = {
2039 .base = {
2040 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2041 .cra_driver_name = "authenc-hmac-sha384-"
2042 "cbc-aes-caam-qi2",
2043 .cra_blocksize = AES_BLOCK_SIZE,
2044 },
2045 .setkey = aead_setkey,
2046 .setauthsize = aead_setauthsize,
2047 .encrypt = aead_encrypt,
2048 .decrypt = aead_decrypt,
2049 .ivsize = AES_BLOCK_SIZE,
2050 .maxauthsize = SHA384_DIGEST_SIZE,
2051 },
2052 .caam = {
2053 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2054 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2055 OP_ALG_AAI_HMAC_PRECOMP,
2056 }
2057 },
2058 {
2059 .aead = {
2060 .base = {
2061 .cra_name = "echainiv(authenc(hmac(sha384),"
2062 "cbc(aes)))",
2063 .cra_driver_name = "echainiv-authenc-"
2064 "hmac-sha384-cbc-aes-"
2065 "caam-qi2",
2066 .cra_blocksize = AES_BLOCK_SIZE,
2067 },
2068 .setkey = aead_setkey,
2069 .setauthsize = aead_setauthsize,
2070 .encrypt = aead_encrypt,
2071 .decrypt = aead_decrypt,
2072 .ivsize = AES_BLOCK_SIZE,
2073 .maxauthsize = SHA384_DIGEST_SIZE,
2074 },
2075 .caam = {
2076 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2077 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2078 OP_ALG_AAI_HMAC_PRECOMP,
2079 .geniv = true,
2080 }
2081 },
2082 {
2083 .aead = {
2084 .base = {
2085 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2086 .cra_driver_name = "authenc-hmac-sha512-"
2087 "cbc-aes-caam-qi2",
2088 .cra_blocksize = AES_BLOCK_SIZE,
2089 },
2090 .setkey = aead_setkey,
2091 .setauthsize = aead_setauthsize,
2092 .encrypt = aead_encrypt,
2093 .decrypt = aead_decrypt,
2094 .ivsize = AES_BLOCK_SIZE,
2095 .maxauthsize = SHA512_DIGEST_SIZE,
2096 },
2097 .caam = {
2098 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2099 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2100 OP_ALG_AAI_HMAC_PRECOMP,
2101 }
2102 },
2103 {
2104 .aead = {
2105 .base = {
2106 .cra_name = "echainiv(authenc(hmac(sha512),"
2107 "cbc(aes)))",
2108 .cra_driver_name = "echainiv-authenc-"
2109 "hmac-sha512-cbc-aes-"
2110 "caam-qi2",
2111 .cra_blocksize = AES_BLOCK_SIZE,
2112 },
2113 .setkey = aead_setkey,
2114 .setauthsize = aead_setauthsize,
2115 .encrypt = aead_encrypt,
2116 .decrypt = aead_decrypt,
2117 .ivsize = AES_BLOCK_SIZE,
2118 .maxauthsize = SHA512_DIGEST_SIZE,
2119 },
2120 .caam = {
2121 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2122 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2123 OP_ALG_AAI_HMAC_PRECOMP,
2124 .geniv = true,
2125 }
2126 },
2127 {
2128 .aead = {
2129 .base = {
2130 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2131 .cra_driver_name = "authenc-hmac-md5-"
2132 "cbc-des3_ede-caam-qi2",
2133 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2134 },
2135 .setkey = des3_aead_setkey,
2136 .setauthsize = aead_setauthsize,
2137 .encrypt = aead_encrypt,
2138 .decrypt = aead_decrypt,
2139 .ivsize = DES3_EDE_BLOCK_SIZE,
2140 .maxauthsize = MD5_DIGEST_SIZE,
2141 },
2142 .caam = {
2143 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2144 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2145 OP_ALG_AAI_HMAC_PRECOMP,
2146 }
2147 },
2148 {
2149 .aead = {
2150 .base = {
2151 .cra_name = "echainiv(authenc(hmac(md5),"
2152 "cbc(des3_ede)))",
2153 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2154 "cbc-des3_ede-caam-qi2",
2155 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2156 },
2157 .setkey = des3_aead_setkey,
2158 .setauthsize = aead_setauthsize,
2159 .encrypt = aead_encrypt,
2160 .decrypt = aead_decrypt,
2161 .ivsize = DES3_EDE_BLOCK_SIZE,
2162 .maxauthsize = MD5_DIGEST_SIZE,
2163 },
2164 .caam = {
2165 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2166 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2167 OP_ALG_AAI_HMAC_PRECOMP,
2168 .geniv = true,
2169 }
2170 },
2171 {
2172 .aead = {
2173 .base = {
2174 .cra_name = "authenc(hmac(sha1),"
2175 "cbc(des3_ede))",
2176 .cra_driver_name = "authenc-hmac-sha1-"
2177 "cbc-des3_ede-caam-qi2",
2178 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2179 },
2180 .setkey = des3_aead_setkey,
2181 .setauthsize = aead_setauthsize,
2182 .encrypt = aead_encrypt,
2183 .decrypt = aead_decrypt,
2184 .ivsize = DES3_EDE_BLOCK_SIZE,
2185 .maxauthsize = SHA1_DIGEST_SIZE,
2186 },
2187 .caam = {
2188 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2189 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2190 OP_ALG_AAI_HMAC_PRECOMP,
2191 },
2192 },
2193 {
2194 .aead = {
2195 .base = {
2196 .cra_name = "echainiv(authenc(hmac(sha1),"
2197 "cbc(des3_ede)))",
2198 .cra_driver_name = "echainiv-authenc-"
2199 "hmac-sha1-"
2200 "cbc-des3_ede-caam-qi2",
2201 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2202 },
2203 .setkey = des3_aead_setkey,
2204 .setauthsize = aead_setauthsize,
2205 .encrypt = aead_encrypt,
2206 .decrypt = aead_decrypt,
2207 .ivsize = DES3_EDE_BLOCK_SIZE,
2208 .maxauthsize = SHA1_DIGEST_SIZE,
2209 },
2210 .caam = {
2211 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2212 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2213 OP_ALG_AAI_HMAC_PRECOMP,
2214 .geniv = true,
2215 }
2216 },
2217 {
2218 .aead = {
2219 .base = {
2220 .cra_name = "authenc(hmac(sha224),"
2221 "cbc(des3_ede))",
2222 .cra_driver_name = "authenc-hmac-sha224-"
2223 "cbc-des3_ede-caam-qi2",
2224 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2225 },
2226 .setkey = des3_aead_setkey,
2227 .setauthsize = aead_setauthsize,
2228 .encrypt = aead_encrypt,
2229 .decrypt = aead_decrypt,
2230 .ivsize = DES3_EDE_BLOCK_SIZE,
2231 .maxauthsize = SHA224_DIGEST_SIZE,
2232 },
2233 .caam = {
2234 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2235 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2236 OP_ALG_AAI_HMAC_PRECOMP,
2237 },
2238 },
2239 {
2240 .aead = {
2241 .base = {
2242 .cra_name = "echainiv(authenc(hmac(sha224),"
2243 "cbc(des3_ede)))",
2244 .cra_driver_name = "echainiv-authenc-"
2245 "hmac-sha224-"
2246 "cbc-des3_ede-caam-qi2",
2247 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2248 },
2249 .setkey = des3_aead_setkey,
2250 .setauthsize = aead_setauthsize,
2251 .encrypt = aead_encrypt,
2252 .decrypt = aead_decrypt,
2253 .ivsize = DES3_EDE_BLOCK_SIZE,
2254 .maxauthsize = SHA224_DIGEST_SIZE,
2255 },
2256 .caam = {
2257 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2258 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2259 OP_ALG_AAI_HMAC_PRECOMP,
2260 .geniv = true,
2261 }
2262 },
2263 {
2264 .aead = {
2265 .base = {
2266 .cra_name = "authenc(hmac(sha256),"
2267 "cbc(des3_ede))",
2268 .cra_driver_name = "authenc-hmac-sha256-"
2269 "cbc-des3_ede-caam-qi2",
2270 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2271 },
2272 .setkey = des3_aead_setkey,
2273 .setauthsize = aead_setauthsize,
2274 .encrypt = aead_encrypt,
2275 .decrypt = aead_decrypt,
2276 .ivsize = DES3_EDE_BLOCK_SIZE,
2277 .maxauthsize = SHA256_DIGEST_SIZE,
2278 },
2279 .caam = {
2280 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2281 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2282 OP_ALG_AAI_HMAC_PRECOMP,
2283 },
2284 },
2285 {
2286 .aead = {
2287 .base = {
2288 .cra_name = "echainiv(authenc(hmac(sha256),"
2289 "cbc(des3_ede)))",
2290 .cra_driver_name = "echainiv-authenc-"
2291 "hmac-sha256-"
2292 "cbc-des3_ede-caam-qi2",
2293 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2294 },
2295 .setkey = des3_aead_setkey,
2296 .setauthsize = aead_setauthsize,
2297 .encrypt = aead_encrypt,
2298 .decrypt = aead_decrypt,
2299 .ivsize = DES3_EDE_BLOCK_SIZE,
2300 .maxauthsize = SHA256_DIGEST_SIZE,
2301 },
2302 .caam = {
2303 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2304 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2305 OP_ALG_AAI_HMAC_PRECOMP,
2306 .geniv = true,
2307 }
2308 },
2309 {
2310 .aead = {
2311 .base = {
2312 .cra_name = "authenc(hmac(sha384),"
2313 "cbc(des3_ede))",
2314 .cra_driver_name = "authenc-hmac-sha384-"
2315 "cbc-des3_ede-caam-qi2",
2316 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2317 },
2318 .setkey = des3_aead_setkey,
2319 .setauthsize = aead_setauthsize,
2320 .encrypt = aead_encrypt,
2321 .decrypt = aead_decrypt,
2322 .ivsize = DES3_EDE_BLOCK_SIZE,
2323 .maxauthsize = SHA384_DIGEST_SIZE,
2324 },
2325 .caam = {
2326 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2327 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2328 OP_ALG_AAI_HMAC_PRECOMP,
2329 },
2330 },
2331 {
2332 .aead = {
2333 .base = {
2334 .cra_name = "echainiv(authenc(hmac(sha384),"
2335 "cbc(des3_ede)))",
2336 .cra_driver_name = "echainiv-authenc-"
2337 "hmac-sha384-"
2338 "cbc-des3_ede-caam-qi2",
2339 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2340 },
2341 .setkey = des3_aead_setkey,
2342 .setauthsize = aead_setauthsize,
2343 .encrypt = aead_encrypt,
2344 .decrypt = aead_decrypt,
2345 .ivsize = DES3_EDE_BLOCK_SIZE,
2346 .maxauthsize = SHA384_DIGEST_SIZE,
2347 },
2348 .caam = {
2349 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2350 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2351 OP_ALG_AAI_HMAC_PRECOMP,
2352 .geniv = true,
2353 }
2354 },
2355 {
2356 .aead = {
2357 .base = {
2358 .cra_name = "authenc(hmac(sha512),"
2359 "cbc(des3_ede))",
2360 .cra_driver_name = "authenc-hmac-sha512-"
2361 "cbc-des3_ede-caam-qi2",
2362 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2363 },
2364 .setkey = des3_aead_setkey,
2365 .setauthsize = aead_setauthsize,
2366 .encrypt = aead_encrypt,
2367 .decrypt = aead_decrypt,
2368 .ivsize = DES3_EDE_BLOCK_SIZE,
2369 .maxauthsize = SHA512_DIGEST_SIZE,
2370 },
2371 .caam = {
2372 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2373 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2374 OP_ALG_AAI_HMAC_PRECOMP,
2375 },
2376 },
2377 {
2378 .aead = {
2379 .base = {
2380 .cra_name = "echainiv(authenc(hmac(sha512),"
2381 "cbc(des3_ede)))",
2382 .cra_driver_name = "echainiv-authenc-"
2383 "hmac-sha512-"
2384 "cbc-des3_ede-caam-qi2",
2385 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2386 },
2387 .setkey = des3_aead_setkey,
2388 .setauthsize = aead_setauthsize,
2389 .encrypt = aead_encrypt,
2390 .decrypt = aead_decrypt,
2391 .ivsize = DES3_EDE_BLOCK_SIZE,
2392 .maxauthsize = SHA512_DIGEST_SIZE,
2393 },
2394 .caam = {
2395 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2396 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2397 OP_ALG_AAI_HMAC_PRECOMP,
2398 .geniv = true,
2399 }
2400 },
2401 {
2402 .aead = {
2403 .base = {
2404 .cra_name = "authenc(hmac(md5),cbc(des))",
2405 .cra_driver_name = "authenc-hmac-md5-"
2406 "cbc-des-caam-qi2",
2407 .cra_blocksize = DES_BLOCK_SIZE,
2408 },
2409 .setkey = aead_setkey,
2410 .setauthsize = aead_setauthsize,
2411 .encrypt = aead_encrypt,
2412 .decrypt = aead_decrypt,
2413 .ivsize = DES_BLOCK_SIZE,
2414 .maxauthsize = MD5_DIGEST_SIZE,
2415 },
2416 .caam = {
2417 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2418 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2419 OP_ALG_AAI_HMAC_PRECOMP,
2420 },
2421 },
2422 {
2423 .aead = {
2424 .base = {
2425 .cra_name = "echainiv(authenc(hmac(md5),"
2426 "cbc(des)))",
2427 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2428 "cbc-des-caam-qi2",
2429 .cra_blocksize = DES_BLOCK_SIZE,
2430 },
2431 .setkey = aead_setkey,
2432 .setauthsize = aead_setauthsize,
2433 .encrypt = aead_encrypt,
2434 .decrypt = aead_decrypt,
2435 .ivsize = DES_BLOCK_SIZE,
2436 .maxauthsize = MD5_DIGEST_SIZE,
2437 },
2438 .caam = {
2439 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2440 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2441 OP_ALG_AAI_HMAC_PRECOMP,
2442 .geniv = true,
2443 }
2444 },
2445 {
2446 .aead = {
2447 .base = {
2448 .cra_name = "authenc(hmac(sha1),cbc(des))",
2449 .cra_driver_name = "authenc-hmac-sha1-"
2450 "cbc-des-caam-qi2",
2451 .cra_blocksize = DES_BLOCK_SIZE,
2452 },
2453 .setkey = aead_setkey,
2454 .setauthsize = aead_setauthsize,
2455 .encrypt = aead_encrypt,
2456 .decrypt = aead_decrypt,
2457 .ivsize = DES_BLOCK_SIZE,
2458 .maxauthsize = SHA1_DIGEST_SIZE,
2459 },
2460 .caam = {
2461 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2462 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2463 OP_ALG_AAI_HMAC_PRECOMP,
2464 },
2465 },
2466 {
2467 .aead = {
2468 .base = {
2469 .cra_name = "echainiv(authenc(hmac(sha1),"
2470 "cbc(des)))",
2471 .cra_driver_name = "echainiv-authenc-"
2472 "hmac-sha1-cbc-des-caam-qi2",
2473 .cra_blocksize = DES_BLOCK_SIZE,
2474 },
2475 .setkey = aead_setkey,
2476 .setauthsize = aead_setauthsize,
2477 .encrypt = aead_encrypt,
2478 .decrypt = aead_decrypt,
2479 .ivsize = DES_BLOCK_SIZE,
2480 .maxauthsize = SHA1_DIGEST_SIZE,
2481 },
2482 .caam = {
2483 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2484 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2485 OP_ALG_AAI_HMAC_PRECOMP,
2486 .geniv = true,
2487 }
2488 },
2489 {
2490 .aead = {
2491 .base = {
2492 .cra_name = "authenc(hmac(sha224),cbc(des))",
2493 .cra_driver_name = "authenc-hmac-sha224-"
2494 "cbc-des-caam-qi2",
2495 .cra_blocksize = DES_BLOCK_SIZE,
2496 },
2497 .setkey = aead_setkey,
2498 .setauthsize = aead_setauthsize,
2499 .encrypt = aead_encrypt,
2500 .decrypt = aead_decrypt,
2501 .ivsize = DES_BLOCK_SIZE,
2502 .maxauthsize = SHA224_DIGEST_SIZE,
2503 },
2504 .caam = {
2505 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2506 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2507 OP_ALG_AAI_HMAC_PRECOMP,
2508 },
2509 },
2510 {
2511 .aead = {
2512 .base = {
2513 .cra_name = "echainiv(authenc(hmac(sha224),"
2514 "cbc(des)))",
2515 .cra_driver_name = "echainiv-authenc-"
2516 "hmac-sha224-cbc-des-"
2517 "caam-qi2",
2518 .cra_blocksize = DES_BLOCK_SIZE,
2519 },
2520 .setkey = aead_setkey,
2521 .setauthsize = aead_setauthsize,
2522 .encrypt = aead_encrypt,
2523 .decrypt = aead_decrypt,
2524 .ivsize = DES_BLOCK_SIZE,
2525 .maxauthsize = SHA224_DIGEST_SIZE,
2526 },
2527 .caam = {
2528 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2529 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2530 OP_ALG_AAI_HMAC_PRECOMP,
2531 .geniv = true,
2532 }
2533 },
2534 {
2535 .aead = {
2536 .base = {
2537 .cra_name = "authenc(hmac(sha256),cbc(des))",
2538 .cra_driver_name = "authenc-hmac-sha256-"
2539 "cbc-des-caam-qi2",
2540 .cra_blocksize = DES_BLOCK_SIZE,
2541 },
2542 .setkey = aead_setkey,
2543 .setauthsize = aead_setauthsize,
2544 .encrypt = aead_encrypt,
2545 .decrypt = aead_decrypt,
2546 .ivsize = DES_BLOCK_SIZE,
2547 .maxauthsize = SHA256_DIGEST_SIZE,
2548 },
2549 .caam = {
2550 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2551 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2552 OP_ALG_AAI_HMAC_PRECOMP,
2553 },
2554 },
2555 {
2556 .aead = {
2557 .base = {
2558 .cra_name = "echainiv(authenc(hmac(sha256),"
2559 "cbc(des)))",
2560 .cra_driver_name = "echainiv-authenc-"
2561 "hmac-sha256-cbc-des-"
2562 "caam-qi2",
2563 .cra_blocksize = DES_BLOCK_SIZE,
2564 },
2565 .setkey = aead_setkey,
2566 .setauthsize = aead_setauthsize,
2567 .encrypt = aead_encrypt,
2568 .decrypt = aead_decrypt,
2569 .ivsize = DES_BLOCK_SIZE,
2570 .maxauthsize = SHA256_DIGEST_SIZE,
2571 },
2572 .caam = {
2573 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2574 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2575 OP_ALG_AAI_HMAC_PRECOMP,
2576 .geniv = true,
2577 },
2578 },
2579 {
2580 .aead = {
2581 .base = {
2582 .cra_name = "authenc(hmac(sha384),cbc(des))",
2583 .cra_driver_name = "authenc-hmac-sha384-"
2584 "cbc-des-caam-qi2",
2585 .cra_blocksize = DES_BLOCK_SIZE,
2586 },
2587 .setkey = aead_setkey,
2588 .setauthsize = aead_setauthsize,
2589 .encrypt = aead_encrypt,
2590 .decrypt = aead_decrypt,
2591 .ivsize = DES_BLOCK_SIZE,
2592 .maxauthsize = SHA384_DIGEST_SIZE,
2593 },
2594 .caam = {
2595 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2596 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2597 OP_ALG_AAI_HMAC_PRECOMP,
2598 },
2599 },
2600 {
2601 .aead = {
2602 .base = {
2603 .cra_name = "echainiv(authenc(hmac(sha384),"
2604 "cbc(des)))",
2605 .cra_driver_name = "echainiv-authenc-"
2606 "hmac-sha384-cbc-des-"
2607 "caam-qi2",
2608 .cra_blocksize = DES_BLOCK_SIZE,
2609 },
2610 .setkey = aead_setkey,
2611 .setauthsize = aead_setauthsize,
2612 .encrypt = aead_encrypt,
2613 .decrypt = aead_decrypt,
2614 .ivsize = DES_BLOCK_SIZE,
2615 .maxauthsize = SHA384_DIGEST_SIZE,
2616 },
2617 .caam = {
2618 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2619 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2620 OP_ALG_AAI_HMAC_PRECOMP,
2621 .geniv = true,
2622 }
2623 },
2624 {
2625 .aead = {
2626 .base = {
2627 .cra_name = "authenc(hmac(sha512),cbc(des))",
2628 .cra_driver_name = "authenc-hmac-sha512-"
2629 "cbc-des-caam-qi2",
2630 .cra_blocksize = DES_BLOCK_SIZE,
2631 },
2632 .setkey = aead_setkey,
2633 .setauthsize = aead_setauthsize,
2634 .encrypt = aead_encrypt,
2635 .decrypt = aead_decrypt,
2636 .ivsize = DES_BLOCK_SIZE,
2637 .maxauthsize = SHA512_DIGEST_SIZE,
2638 },
2639 .caam = {
2640 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2641 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2642 OP_ALG_AAI_HMAC_PRECOMP,
2643 }
2644 },
2645 {
2646 .aead = {
2647 .base = {
2648 .cra_name = "echainiv(authenc(hmac(sha512),"
2649 "cbc(des)))",
2650 .cra_driver_name = "echainiv-authenc-"
2651 "hmac-sha512-cbc-des-"
2652 "caam-qi2",
2653 .cra_blocksize = DES_BLOCK_SIZE,
2654 },
2655 .setkey = aead_setkey,
2656 .setauthsize = aead_setauthsize,
2657 .encrypt = aead_encrypt,
2658 .decrypt = aead_decrypt,
2659 .ivsize = DES_BLOCK_SIZE,
2660 .maxauthsize = SHA512_DIGEST_SIZE,
2661 },
2662 .caam = {
2663 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2664 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2665 OP_ALG_AAI_HMAC_PRECOMP,
2666 .geniv = true,
2667 }
2668 },
2669 {
2670 .aead = {
2671 .base = {
2672 .cra_name = "authenc(hmac(md5),"
2673 "rfc3686(ctr(aes)))",
2674 .cra_driver_name = "authenc-hmac-md5-"
2675 "rfc3686-ctr-aes-caam-qi2",
2676 .cra_blocksize = 1,
2677 },
2678 .setkey = aead_setkey,
2679 .setauthsize = aead_setauthsize,
2680 .encrypt = aead_encrypt,
2681 .decrypt = aead_decrypt,
2682 .ivsize = CTR_RFC3686_IV_SIZE,
2683 .maxauthsize = MD5_DIGEST_SIZE,
2684 },
2685 .caam = {
2686 .class1_alg_type = OP_ALG_ALGSEL_AES |
2687 OP_ALG_AAI_CTR_MOD128,
2688 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2689 OP_ALG_AAI_HMAC_PRECOMP,
2690 .rfc3686 = true,
2691 },
2692 },
2693 {
2694 .aead = {
2695 .base = {
2696 .cra_name = "seqiv(authenc("
2697 "hmac(md5),rfc3686(ctr(aes))))",
2698 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2699 "rfc3686-ctr-aes-caam-qi2",
2700 .cra_blocksize = 1,
2701 },
2702 .setkey = aead_setkey,
2703 .setauthsize = aead_setauthsize,
2704 .encrypt = aead_encrypt,
2705 .decrypt = aead_decrypt,
2706 .ivsize = CTR_RFC3686_IV_SIZE,
2707 .maxauthsize = MD5_DIGEST_SIZE,
2708 },
2709 .caam = {
2710 .class1_alg_type = OP_ALG_ALGSEL_AES |
2711 OP_ALG_AAI_CTR_MOD128,
2712 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2713 OP_ALG_AAI_HMAC_PRECOMP,
2714 .rfc3686 = true,
2715 .geniv = true,
2716 },
2717 },
2718 {
2719 .aead = {
2720 .base = {
2721 .cra_name = "authenc(hmac(sha1),"
2722 "rfc3686(ctr(aes)))",
2723 .cra_driver_name = "authenc-hmac-sha1-"
2724 "rfc3686-ctr-aes-caam-qi2",
2725 .cra_blocksize = 1,
2726 },
2727 .setkey = aead_setkey,
2728 .setauthsize = aead_setauthsize,
2729 .encrypt = aead_encrypt,
2730 .decrypt = aead_decrypt,
2731 .ivsize = CTR_RFC3686_IV_SIZE,
2732 .maxauthsize = SHA1_DIGEST_SIZE,
2733 },
2734 .caam = {
2735 .class1_alg_type = OP_ALG_ALGSEL_AES |
2736 OP_ALG_AAI_CTR_MOD128,
2737 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2738 OP_ALG_AAI_HMAC_PRECOMP,
2739 .rfc3686 = true,
2740 },
2741 },
2742 {
2743 .aead = {
2744 .base = {
2745 .cra_name = "seqiv(authenc("
2746 "hmac(sha1),rfc3686(ctr(aes))))",
2747 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2748 "rfc3686-ctr-aes-caam-qi2",
2749 .cra_blocksize = 1,
2750 },
2751 .setkey = aead_setkey,
2752 .setauthsize = aead_setauthsize,
2753 .encrypt = aead_encrypt,
2754 .decrypt = aead_decrypt,
2755 .ivsize = CTR_RFC3686_IV_SIZE,
2756 .maxauthsize = SHA1_DIGEST_SIZE,
2757 },
2758 .caam = {
2759 .class1_alg_type = OP_ALG_ALGSEL_AES |
2760 OP_ALG_AAI_CTR_MOD128,
2761 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2762 OP_ALG_AAI_HMAC_PRECOMP,
2763 .rfc3686 = true,
2764 .geniv = true,
2765 },
2766 },
2767 {
2768 .aead = {
2769 .base = {
2770 .cra_name = "authenc(hmac(sha224),"
2771 "rfc3686(ctr(aes)))",
2772 .cra_driver_name = "authenc-hmac-sha224-"
2773 "rfc3686-ctr-aes-caam-qi2",
2774 .cra_blocksize = 1,
2775 },
2776 .setkey = aead_setkey,
2777 .setauthsize = aead_setauthsize,
2778 .encrypt = aead_encrypt,
2779 .decrypt = aead_decrypt,
2780 .ivsize = CTR_RFC3686_IV_SIZE,
2781 .maxauthsize = SHA224_DIGEST_SIZE,
2782 },
2783 .caam = {
2784 .class1_alg_type = OP_ALG_ALGSEL_AES |
2785 OP_ALG_AAI_CTR_MOD128,
2786 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2787 OP_ALG_AAI_HMAC_PRECOMP,
2788 .rfc3686 = true,
2789 },
2790 },
2791 {
2792 .aead = {
2793 .base = {
2794 .cra_name = "seqiv(authenc("
2795 "hmac(sha224),rfc3686(ctr(aes))))",
2796 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2797 "rfc3686-ctr-aes-caam-qi2",
2798 .cra_blocksize = 1,
2799 },
2800 .setkey = aead_setkey,
2801 .setauthsize = aead_setauthsize,
2802 .encrypt = aead_encrypt,
2803 .decrypt = aead_decrypt,
2804 .ivsize = CTR_RFC3686_IV_SIZE,
2805 .maxauthsize = SHA224_DIGEST_SIZE,
2806 },
2807 .caam = {
2808 .class1_alg_type = OP_ALG_ALGSEL_AES |
2809 OP_ALG_AAI_CTR_MOD128,
2810 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2811 OP_ALG_AAI_HMAC_PRECOMP,
2812 .rfc3686 = true,
2813 .geniv = true,
2814 },
2815 },
2816 {
2817 .aead = {
2818 .base = {
2819 .cra_name = "authenc(hmac(sha256),"
2820 "rfc3686(ctr(aes)))",
2821 .cra_driver_name = "authenc-hmac-sha256-"
2822 "rfc3686-ctr-aes-caam-qi2",
2823 .cra_blocksize = 1,
2824 },
2825 .setkey = aead_setkey,
2826 .setauthsize = aead_setauthsize,
2827 .encrypt = aead_encrypt,
2828 .decrypt = aead_decrypt,
2829 .ivsize = CTR_RFC3686_IV_SIZE,
2830 .maxauthsize = SHA256_DIGEST_SIZE,
2831 },
2832 .caam = {
2833 .class1_alg_type = OP_ALG_ALGSEL_AES |
2834 OP_ALG_AAI_CTR_MOD128,
2835 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2836 OP_ALG_AAI_HMAC_PRECOMP,
2837 .rfc3686 = true,
2838 },
2839 },
2840 {
2841 .aead = {
2842 .base = {
2843 .cra_name = "seqiv(authenc(hmac(sha256),"
2844 "rfc3686(ctr(aes))))",
2845 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2846 "rfc3686-ctr-aes-caam-qi2",
2847 .cra_blocksize = 1,
2848 },
2849 .setkey = aead_setkey,
2850 .setauthsize = aead_setauthsize,
2851 .encrypt = aead_encrypt,
2852 .decrypt = aead_decrypt,
2853 .ivsize = CTR_RFC3686_IV_SIZE,
2854 .maxauthsize = SHA256_DIGEST_SIZE,
2855 },
2856 .caam = {
2857 .class1_alg_type = OP_ALG_ALGSEL_AES |
2858 OP_ALG_AAI_CTR_MOD128,
2859 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2860 OP_ALG_AAI_HMAC_PRECOMP,
2861 .rfc3686 = true,
2862 .geniv = true,
2863 },
2864 },
2865 {
2866 .aead = {
2867 .base = {
2868 .cra_name = "authenc(hmac(sha384),"
2869 "rfc3686(ctr(aes)))",
2870 .cra_driver_name = "authenc-hmac-sha384-"
2871 "rfc3686-ctr-aes-caam-qi2",
2872 .cra_blocksize = 1,
2873 },
2874 .setkey = aead_setkey,
2875 .setauthsize = aead_setauthsize,
2876 .encrypt = aead_encrypt,
2877 .decrypt = aead_decrypt,
2878 .ivsize = CTR_RFC3686_IV_SIZE,
2879 .maxauthsize = SHA384_DIGEST_SIZE,
2880 },
2881 .caam = {
2882 .class1_alg_type = OP_ALG_ALGSEL_AES |
2883 OP_ALG_AAI_CTR_MOD128,
2884 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2885 OP_ALG_AAI_HMAC_PRECOMP,
2886 .rfc3686 = true,
2887 },
2888 },
2889 {
2890 .aead = {
2891 .base = {
2892 .cra_name = "seqiv(authenc(hmac(sha384),"
2893 "rfc3686(ctr(aes))))",
2894 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2895 "rfc3686-ctr-aes-caam-qi2",
2896 .cra_blocksize = 1,
2897 },
2898 .setkey = aead_setkey,
2899 .setauthsize = aead_setauthsize,
2900 .encrypt = aead_encrypt,
2901 .decrypt = aead_decrypt,
2902 .ivsize = CTR_RFC3686_IV_SIZE,
2903 .maxauthsize = SHA384_DIGEST_SIZE,
2904 },
2905 .caam = {
2906 .class1_alg_type = OP_ALG_ALGSEL_AES |
2907 OP_ALG_AAI_CTR_MOD128,
2908 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2909 OP_ALG_AAI_HMAC_PRECOMP,
2910 .rfc3686 = true,
2911 .geniv = true,
2912 },
2913 },
2914 {
2915 .aead = {
2916 .base = {
2917 .cra_name = "rfc7539(chacha20,poly1305)",
2918 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2919 "caam-qi2",
2920 .cra_blocksize = 1,
2921 },
2922 .setkey = chachapoly_setkey,
2923 .setauthsize = chachapoly_setauthsize,
2924 .encrypt = aead_encrypt,
2925 .decrypt = aead_decrypt,
2926 .ivsize = CHACHAPOLY_IV_SIZE,
2927 .maxauthsize = POLY1305_DIGEST_SIZE,
2928 },
2929 .caam = {
2930 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2931 OP_ALG_AAI_AEAD,
2932 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2933 OP_ALG_AAI_AEAD,
2934 .nodkp = true,
2935 },
2936 },
2937 {
2938 .aead = {
2939 .base = {
2940 .cra_name = "rfc7539esp(chacha20,poly1305)",
2941 .cra_driver_name = "rfc7539esp-chacha20-"
2942 "poly1305-caam-qi2",
2943 .cra_blocksize = 1,
2944 },
2945 .setkey = chachapoly_setkey,
2946 .setauthsize = chachapoly_setauthsize,
2947 .encrypt = aead_encrypt,
2948 .decrypt = aead_decrypt,
2949 .ivsize = 8,
2950 .maxauthsize = POLY1305_DIGEST_SIZE,
2951 },
2952 .caam = {
2953 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2954 OP_ALG_AAI_AEAD,
2955 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2956 OP_ALG_AAI_AEAD,
2957 .nodkp = true,
2958 },
2959 },
2960 {
2961 .aead = {
2962 .base = {
2963 .cra_name = "authenc(hmac(sha512),"
2964 "rfc3686(ctr(aes)))",
2965 .cra_driver_name = "authenc-hmac-sha512-"
2966 "rfc3686-ctr-aes-caam-qi2",
2967 .cra_blocksize = 1,
2968 },
2969 .setkey = aead_setkey,
2970 .setauthsize = aead_setauthsize,
2971 .encrypt = aead_encrypt,
2972 .decrypt = aead_decrypt,
2973 .ivsize = CTR_RFC3686_IV_SIZE,
2974 .maxauthsize = SHA512_DIGEST_SIZE,
2975 },
2976 .caam = {
2977 .class1_alg_type = OP_ALG_ALGSEL_AES |
2978 OP_ALG_AAI_CTR_MOD128,
2979 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2980 OP_ALG_AAI_HMAC_PRECOMP,
2981 .rfc3686 = true,
2982 },
2983 },
2984 {
2985 .aead = {
2986 .base = {
2987 .cra_name = "seqiv(authenc(hmac(sha512),"
2988 "rfc3686(ctr(aes))))",
2989 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2990 "rfc3686-ctr-aes-caam-qi2",
2991 .cra_blocksize = 1,
2992 },
2993 .setkey = aead_setkey,
2994 .setauthsize = aead_setauthsize,
2995 .encrypt = aead_encrypt,
2996 .decrypt = aead_decrypt,
2997 .ivsize = CTR_RFC3686_IV_SIZE,
2998 .maxauthsize = SHA512_DIGEST_SIZE,
2999 },
3000 .caam = {
3001 .class1_alg_type = OP_ALG_ALGSEL_AES |
3002 OP_ALG_AAI_CTR_MOD128,
3003 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3004 OP_ALG_AAI_HMAC_PRECOMP,
3005 .rfc3686 = true,
3006 .geniv = true,
3007 },
3008 },
3009 };
3010
caam_skcipher_alg_init(struct caam_skcipher_alg * t_alg)3011 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3012 {
3013 struct skcipher_alg *alg = &t_alg->skcipher;
3014
3015 alg->base.cra_module = THIS_MODULE;
3016 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3017 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3018 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3019 CRYPTO_ALG_KERN_DRIVER_ONLY);
3020
3021 alg->init = caam_cra_init_skcipher;
3022 alg->exit = caam_cra_exit;
3023 }
3024
caam_aead_alg_init(struct caam_aead_alg * t_alg)3025 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3026 {
3027 struct aead_alg *alg = &t_alg->aead;
3028
3029 alg->base.cra_module = THIS_MODULE;
3030 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3031 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3032 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3033 CRYPTO_ALG_KERN_DRIVER_ONLY;
3034
3035 alg->init = caam_cra_init_aead;
3036 alg->exit = caam_cra_exit_aead;
3037 }
3038
3039 /* max hash key is max split key size */
3040 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
3041
3042 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
3043
3044 /* caam context sizes for hashes: running digest + 8 */
3045 #define HASH_MSG_LEN 8
3046 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3047
3048 enum hash_optype {
3049 UPDATE = 0,
3050 UPDATE_FIRST,
3051 FINALIZE,
3052 DIGEST,
3053 HASH_NUM_OP
3054 };
3055
3056 /**
3057 * struct caam_hash_ctx - ahash per-session context
3058 * @flc: Flow Contexts array
3059 * @key: authentication key
3060 * @flc_dma: I/O virtual addresses of the Flow Contexts
3061 * @dev: dpseci device
3062 * @ctx_len: size of Context Register
3063 * @adata: hashing algorithm details
3064 */
3065 struct caam_hash_ctx {
3066 struct caam_flc flc[HASH_NUM_OP];
3067 u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3068 dma_addr_t flc_dma[HASH_NUM_OP];
3069 struct device *dev;
3070 int ctx_len;
3071 struct alginfo adata;
3072 };
3073
3074 /* ahash state */
3075 struct caam_hash_state {
3076 struct caam_request caam_req;
3077 dma_addr_t buf_dma;
3078 dma_addr_t ctx_dma;
3079 int ctx_dma_len;
3080 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3081 int buflen;
3082 int next_buflen;
3083 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3084 int (*update)(struct ahash_request *req);
3085 int (*final)(struct ahash_request *req);
3086 int (*finup)(struct ahash_request *req);
3087 };
3088
3089 struct caam_export_state {
3090 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3091 u8 caam_ctx[MAX_CTX_LEN];
3092 int buflen;
3093 int (*update)(struct ahash_request *req);
3094 int (*final)(struct ahash_request *req);
3095 int (*finup)(struct ahash_request *req);
3096 };
3097
3098 /* Map current buffer in state (if length > 0) and put it in link table */
buf_map_to_qm_sg(struct device * dev,struct dpaa2_sg_entry * qm_sg,struct caam_hash_state * state)3099 static inline int buf_map_to_qm_sg(struct device *dev,
3100 struct dpaa2_sg_entry *qm_sg,
3101 struct caam_hash_state *state)
3102 {
3103 int buflen = state->buflen;
3104
3105 if (!buflen)
3106 return 0;
3107
3108 state->buf_dma = dma_map_single(dev, state->buf, buflen,
3109 DMA_TO_DEVICE);
3110 if (dma_mapping_error(dev, state->buf_dma)) {
3111 dev_err(dev, "unable to map buf\n");
3112 state->buf_dma = 0;
3113 return -ENOMEM;
3114 }
3115
3116 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3117
3118 return 0;
3119 }
3120
3121 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_qm_sg(struct device * dev,struct caam_hash_state * state,int ctx_len,struct dpaa2_sg_entry * qm_sg,u32 flag)3122 static inline int ctx_map_to_qm_sg(struct device *dev,
3123 struct caam_hash_state *state, int ctx_len,
3124 struct dpaa2_sg_entry *qm_sg, u32 flag)
3125 {
3126 state->ctx_dma_len = ctx_len;
3127 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3128 if (dma_mapping_error(dev, state->ctx_dma)) {
3129 dev_err(dev, "unable to map ctx\n");
3130 state->ctx_dma = 0;
3131 return -ENOMEM;
3132 }
3133
3134 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3135
3136 return 0;
3137 }
3138
ahash_set_sh_desc(struct crypto_ahash * ahash)3139 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3140 {
3141 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3142 int digestsize = crypto_ahash_digestsize(ahash);
3143 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3144 struct caam_flc *flc;
3145 u32 *desc;
3146
3147 /* ahash_update shared descriptor */
3148 flc = &ctx->flc[UPDATE];
3149 desc = flc->sh_desc;
3150 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3151 ctx->ctx_len, true, priv->sec_attr.era);
3152 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3153 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3154 desc_bytes(desc), DMA_BIDIRECTIONAL);
3155 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3156 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3157 1);
3158
3159 /* ahash_update_first shared descriptor */
3160 flc = &ctx->flc[UPDATE_FIRST];
3161 desc = flc->sh_desc;
3162 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3163 ctx->ctx_len, false, priv->sec_attr.era);
3164 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3165 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3166 desc_bytes(desc), DMA_BIDIRECTIONAL);
3167 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3168 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3169 1);
3170
3171 /* ahash_final shared descriptor */
3172 flc = &ctx->flc[FINALIZE];
3173 desc = flc->sh_desc;
3174 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3175 ctx->ctx_len, true, priv->sec_attr.era);
3176 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3177 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3178 desc_bytes(desc), DMA_BIDIRECTIONAL);
3179 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3180 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3181 1);
3182
3183 /* ahash_digest shared descriptor */
3184 flc = &ctx->flc[DIGEST];
3185 desc = flc->sh_desc;
3186 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3187 ctx->ctx_len, false, priv->sec_attr.era);
3188 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3189 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3190 desc_bytes(desc), DMA_BIDIRECTIONAL);
3191 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3192 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3193 1);
3194
3195 return 0;
3196 }
3197
3198 struct split_key_sh_result {
3199 struct completion completion;
3200 int err;
3201 struct device *dev;
3202 };
3203
split_key_sh_done(void * cbk_ctx,u32 err)3204 static void split_key_sh_done(void *cbk_ctx, u32 err)
3205 {
3206 struct split_key_sh_result *res = cbk_ctx;
3207
3208 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3209
3210 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3211 complete(&res->completion);
3212 }
3213
3214 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx * ctx,u32 * keylen,u8 * key,u32 digestsize)3215 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3216 u32 digestsize)
3217 {
3218 struct caam_request *req_ctx;
3219 u32 *desc;
3220 struct split_key_sh_result result;
3221 dma_addr_t key_dma;
3222 struct caam_flc *flc;
3223 dma_addr_t flc_dma;
3224 int ret = -ENOMEM;
3225 struct dpaa2_fl_entry *in_fle, *out_fle;
3226
3227 req_ctx = kzalloc_obj(*req_ctx);
3228 if (!req_ctx)
3229 return -ENOMEM;
3230
3231 in_fle = &req_ctx->fd_flt[1];
3232 out_fle = &req_ctx->fd_flt[0];
3233
3234 flc = kzalloc_obj(*flc);
3235 if (!flc)
3236 goto err_flc;
3237
3238 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3239 if (dma_mapping_error(ctx->dev, key_dma)) {
3240 dev_err(ctx->dev, "unable to map key memory\n");
3241 goto err_key_dma;
3242 }
3243
3244 desc = flc->sh_desc;
3245
3246 init_sh_desc(desc, 0);
3247
3248 /* descriptor to perform unkeyed hash on key_in */
3249 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3250 OP_ALG_AS_INITFINAL);
3251 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3252 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3253 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3254 LDST_SRCDST_BYTE_CONTEXT);
3255
3256 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3257 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3258 desc_bytes(desc), DMA_TO_DEVICE);
3259 if (dma_mapping_error(ctx->dev, flc_dma)) {
3260 dev_err(ctx->dev, "unable to map shared descriptor\n");
3261 goto err_flc_dma;
3262 }
3263
3264 dpaa2_fl_set_final(in_fle, true);
3265 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3266 dpaa2_fl_set_addr(in_fle, key_dma);
3267 dpaa2_fl_set_len(in_fle, *keylen);
3268 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3269 dpaa2_fl_set_addr(out_fle, key_dma);
3270 dpaa2_fl_set_len(out_fle, digestsize);
3271
3272 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3273 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3274 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3275 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3276 1);
3277
3278 result.err = 0;
3279 init_completion(&result.completion);
3280 result.dev = ctx->dev;
3281
3282 req_ctx->flc = flc;
3283 req_ctx->flc_dma = flc_dma;
3284 req_ctx->cbk = split_key_sh_done;
3285 req_ctx->ctx = &result;
3286
3287 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3288 if (ret == -EINPROGRESS) {
3289 /* in progress */
3290 wait_for_completion(&result.completion);
3291 ret = result.err;
3292 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3293 DUMP_PREFIX_ADDRESS, 16, 4, key,
3294 digestsize, 1);
3295 }
3296
3297 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3298 DMA_TO_DEVICE);
3299 err_flc_dma:
3300 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3301 err_key_dma:
3302 kfree(flc);
3303 err_flc:
3304 kfree(req_ctx);
3305
3306 *keylen = digestsize;
3307
3308 return ret;
3309 }
3310
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)3311 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3312 unsigned int keylen)
3313 {
3314 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3315 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3316 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3317 int ret;
3318 u8 *hashed_key = NULL;
3319
3320 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3321
3322 if (keylen > blocksize) {
3323 unsigned int aligned_len =
3324 ALIGN(keylen, dma_get_cache_alignment());
3325
3326 if (aligned_len < keylen)
3327 return -EOVERFLOW;
3328
3329 hashed_key = kmemdup(key, aligned_len, GFP_KERNEL);
3330 if (!hashed_key)
3331 return -ENOMEM;
3332 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3333 if (ret)
3334 goto bad_free_key;
3335 key = hashed_key;
3336 }
3337
3338 ctx->adata.keylen = keylen;
3339 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3340 OP_ALG_ALGSEL_MASK);
3341 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3342 goto bad_free_key;
3343
3344 ctx->adata.key_virt = key;
3345 ctx->adata.key_inline = true;
3346
3347 /*
3348 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3349 * in invalid opcodes (last bytes of user key) in the resulting
3350 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3351 * addresses are needed.
3352 */
3353 if (keylen > ctx->adata.keylen_pad) {
3354 memcpy(ctx->key, key, keylen);
3355 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3356 ctx->adata.keylen_pad,
3357 DMA_TO_DEVICE);
3358 }
3359
3360 ret = ahash_set_sh_desc(ahash);
3361 kfree(hashed_key);
3362 return ret;
3363 bad_free_key:
3364 kfree(hashed_key);
3365 return -EINVAL;
3366 }
3367
ahash_unmap(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req)3368 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3369 struct ahash_request *req)
3370 {
3371 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3372
3373 if (edesc->src_nents)
3374 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3375
3376 if (edesc->qm_sg_bytes)
3377 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3378 DMA_TO_DEVICE);
3379
3380 if (state->buf_dma) {
3381 dma_unmap_single(dev, state->buf_dma, state->buflen,
3382 DMA_TO_DEVICE);
3383 state->buf_dma = 0;
3384 }
3385 }
3386
ahash_unmap_ctx(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,u32 flag)3387 static inline void ahash_unmap_ctx(struct device *dev,
3388 struct ahash_edesc *edesc,
3389 struct ahash_request *req, u32 flag)
3390 {
3391 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3392
3393 if (state->ctx_dma) {
3394 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3395 state->ctx_dma = 0;
3396 }
3397 ahash_unmap(dev, edesc, req);
3398 }
3399
ahash_done(void * cbk_ctx,u32 status)3400 static void ahash_done(void *cbk_ctx, u32 status)
3401 {
3402 struct crypto_async_request *areq = cbk_ctx;
3403 struct ahash_request *req = ahash_request_cast(areq);
3404 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3405 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3406 struct ahash_edesc *edesc = state->caam_req.edesc;
3407 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3408 int digestsize = crypto_ahash_digestsize(ahash);
3409 int ecode = 0;
3410
3411 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3412
3413 if (unlikely(status))
3414 ecode = caam_qi2_strstatus(ctx->dev, status);
3415
3416 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3417 memcpy(req->result, state->caam_ctx, digestsize);
3418 qi_cache_free(edesc);
3419
3420 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3421 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3422 ctx->ctx_len, 1);
3423
3424 ahash_request_complete(req, ecode);
3425 }
3426
ahash_done_bi(void * cbk_ctx,u32 status)3427 static void ahash_done_bi(void *cbk_ctx, u32 status)
3428 {
3429 struct crypto_async_request *areq = cbk_ctx;
3430 struct ahash_request *req = ahash_request_cast(areq);
3431 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3432 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3433 struct ahash_edesc *edesc = state->caam_req.edesc;
3434 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3435 int ecode = 0;
3436
3437 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3438
3439 if (unlikely(status))
3440 ecode = caam_qi2_strstatus(ctx->dev, status);
3441
3442 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3443 qi_cache_free(edesc);
3444
3445 scatterwalk_map_and_copy(state->buf, req->src,
3446 req->nbytes - state->next_buflen,
3447 state->next_buflen, 0);
3448 state->buflen = state->next_buflen;
3449
3450 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3451 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3452 state->buflen, 1);
3453
3454 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3455 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3456 ctx->ctx_len, 1);
3457 if (req->result)
3458 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3459 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3460 crypto_ahash_digestsize(ahash), 1);
3461
3462 ahash_request_complete(req, ecode);
3463 }
3464
ahash_done_ctx_src(void * cbk_ctx,u32 status)3465 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3466 {
3467 struct crypto_async_request *areq = cbk_ctx;
3468 struct ahash_request *req = ahash_request_cast(areq);
3469 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3470 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3471 struct ahash_edesc *edesc = state->caam_req.edesc;
3472 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3473 int digestsize = crypto_ahash_digestsize(ahash);
3474 int ecode = 0;
3475
3476 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3477
3478 if (unlikely(status))
3479 ecode = caam_qi2_strstatus(ctx->dev, status);
3480
3481 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3482 memcpy(req->result, state->caam_ctx, digestsize);
3483 qi_cache_free(edesc);
3484
3485 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3486 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3487 ctx->ctx_len, 1);
3488
3489 ahash_request_complete(req, ecode);
3490 }
3491
ahash_done_ctx_dst(void * cbk_ctx,u32 status)3492 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3493 {
3494 struct crypto_async_request *areq = cbk_ctx;
3495 struct ahash_request *req = ahash_request_cast(areq);
3496 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3497 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3498 struct ahash_edesc *edesc = state->caam_req.edesc;
3499 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3500 int ecode = 0;
3501
3502 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3503
3504 if (unlikely(status))
3505 ecode = caam_qi2_strstatus(ctx->dev, status);
3506
3507 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3508 qi_cache_free(edesc);
3509
3510 scatterwalk_map_and_copy(state->buf, req->src,
3511 req->nbytes - state->next_buflen,
3512 state->next_buflen, 0);
3513 state->buflen = state->next_buflen;
3514
3515 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3516 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3517 state->buflen, 1);
3518
3519 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3520 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3521 ctx->ctx_len, 1);
3522 if (req->result)
3523 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3524 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3525 crypto_ahash_digestsize(ahash), 1);
3526
3527 ahash_request_complete(req, ecode);
3528 }
3529
ahash_update_ctx(struct ahash_request * req)3530 static int ahash_update_ctx(struct ahash_request *req)
3531 {
3532 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3533 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3534 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3535 struct caam_request *req_ctx = &state->caam_req;
3536 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3537 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3538 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3539 GFP_KERNEL : GFP_ATOMIC;
3540 u8 *buf = state->buf;
3541 int *buflen = &state->buflen;
3542 int *next_buflen = &state->next_buflen;
3543 int in_len = *buflen + req->nbytes, to_hash;
3544 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3545 struct ahash_edesc *edesc;
3546 int ret = 0;
3547
3548 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3549 to_hash = in_len - *next_buflen;
3550
3551 if (to_hash) {
3552 struct dpaa2_sg_entry *sg_table;
3553 int src_len = req->nbytes - *next_buflen;
3554
3555 src_nents = sg_nents_for_len(req->src, src_len);
3556 if (src_nents < 0) {
3557 dev_err(ctx->dev, "Invalid number of src SG.\n");
3558 return src_nents;
3559 }
3560
3561 if (src_nents) {
3562 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3563 DMA_TO_DEVICE);
3564 if (!mapped_nents) {
3565 dev_err(ctx->dev, "unable to DMA map source\n");
3566 return -ENOMEM;
3567 }
3568 } else {
3569 mapped_nents = 0;
3570 }
3571
3572 /* allocate space for base edesc and link tables */
3573 edesc = qi_cache_zalloc(flags);
3574 if (!edesc) {
3575 dma_unmap_sg(ctx->dev, req->src, src_nents,
3576 DMA_TO_DEVICE);
3577 return -ENOMEM;
3578 }
3579
3580 edesc->src_nents = src_nents;
3581 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3582 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3583 sizeof(*sg_table);
3584 sg_table = &edesc->sgt[0];
3585
3586 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3587 DMA_BIDIRECTIONAL);
3588 if (ret)
3589 goto unmap_ctx;
3590
3591 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3592 if (ret)
3593 goto unmap_ctx;
3594
3595 if (mapped_nents) {
3596 sg_to_qm_sg_last(req->src, src_len,
3597 sg_table + qm_sg_src_index, 0);
3598 } else {
3599 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3600 true);
3601 }
3602
3603 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3604 qm_sg_bytes, DMA_TO_DEVICE);
3605 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3606 dev_err(ctx->dev, "unable to map S/G table\n");
3607 ret = -ENOMEM;
3608 goto unmap_ctx;
3609 }
3610 edesc->qm_sg_bytes = qm_sg_bytes;
3611
3612 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3613 dpaa2_fl_set_final(in_fle, true);
3614 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3615 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3616 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3617 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3618 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3619 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3620
3621 req_ctx->flc = &ctx->flc[UPDATE];
3622 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3623 req_ctx->cbk = ahash_done_bi;
3624 req_ctx->ctx = &req->base;
3625 req_ctx->edesc = edesc;
3626
3627 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3628 if (ret != -EINPROGRESS &&
3629 !(ret == -EBUSY &&
3630 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3631 goto unmap_ctx;
3632 } else if (*next_buflen) {
3633 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3634 req->nbytes, 0);
3635 *buflen = *next_buflen;
3636
3637 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3638 DUMP_PREFIX_ADDRESS, 16, 4, buf,
3639 *buflen, 1);
3640 }
3641
3642 return ret;
3643 unmap_ctx:
3644 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3645 qi_cache_free(edesc);
3646 return ret;
3647 }
3648
ahash_final_ctx(struct ahash_request * req)3649 static int ahash_final_ctx(struct ahash_request *req)
3650 {
3651 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3652 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3653 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3654 struct caam_request *req_ctx = &state->caam_req;
3655 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3656 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3657 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3658 GFP_KERNEL : GFP_ATOMIC;
3659 int buflen = state->buflen;
3660 int qm_sg_bytes;
3661 int digestsize = crypto_ahash_digestsize(ahash);
3662 struct ahash_edesc *edesc;
3663 struct dpaa2_sg_entry *sg_table;
3664 int ret;
3665
3666 /* allocate space for base edesc and link tables */
3667 edesc = qi_cache_zalloc(flags);
3668 if (!edesc)
3669 return -ENOMEM;
3670
3671 qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3672 sg_table = &edesc->sgt[0];
3673
3674 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3675 DMA_BIDIRECTIONAL);
3676 if (ret)
3677 goto unmap_ctx;
3678
3679 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3680 if (ret)
3681 goto unmap_ctx;
3682
3683 dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3684
3685 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3686 DMA_TO_DEVICE);
3687 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3688 dev_err(ctx->dev, "unable to map S/G table\n");
3689 ret = -ENOMEM;
3690 goto unmap_ctx;
3691 }
3692 edesc->qm_sg_bytes = qm_sg_bytes;
3693
3694 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3695 dpaa2_fl_set_final(in_fle, true);
3696 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3697 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3698 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3699 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3700 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3701 dpaa2_fl_set_len(out_fle, digestsize);
3702
3703 req_ctx->flc = &ctx->flc[FINALIZE];
3704 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3705 req_ctx->cbk = ahash_done_ctx_src;
3706 req_ctx->ctx = &req->base;
3707 req_ctx->edesc = edesc;
3708
3709 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3710 if (ret == -EINPROGRESS ||
3711 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3712 return ret;
3713
3714 unmap_ctx:
3715 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3716 qi_cache_free(edesc);
3717 return ret;
3718 }
3719
ahash_finup_ctx(struct ahash_request * req)3720 static int ahash_finup_ctx(struct ahash_request *req)
3721 {
3722 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3723 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3724 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3725 struct caam_request *req_ctx = &state->caam_req;
3726 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3727 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3728 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3729 GFP_KERNEL : GFP_ATOMIC;
3730 int buflen = state->buflen;
3731 int qm_sg_bytes, qm_sg_src_index;
3732 int src_nents, mapped_nents;
3733 int digestsize = crypto_ahash_digestsize(ahash);
3734 struct ahash_edesc *edesc;
3735 struct dpaa2_sg_entry *sg_table;
3736 int ret;
3737
3738 src_nents = sg_nents_for_len(req->src, req->nbytes);
3739 if (src_nents < 0) {
3740 dev_err(ctx->dev, "Invalid number of src SG.\n");
3741 return src_nents;
3742 }
3743
3744 if (src_nents) {
3745 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3746 DMA_TO_DEVICE);
3747 if (!mapped_nents) {
3748 dev_err(ctx->dev, "unable to DMA map source\n");
3749 return -ENOMEM;
3750 }
3751 } else {
3752 mapped_nents = 0;
3753 }
3754
3755 /* allocate space for base edesc and link tables */
3756 edesc = qi_cache_zalloc(flags);
3757 if (!edesc) {
3758 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3759 return -ENOMEM;
3760 }
3761
3762 edesc->src_nents = src_nents;
3763 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3764 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3765 sizeof(*sg_table);
3766 sg_table = &edesc->sgt[0];
3767
3768 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3769 DMA_BIDIRECTIONAL);
3770 if (ret)
3771 goto unmap_ctx;
3772
3773 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3774 if (ret)
3775 goto unmap_ctx;
3776
3777 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3778
3779 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3780 DMA_TO_DEVICE);
3781 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3782 dev_err(ctx->dev, "unable to map S/G table\n");
3783 ret = -ENOMEM;
3784 goto unmap_ctx;
3785 }
3786 edesc->qm_sg_bytes = qm_sg_bytes;
3787
3788 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3789 dpaa2_fl_set_final(in_fle, true);
3790 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3791 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3792 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3793 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3794 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3795 dpaa2_fl_set_len(out_fle, digestsize);
3796
3797 req_ctx->flc = &ctx->flc[FINALIZE];
3798 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3799 req_ctx->cbk = ahash_done_ctx_src;
3800 req_ctx->ctx = &req->base;
3801 req_ctx->edesc = edesc;
3802
3803 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3804 if (ret == -EINPROGRESS ||
3805 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3806 return ret;
3807
3808 unmap_ctx:
3809 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3810 qi_cache_free(edesc);
3811 return ret;
3812 }
3813
ahash_digest(struct ahash_request * req)3814 static int ahash_digest(struct ahash_request *req)
3815 {
3816 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3817 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3818 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3819 struct caam_request *req_ctx = &state->caam_req;
3820 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3821 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3822 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3823 GFP_KERNEL : GFP_ATOMIC;
3824 int digestsize = crypto_ahash_digestsize(ahash);
3825 int src_nents, mapped_nents;
3826 struct ahash_edesc *edesc;
3827 int ret = -ENOMEM;
3828
3829 state->buf_dma = 0;
3830
3831 src_nents = sg_nents_for_len(req->src, req->nbytes);
3832 if (src_nents < 0) {
3833 dev_err(ctx->dev, "Invalid number of src SG.\n");
3834 return src_nents;
3835 }
3836
3837 if (src_nents) {
3838 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3839 DMA_TO_DEVICE);
3840 if (!mapped_nents) {
3841 dev_err(ctx->dev, "unable to map source for DMA\n");
3842 return ret;
3843 }
3844 } else {
3845 mapped_nents = 0;
3846 }
3847
3848 /* allocate space for base edesc and link tables */
3849 edesc = qi_cache_zalloc(flags);
3850 if (!edesc) {
3851 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3852 return ret;
3853 }
3854
3855 edesc->src_nents = src_nents;
3856 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3857
3858 if (mapped_nents > 1) {
3859 int qm_sg_bytes;
3860 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3861
3862 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3863 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3864 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3865 qm_sg_bytes, DMA_TO_DEVICE);
3866 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3867 dev_err(ctx->dev, "unable to map S/G table\n");
3868 goto unmap;
3869 }
3870 edesc->qm_sg_bytes = qm_sg_bytes;
3871 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3872 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3873 } else {
3874 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3875 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3876 }
3877
3878 state->ctx_dma_len = digestsize;
3879 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3880 DMA_FROM_DEVICE);
3881 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3882 dev_err(ctx->dev, "unable to map ctx\n");
3883 state->ctx_dma = 0;
3884 goto unmap;
3885 }
3886
3887 dpaa2_fl_set_final(in_fle, true);
3888 dpaa2_fl_set_len(in_fle, req->nbytes);
3889 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3890 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3891 dpaa2_fl_set_len(out_fle, digestsize);
3892
3893 req_ctx->flc = &ctx->flc[DIGEST];
3894 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3895 req_ctx->cbk = ahash_done;
3896 req_ctx->ctx = &req->base;
3897 req_ctx->edesc = edesc;
3898 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3899 if (ret == -EINPROGRESS ||
3900 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3901 return ret;
3902
3903 unmap:
3904 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3905 qi_cache_free(edesc);
3906 return ret;
3907 }
3908
ahash_final_no_ctx(struct ahash_request * req)3909 static int ahash_final_no_ctx(struct ahash_request *req)
3910 {
3911 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3912 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3913 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3914 struct caam_request *req_ctx = &state->caam_req;
3915 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3916 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3917 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3918 GFP_KERNEL : GFP_ATOMIC;
3919 u8 *buf = state->buf;
3920 int buflen = state->buflen;
3921 int digestsize = crypto_ahash_digestsize(ahash);
3922 struct ahash_edesc *edesc;
3923 int ret = -ENOMEM;
3924
3925 /* allocate space for base edesc and link tables */
3926 edesc = qi_cache_zalloc(flags);
3927 if (!edesc)
3928 return ret;
3929
3930 if (buflen) {
3931 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3932 DMA_TO_DEVICE);
3933 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3934 dev_err(ctx->dev, "unable to map src\n");
3935 goto unmap;
3936 }
3937 }
3938
3939 state->ctx_dma_len = digestsize;
3940 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3941 DMA_FROM_DEVICE);
3942 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3943 dev_err(ctx->dev, "unable to map ctx\n");
3944 state->ctx_dma = 0;
3945 goto unmap;
3946 }
3947
3948 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3949 dpaa2_fl_set_final(in_fle, true);
3950 /*
3951 * crypto engine requires the input entry to be present when
3952 * "frame list" FD is used.
3953 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3954 * in_fle zeroized (except for "Final" flag) is the best option.
3955 */
3956 if (buflen) {
3957 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3958 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3959 dpaa2_fl_set_len(in_fle, buflen);
3960 }
3961 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3962 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3963 dpaa2_fl_set_len(out_fle, digestsize);
3964
3965 req_ctx->flc = &ctx->flc[DIGEST];
3966 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3967 req_ctx->cbk = ahash_done;
3968 req_ctx->ctx = &req->base;
3969 req_ctx->edesc = edesc;
3970
3971 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3972 if (ret == -EINPROGRESS ||
3973 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3974 return ret;
3975
3976 unmap:
3977 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3978 qi_cache_free(edesc);
3979 return ret;
3980 }
3981
ahash_update_no_ctx(struct ahash_request * req)3982 static int ahash_update_no_ctx(struct ahash_request *req)
3983 {
3984 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3985 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3986 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3987 struct caam_request *req_ctx = &state->caam_req;
3988 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3989 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3990 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3991 GFP_KERNEL : GFP_ATOMIC;
3992 u8 *buf = state->buf;
3993 int *buflen = &state->buflen;
3994 int *next_buflen = &state->next_buflen;
3995 int in_len = *buflen + req->nbytes, to_hash;
3996 int qm_sg_bytes, src_nents, mapped_nents;
3997 struct ahash_edesc *edesc;
3998 int ret = 0;
3999
4000 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
4001 to_hash = in_len - *next_buflen;
4002
4003 if (to_hash) {
4004 struct dpaa2_sg_entry *sg_table;
4005 int src_len = req->nbytes - *next_buflen;
4006
4007 src_nents = sg_nents_for_len(req->src, src_len);
4008 if (src_nents < 0) {
4009 dev_err(ctx->dev, "Invalid number of src SG.\n");
4010 return src_nents;
4011 }
4012
4013 if (src_nents) {
4014 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4015 DMA_TO_DEVICE);
4016 if (!mapped_nents) {
4017 dev_err(ctx->dev, "unable to DMA map source\n");
4018 return -ENOMEM;
4019 }
4020 } else {
4021 mapped_nents = 0;
4022 }
4023
4024 /* allocate space for base edesc and link tables */
4025 edesc = qi_cache_zalloc(flags);
4026 if (!edesc) {
4027 dma_unmap_sg(ctx->dev, req->src, src_nents,
4028 DMA_TO_DEVICE);
4029 return -ENOMEM;
4030 }
4031
4032 edesc->src_nents = src_nents;
4033 qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4034 sizeof(*sg_table);
4035 sg_table = &edesc->sgt[0];
4036
4037 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4038 if (ret)
4039 goto unmap_ctx;
4040
4041 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4042
4043 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4044 qm_sg_bytes, DMA_TO_DEVICE);
4045 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4046 dev_err(ctx->dev, "unable to map S/G table\n");
4047 ret = -ENOMEM;
4048 goto unmap_ctx;
4049 }
4050 edesc->qm_sg_bytes = qm_sg_bytes;
4051
4052 state->ctx_dma_len = ctx->ctx_len;
4053 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4054 ctx->ctx_len, DMA_FROM_DEVICE);
4055 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4056 dev_err(ctx->dev, "unable to map ctx\n");
4057 state->ctx_dma = 0;
4058 ret = -ENOMEM;
4059 goto unmap_ctx;
4060 }
4061
4062 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4063 dpaa2_fl_set_final(in_fle, true);
4064 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4065 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4066 dpaa2_fl_set_len(in_fle, to_hash);
4067 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4068 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4069 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4070
4071 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4072 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4073 req_ctx->cbk = ahash_done_ctx_dst;
4074 req_ctx->ctx = &req->base;
4075 req_ctx->edesc = edesc;
4076
4077 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4078 if (ret != -EINPROGRESS &&
4079 !(ret == -EBUSY &&
4080 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4081 goto unmap_ctx;
4082
4083 state->update = ahash_update_ctx;
4084 state->finup = ahash_finup_ctx;
4085 state->final = ahash_final_ctx;
4086 } else if (*next_buflen) {
4087 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4088 req->nbytes, 0);
4089 *buflen = *next_buflen;
4090
4091 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4092 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4093 *buflen, 1);
4094 }
4095
4096 return ret;
4097 unmap_ctx:
4098 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4099 qi_cache_free(edesc);
4100 return ret;
4101 }
4102
ahash_finup_no_ctx(struct ahash_request * req)4103 static int ahash_finup_no_ctx(struct ahash_request *req)
4104 {
4105 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4106 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4107 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4108 struct caam_request *req_ctx = &state->caam_req;
4109 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4110 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4111 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4112 GFP_KERNEL : GFP_ATOMIC;
4113 int buflen = state->buflen;
4114 int qm_sg_bytes, src_nents, mapped_nents;
4115 int digestsize = crypto_ahash_digestsize(ahash);
4116 struct ahash_edesc *edesc;
4117 struct dpaa2_sg_entry *sg_table;
4118 int ret = -ENOMEM;
4119
4120 src_nents = sg_nents_for_len(req->src, req->nbytes);
4121 if (src_nents < 0) {
4122 dev_err(ctx->dev, "Invalid number of src SG.\n");
4123 return src_nents;
4124 }
4125
4126 if (src_nents) {
4127 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4128 DMA_TO_DEVICE);
4129 if (!mapped_nents) {
4130 dev_err(ctx->dev, "unable to DMA map source\n");
4131 return ret;
4132 }
4133 } else {
4134 mapped_nents = 0;
4135 }
4136
4137 /* allocate space for base edesc and link tables */
4138 edesc = qi_cache_zalloc(flags);
4139 if (!edesc) {
4140 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4141 return ret;
4142 }
4143
4144 edesc->src_nents = src_nents;
4145 qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4146 sg_table = &edesc->sgt[0];
4147
4148 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4149 if (ret)
4150 goto unmap;
4151
4152 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4153
4154 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4155 DMA_TO_DEVICE);
4156 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4157 dev_err(ctx->dev, "unable to map S/G table\n");
4158 ret = -ENOMEM;
4159 goto unmap;
4160 }
4161 edesc->qm_sg_bytes = qm_sg_bytes;
4162
4163 state->ctx_dma_len = digestsize;
4164 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4165 DMA_FROM_DEVICE);
4166 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4167 dev_err(ctx->dev, "unable to map ctx\n");
4168 state->ctx_dma = 0;
4169 ret = -ENOMEM;
4170 goto unmap;
4171 }
4172
4173 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4174 dpaa2_fl_set_final(in_fle, true);
4175 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4176 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4177 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4178 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4179 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4180 dpaa2_fl_set_len(out_fle, digestsize);
4181
4182 req_ctx->flc = &ctx->flc[DIGEST];
4183 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4184 req_ctx->cbk = ahash_done;
4185 req_ctx->ctx = &req->base;
4186 req_ctx->edesc = edesc;
4187 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4188 if (ret != -EINPROGRESS &&
4189 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4190 goto unmap;
4191
4192 return ret;
4193 unmap:
4194 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4195 qi_cache_free(edesc);
4196 return ret;
4197 }
4198
ahash_update_first(struct ahash_request * req)4199 static int ahash_update_first(struct ahash_request *req)
4200 {
4201 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4202 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4203 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4204 struct caam_request *req_ctx = &state->caam_req;
4205 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4206 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4207 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4208 GFP_KERNEL : GFP_ATOMIC;
4209 u8 *buf = state->buf;
4210 int *buflen = &state->buflen;
4211 int *next_buflen = &state->next_buflen;
4212 int to_hash;
4213 int src_nents, mapped_nents;
4214 struct ahash_edesc *edesc;
4215 int ret = 0;
4216
4217 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4218 1);
4219 to_hash = req->nbytes - *next_buflen;
4220
4221 if (to_hash) {
4222 struct dpaa2_sg_entry *sg_table;
4223 int src_len = req->nbytes - *next_buflen;
4224
4225 src_nents = sg_nents_for_len(req->src, src_len);
4226 if (src_nents < 0) {
4227 dev_err(ctx->dev, "Invalid number of src SG.\n");
4228 return src_nents;
4229 }
4230
4231 if (src_nents) {
4232 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4233 DMA_TO_DEVICE);
4234 if (!mapped_nents) {
4235 dev_err(ctx->dev, "unable to map source for DMA\n");
4236 return -ENOMEM;
4237 }
4238 } else {
4239 mapped_nents = 0;
4240 }
4241
4242 /* allocate space for base edesc and link tables */
4243 edesc = qi_cache_zalloc(flags);
4244 if (!edesc) {
4245 dma_unmap_sg(ctx->dev, req->src, src_nents,
4246 DMA_TO_DEVICE);
4247 return -ENOMEM;
4248 }
4249
4250 edesc->src_nents = src_nents;
4251 sg_table = &edesc->sgt[0];
4252
4253 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4254 dpaa2_fl_set_final(in_fle, true);
4255 dpaa2_fl_set_len(in_fle, to_hash);
4256
4257 if (mapped_nents > 1) {
4258 int qm_sg_bytes;
4259
4260 sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4261 qm_sg_bytes = pad_sg_nents(mapped_nents) *
4262 sizeof(*sg_table);
4263 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4264 qm_sg_bytes,
4265 DMA_TO_DEVICE);
4266 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4267 dev_err(ctx->dev, "unable to map S/G table\n");
4268 ret = -ENOMEM;
4269 goto unmap_ctx;
4270 }
4271 edesc->qm_sg_bytes = qm_sg_bytes;
4272 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4273 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4274 } else {
4275 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4276 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4277 }
4278
4279 state->ctx_dma_len = ctx->ctx_len;
4280 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4281 ctx->ctx_len, DMA_FROM_DEVICE);
4282 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4283 dev_err(ctx->dev, "unable to map ctx\n");
4284 state->ctx_dma = 0;
4285 ret = -ENOMEM;
4286 goto unmap_ctx;
4287 }
4288
4289 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4290 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4291 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4292
4293 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4294 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4295 req_ctx->cbk = ahash_done_ctx_dst;
4296 req_ctx->ctx = &req->base;
4297 req_ctx->edesc = edesc;
4298
4299 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4300 if (ret != -EINPROGRESS &&
4301 !(ret == -EBUSY && req->base.flags &
4302 CRYPTO_TFM_REQ_MAY_BACKLOG))
4303 goto unmap_ctx;
4304
4305 state->update = ahash_update_ctx;
4306 state->finup = ahash_finup_ctx;
4307 state->final = ahash_final_ctx;
4308 } else if (*next_buflen) {
4309 state->update = ahash_update_no_ctx;
4310 state->finup = ahash_finup_no_ctx;
4311 state->final = ahash_final_no_ctx;
4312 scatterwalk_map_and_copy(buf, req->src, 0,
4313 req->nbytes, 0);
4314 *buflen = *next_buflen;
4315
4316 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4317 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4318 *buflen, 1);
4319 }
4320
4321 return ret;
4322 unmap_ctx:
4323 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4324 qi_cache_free(edesc);
4325 return ret;
4326 }
4327
ahash_finup_first(struct ahash_request * req)4328 static int ahash_finup_first(struct ahash_request *req)
4329 {
4330 return ahash_digest(req);
4331 }
4332
ahash_init(struct ahash_request * req)4333 static int ahash_init(struct ahash_request *req)
4334 {
4335 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4336
4337 state->update = ahash_update_first;
4338 state->finup = ahash_finup_first;
4339 state->final = ahash_final_no_ctx;
4340
4341 state->ctx_dma = 0;
4342 state->ctx_dma_len = 0;
4343 state->buf_dma = 0;
4344 state->buflen = 0;
4345 state->next_buflen = 0;
4346
4347 return 0;
4348 }
4349
ahash_update(struct ahash_request * req)4350 static int ahash_update(struct ahash_request *req)
4351 {
4352 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4353
4354 return state->update(req);
4355 }
4356
ahash_finup(struct ahash_request * req)4357 static int ahash_finup(struct ahash_request *req)
4358 {
4359 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4360
4361 return state->finup(req);
4362 }
4363
ahash_final(struct ahash_request * req)4364 static int ahash_final(struct ahash_request *req)
4365 {
4366 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4367
4368 return state->final(req);
4369 }
4370
ahash_export(struct ahash_request * req,void * out)4371 static int ahash_export(struct ahash_request *req, void *out)
4372 {
4373 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4374 struct caam_export_state *export = out;
4375 u8 *buf = state->buf;
4376 int len = state->buflen;
4377
4378 memcpy(export->buf, buf, len);
4379 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4380 export->buflen = len;
4381 export->update = state->update;
4382 export->final = state->final;
4383 export->finup = state->finup;
4384
4385 return 0;
4386 }
4387
ahash_import(struct ahash_request * req,const void * in)4388 static int ahash_import(struct ahash_request *req, const void *in)
4389 {
4390 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4391 const struct caam_export_state *export = in;
4392
4393 memset(state, 0, sizeof(*state));
4394 memcpy(state->buf, export->buf, export->buflen);
4395 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4396 state->buflen = export->buflen;
4397 state->update = export->update;
4398 state->final = export->final;
4399 state->finup = export->finup;
4400
4401 return 0;
4402 }
4403
4404 struct caam_hash_template {
4405 char name[CRYPTO_MAX_ALG_NAME];
4406 char driver_name[CRYPTO_MAX_ALG_NAME];
4407 char hmac_name[CRYPTO_MAX_ALG_NAME];
4408 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4409 unsigned int blocksize;
4410 struct ahash_alg template_ahash;
4411 u32 alg_type;
4412 };
4413
4414 /* ahash descriptors */
4415 static struct caam_hash_template driver_hash[] = {
4416 {
4417 .name = "sha1",
4418 .driver_name = "sha1-caam-qi2",
4419 .hmac_name = "hmac(sha1)",
4420 .hmac_driver_name = "hmac-sha1-caam-qi2",
4421 .blocksize = SHA1_BLOCK_SIZE,
4422 .template_ahash = {
4423 .init = ahash_init,
4424 .update = ahash_update,
4425 .final = ahash_final,
4426 .finup = ahash_finup,
4427 .digest = ahash_digest,
4428 .export = ahash_export,
4429 .import = ahash_import,
4430 .setkey = ahash_setkey,
4431 .halg = {
4432 .digestsize = SHA1_DIGEST_SIZE,
4433 .statesize = sizeof(struct caam_export_state),
4434 },
4435 },
4436 .alg_type = OP_ALG_ALGSEL_SHA1,
4437 }, {
4438 .name = "sha224",
4439 .driver_name = "sha224-caam-qi2",
4440 .hmac_name = "hmac(sha224)",
4441 .hmac_driver_name = "hmac-sha224-caam-qi2",
4442 .blocksize = SHA224_BLOCK_SIZE,
4443 .template_ahash = {
4444 .init = ahash_init,
4445 .update = ahash_update,
4446 .final = ahash_final,
4447 .finup = ahash_finup,
4448 .digest = ahash_digest,
4449 .export = ahash_export,
4450 .import = ahash_import,
4451 .setkey = ahash_setkey,
4452 .halg = {
4453 .digestsize = SHA224_DIGEST_SIZE,
4454 .statesize = sizeof(struct caam_export_state),
4455 },
4456 },
4457 .alg_type = OP_ALG_ALGSEL_SHA224,
4458 }, {
4459 .name = "sha256",
4460 .driver_name = "sha256-caam-qi2",
4461 .hmac_name = "hmac(sha256)",
4462 .hmac_driver_name = "hmac-sha256-caam-qi2",
4463 .blocksize = SHA256_BLOCK_SIZE,
4464 .template_ahash = {
4465 .init = ahash_init,
4466 .update = ahash_update,
4467 .final = ahash_final,
4468 .finup = ahash_finup,
4469 .digest = ahash_digest,
4470 .export = ahash_export,
4471 .import = ahash_import,
4472 .setkey = ahash_setkey,
4473 .halg = {
4474 .digestsize = SHA256_DIGEST_SIZE,
4475 .statesize = sizeof(struct caam_export_state),
4476 },
4477 },
4478 .alg_type = OP_ALG_ALGSEL_SHA256,
4479 }, {
4480 .name = "sha384",
4481 .driver_name = "sha384-caam-qi2",
4482 .hmac_name = "hmac(sha384)",
4483 .hmac_driver_name = "hmac-sha384-caam-qi2",
4484 .blocksize = SHA384_BLOCK_SIZE,
4485 .template_ahash = {
4486 .init = ahash_init,
4487 .update = ahash_update,
4488 .final = ahash_final,
4489 .finup = ahash_finup,
4490 .digest = ahash_digest,
4491 .export = ahash_export,
4492 .import = ahash_import,
4493 .setkey = ahash_setkey,
4494 .halg = {
4495 .digestsize = SHA384_DIGEST_SIZE,
4496 .statesize = sizeof(struct caam_export_state),
4497 },
4498 },
4499 .alg_type = OP_ALG_ALGSEL_SHA384,
4500 }, {
4501 .name = "sha512",
4502 .driver_name = "sha512-caam-qi2",
4503 .hmac_name = "hmac(sha512)",
4504 .hmac_driver_name = "hmac-sha512-caam-qi2",
4505 .blocksize = SHA512_BLOCK_SIZE,
4506 .template_ahash = {
4507 .init = ahash_init,
4508 .update = ahash_update,
4509 .final = ahash_final,
4510 .finup = ahash_finup,
4511 .digest = ahash_digest,
4512 .export = ahash_export,
4513 .import = ahash_import,
4514 .setkey = ahash_setkey,
4515 .halg = {
4516 .digestsize = SHA512_DIGEST_SIZE,
4517 .statesize = sizeof(struct caam_export_state),
4518 },
4519 },
4520 .alg_type = OP_ALG_ALGSEL_SHA512,
4521 }, {
4522 .name = "md5",
4523 .driver_name = "md5-caam-qi2",
4524 .hmac_name = "hmac(md5)",
4525 .hmac_driver_name = "hmac-md5-caam-qi2",
4526 .blocksize = MD5_BLOCK_WORDS * 4,
4527 .template_ahash = {
4528 .init = ahash_init,
4529 .update = ahash_update,
4530 .final = ahash_final,
4531 .finup = ahash_finup,
4532 .digest = ahash_digest,
4533 .export = ahash_export,
4534 .import = ahash_import,
4535 .setkey = ahash_setkey,
4536 .halg = {
4537 .digestsize = MD5_DIGEST_SIZE,
4538 .statesize = sizeof(struct caam_export_state),
4539 },
4540 },
4541 .alg_type = OP_ALG_ALGSEL_MD5,
4542 }
4543 };
4544
4545 struct caam_hash_alg {
4546 struct list_head entry;
4547 struct device *dev;
4548 int alg_type;
4549 bool is_hmac;
4550 struct ahash_alg ahash_alg;
4551 };
4552
caam_hash_cra_init(struct crypto_tfm * tfm)4553 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4554 {
4555 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4556 struct crypto_alg *base = tfm->__crt_alg;
4557 struct hash_alg_common *halg =
4558 container_of(base, struct hash_alg_common, base);
4559 struct ahash_alg *alg =
4560 container_of(halg, struct ahash_alg, halg);
4561 struct caam_hash_alg *caam_hash =
4562 container_of(alg, struct caam_hash_alg, ahash_alg);
4563 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4564 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4565 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4566 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4567 HASH_MSG_LEN + 32,
4568 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4569 HASH_MSG_LEN + 64,
4570 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4571 dma_addr_t dma_addr;
4572 int i;
4573
4574 ctx->dev = caam_hash->dev;
4575
4576 if (caam_hash->is_hmac) {
4577 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4578 ARRAY_SIZE(ctx->key),
4579 DMA_TO_DEVICE,
4580 DMA_ATTR_SKIP_CPU_SYNC);
4581 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4582 dev_err(ctx->dev, "unable to map key\n");
4583 return -ENOMEM;
4584 }
4585 }
4586
4587 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4588 DMA_BIDIRECTIONAL,
4589 DMA_ATTR_SKIP_CPU_SYNC);
4590 if (dma_mapping_error(ctx->dev, dma_addr)) {
4591 dev_err(ctx->dev, "unable to map shared descriptors\n");
4592 if (ctx->adata.key_dma)
4593 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4594 ARRAY_SIZE(ctx->key),
4595 DMA_TO_DEVICE,
4596 DMA_ATTR_SKIP_CPU_SYNC);
4597 return -ENOMEM;
4598 }
4599
4600 for (i = 0; i < HASH_NUM_OP; i++)
4601 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4602
4603 /* copy descriptor header template value */
4604 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4605
4606 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4607 OP_ALG_ALGSEL_SUBMASK) >>
4608 OP_ALG_ALGSEL_SHIFT];
4609
4610 crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
4611
4612 /*
4613 * For keyed hash algorithms shared descriptors
4614 * will be created later in setkey() callback
4615 */
4616 return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
4617 }
4618
caam_hash_cra_exit(struct crypto_tfm * tfm)4619 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4620 {
4621 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4622
4623 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4624 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4625 if (ctx->adata.key_dma)
4626 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4627 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4628 DMA_ATTR_SKIP_CPU_SYNC);
4629 }
4630
caam_hash_alloc(struct device * dev,struct caam_hash_template * template,bool keyed)4631 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4632 struct caam_hash_template *template, bool keyed)
4633 {
4634 struct caam_hash_alg *t_alg;
4635 struct ahash_alg *halg;
4636 struct crypto_alg *alg;
4637
4638 t_alg = kzalloc_obj(*t_alg);
4639 if (!t_alg)
4640 return ERR_PTR(-ENOMEM);
4641
4642 t_alg->ahash_alg = template->template_ahash;
4643 halg = &t_alg->ahash_alg;
4644 alg = &halg->halg.base;
4645
4646 if (keyed) {
4647 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4648 template->hmac_name);
4649 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4650 template->hmac_driver_name);
4651 t_alg->is_hmac = true;
4652 } else {
4653 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4654 template->name);
4655 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4656 template->driver_name);
4657 t_alg->ahash_alg.setkey = NULL;
4658 t_alg->is_hmac = false;
4659 }
4660 alg->cra_module = THIS_MODULE;
4661 alg->cra_init = caam_hash_cra_init;
4662 alg->cra_exit = caam_hash_cra_exit;
4663 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
4664 alg->cra_priority = CAAM_CRA_PRIORITY;
4665 alg->cra_blocksize = template->blocksize;
4666 alg->cra_alignmask = 0;
4667 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4668
4669 t_alg->alg_type = template->alg_type;
4670 t_alg->dev = dev;
4671
4672 return t_alg;
4673 }
4674
dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx * nctx)4675 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4676 {
4677 struct dpaa2_caam_priv_per_cpu *ppriv;
4678
4679 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4680 napi_schedule_irqoff(&ppriv->napi);
4681 }
4682
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv * priv)4683 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4684 {
4685 struct device *dev = priv->dev;
4686 struct dpaa2_io_notification_ctx *nctx;
4687 struct dpaa2_caam_priv_per_cpu *ppriv;
4688 int err, i = 0, cpu;
4689
4690 for_each_online_cpu(cpu) {
4691 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4692 ppriv->priv = priv;
4693 nctx = &ppriv->nctx;
4694 nctx->is_cdan = 0;
4695 nctx->id = ppriv->rsp_fqid;
4696 nctx->desired_cpu = cpu;
4697 nctx->cb = dpaa2_caam_fqdan_cb;
4698
4699 /* Register notification callbacks */
4700 ppriv->dpio = dpaa2_io_service_select(cpu);
4701 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4702 if (unlikely(err)) {
4703 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4704 nctx->cb = NULL;
4705 /*
4706 * If no affine DPIO for this core, there's probably
4707 * none available for next cores either. Signal we want
4708 * to retry later, in case the DPIO devices weren't
4709 * probed yet.
4710 */
4711 err = -EPROBE_DEFER;
4712 goto err;
4713 }
4714
4715 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4716 dev);
4717 if (unlikely(!ppriv->store)) {
4718 dev_err(dev, "dpaa2_io_store_create() failed\n");
4719 err = -ENOMEM;
4720 goto err;
4721 }
4722
4723 if (++i == priv->num_pairs)
4724 break;
4725 }
4726
4727 return 0;
4728
4729 err:
4730 for_each_online_cpu(cpu) {
4731 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4732 if (!ppriv->nctx.cb)
4733 break;
4734 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4735 }
4736
4737 for_each_online_cpu(cpu) {
4738 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4739 if (!ppriv->store)
4740 break;
4741 dpaa2_io_store_destroy(ppriv->store);
4742 }
4743
4744 return err;
4745 }
4746
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv * priv)4747 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4748 {
4749 struct dpaa2_caam_priv_per_cpu *ppriv;
4750 int i = 0, cpu;
4751
4752 for_each_online_cpu(cpu) {
4753 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4754 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4755 priv->dev);
4756 dpaa2_io_store_destroy(ppriv->store);
4757
4758 if (++i == priv->num_pairs)
4759 return;
4760 }
4761 }
4762
dpaa2_dpseci_bind(struct dpaa2_caam_priv * priv)4763 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4764 {
4765 struct dpseci_rx_queue_cfg rx_queue_cfg;
4766 struct device *dev = priv->dev;
4767 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4768 struct dpaa2_caam_priv_per_cpu *ppriv;
4769 int err = 0, i = 0, cpu;
4770
4771 /* Configure Rx queues */
4772 for_each_online_cpu(cpu) {
4773 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4774
4775 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4776 DPSECI_QUEUE_OPT_USER_CTX;
4777 rx_queue_cfg.order_preservation_en = 0;
4778 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4779 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4780 /*
4781 * Rx priority (WQ) doesn't really matter, since we use
4782 * pull mode, i.e. volatile dequeues from specific FQs
4783 */
4784 rx_queue_cfg.dest_cfg.priority = 0;
4785 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4786
4787 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4788 &rx_queue_cfg);
4789 if (err) {
4790 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4791 err);
4792 return err;
4793 }
4794
4795 if (++i == priv->num_pairs)
4796 break;
4797 }
4798
4799 return err;
4800 }
4801
dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv * priv)4802 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4803 {
4804 struct device *dev = priv->dev;
4805
4806 if (!priv->cscn_mem)
4807 return;
4808
4809 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4810 kfree(priv->cscn_mem);
4811 }
4812
dpaa2_dpseci_free(struct dpaa2_caam_priv * priv)4813 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4814 {
4815 struct device *dev = priv->dev;
4816 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4817 struct dpaa2_caam_priv_per_cpu *ppriv;
4818 int i, err;
4819
4820 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4821 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4822 if (err)
4823 dev_err(dev, "dpseci_reset() failed\n");
4824 }
4825
4826 for_each_cpu(i, priv->clean_mask) {
4827 ppriv = per_cpu_ptr(priv->ppriv, i);
4828 free_netdev(ppriv->net_dev);
4829 }
4830 free_cpumask_var(priv->clean_mask);
4831
4832 dpaa2_dpseci_congestion_free(priv);
4833 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4834 }
4835
dpaa2_caam_process_fd(struct dpaa2_caam_priv * priv,const struct dpaa2_fd * fd)4836 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4837 const struct dpaa2_fd *fd)
4838 {
4839 struct caam_request *req;
4840 u32 fd_err;
4841
4842 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4843 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4844 return;
4845 }
4846
4847 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4848 if (unlikely(fd_err))
4849 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4850
4851 /*
4852 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4853 * in FD[ERR] or FD[FRC].
4854 */
4855 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4856 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4857 DMA_BIDIRECTIONAL);
4858 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4859 }
4860
dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu * ppriv)4861 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4862 {
4863 int err;
4864
4865 /* Retry while portal is busy */
4866 do {
4867 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4868 ppriv->store);
4869 } while (err == -EBUSY);
4870
4871 if (unlikely(err))
4872 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4873
4874 return err;
4875 }
4876
dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu * ppriv)4877 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4878 {
4879 struct dpaa2_dq *dq;
4880 int cleaned = 0, is_last;
4881
4882 do {
4883 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4884 if (unlikely(!dq)) {
4885 if (unlikely(!is_last)) {
4886 dev_dbg(ppriv->priv->dev,
4887 "FQ %d returned no valid frames\n",
4888 ppriv->rsp_fqid);
4889 /*
4890 * MUST retry until we get some sort of
4891 * valid response token (be it "empty dequeue"
4892 * or a valid frame).
4893 */
4894 continue;
4895 }
4896 break;
4897 }
4898
4899 /* Process FD */
4900 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4901 cleaned++;
4902 } while (!is_last);
4903
4904 return cleaned;
4905 }
4906
dpaa2_dpseci_poll(struct napi_struct * napi,int budget)4907 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4908 {
4909 struct dpaa2_caam_priv_per_cpu *ppriv;
4910 struct dpaa2_caam_priv *priv;
4911 int err, cleaned = 0, store_cleaned;
4912
4913 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4914 priv = ppriv->priv;
4915
4916 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4917 return 0;
4918
4919 do {
4920 store_cleaned = dpaa2_caam_store_consume(ppriv);
4921 cleaned += store_cleaned;
4922
4923 if (store_cleaned == 0 ||
4924 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4925 break;
4926
4927 /* Try to dequeue some more */
4928 err = dpaa2_caam_pull_fq(ppriv);
4929 if (unlikely(err))
4930 break;
4931 } while (1);
4932
4933 if (cleaned < budget) {
4934 napi_complete_done(napi, cleaned);
4935 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4936 if (unlikely(err))
4937 dev_err(priv->dev, "Notification rearm failed: %d\n",
4938 err);
4939 }
4940
4941 return cleaned;
4942 }
4943
dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv * priv,u16 token)4944 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4945 u16 token)
4946 {
4947 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4948 struct device *dev = priv->dev;
4949 unsigned int alignmask;
4950 int err;
4951
4952 /*
4953 * Congestion group feature supported starting with DPSECI API v5.1
4954 * and only when object has been created with this capability.
4955 */
4956 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4957 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4958 return 0;
4959
4960 alignmask = DPAA2_CSCN_ALIGN - 1;
4961 alignmask |= dma_get_cache_alignment() - 1;
4962 priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1),
4963 GFP_KERNEL);
4964 if (!priv->cscn_mem)
4965 return -ENOMEM;
4966
4967 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem,
4968 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4969 if (dma_mapping_error(dev, priv->cscn_dma)) {
4970 dev_err(dev, "Error mapping CSCN memory area\n");
4971 err = -ENOMEM;
4972 goto err_dma_map;
4973 }
4974
4975 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4976 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4977 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4978 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4979 cong_notif_cfg.message_iova = priv->cscn_dma;
4980 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4981 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4982 DPSECI_CGN_MODE_COHERENT_WRITE;
4983
4984 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4985 &cong_notif_cfg);
4986 if (err) {
4987 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4988 goto err_set_cong;
4989 }
4990
4991 return 0;
4992
4993 err_set_cong:
4994 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4995 err_dma_map:
4996 kfree(priv->cscn_mem);
4997
4998 return err;
4999 }
5000
free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv * priv,const cpumask_t * cpus)5001 static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus)
5002 {
5003 struct dpaa2_caam_priv_per_cpu *ppriv;
5004 int i;
5005
5006 for_each_cpu(i, cpus) {
5007 ppriv = per_cpu_ptr(priv->ppriv, i);
5008 free_netdev(ppriv->net_dev);
5009 }
5010 }
5011
dpaa2_dpseci_setup(struct fsl_mc_device * ls_dev)5012 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
5013 {
5014 struct device *dev = &ls_dev->dev;
5015 struct dpaa2_caam_priv *priv;
5016 struct dpaa2_caam_priv_per_cpu *ppriv;
5017 int err, cpu;
5018 u8 i;
5019
5020 err = -ENOMEM;
5021 priv = dev_get_drvdata(dev);
5022
5023 if (!zalloc_cpumask_var(&priv->clean_mask, GFP_KERNEL))
5024 goto err_cpumask;
5025
5026 priv->dev = dev;
5027 priv->dpsec_id = ls_dev->obj_desc.id;
5028
5029 /* Get a handle for the DPSECI this interface is associate with */
5030 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
5031 if (err) {
5032 dev_err(dev, "dpseci_open() failed: %d\n", err);
5033 goto err_open;
5034 }
5035
5036 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
5037 &priv->minor_ver);
5038 if (err) {
5039 dev_err(dev, "dpseci_get_api_version() failed\n");
5040 goto err_get_vers;
5041 }
5042
5043 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5044
5045 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5046 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5047 if (err) {
5048 dev_err(dev, "dpseci_reset() failed\n");
5049 goto err_get_vers;
5050 }
5051 }
5052
5053 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5054 &priv->dpseci_attr);
5055 if (err) {
5056 dev_err(dev, "dpseci_get_attributes() failed\n");
5057 goto err_get_vers;
5058 }
5059
5060 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5061 &priv->sec_attr);
5062 if (err) {
5063 dev_err(dev, "dpseci_get_sec_attr() failed\n");
5064 goto err_get_vers;
5065 }
5066
5067 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5068 if (err) {
5069 dev_err(dev, "setup_congestion() failed\n");
5070 goto err_get_vers;
5071 }
5072
5073 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5074 priv->dpseci_attr.num_tx_queues);
5075 if (priv->num_pairs > num_online_cpus()) {
5076 dev_warn(dev, "%d queues won't be used\n",
5077 priv->num_pairs - num_online_cpus());
5078 priv->num_pairs = num_online_cpus();
5079 }
5080
5081 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5082 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5083 &priv->rx_queue_attr[i]);
5084 if (err) {
5085 dev_err(dev, "dpseci_get_rx_queue() failed\n");
5086 goto err_get_rx_queue;
5087 }
5088 }
5089
5090 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5091 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5092 &priv->tx_queue_attr[i]);
5093 if (err) {
5094 dev_err(dev, "dpseci_get_tx_queue() failed\n");
5095 goto err_get_rx_queue;
5096 }
5097 }
5098
5099 i = 0;
5100 for_each_online_cpu(cpu) {
5101 u8 j;
5102
5103 j = i % priv->num_pairs;
5104
5105 ppriv = per_cpu_ptr(priv->ppriv, cpu);
5106 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5107
5108 /*
5109 * Allow all cores to enqueue, while only some of them
5110 * will take part in dequeuing.
5111 */
5112 if (++i > priv->num_pairs)
5113 continue;
5114
5115 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5116 ppriv->prio = j;
5117
5118 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5119 priv->rx_queue_attr[j].fqid,
5120 priv->tx_queue_attr[j].fqid);
5121
5122 ppriv->net_dev = alloc_netdev_dummy(0);
5123 if (!ppriv->net_dev) {
5124 err = -ENOMEM;
5125 goto err_alloc_netdev;
5126 }
5127 cpumask_set_cpu(cpu, priv->clean_mask);
5128 ppriv->net_dev->dev = *dev;
5129
5130 netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
5131 dpaa2_dpseci_poll,
5132 DPAA2_CAAM_NAPI_WEIGHT);
5133 }
5134
5135 return 0;
5136
5137 err_alloc_netdev:
5138 free_dpaa2_pcpu_netdev(priv, priv->clean_mask);
5139 err_get_rx_queue:
5140 dpaa2_dpseci_congestion_free(priv);
5141 err_get_vers:
5142 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5143 err_open:
5144 free_cpumask_var(priv->clean_mask);
5145 err_cpumask:
5146 return err;
5147 }
5148
dpaa2_dpseci_enable(struct dpaa2_caam_priv * priv)5149 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5150 {
5151 struct device *dev = priv->dev;
5152 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5153 struct dpaa2_caam_priv_per_cpu *ppriv;
5154 int i;
5155
5156 for (i = 0; i < priv->num_pairs; i++) {
5157 ppriv = per_cpu_ptr(priv->ppriv, i);
5158 napi_enable(&ppriv->napi);
5159 }
5160
5161 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5162 }
5163
dpaa2_dpseci_disable(struct dpaa2_caam_priv * priv)5164 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5165 {
5166 struct device *dev = priv->dev;
5167 struct dpaa2_caam_priv_per_cpu *ppriv;
5168 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5169 int i, err = 0, enabled;
5170
5171 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5172 if (err) {
5173 dev_err(dev, "dpseci_disable() failed\n");
5174 return err;
5175 }
5176
5177 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5178 if (err) {
5179 dev_err(dev, "dpseci_is_enabled() failed\n");
5180 return err;
5181 }
5182
5183 dev_dbg(dev, "disable: %s\n", str_false_true(enabled));
5184
5185 for (i = 0; i < priv->num_pairs; i++) {
5186 ppriv = per_cpu_ptr(priv->ppriv, i);
5187 napi_disable(&ppriv->napi);
5188 netif_napi_del(&ppriv->napi);
5189 }
5190
5191 return 0;
5192 }
5193
5194 static struct list_head hash_list;
5195
dpaa2_caam_probe(struct fsl_mc_device * dpseci_dev)5196 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5197 {
5198 struct device *dev;
5199 struct dpaa2_caam_priv *priv;
5200 int i, err = 0;
5201 bool registered = false;
5202
5203 /*
5204 * There is no way to get CAAM endianness - there is no direct register
5205 * space access and MC f/w does not provide this attribute.
5206 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5207 * property.
5208 */
5209 caam_little_end = true;
5210
5211 caam_imx = false;
5212
5213 dev = &dpseci_dev->dev;
5214
5215 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5216 if (!priv)
5217 return -ENOMEM;
5218
5219 dev_set_drvdata(dev, priv);
5220
5221 priv->domain = iommu_get_domain_for_dev(dev);
5222
5223 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5224 0, 0, NULL);
5225 if (!qi_cache) {
5226 dev_err(dev, "Can't allocate SEC cache\n");
5227 return -ENOMEM;
5228 }
5229
5230 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5231 if (err) {
5232 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5233 goto err_dma_mask;
5234 }
5235
5236 /* Obtain a MC portal */
5237 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5238 if (err) {
5239 if (err == -ENXIO)
5240 err = -EPROBE_DEFER;
5241 else
5242 dev_err(dev, "MC portal allocation failed\n");
5243
5244 goto err_dma_mask;
5245 }
5246
5247 priv->ppriv = alloc_percpu(*priv->ppriv);
5248 if (!priv->ppriv) {
5249 dev_err(dev, "alloc_percpu() failed\n");
5250 err = -ENOMEM;
5251 goto err_alloc_ppriv;
5252 }
5253
5254 /* DPSECI initialization */
5255 err = dpaa2_dpseci_setup(dpseci_dev);
5256 if (err) {
5257 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5258 goto err_dpseci_setup;
5259 }
5260
5261 /* DPIO */
5262 err = dpaa2_dpseci_dpio_setup(priv);
5263 if (err) {
5264 dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5265 goto err_dpio_setup;
5266 }
5267
5268 /* DPSECI binding to DPIO */
5269 err = dpaa2_dpseci_bind(priv);
5270 if (err) {
5271 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5272 goto err_bind;
5273 }
5274
5275 /* DPSECI enable */
5276 err = dpaa2_dpseci_enable(priv);
5277 if (err) {
5278 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5279 goto err_bind;
5280 }
5281
5282 dpaa2_dpseci_debugfs_init(priv);
5283
5284 /* register crypto algorithms the device supports */
5285 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5286 struct caam_skcipher_alg *t_alg = driver_algs + i;
5287 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5288
5289 /* Skip DES algorithms if not supported by device */
5290 if (!priv->sec_attr.des_acc_num &&
5291 (alg_sel == OP_ALG_ALGSEL_3DES ||
5292 alg_sel == OP_ALG_ALGSEL_DES))
5293 continue;
5294
5295 /* Skip AES algorithms if not supported by device */
5296 if (!priv->sec_attr.aes_acc_num &&
5297 alg_sel == OP_ALG_ALGSEL_AES)
5298 continue;
5299
5300 /* Skip CHACHA20 algorithms if not supported by device */
5301 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5302 !priv->sec_attr.ccha_acc_num)
5303 continue;
5304
5305 t_alg->caam.dev = dev;
5306 caam_skcipher_alg_init(t_alg);
5307
5308 err = crypto_register_skcipher(&t_alg->skcipher);
5309 if (err) {
5310 dev_warn(dev, "%s alg registration failed: %d\n",
5311 t_alg->skcipher.base.cra_driver_name, err);
5312 continue;
5313 }
5314
5315 t_alg->registered = true;
5316 registered = true;
5317 }
5318
5319 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5320 struct caam_aead_alg *t_alg = driver_aeads + i;
5321 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5322 OP_ALG_ALGSEL_MASK;
5323 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5324 OP_ALG_ALGSEL_MASK;
5325
5326 /* Skip DES algorithms if not supported by device */
5327 if (!priv->sec_attr.des_acc_num &&
5328 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5329 c1_alg_sel == OP_ALG_ALGSEL_DES))
5330 continue;
5331
5332 /* Skip AES algorithms if not supported by device */
5333 if (!priv->sec_attr.aes_acc_num &&
5334 c1_alg_sel == OP_ALG_ALGSEL_AES)
5335 continue;
5336
5337 /* Skip CHACHA20 algorithms if not supported by device */
5338 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5339 !priv->sec_attr.ccha_acc_num)
5340 continue;
5341
5342 /* Skip POLY1305 algorithms if not supported by device */
5343 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5344 !priv->sec_attr.ptha_acc_num)
5345 continue;
5346
5347 /*
5348 * Skip algorithms requiring message digests
5349 * if MD not supported by device.
5350 */
5351 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5352 !priv->sec_attr.md_acc_num)
5353 continue;
5354
5355 t_alg->caam.dev = dev;
5356 caam_aead_alg_init(t_alg);
5357
5358 err = crypto_register_aead(&t_alg->aead);
5359 if (err) {
5360 dev_warn(dev, "%s alg registration failed: %d\n",
5361 t_alg->aead.base.cra_driver_name, err);
5362 continue;
5363 }
5364
5365 t_alg->registered = true;
5366 registered = true;
5367 }
5368 if (registered)
5369 dev_info(dev, "algorithms registered in /proc/crypto\n");
5370
5371 /* register hash algorithms the device supports */
5372 INIT_LIST_HEAD(&hash_list);
5373
5374 /*
5375 * Skip registration of any hashing algorithms if MD block
5376 * is not present.
5377 */
5378 if (!priv->sec_attr.md_acc_num)
5379 return 0;
5380
5381 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5382 struct caam_hash_alg *t_alg;
5383 struct caam_hash_template *alg = driver_hash + i;
5384
5385 /* register hmac version */
5386 t_alg = caam_hash_alloc(dev, alg, true);
5387 if (IS_ERR(t_alg)) {
5388 err = PTR_ERR(t_alg);
5389 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5390 alg->hmac_driver_name, err);
5391 continue;
5392 }
5393
5394 err = crypto_register_ahash(&t_alg->ahash_alg);
5395 if (err) {
5396 dev_warn(dev, "%s alg registration failed: %d\n",
5397 t_alg->ahash_alg.halg.base.cra_driver_name,
5398 err);
5399 kfree(t_alg);
5400 } else {
5401 list_add_tail(&t_alg->entry, &hash_list);
5402 }
5403
5404 /* register unkeyed version */
5405 t_alg = caam_hash_alloc(dev, alg, false);
5406 if (IS_ERR(t_alg)) {
5407 err = PTR_ERR(t_alg);
5408 dev_warn(dev, "%s alg allocation failed: %d\n",
5409 alg->driver_name, err);
5410 continue;
5411 }
5412
5413 err = crypto_register_ahash(&t_alg->ahash_alg);
5414 if (err) {
5415 dev_warn(dev, "%s alg registration failed: %d\n",
5416 t_alg->ahash_alg.halg.base.cra_driver_name,
5417 err);
5418 kfree(t_alg);
5419 } else {
5420 list_add_tail(&t_alg->entry, &hash_list);
5421 }
5422 }
5423 if (!list_empty(&hash_list))
5424 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5425
5426 return err;
5427
5428 err_bind:
5429 dpaa2_dpseci_dpio_free(priv);
5430 err_dpio_setup:
5431 dpaa2_dpseci_free(priv);
5432 err_dpseci_setup:
5433 free_percpu(priv->ppriv);
5434 err_alloc_ppriv:
5435 fsl_mc_portal_free(priv->mc_io);
5436 err_dma_mask:
5437 kmem_cache_destroy(qi_cache);
5438
5439 return err;
5440 }
5441
dpaa2_caam_remove(struct fsl_mc_device * ls_dev)5442 static void __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5443 {
5444 struct device *dev;
5445 struct dpaa2_caam_priv *priv;
5446 int i;
5447
5448 dev = &ls_dev->dev;
5449 priv = dev_get_drvdata(dev);
5450
5451 dpaa2_dpseci_debugfs_exit(priv);
5452
5453 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5454 struct caam_aead_alg *t_alg = driver_aeads + i;
5455
5456 if (t_alg->registered)
5457 crypto_unregister_aead(&t_alg->aead);
5458 }
5459
5460 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5461 struct caam_skcipher_alg *t_alg = driver_algs + i;
5462
5463 if (t_alg->registered)
5464 crypto_unregister_skcipher(&t_alg->skcipher);
5465 }
5466
5467 if (hash_list.next) {
5468 struct caam_hash_alg *t_hash_alg, *p;
5469
5470 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5471 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5472 list_del(&t_hash_alg->entry);
5473 kfree(t_hash_alg);
5474 }
5475 }
5476
5477 dpaa2_dpseci_disable(priv);
5478 dpaa2_dpseci_dpio_free(priv);
5479 dpaa2_dpseci_free(priv);
5480 free_percpu(priv->ppriv);
5481 fsl_mc_portal_free(priv->mc_io);
5482 kmem_cache_destroy(qi_cache);
5483 }
5484
dpaa2_caam_enqueue(struct device * dev,struct caam_request * req)5485 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5486 {
5487 struct dpaa2_fd fd;
5488 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5489 struct dpaa2_caam_priv_per_cpu *ppriv;
5490 int err = 0, i;
5491
5492 if (IS_ERR(req))
5493 return PTR_ERR(req);
5494
5495 if (priv->cscn_mem) {
5496 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5497 DPAA2_CSCN_SIZE,
5498 DMA_FROM_DEVICE);
5499 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
5500 dev_dbg_ratelimited(dev, "Dropping request\n");
5501 return -EBUSY;
5502 }
5503 }
5504
5505 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5506
5507 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5508 DMA_BIDIRECTIONAL);
5509 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5510 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5511 goto err_out;
5512 }
5513
5514 memset(&fd, 0, sizeof(fd));
5515 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5516 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5517 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5518 dpaa2_fd_set_flc(&fd, req->flc_dma);
5519
5520 ppriv = raw_cpu_ptr(priv->ppriv);
5521 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5522 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5523 &fd);
5524 if (err != -EBUSY)
5525 break;
5526
5527 cpu_relax();
5528 }
5529
5530 if (unlikely(err)) {
5531 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5532 goto err_out;
5533 }
5534
5535 return -EINPROGRESS;
5536
5537 err_out:
5538 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5539 DMA_BIDIRECTIONAL);
5540 return -EIO;
5541 }
5542 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5543
5544 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5545 {
5546 .vendor = FSL_MC_VENDOR_FREESCALE,
5547 .obj_type = "dpseci",
5548 },
5549 { .vendor = 0x0 }
5550 };
5551 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5552
5553 static struct fsl_mc_driver dpaa2_caam_driver = {
5554 .driver = {
5555 .name = KBUILD_MODNAME,
5556 .owner = THIS_MODULE,
5557 },
5558 .probe = dpaa2_caam_probe,
5559 .remove = dpaa2_caam_remove,
5560 .match_id_table = dpaa2_caam_match_id_table
5561 };
5562
5563 MODULE_LICENSE("Dual BSD/GPL");
5564 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5565 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5566
5567 module_fsl_mc_driver(dpaa2_caam_driver);
5568