1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
7 *
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 */
11
12 #include <crypto/aes.h>
13 #include <crypto/internal/des.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16
17 #include "cesa.h"
18
19 struct mv_cesa_des_ctx {
20 struct mv_cesa_ctx base;
21 u8 key[DES_KEY_SIZE];
22 };
23
24 struct mv_cesa_des3_ctx {
25 struct mv_cesa_ctx base;
26 u8 key[DES3_EDE_KEY_SIZE];
27 };
28
29 struct mv_cesa_aes_ctx {
30 struct mv_cesa_ctx base;
31 struct crypto_aes_ctx aes;
32 };
33
34 struct mv_cesa_skcipher_dma_iter {
35 struct mv_cesa_dma_iter base;
36 struct mv_cesa_sg_dma_iter src;
37 struct mv_cesa_sg_dma_iter dst;
38 };
39
40 static inline void
mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter * iter,struct skcipher_request * req)41 mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
42 struct skcipher_request *req)
43 {
44 mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
45 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
46 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
47 }
48
49 static inline bool
mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter * iter)50 mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
51 {
52 iter->src.op_offset = 0;
53 iter->dst.op_offset = 0;
54
55 return mv_cesa_req_dma_iter_next_op(&iter->base);
56 }
57
58 static inline void
mv_cesa_skcipher_dma_cleanup(struct skcipher_request * req)59 mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
60 {
61 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
62
63 if (req->dst != req->src) {
64 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
65 DMA_FROM_DEVICE);
66 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
67 DMA_TO_DEVICE);
68 } else {
69 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
70 DMA_BIDIRECTIONAL);
71 }
72 mv_cesa_dma_cleanup(&creq->base);
73 }
74
mv_cesa_skcipher_cleanup(struct skcipher_request * req)75 static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
76 {
77 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
78
79 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
80 mv_cesa_skcipher_dma_cleanup(req);
81 }
82
mv_cesa_skcipher_std_step(struct skcipher_request * req)83 static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
84 {
85 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
86 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
87 struct mv_cesa_engine *engine = creq->base.engine;
88 size_t len = min_t(size_t, req->cryptlen - sreq->offset,
89 CESA_SA_SRAM_PAYLOAD_SIZE);
90
91 mv_cesa_adjust_op(engine, &sreq->op);
92 if (engine->pool)
93 memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
94 else
95 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
96
97 len = mv_cesa_sg_copy_to_sram(engine, req->src, creq->src_nents,
98 CESA_SA_DATA_SRAM_OFFSET, len,
99 sreq->offset);
100
101 sreq->size = len;
102 mv_cesa_set_crypt_op_len(&sreq->op, len);
103
104 /* FIXME: only update enc_len field */
105 if (!sreq->skip_ctx) {
106 if (engine->pool)
107 memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
108 else
109 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
110 sreq->skip_ctx = true;
111 } else if (engine->pool)
112 memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op.desc));
113 else
114 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
115
116 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
117 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
118 WARN_ON(readl(engine->regs + CESA_SA_CMD) &
119 CESA_SA_CMD_EN_CESA_SA_ACCL0);
120 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
121 }
122
mv_cesa_skcipher_std_process(struct skcipher_request * req,u32 status)123 static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
124 u32 status)
125 {
126 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
127 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
128 struct mv_cesa_engine *engine = creq->base.engine;
129 size_t len;
130
131 len = mv_cesa_sg_copy_from_sram(engine, req->dst, creq->dst_nents,
132 CESA_SA_DATA_SRAM_OFFSET, sreq->size,
133 sreq->offset);
134
135 sreq->offset += len;
136 if (sreq->offset < req->cryptlen)
137 return -EINPROGRESS;
138
139 return 0;
140 }
141
mv_cesa_skcipher_process(struct crypto_async_request * req,u32 status)142 static int mv_cesa_skcipher_process(struct crypto_async_request *req,
143 u32 status)
144 {
145 struct skcipher_request *skreq = skcipher_request_cast(req);
146 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
147 struct mv_cesa_req *basereq = &creq->base;
148
149 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
150 return mv_cesa_skcipher_std_process(skreq, status);
151
152 return mv_cesa_dma_process(basereq, status);
153 }
154
mv_cesa_skcipher_step(struct crypto_async_request * req)155 static void mv_cesa_skcipher_step(struct crypto_async_request *req)
156 {
157 struct skcipher_request *skreq = skcipher_request_cast(req);
158 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
159
160 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
161 mv_cesa_dma_step(&creq->base);
162 else
163 mv_cesa_skcipher_std_step(skreq);
164 }
165
166 static inline void
mv_cesa_skcipher_dma_prepare(struct skcipher_request * req)167 mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
168 {
169 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
170 struct mv_cesa_req *basereq = &creq->base;
171
172 mv_cesa_dma_prepare(basereq, basereq->engine);
173 }
174
175 static inline void
mv_cesa_skcipher_std_prepare(struct skcipher_request * req)176 mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
177 {
178 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
179 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
180
181 sreq->size = 0;
182 sreq->offset = 0;
183 }
184
mv_cesa_skcipher_prepare(struct crypto_async_request * req,struct mv_cesa_engine * engine)185 static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
186 struct mv_cesa_engine *engine)
187 {
188 struct skcipher_request *skreq = skcipher_request_cast(req);
189 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
190
191 creq->base.engine = engine;
192
193 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
194 mv_cesa_skcipher_dma_prepare(skreq);
195 else
196 mv_cesa_skcipher_std_prepare(skreq);
197 }
198
199 static inline void
mv_cesa_skcipher_req_cleanup(struct crypto_async_request * req)200 mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
201 {
202 struct skcipher_request *skreq = skcipher_request_cast(req);
203
204 mv_cesa_skcipher_cleanup(skreq);
205 }
206
207 static void
mv_cesa_skcipher_complete(struct crypto_async_request * req)208 mv_cesa_skcipher_complete(struct crypto_async_request *req)
209 {
210 struct skcipher_request *skreq = skcipher_request_cast(req);
211 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
212 struct mv_cesa_engine *engine = creq->base.engine;
213 unsigned int ivsize;
214
215 atomic_sub(skreq->cryptlen, &engine->load);
216 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
217
218 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
219 struct mv_cesa_req *basereq;
220
221 basereq = &creq->base;
222 memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
223 ivsize);
224 } else if (engine->pool)
225 memcpy(skreq->iv,
226 engine->sram_pool + CESA_SA_CRYPT_IV_SRAM_OFFSET,
227 ivsize);
228 else
229 memcpy_fromio(skreq->iv,
230 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
231 ivsize);
232 }
233
234 static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
235 .step = mv_cesa_skcipher_step,
236 .process = mv_cesa_skcipher_process,
237 .cleanup = mv_cesa_skcipher_req_cleanup,
238 .complete = mv_cesa_skcipher_complete,
239 };
240
mv_cesa_skcipher_cra_exit(struct crypto_tfm * tfm)241 static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
242 {
243 void *ctx = crypto_tfm_ctx(tfm);
244
245 memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
246 }
247
mv_cesa_skcipher_cra_init(struct crypto_tfm * tfm)248 static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
249 {
250 struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
251
252 ctx->ops = &mv_cesa_skcipher_req_ops;
253
254 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
255 sizeof(struct mv_cesa_skcipher_req));
256
257 return 0;
258 }
259
mv_cesa_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)260 static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
261 unsigned int len)
262 {
263 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
264 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
265 int remaining;
266 int offset;
267 int ret;
268 int i;
269
270 ret = aes_expandkey(&ctx->aes, key, len);
271 if (ret)
272 return ret;
273
274 remaining = (ctx->aes.key_length - 16) / 4;
275 offset = ctx->aes.key_length + 24 - remaining;
276 for (i = 0; i < remaining; i++)
277 ctx->aes.key_dec[4 + i] = ctx->aes.key_enc[offset + i];
278
279 return 0;
280 }
281
mv_cesa_des_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)282 static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
283 unsigned int len)
284 {
285 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
286 int err;
287
288 err = verify_skcipher_des_key(cipher, key);
289 if (err)
290 return err;
291
292 memcpy(ctx->key, key, DES_KEY_SIZE);
293
294 return 0;
295 }
296
mv_cesa_des3_ede_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)297 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
298 const u8 *key, unsigned int len)
299 {
300 struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
301 int err;
302
303 err = verify_skcipher_des3_key(cipher, key);
304 if (err)
305 return err;
306
307 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
308
309 return 0;
310 }
311
mv_cesa_skcipher_dma_req_init(struct skcipher_request * req,const struct mv_cesa_op_ctx * op_templ)312 static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
313 const struct mv_cesa_op_ctx *op_templ)
314 {
315 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
316 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
317 GFP_KERNEL : GFP_ATOMIC;
318 struct mv_cesa_req *basereq = &creq->base;
319 struct mv_cesa_skcipher_dma_iter iter;
320 bool skip_ctx = false;
321 int ret;
322
323 basereq->chain.first = NULL;
324 basereq->chain.last = NULL;
325
326 if (req->src != req->dst) {
327 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
328 DMA_TO_DEVICE);
329 if (!ret)
330 return -ENOMEM;
331
332 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
333 DMA_FROM_DEVICE);
334 if (!ret) {
335 ret = -ENOMEM;
336 goto err_unmap_src;
337 }
338 } else {
339 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
340 DMA_BIDIRECTIONAL);
341 if (!ret)
342 return -ENOMEM;
343 }
344
345 mv_cesa_tdma_desc_iter_init(&basereq->chain);
346 mv_cesa_skcipher_req_iter_init(&iter, req);
347
348 do {
349 struct mv_cesa_op_ctx *op;
350
351 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
352 flags);
353 if (IS_ERR(op)) {
354 ret = PTR_ERR(op);
355 goto err_free_tdma;
356 }
357 skip_ctx = true;
358
359 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
360
361 /* Add input transfers */
362 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
363 &iter.src, flags);
364 if (ret)
365 goto err_free_tdma;
366
367 /* Add dummy desc to launch the crypto operation */
368 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
369 if (ret)
370 goto err_free_tdma;
371
372 /* Add output transfers */
373 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
374 &iter.dst, flags);
375 if (ret)
376 goto err_free_tdma;
377
378 } while (mv_cesa_skcipher_req_iter_next_op(&iter));
379
380 /* Add output data for IV */
381 ret = mv_cesa_dma_add_result_op(&basereq->chain,
382 CESA_SA_CFG_SRAM_OFFSET,
383 CESA_SA_DATA_SRAM_OFFSET,
384 CESA_TDMA_SRC_IN_SRAM, flags);
385
386 if (ret)
387 goto err_free_tdma;
388
389 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
390
391 return 0;
392
393 err_free_tdma:
394 mv_cesa_dma_cleanup(basereq);
395 if (req->dst != req->src)
396 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
397 DMA_FROM_DEVICE);
398
399 err_unmap_src:
400 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
401 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
402
403 return ret;
404 }
405
406 static inline int
mv_cesa_skcipher_std_req_init(struct skcipher_request * req,const struct mv_cesa_op_ctx * op_templ)407 mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
408 const struct mv_cesa_op_ctx *op_templ)
409 {
410 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
411 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
412 struct mv_cesa_req *basereq = &creq->base;
413
414 sreq->op = *op_templ;
415 sreq->skip_ctx = false;
416 basereq->chain.first = NULL;
417 basereq->chain.last = NULL;
418
419 return 0;
420 }
421
mv_cesa_skcipher_req_init(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)422 static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
423 struct mv_cesa_op_ctx *tmpl)
424 {
425 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
426 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
427 unsigned int blksize = crypto_skcipher_blocksize(tfm);
428 int ret;
429
430 if (!IS_ALIGNED(req->cryptlen, blksize))
431 return -EINVAL;
432
433 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
434 if (creq->src_nents < 0) {
435 dev_err(cesa_dev->dev, "Invalid number of src SG");
436 return creq->src_nents;
437 }
438 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
439 if (creq->dst_nents < 0) {
440 dev_err(cesa_dev->dev, "Invalid number of dst SG");
441 return creq->dst_nents;
442 }
443
444 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
445 CESA_SA_DESC_CFG_OP_MSK);
446
447 if (cesa_dev->caps->has_tdma)
448 ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
449 else
450 ret = mv_cesa_skcipher_std_req_init(req, tmpl);
451
452 return ret;
453 }
454
mv_cesa_skcipher_queue_req(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)455 static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
456 struct mv_cesa_op_ctx *tmpl)
457 {
458 int ret;
459 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
460 struct mv_cesa_engine *engine;
461
462 ret = mv_cesa_skcipher_req_init(req, tmpl);
463 if (ret)
464 return ret;
465
466 engine = mv_cesa_select_engine(req->cryptlen);
467 mv_cesa_skcipher_prepare(&req->base, engine);
468
469 ret = mv_cesa_queue_req(&req->base, &creq->base);
470
471 if (mv_cesa_req_needs_cleanup(&req->base, ret))
472 mv_cesa_skcipher_cleanup(req);
473
474 return ret;
475 }
476
mv_cesa_des_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)477 static int mv_cesa_des_op(struct skcipher_request *req,
478 struct mv_cesa_op_ctx *tmpl)
479 {
480 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
481
482 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
483 CESA_SA_DESC_CFG_CRYPTM_MSK);
484
485 memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
486
487 return mv_cesa_skcipher_queue_req(req, tmpl);
488 }
489
mv_cesa_ecb_des_encrypt(struct skcipher_request * req)490 static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
491 {
492 struct mv_cesa_op_ctx tmpl;
493
494 mv_cesa_set_op_cfg(&tmpl,
495 CESA_SA_DESC_CFG_CRYPTCM_ECB |
496 CESA_SA_DESC_CFG_DIR_ENC);
497
498 return mv_cesa_des_op(req, &tmpl);
499 }
500
mv_cesa_ecb_des_decrypt(struct skcipher_request * req)501 static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
502 {
503 struct mv_cesa_op_ctx tmpl;
504
505 mv_cesa_set_op_cfg(&tmpl,
506 CESA_SA_DESC_CFG_CRYPTCM_ECB |
507 CESA_SA_DESC_CFG_DIR_DEC);
508
509 return mv_cesa_des_op(req, &tmpl);
510 }
511
512 struct skcipher_alg mv_cesa_ecb_des_alg = {
513 .setkey = mv_cesa_des_setkey,
514 .encrypt = mv_cesa_ecb_des_encrypt,
515 .decrypt = mv_cesa_ecb_des_decrypt,
516 .min_keysize = DES_KEY_SIZE,
517 .max_keysize = DES_KEY_SIZE,
518 .base = {
519 .cra_name = "ecb(des)",
520 .cra_driver_name = "mv-ecb-des",
521 .cra_priority = 300,
522 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
523 CRYPTO_ALG_ALLOCATES_MEMORY,
524 .cra_blocksize = DES_BLOCK_SIZE,
525 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
526 .cra_alignmask = 0,
527 .cra_module = THIS_MODULE,
528 .cra_init = mv_cesa_skcipher_cra_init,
529 .cra_exit = mv_cesa_skcipher_cra_exit,
530 },
531 };
532
mv_cesa_cbc_des_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)533 static int mv_cesa_cbc_des_op(struct skcipher_request *req,
534 struct mv_cesa_op_ctx *tmpl)
535 {
536 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
537 CESA_SA_DESC_CFG_CRYPTCM_MSK);
538
539 memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
540
541 return mv_cesa_des_op(req, tmpl);
542 }
543
mv_cesa_cbc_des_encrypt(struct skcipher_request * req)544 static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
545 {
546 struct mv_cesa_op_ctx tmpl;
547
548 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
549
550 return mv_cesa_cbc_des_op(req, &tmpl);
551 }
552
mv_cesa_cbc_des_decrypt(struct skcipher_request * req)553 static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
554 {
555 struct mv_cesa_op_ctx tmpl;
556
557 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
558
559 return mv_cesa_cbc_des_op(req, &tmpl);
560 }
561
562 struct skcipher_alg mv_cesa_cbc_des_alg = {
563 .setkey = mv_cesa_des_setkey,
564 .encrypt = mv_cesa_cbc_des_encrypt,
565 .decrypt = mv_cesa_cbc_des_decrypt,
566 .min_keysize = DES_KEY_SIZE,
567 .max_keysize = DES_KEY_SIZE,
568 .ivsize = DES_BLOCK_SIZE,
569 .base = {
570 .cra_name = "cbc(des)",
571 .cra_driver_name = "mv-cbc-des",
572 .cra_priority = 300,
573 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
574 CRYPTO_ALG_ALLOCATES_MEMORY,
575 .cra_blocksize = DES_BLOCK_SIZE,
576 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
577 .cra_alignmask = 0,
578 .cra_module = THIS_MODULE,
579 .cra_init = mv_cesa_skcipher_cra_init,
580 .cra_exit = mv_cesa_skcipher_cra_exit,
581 },
582 };
583
mv_cesa_des3_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)584 static int mv_cesa_des3_op(struct skcipher_request *req,
585 struct mv_cesa_op_ctx *tmpl)
586 {
587 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
588
589 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
590 CESA_SA_DESC_CFG_CRYPTM_MSK);
591
592 memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
593
594 return mv_cesa_skcipher_queue_req(req, tmpl);
595 }
596
mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request * req)597 static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
598 {
599 struct mv_cesa_op_ctx tmpl;
600
601 mv_cesa_set_op_cfg(&tmpl,
602 CESA_SA_DESC_CFG_CRYPTCM_ECB |
603 CESA_SA_DESC_CFG_3DES_EDE |
604 CESA_SA_DESC_CFG_DIR_ENC);
605
606 return mv_cesa_des3_op(req, &tmpl);
607 }
608
mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request * req)609 static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
610 {
611 struct mv_cesa_op_ctx tmpl;
612
613 mv_cesa_set_op_cfg(&tmpl,
614 CESA_SA_DESC_CFG_CRYPTCM_ECB |
615 CESA_SA_DESC_CFG_3DES_EDE |
616 CESA_SA_DESC_CFG_DIR_DEC);
617
618 return mv_cesa_des3_op(req, &tmpl);
619 }
620
621 struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
622 .setkey = mv_cesa_des3_ede_setkey,
623 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
624 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
625 .min_keysize = DES3_EDE_KEY_SIZE,
626 .max_keysize = DES3_EDE_KEY_SIZE,
627 .base = {
628 .cra_name = "ecb(des3_ede)",
629 .cra_driver_name = "mv-ecb-des3-ede",
630 .cra_priority = 300,
631 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
632 CRYPTO_ALG_ALLOCATES_MEMORY,
633 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
634 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
635 .cra_alignmask = 0,
636 .cra_module = THIS_MODULE,
637 .cra_init = mv_cesa_skcipher_cra_init,
638 .cra_exit = mv_cesa_skcipher_cra_exit,
639 },
640 };
641
mv_cesa_cbc_des3_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)642 static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
643 struct mv_cesa_op_ctx *tmpl)
644 {
645 memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
646
647 return mv_cesa_des3_op(req, tmpl);
648 }
649
mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request * req)650 static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
651 {
652 struct mv_cesa_op_ctx tmpl;
653
654 mv_cesa_set_op_cfg(&tmpl,
655 CESA_SA_DESC_CFG_CRYPTCM_CBC |
656 CESA_SA_DESC_CFG_3DES_EDE |
657 CESA_SA_DESC_CFG_DIR_ENC);
658
659 return mv_cesa_cbc_des3_op(req, &tmpl);
660 }
661
mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request * req)662 static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
663 {
664 struct mv_cesa_op_ctx tmpl;
665
666 mv_cesa_set_op_cfg(&tmpl,
667 CESA_SA_DESC_CFG_CRYPTCM_CBC |
668 CESA_SA_DESC_CFG_3DES_EDE |
669 CESA_SA_DESC_CFG_DIR_DEC);
670
671 return mv_cesa_cbc_des3_op(req, &tmpl);
672 }
673
674 struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
675 .setkey = mv_cesa_des3_ede_setkey,
676 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
677 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
678 .min_keysize = DES3_EDE_KEY_SIZE,
679 .max_keysize = DES3_EDE_KEY_SIZE,
680 .ivsize = DES3_EDE_BLOCK_SIZE,
681 .base = {
682 .cra_name = "cbc(des3_ede)",
683 .cra_driver_name = "mv-cbc-des3-ede",
684 .cra_priority = 300,
685 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
686 CRYPTO_ALG_ALLOCATES_MEMORY,
687 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
688 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
689 .cra_alignmask = 0,
690 .cra_module = THIS_MODULE,
691 .cra_init = mv_cesa_skcipher_cra_init,
692 .cra_exit = mv_cesa_skcipher_cra_exit,
693 },
694 };
695
mv_cesa_aes_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)696 static int mv_cesa_aes_op(struct skcipher_request *req,
697 struct mv_cesa_op_ctx *tmpl)
698 {
699 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
700 int i;
701 u32 *key;
702 u32 cfg;
703
704 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
705
706 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
707 key = ctx->aes.key_dec;
708 else
709 key = ctx->aes.key_enc;
710
711 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
712 tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
713
714 if (ctx->aes.key_length == 24)
715 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
716 else if (ctx->aes.key_length == 32)
717 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
718
719 mv_cesa_update_op_cfg(tmpl, cfg,
720 CESA_SA_DESC_CFG_CRYPTM_MSK |
721 CESA_SA_DESC_CFG_AES_LEN_MSK);
722
723 return mv_cesa_skcipher_queue_req(req, tmpl);
724 }
725
mv_cesa_ecb_aes_encrypt(struct skcipher_request * req)726 static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
727 {
728 struct mv_cesa_op_ctx tmpl;
729
730 mv_cesa_set_op_cfg(&tmpl,
731 CESA_SA_DESC_CFG_CRYPTCM_ECB |
732 CESA_SA_DESC_CFG_DIR_ENC);
733
734 return mv_cesa_aes_op(req, &tmpl);
735 }
736
mv_cesa_ecb_aes_decrypt(struct skcipher_request * req)737 static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
738 {
739 struct mv_cesa_op_ctx tmpl;
740
741 mv_cesa_set_op_cfg(&tmpl,
742 CESA_SA_DESC_CFG_CRYPTCM_ECB |
743 CESA_SA_DESC_CFG_DIR_DEC);
744
745 return mv_cesa_aes_op(req, &tmpl);
746 }
747
748 struct skcipher_alg mv_cesa_ecb_aes_alg = {
749 .setkey = mv_cesa_aes_setkey,
750 .encrypt = mv_cesa_ecb_aes_encrypt,
751 .decrypt = mv_cesa_ecb_aes_decrypt,
752 .min_keysize = AES_MIN_KEY_SIZE,
753 .max_keysize = AES_MAX_KEY_SIZE,
754 .base = {
755 .cra_name = "ecb(aes)",
756 .cra_driver_name = "mv-ecb-aes",
757 .cra_priority = 300,
758 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
759 CRYPTO_ALG_ALLOCATES_MEMORY,
760 .cra_blocksize = AES_BLOCK_SIZE,
761 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
762 .cra_alignmask = 0,
763 .cra_module = THIS_MODULE,
764 .cra_init = mv_cesa_skcipher_cra_init,
765 .cra_exit = mv_cesa_skcipher_cra_exit,
766 },
767 };
768
mv_cesa_cbc_aes_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)769 static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
770 struct mv_cesa_op_ctx *tmpl)
771 {
772 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
773 CESA_SA_DESC_CFG_CRYPTCM_MSK);
774 memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
775
776 return mv_cesa_aes_op(req, tmpl);
777 }
778
mv_cesa_cbc_aes_encrypt(struct skcipher_request * req)779 static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
780 {
781 struct mv_cesa_op_ctx tmpl;
782
783 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
784
785 return mv_cesa_cbc_aes_op(req, &tmpl);
786 }
787
mv_cesa_cbc_aes_decrypt(struct skcipher_request * req)788 static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
789 {
790 struct mv_cesa_op_ctx tmpl;
791
792 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
793
794 return mv_cesa_cbc_aes_op(req, &tmpl);
795 }
796
797 struct skcipher_alg mv_cesa_cbc_aes_alg = {
798 .setkey = mv_cesa_aes_setkey,
799 .encrypt = mv_cesa_cbc_aes_encrypt,
800 .decrypt = mv_cesa_cbc_aes_decrypt,
801 .min_keysize = AES_MIN_KEY_SIZE,
802 .max_keysize = AES_MAX_KEY_SIZE,
803 .ivsize = AES_BLOCK_SIZE,
804 .base = {
805 .cra_name = "cbc(aes)",
806 .cra_driver_name = "mv-cbc-aes",
807 .cra_priority = 300,
808 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
809 CRYPTO_ALG_ALLOCATES_MEMORY,
810 .cra_blocksize = AES_BLOCK_SIZE,
811 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
812 .cra_alignmask = 0,
813 .cra_module = THIS_MODULE,
814 .cra_init = mv_cesa_skcipher_cra_init,
815 .cra_exit = mv_cesa_skcipher_cra_exit,
816 },
817 };
818