Lines Matching +full:sun8i +full:- +full:h3 +full:- +full:crypto

1 // SPDX-License-Identifier: GPL-2.0
3 * sun8i-ce-cipher.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
6 * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
15 #include <linux/crypto.h>
16 #include <linux/dma-mapping.h>
19 #include <crypto/scatterwalk.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/internal/skcipher.h>
22 #include "sun8i-ce.h"
34 if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
35 sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
37 algt->stat_fb_maxsg++;
42 if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
44 algt->stat_fb_leniv++;
49 if (areq->cryptlen == 0) {
51 algt->stat_fb_len0++;
56 if (areq->cryptlen % 16) {
58 algt->stat_fb_mod16++;
63 len = areq->cryptlen;
64 sg = areq->src;
66 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
68 algt->stat_fb_srcali++;
72 todo = min(len, sg->length);
75 algt->stat_fb_srclen++;
79 len -= todo;
83 len = areq->cryptlen;
84 sg = areq->dst;
86 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
88 algt->stat_fb_dstali++;
92 todo = min(len, sg->length);
95 algt->stat_fb_dstlen++;
99 len -= todo;
119 algt->stat_fb++;
122 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
123 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
124 areq->base.complete, areq->base.data);
125 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
126 areq->cryptlen, areq->iv);
127 if (rctx->op_dir & CE_DECRYPTION)
128 err = crypto_skcipher_decrypt(&rctx->fallback_req);
130 err = crypto_skcipher_encrypt(&rctx->fallback_req);
139 struct sun8i_ce_dev *ce = op->ce;
152 int ns = sg_nents_for_len(areq->src, areq->cryptlen);
153 int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
157 dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
158 crypto_tfm_alg_name(areq->base.tfm),
159 areq->cryptlen,
160 rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
161 op->keylen);
164 algt->stat_req++;
166 flow = rctx->flow;
168 chan = &ce->chanlist[flow];
170 cet = chan->tl;
173 cet->t_id = cpu_to_le32(flow);
174 common = ce->variant->alg_cipher[algt->ce_algo_id];
175 common |= rctx->op_dir | CE_COMM_INT;
176 cet->t_common_ctl = cpu_to_le32(common);
178 if (ce->variant->cipher_t_dlen_in_bytes)
179 cet->t_dlen = cpu_to_le32(areq->cryptlen);
181 cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
183 sym = ce->variant->op_mode[algt->ce_blockmode];
184 len = op->keylen;
197 cet->t_sym_ctl = cpu_to_le32(sym);
198 cet->t_asym_ctl = 0;
200 rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
201 if (dma_mapping_error(ce->dev, rctx->addr_key)) {
202 dev_err(ce->dev, "Cannot DMA MAP KEY\n");
203 err = -EFAULT;
206 cet->t_key = desc_addr_val_le32(ce, rctx->addr_key);
209 if (areq->iv && ivsize > 0) {
210 if (rctx->op_dir & CE_DECRYPTION) {
211 offset = areq->cryptlen - ivsize;
212 scatterwalk_map_and_copy(chan->backup_iv, areq->src,
215 memcpy(chan->bounce_iv, areq->iv, ivsize);
216 rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, ivsize,
218 if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
219 dev_err(ce->dev, "Cannot DMA MAP IV\n");
220 err = -ENOMEM;
223 cet->t_iv = desc_addr_val_le32(ce, rctx->addr_iv);
226 if (areq->src == areq->dst) {
227 nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
229 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
230 err = -EINVAL;
235 nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
237 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
238 err = -EINVAL;
241 nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
243 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
244 err = -EINVAL;
249 len = areq->cryptlen;
250 for_each_sg(areq->src, sg, nr_sgs, i) {
251 cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
253 cet->t_src[i].len = cpu_to_le32(todo / 4);
254 dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
255 areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
256 len -= todo;
259 dev_err(ce->dev, "remaining len %d\n", len);
260 err = -EINVAL;
264 len = areq->cryptlen;
265 for_each_sg(areq->dst, sg, nr_sgd, i) {
266 cet->t_dst[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
268 cet->t_dst[i].len = cpu_to_le32(todo / 4);
269 dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
270 areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
271 len -= todo;
274 dev_err(ce->dev, "remaining len %d\n", len);
275 err = -EINVAL;
279 chan->timeout = areq->cryptlen;
280 rctx->nr_sgs = ns;
281 rctx->nr_sgd = nd;
285 if (areq->src == areq->dst) {
286 dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
289 dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
292 dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
296 if (areq->iv && ivsize > 0) {
297 if (!dma_mapping_error(ce->dev, rctx->addr_iv))
298 dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
301 offset = areq->cryptlen - ivsize;
302 if (rctx->op_dir & CE_DECRYPTION) {
303 memcpy(areq->iv, chan->backup_iv, ivsize);
304 memzero_explicit(chan->backup_iv, ivsize);
306 scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
309 memzero_explicit(chan->bounce_iv, ivsize);
312 dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
324 struct sun8i_ce_dev *ce = op->ce;
329 int nr_sgs = rctx->nr_sgs;
330 int nr_sgd = rctx->nr_sgd;
333 flow = rctx->flow;
334 chan = &ce->chanlist[flow];
335 cet = chan->tl;
338 if (areq->src == areq->dst) {
339 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
342 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
343 dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
346 if (areq->iv && ivsize > 0) {
347 if (cet->t_iv)
348 dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
350 offset = areq->cryptlen - ivsize;
351 if (rctx->op_dir & CE_DECRYPTION) {
352 memcpy(areq->iv, chan->backup_iv, ivsize);
353 memzero_explicit(chan->backup_iv, ivsize);
355 scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
358 memzero_explicit(chan->bounce_iv, ivsize);
361 dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
369 struct sun8i_ce_dev *ce = op->ce;
373 flow = rctx->flow;
374 err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
400 rctx->op_dir = CE_DECRYPTION;
404 e = sun8i_ce_get_engine_number(op->ce);
405 rctx->flow = e;
406 engine = op->ce->chanlist[e].engine;
419 rctx->op_dir = CE_ENCRYPTION;
423 e = sun8i_ce_get_engine_number(op->ce);
424 rctx->flow = e;
425 engine = op->ce->chanlist[e].engine;
442 op->ce = algt->ce;
444 op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
445 if (IS_ERR(op->fallback_tfm)) {
446 dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
447 name, PTR_ERR(op->fallback_tfm));
448 return PTR_ERR(op->fallback_tfm);
452 crypto_skcipher_reqsize(op->fallback_tfm));
455 memcpy(algt->fbname,
456 crypto_skcipher_driver_name(op->fallback_tfm),
459 err = pm_runtime_resume_and_get(op->ce->dev);
465 crypto_free_skcipher(op->fallback_tfm);
473 kfree_sensitive(op->key);
474 crypto_free_skcipher(op->fallback_tfm);
475 pm_runtime_put_sync_suspend(op->ce->dev);
482 struct sun8i_ce_dev *ce = op->ce;
492 dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
493 return -EINVAL;
495 kfree_sensitive(op->key);
496 op->keylen = keylen;
497 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
498 if (!op->key)
499 return -ENOMEM;
501 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
502 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
504 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
517 kfree_sensitive(op->key);
518 op->keylen = keylen;
519 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
520 if (!op->key)
521 return -ENOMEM;
523 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
524 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
526 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);