Lines Matching +full:sun8i +full:- +full:h3 +full:- +full:crypto

1 // SPDX-License-Identifier: GPL-2.0
3 * sun8i-ce-hash.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
13 #include <crypto/internal/hash.h>
14 #include <crypto/md5.h>
15 #include <crypto/sha1.h>
16 #include <crypto/sha2.h>
18 #include <linux/dma-mapping.h>
24 #include "sun8i-ce.h"
34 algt->stat_fb++;
46 op->ce = algt->ce;
49 op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
51 if (IS_ERR(op->fallback_tfm)) {
52 dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
53 return PTR_ERR(op->fallback_tfm);
57 crypto_ahash_statesize(op->fallback_tfm));
61 crypto_ahash_reqsize(op->fallback_tfm));
64 memcpy(algt->fbname,
65 crypto_ahash_driver_name(op->fallback_tfm),
68 err = pm_runtime_resume_and_get(op->ce->dev);
73 crypto_free_ahash(op->fallback_tfm);
81 crypto_free_ahash(tfmctx->fallback_tfm);
82 pm_runtime_put_sync_suspend(tfmctx->ce->dev);
93 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
94 ahash_request_set_callback(&rctx->fallback_req,
95 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
96 areq->base.complete, areq->base.data);
98 return crypto_ahash_init(&rctx->fallback_req);
107 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
108 ahash_request_set_callback(&rctx->fallback_req,
109 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
110 areq->base.complete, areq->base.data);
112 return crypto_ahash_export(&rctx->fallback_req, out);
121 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
122 ahash_request_set_callback(&rctx->fallback_req,
123 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
124 areq->base.complete, areq->base.data);
126 return crypto_ahash_import(&rctx->fallback_req, in);
137 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
138 ahash_request_set_callback(&rctx->fallback_req,
139 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
140 areq->base.complete, areq->base.data);
141 ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
143 return crypto_ahash_final(&rctx->fallback_req);
152 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
153 ahash_request_set_callback(&rctx->fallback_req,
154 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
155 areq->base.complete, areq->base.data);
156 ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
158 return crypto_ahash_update(&rctx->fallback_req);
169 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
170 ahash_request_set_callback(&rctx->fallback_req,
171 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
172 areq->base.complete, areq->base.data);
173 ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
174 areq->nbytes);
176 return crypto_ahash_finup(&rctx->fallback_req);
187 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
188 ahash_request_set_callback(&rctx->fallback_req,
189 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
190 areq->base.complete, areq->base.data);
191 ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
192 areq->nbytes);
194 return crypto_ahash_digest(&rctx->fallback_req);
200 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
206 if (areq->nbytes == 0) {
208 algt->stat_fb_len0++;
213 if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
215 algt->stat_fb_maxsg++;
219 sg = areq->src;
221 if (sg->length % 4) {
223 algt->stat_fb_srclen++;
227 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
229 algt->stat_fb_srcali++;
241 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
252 ce = algt->ce;
255 rctx->flow = e;
256 engine = ce->chanlist[e].engine;
271 fill = 64 - (byte_count % 64);
274 fill = 128 - (byte_count % 128);
282 j += (fill - min_fill) / sizeof(u32);
323 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
340 int ns = sg_nents_for_len(areq->src, areq->nbytes);
343 ce = algt->ce;
355 err = -ENOMEM;
362 err = -ENOMEM;
366 flow = rctx->flow;
367 chan = &ce->chanlist[flow];
370 algt->stat_req++;
372 dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
374 cet = chan->tl;
377 cet->t_id = cpu_to_le32(flow);
378 common = ce->variant->alg_hash[algt->ce_algo_id];
380 cet->t_common_ctl = cpu_to_le32(common);
382 cet->t_sym_ctl = 0;
383 cet->t_asym_ctl = 0;
385 nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
387 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
388 err = -EINVAL;
392 len = areq->nbytes;
393 for_each_sg(areq->src, sg, nr_sgs, i) {
394 cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
396 cet->t_src[i].len = cpu_to_le32(todo / 4);
397 len -= todo;
400 dev_err(ce->dev, "remaining len %d\n", len);
401 err = -EINVAL;
404 addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
405 cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
406 cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
407 if (dma_mapping_error(ce->dev, addr_res)) {
408 dev_err(ce->dev, "DMA map dest\n");
409 err = -EINVAL;
413 byte_count = areq->nbytes;
416 switch (algt->ce_algo_id) {
431 err = -EINVAL;
435 addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
436 cet->t_src[i].addr = desc_addr_val_le32(ce, addr_pad);
437 cet->t_src[i].len = cpu_to_le32(j);
438 if (dma_mapping_error(ce->dev, addr_pad)) {
439 dev_err(ce->dev, "DMA error on padding SG\n");
440 err = -EINVAL;
444 if (ce->variant->hash_t_dlen_in_bits)
445 cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
447 cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
449 chan->timeout = areq->nbytes;
453 dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
456 dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
458 memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
461 dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);