xref: /linux/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c (revision 69e4b75a5b90ef74300c283c0aafe8d41daf13a8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ce-hash.c - hardware cryptographic offloader for
4  * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5  *
6  * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7  *
8  * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
9  *
10  * You could find the datasheet in Documentation/arch/arm/sunxi.rst
11  */
12 
13 #include <crypto/internal/hash.h>
14 #include <crypto/md5.h>
15 #include <crypto/sha1.h>
16 #include <crypto/sha2.h>
17 #include <linux/bottom_half.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/kernel.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include "sun8i-ce.h"
25 
26 static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
27 {
28 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
29 		struct sun8i_ce_alg_template *algt;
30 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
31 
32 		algt = container_of(alg, struct sun8i_ce_alg_template,
33 				    alg.hash.base);
34 		algt->stat_fb++;
35 	}
36 }
37 
38 int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
39 {
40 	struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
41 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
42 	struct sun8i_ce_alg_template *algt;
43 	int err;
44 
45 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
46 	op->ce = algt->ce;
47 
48 	/* FALLBACK */
49 	op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
50 					      CRYPTO_ALG_NEED_FALLBACK);
51 	if (IS_ERR(op->fallback_tfm)) {
52 		dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
53 		return PTR_ERR(op->fallback_tfm);
54 	}
55 
56 	crypto_ahash_set_statesize(tfm,
57 				   crypto_ahash_statesize(op->fallback_tfm));
58 
59 	crypto_ahash_set_reqsize(tfm,
60 				 sizeof(struct sun8i_ce_hash_reqctx) +
61 				 crypto_ahash_reqsize(op->fallback_tfm) +
62 				 CRYPTO_DMA_PADDING);
63 
64 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
65 		memcpy(algt->fbname,
66 		       crypto_ahash_driver_name(op->fallback_tfm),
67 		       CRYPTO_MAX_ALG_NAME);
68 
69 	err = pm_runtime_resume_and_get(op->ce->dev);
70 	if (err < 0)
71 		goto error_pm;
72 	return 0;
73 error_pm:
74 	crypto_free_ahash(op->fallback_tfm);
75 	return err;
76 }
77 
78 void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
79 {
80 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
81 
82 	crypto_free_ahash(tfmctx->fallback_tfm);
83 	pm_runtime_put_sync_suspend(tfmctx->ce->dev);
84 }
85 
86 int sun8i_ce_hash_init(struct ahash_request *areq)
87 {
88 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
89 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
90 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
91 
92 	memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
93 
94 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
95 	ahash_request_set_callback(&rctx->fallback_req,
96 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
97 				   areq->base.complete, areq->base.data);
98 
99 	return crypto_ahash_init(&rctx->fallback_req);
100 }
101 
102 int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
103 {
104 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
105 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
106 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
107 
108 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
109 	ahash_request_set_callback(&rctx->fallback_req,
110 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
111 				   areq->base.complete, areq->base.data);
112 
113 	return crypto_ahash_export(&rctx->fallback_req, out);
114 }
115 
116 int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
117 {
118 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
119 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
120 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
121 
122 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
123 	ahash_request_set_callback(&rctx->fallback_req,
124 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
125 				   areq->base.complete, areq->base.data);
126 
127 	return crypto_ahash_import(&rctx->fallback_req, in);
128 }
129 
130 int sun8i_ce_hash_final(struct ahash_request *areq)
131 {
132 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
133 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
134 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
135 
136 	sun8i_ce_hash_stat_fb_inc(tfm);
137 
138 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
139 	ahash_request_set_callback(&rctx->fallback_req,
140 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
141 				   areq->base.complete, areq->base.data);
142 	ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
143 
144 	return crypto_ahash_final(&rctx->fallback_req);
145 }
146 
147 int sun8i_ce_hash_update(struct ahash_request *areq)
148 {
149 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
150 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
151 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
152 
153 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
154 	ahash_request_set_callback(&rctx->fallback_req,
155 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
156 				   areq->base.complete, areq->base.data);
157 	ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
158 
159 	return crypto_ahash_update(&rctx->fallback_req);
160 }
161 
162 int sun8i_ce_hash_finup(struct ahash_request *areq)
163 {
164 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
165 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
166 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
167 
168 	sun8i_ce_hash_stat_fb_inc(tfm);
169 
170 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
171 	ahash_request_set_callback(&rctx->fallback_req,
172 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
173 				   areq->base.complete, areq->base.data);
174 	ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
175 				areq->nbytes);
176 
177 	return crypto_ahash_finup(&rctx->fallback_req);
178 }
179 
180 static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
181 {
182 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
183 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
184 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
185 
186 	sun8i_ce_hash_stat_fb_inc(tfm);
187 
188 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
189 	ahash_request_set_callback(&rctx->fallback_req,
190 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
191 				   areq->base.complete, areq->base.data);
192 	ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
193 				areq->nbytes);
194 
195 	return crypto_ahash_digest(&rctx->fallback_req);
196 }
197 
198 static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
199 {
200 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
201 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
202 	struct sun8i_ce_alg_template *algt;
203 	struct scatterlist *sg;
204 
205 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
206 
207 	if (areq->nbytes == 0) {
208 		if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
209 			algt->stat_fb_len0++;
210 
211 		return true;
212 	}
213 	/* we need to reserve one SG for padding one */
214 	if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
215 		if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
216 			algt->stat_fb_maxsg++;
217 
218 		return true;
219 	}
220 	sg = areq->src;
221 	while (sg) {
222 		if (sg->length % 4) {
223 			if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
224 				algt->stat_fb_srclen++;
225 
226 			return true;
227 		}
228 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
229 			if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
230 				algt->stat_fb_srcali++;
231 
232 			return true;
233 		}
234 		sg = sg_next(sg);
235 	}
236 	return false;
237 }
238 
239 int sun8i_ce_hash_digest(struct ahash_request *areq)
240 {
241 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
242 	struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
243 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
244 	struct sun8i_ce_dev *ce = ctx->ce;
245 	struct crypto_engine *engine;
246 	int e;
247 
248 	if (sun8i_ce_hash_need_fallback(areq))
249 		return sun8i_ce_hash_digest_fb(areq);
250 
251 	e = sun8i_ce_get_engine_number(ce);
252 	rctx->flow = e;
253 	engine = ce->chanlist[e].engine;
254 
255 	return crypto_transfer_hash_request_to_engine(engine, areq);
256 }
257 
258 static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
259 {
260 	u64 fill, min_fill, j, k;
261 	__be64 *bebits;
262 	__le64 *lebits;
263 
264 	j = padi;
265 	buf[j++] = cpu_to_le32(0x80);
266 
267 	if (bs == 64) {
268 		fill = 64 - (byte_count % 64);
269 		min_fill = 2 * sizeof(u32) + sizeof(u32);
270 	} else {
271 		fill = 128 - (byte_count % 128);
272 		min_fill = 4 * sizeof(u32) + sizeof(u32);
273 	}
274 
275 	if (fill < min_fill)
276 		fill += bs;
277 
278 	k = j;
279 	j += (fill - min_fill) / sizeof(u32);
280 	if (j * 4 > bufsize) {
281 		pr_err("%s OVERFLOW %llu\n", __func__, j);
282 		return 0;
283 	}
284 	for (; k < j; k++)
285 		buf[k] = 0;
286 
287 	if (le) {
288 		/* MD5 */
289 		lebits = (__le64 *)&buf[j];
290 		*lebits = cpu_to_le64(byte_count << 3);
291 		j += 2;
292 	} else {
293 		if (bs == 64) {
294 			/* sha1 sha224 sha256 */
295 			bebits = (__be64 *)&buf[j];
296 			*bebits = cpu_to_be64(byte_count << 3);
297 			j += 2;
298 		} else {
299 			/* sha384 sha512*/
300 			bebits = (__be64 *)&buf[j];
301 			*bebits = cpu_to_be64(byte_count >> 61);
302 			j += 2;
303 			bebits = (__be64 *)&buf[j];
304 			*bebits = cpu_to_be64(byte_count << 3);
305 			j += 2;
306 		}
307 	}
308 	if (j * 4 > bufsize) {
309 		pr_err("%s OVERFLOW %llu\n", __func__, j);
310 		return 0;
311 	}
312 
313 	return j;
314 }
315 
316 static int sun8i_ce_hash_prepare(struct ahash_request *areq, struct ce_task *cet)
317 {
318 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
319 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
320 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
321 	struct sun8i_ce_alg_template *algt;
322 	struct sun8i_ce_dev *ce;
323 	struct scatterlist *sg;
324 	int nr_sgs, err;
325 	unsigned int len;
326 	u32 common;
327 	u64 byte_count;
328 	__le32 *bf;
329 	int j, i, todo;
330 	u64 bs;
331 	int digestsize;
332 
333 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
334 	ce = algt->ce;
335 
336 	bs = crypto_ahash_blocksize(tfm);
337 	digestsize = crypto_ahash_digestsize(tfm);
338 	if (digestsize == SHA224_DIGEST_SIZE)
339 		digestsize = SHA256_DIGEST_SIZE;
340 	if (digestsize == SHA384_DIGEST_SIZE)
341 		digestsize = SHA512_DIGEST_SIZE;
342 
343 	bf = (__le32 *)rctx->pad;
344 
345 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
346 		algt->stat_req++;
347 
348 	dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
349 
350 	memset(cet, 0, sizeof(struct ce_task));
351 
352 	cet->t_id = cpu_to_le32(rctx->flow);
353 	common = ce->variant->alg_hash[algt->ce_algo_id];
354 	common |= CE_COMM_INT;
355 	cet->t_common_ctl = cpu_to_le32(common);
356 
357 	cet->t_sym_ctl = 0;
358 	cet->t_asym_ctl = 0;
359 
360 	rctx->nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
361 	nr_sgs = dma_map_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
362 	if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
363 		dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
364 		err = -EINVAL;
365 		goto err_out;
366 	}
367 
368 	len = areq->nbytes;
369 	for_each_sg(areq->src, sg, nr_sgs, i) {
370 		cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
371 		todo = min(len, sg_dma_len(sg));
372 		cet->t_src[i].len = cpu_to_le32(todo / 4);
373 		len -= todo;
374 	}
375 	if (len > 0) {
376 		dev_err(ce->dev, "remaining len %d\n", len);
377 		err = -EINVAL;
378 		goto err_unmap_src;
379 	}
380 
381 	rctx->result_len = digestsize;
382 	rctx->addr_res = dma_map_single(ce->dev, rctx->result, rctx->result_len,
383 					DMA_FROM_DEVICE);
384 	cet->t_dst[0].addr = desc_addr_val_le32(ce, rctx->addr_res);
385 	cet->t_dst[0].len = cpu_to_le32(rctx->result_len / 4);
386 	if (dma_mapping_error(ce->dev, rctx->addr_res)) {
387 		dev_err(ce->dev, "DMA map dest\n");
388 		err = -EINVAL;
389 		goto err_unmap_src;
390 	}
391 
392 	byte_count = areq->nbytes;
393 	j = 0;
394 
395 	switch (algt->ce_algo_id) {
396 	case CE_ID_HASH_MD5:
397 		j = hash_pad(bf, 2 * bs, j, byte_count, true, bs);
398 		break;
399 	case CE_ID_HASH_SHA1:
400 	case CE_ID_HASH_SHA224:
401 	case CE_ID_HASH_SHA256:
402 		j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
403 		break;
404 	case CE_ID_HASH_SHA384:
405 	case CE_ID_HASH_SHA512:
406 		j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
407 		break;
408 	}
409 	if (!j) {
410 		err = -EINVAL;
411 		goto err_unmap_result;
412 	}
413 
414 	rctx->pad_len = j * 4;
415 	rctx->addr_pad = dma_map_single(ce->dev, rctx->pad, rctx->pad_len,
416 					DMA_TO_DEVICE);
417 	cet->t_src[i].addr = desc_addr_val_le32(ce, rctx->addr_pad);
418 	cet->t_src[i].len = cpu_to_le32(j);
419 	if (dma_mapping_error(ce->dev, rctx->addr_pad)) {
420 		dev_err(ce->dev, "DMA error on padding SG\n");
421 		err = -EINVAL;
422 		goto err_unmap_result;
423 	}
424 
425 	if (ce->variant->hash_t_dlen_in_bits)
426 		cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
427 	else
428 		cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
429 
430 	return 0;
431 
432 err_unmap_result:
433 	dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
434 			 DMA_FROM_DEVICE);
435 
436 err_unmap_src:
437 	dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
438 
439 err_out:
440 	return err;
441 }
442 
443 static void sun8i_ce_hash_unprepare(struct ahash_request *areq,
444 				    struct ce_task *cet)
445 {
446 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
447 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
448 	struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
449 	struct sun8i_ce_dev *ce = ctx->ce;
450 
451 	dma_unmap_single(ce->dev, rctx->addr_pad, rctx->pad_len, DMA_TO_DEVICE);
452 	dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
453 			 DMA_FROM_DEVICE);
454 	dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
455 }
456 
457 int sun8i_ce_hash_run(struct crypto_engine *engine, void *async_req)
458 {
459 	struct ahash_request *areq = ahash_request_cast(async_req);
460 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
461 	struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
462 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
463 	struct sun8i_ce_dev *ce = ctx->ce;
464 	struct sun8i_ce_flow *chan;
465 	int err;
466 
467 	chan = &ce->chanlist[rctx->flow];
468 
469 	err = sun8i_ce_hash_prepare(areq, chan->tl);
470 	if (err)
471 		return err;
472 
473 	err = sun8i_ce_run_task(ce, rctx->flow, crypto_ahash_alg_name(tfm));
474 
475 	sun8i_ce_hash_unprepare(areq, chan->tl);
476 
477 	if (!err)
478 		memcpy(areq->result, rctx->result,
479 		       crypto_ahash_digestsize(tfm));
480 
481 	local_bh_disable();
482 	crypto_finalize_hash_request(engine, async_req, err);
483 	local_bh_enable();
484 
485 	return 0;
486 }
487