xref: /linux/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c (revision 746680ec6696585e30db3e18c93a63df9cbec39c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ss-hash.c - hardware cryptographic offloader for
4  * Allwinner A80/A83T SoC
5  *
6  * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7  *
8  * This file add support for MD5 and SHA1/SHA224/SHA256.
9  *
10  * You could find the datasheet in Documentation/arch/arm/sunxi.rst
11  */
12 
13 #include <crypto/hmac.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/md5.h>
16 #include <crypto/scatterwalk.h>
17 #include <crypto/sha1.h>
18 #include <crypto/sha2.h>
19 #include <linux/bottom_half.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/kernel.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include "sun8i-ss.h"
28 
29 static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
30 			    unsigned int keylen)
31 {
32 	struct crypto_shash *xtfm;
33 	int ret;
34 
35 	xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
36 	if (IS_ERR(xtfm))
37 		return PTR_ERR(xtfm);
38 
39 	ret = crypto_shash_tfm_digest(xtfm, key, keylen, tfmctx->key);
40 	if (ret)
41 		dev_err(tfmctx->ss->dev, "shash digest error ret=%d\n", ret);
42 
43 	crypto_free_shash(xtfm);
44 	return ret;
45 }
46 
47 int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
48 			 unsigned int keylen)
49 {
50 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
51 	int digestsize, i;
52 	int bs = crypto_ahash_blocksize(ahash);
53 	int ret;
54 
55 	digestsize = crypto_ahash_digestsize(ahash);
56 
57 	if (keylen > bs) {
58 		ret = sun8i_ss_hashkey(tfmctx, key, keylen);
59 		if (ret)
60 			return ret;
61 		tfmctx->keylen = digestsize;
62 	} else {
63 		tfmctx->keylen = keylen;
64 		memcpy(tfmctx->key, key, keylen);
65 	}
66 
67 	tfmctx->ipad = kzalloc(bs, GFP_KERNEL);
68 	if (!tfmctx->ipad)
69 		return -ENOMEM;
70 	tfmctx->opad = kzalloc(bs, GFP_KERNEL);
71 	if (!tfmctx->opad) {
72 		ret = -ENOMEM;
73 		goto err_opad;
74 	}
75 
76 	memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen);
77 	memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen);
78 	memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen);
79 	for (i = 0; i < bs; i++) {
80 		tfmctx->ipad[i] ^= HMAC_IPAD_VALUE;
81 		tfmctx->opad[i] ^= HMAC_OPAD_VALUE;
82 	}
83 
84 	ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen);
85 	if (!ret)
86 		return 0;
87 
88 	memzero_explicit(tfmctx->key, keylen);
89 	kfree_sensitive(tfmctx->opad);
90 err_opad:
91 	kfree_sensitive(tfmctx->ipad);
92 	return ret;
93 }
94 
95 int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm)
96 {
97 	struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
98 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
99 	struct sun8i_ss_alg_template *algt;
100 	int err;
101 
102 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
103 	op->ss = algt->ss;
104 
105 	/* FALLBACK */
106 	op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
107 					      CRYPTO_ALG_NEED_FALLBACK);
108 	if (IS_ERR(op->fallback_tfm)) {
109 		dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
110 		return PTR_ERR(op->fallback_tfm);
111 	}
112 
113 	crypto_ahash_set_statesize(tfm,
114 				   crypto_ahash_statesize(op->fallback_tfm));
115 
116 	crypto_ahash_set_reqsize(tfm,
117 				 sizeof(struct sun8i_ss_hash_reqctx) +
118 				 crypto_ahash_reqsize(op->fallback_tfm));
119 
120 	memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
121 	       CRYPTO_MAX_ALG_NAME);
122 
123 	err = pm_runtime_get_sync(op->ss->dev);
124 	if (err < 0)
125 		goto error_pm;
126 	return 0;
127 error_pm:
128 	pm_runtime_put_noidle(op->ss->dev);
129 	crypto_free_ahash(op->fallback_tfm);
130 	return err;
131 }
132 
133 void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm)
134 {
135 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
136 
137 	kfree_sensitive(tfmctx->ipad);
138 	kfree_sensitive(tfmctx->opad);
139 
140 	crypto_free_ahash(tfmctx->fallback_tfm);
141 	pm_runtime_put_sync_suspend(tfmctx->ss->dev);
142 }
143 
144 int sun8i_ss_hash_init(struct ahash_request *areq)
145 {
146 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
147 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
148 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
149 
150 	memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
151 
152 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
153 	ahash_request_set_callback(&rctx->fallback_req,
154 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
155 				   areq->base.complete, areq->base.data);
156 
157 	return crypto_ahash_init(&rctx->fallback_req);
158 }
159 
160 int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
161 {
162 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
163 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
164 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
165 
166 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
167 	ahash_request_set_callback(&rctx->fallback_req,
168 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
169 				   areq->base.complete, areq->base.data);
170 
171 	return crypto_ahash_export(&rctx->fallback_req, out);
172 }
173 
174 int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
175 {
176 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
177 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
178 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
179 
180 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
181 	ahash_request_set_callback(&rctx->fallback_req,
182 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
183 				   areq->base.complete, areq->base.data);
184 
185 	return crypto_ahash_import(&rctx->fallback_req, in);
186 }
187 
188 int sun8i_ss_hash_final(struct ahash_request *areq)
189 {
190 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
191 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
192 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
193 
194 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
195 	ahash_request_set_callback(&rctx->fallback_req,
196 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
197 				   areq->base.complete, areq->base.data);
198 	ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
199 
200 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
201 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
202 		struct sun8i_ss_alg_template *algt __maybe_unused;
203 
204 		algt = container_of(alg, struct sun8i_ss_alg_template,
205 				    alg.hash.base);
206 
207 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
208 		algt->stat_fb++;
209 #endif
210 	}
211 
212 	return crypto_ahash_final(&rctx->fallback_req);
213 }
214 
215 int sun8i_ss_hash_update(struct ahash_request *areq)
216 {
217 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
218 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
219 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
220 
221 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
222 	ahash_request_set_callback(&rctx->fallback_req,
223 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
224 				   areq->base.complete, areq->base.data);
225 	ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
226 
227 	return crypto_ahash_update(&rctx->fallback_req);
228 }
229 
230 int sun8i_ss_hash_finup(struct ahash_request *areq)
231 {
232 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
233 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
234 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
235 
236 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
237 	ahash_request_set_callback(&rctx->fallback_req,
238 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
239 				   areq->base.complete, areq->base.data);
240 	ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
241 				areq->nbytes);
242 
243 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
244 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
245 		struct sun8i_ss_alg_template *algt __maybe_unused;
246 
247 		algt = container_of(alg, struct sun8i_ss_alg_template,
248 				    alg.hash.base);
249 
250 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
251 		algt->stat_fb++;
252 #endif
253 	}
254 
255 	return crypto_ahash_finup(&rctx->fallback_req);
256 }
257 
258 static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
259 {
260 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
261 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
262 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
263 
264 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
265 	ahash_request_set_callback(&rctx->fallback_req,
266 				   areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
267 				   areq->base.complete, areq->base.data);
268 	ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
269 				areq->nbytes);
270 
271 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
272 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
273 		struct sun8i_ss_alg_template *algt __maybe_unused;
274 
275 		algt = container_of(alg, struct sun8i_ss_alg_template,
276 				    alg.hash.base);
277 
278 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
279 		algt->stat_fb++;
280 #endif
281 	}
282 
283 	return crypto_ahash_digest(&rctx->fallback_req);
284 }
285 
286 static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
287 				  struct sun8i_ss_hash_reqctx *rctx,
288 				  const char *name)
289 {
290 	int flow = rctx->flow;
291 	u32 v = SS_START;
292 	int i;
293 
294 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
295 	ss->flows[flow].stat_req++;
296 #endif
297 
298 	/* choose between stream0/stream1 */
299 	if (flow)
300 		v |= SS_FLOW1;
301 	else
302 		v |= SS_FLOW0;
303 
304 	v |= rctx->method;
305 
306 	for (i = 0; i < MAX_SG; i++) {
307 		if (!rctx->t_dst[i].addr)
308 			break;
309 
310 		mutex_lock(&ss->mlock);
311 		if (i > 0) {
312 			v |= BIT(17);
313 			writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
314 			writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
315 		}
316 
317 		dev_dbg(ss->dev,
318 			"Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
319 			i, flow, name, v,
320 			rctx->t_src[i].len, rctx->t_dst[i].len,
321 			rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
322 
323 		writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
324 		writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
325 		writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
326 		writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
327 
328 		reinit_completion(&ss->flows[flow].complete);
329 		ss->flows[flow].status = 0;
330 		wmb();
331 
332 		writel(v, ss->base + SS_CTL_REG);
333 		mutex_unlock(&ss->mlock);
334 		wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
335 							  msecs_to_jiffies(2000));
336 		if (ss->flows[flow].status == 0) {
337 			dev_err(ss->dev, "DMA timeout for %s\n", name);
338 			return -EFAULT;
339 		}
340 	}
341 
342 	return 0;
343 }
344 
345 static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
346 {
347 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
348 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
349 	struct sun8i_ss_alg_template *algt;
350 	struct scatterlist *sg;
351 
352 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
353 
354 	if (areq->nbytes == 0) {
355 		algt->stat_fb_len++;
356 		return true;
357 	}
358 
359 	if (areq->nbytes >= MAX_PAD_SIZE - 64) {
360 		algt->stat_fb_len++;
361 		return true;
362 	}
363 
364 	/* we need to reserve one SG for the padding one */
365 	if (sg_nents(areq->src) > MAX_SG - 1) {
366 		algt->stat_fb_sgnum++;
367 		return true;
368 	}
369 
370 	sg = areq->src;
371 	while (sg) {
372 		/* SS can operate hash only on full block size
373 		 * since SS support only MD5,sha1,sha224 and sha256, blocksize
374 		 * is always 64
375 		 */
376 		/* Only the last block could be bounced to the pad buffer */
377 		if (sg->length % 64 && sg_next(sg)) {
378 			algt->stat_fb_sglen++;
379 			return true;
380 		}
381 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
382 			algt->stat_fb_align++;
383 			return true;
384 		}
385 		if (sg->length % 4) {
386 			algt->stat_fb_sglen++;
387 			return true;
388 		}
389 		sg = sg_next(sg);
390 	}
391 	return false;
392 }
393 
394 int sun8i_ss_hash_digest(struct ahash_request *areq)
395 {
396 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
397 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
398 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
399 	struct sun8i_ss_alg_template *algt;
400 	struct sun8i_ss_dev *ss;
401 	struct crypto_engine *engine;
402 	int e;
403 
404 	if (sun8i_ss_hash_need_fallback(areq))
405 		return sun8i_ss_hash_digest_fb(areq);
406 
407 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
408 	ss = algt->ss;
409 
410 	e = sun8i_ss_get_engine_number(ss);
411 	rctx->flow = e;
412 	engine = ss->flows[e].engine;
413 
414 	return crypto_transfer_hash_request_to_engine(engine, areq);
415 }
416 
417 static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
418 {
419 	u64 fill, min_fill, j, k;
420 	__be64 *bebits;
421 	__le64 *lebits;
422 
423 	j = padi;
424 	buf[j++] = cpu_to_le32(0x80);
425 
426 	if (bs == 64) {
427 		fill = 64 - (byte_count % 64);
428 		min_fill = 2 * sizeof(u32) + sizeof(u32);
429 	} else {
430 		fill = 128 - (byte_count % 128);
431 		min_fill = 4 * sizeof(u32) + sizeof(u32);
432 	}
433 
434 	if (fill < min_fill)
435 		fill += bs;
436 
437 	k = j;
438 	j += (fill - min_fill) / sizeof(u32);
439 	if (j * 4 > bufsize) {
440 		pr_err("%s OVERFLOW %llu\n", __func__, j);
441 		return 0;
442 	}
443 	for (; k < j; k++)
444 		buf[k] = 0;
445 
446 	if (le) {
447 		/* MD5 */
448 		lebits = (__le64 *)&buf[j];
449 		*lebits = cpu_to_le64(byte_count << 3);
450 		j += 2;
451 	} else {
452 		if (bs == 64) {
453 			/* sha1 sha224 sha256 */
454 			bebits = (__be64 *)&buf[j];
455 			*bebits = cpu_to_be64(byte_count << 3);
456 			j += 2;
457 		} else {
458 			/* sha384 sha512*/
459 			bebits = (__be64 *)&buf[j];
460 			*bebits = cpu_to_be64(byte_count >> 61);
461 			j += 2;
462 			bebits = (__be64 *)&buf[j];
463 			*bebits = cpu_to_be64(byte_count << 3);
464 			j += 2;
465 		}
466 	}
467 	if (j * 4 > bufsize) {
468 		pr_err("%s OVERFLOW %llu\n", __func__, j);
469 		return 0;
470 	}
471 
472 	return j;
473 }
474 
475 /* sun8i_ss_hash_run - run an ahash request
476  * Send the data of the request to the SS along with an extra SG with padding
477  */
478 int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
479 {
480 	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
481 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
482 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
483 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
484 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
485 	struct sun8i_ss_alg_template *algt;
486 	struct sun8i_ss_dev *ss;
487 	struct scatterlist *sg;
488 	int bs = crypto_ahash_blocksize(tfm);
489 	int nr_sgs, err, digestsize;
490 	unsigned int len;
491 	u64 byte_count;
492 	void *pad, *result;
493 	int j, i, k, todo;
494 	dma_addr_t addr_res, addr_pad, addr_xpad;
495 	__le32 *bf;
496 	/* HMAC step:
497 	 * 0: normal hashing
498 	 * 1: IPAD
499 	 * 2: OPAD
500 	 */
501 	int hmac = 0;
502 
503 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
504 	ss = algt->ss;
505 
506 	digestsize = crypto_ahash_digestsize(tfm);
507 	if (digestsize == SHA224_DIGEST_SIZE)
508 		digestsize = SHA256_DIGEST_SIZE;
509 
510 	result = ss->flows[rctx->flow].result;
511 	pad = ss->flows[rctx->flow].pad;
512 	bf = (__le32 *)pad;
513 
514 	for (i = 0; i < MAX_SG; i++) {
515 		rctx->t_dst[i].addr = 0;
516 		rctx->t_dst[i].len = 0;
517 	}
518 
519 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
520 	algt->stat_req++;
521 #endif
522 
523 	rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
524 
525 	nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
526 	if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
527 		dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
528 		err = -EINVAL;
529 		goto theend;
530 	}
531 
532 	addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
533 	if (dma_mapping_error(ss->dev, addr_res)) {
534 		dev_err(ss->dev, "DMA map dest\n");
535 		err = -EINVAL;
536 		goto err_dma_result;
537 	}
538 
539 	j = 0;
540 	len = areq->nbytes;
541 	sg = areq->src;
542 	i = 0;
543 	while (len > 0 && sg) {
544 		if (sg_dma_len(sg) == 0) {
545 			sg = sg_next(sg);
546 			continue;
547 		}
548 		todo = min(len, sg_dma_len(sg));
549 		/* only the last SG could be with a size not modulo64 */
550 		if (todo % 64 == 0) {
551 			rctx->t_src[i].addr = sg_dma_address(sg);
552 			rctx->t_src[i].len = todo / 4;
553 			rctx->t_dst[i].addr = addr_res;
554 			rctx->t_dst[i].len = digestsize / 4;
555 			len -= todo;
556 		} else {
557 			scatterwalk_map_and_copy(bf, sg, 0, todo, 0);
558 			j += todo / 4;
559 			len -= todo;
560 		}
561 		sg = sg_next(sg);
562 		i++;
563 	}
564 	if (len > 0) {
565 		dev_err(ss->dev, "remaining len %d\n", len);
566 		err = -EINVAL;
567 		goto theend;
568 	}
569 
570 	if (j > 0)
571 		i--;
572 
573 retry:
574 	byte_count = areq->nbytes;
575 	if (tfmctx->keylen && hmac == 0) {
576 		hmac = 1;
577 		/* shift all SG one slot up, to free slot 0 for IPAD */
578 		for (k = 6; k >= 0; k--) {
579 			rctx->t_src[k + 1].addr = rctx->t_src[k].addr;
580 			rctx->t_src[k + 1].len = rctx->t_src[k].len;
581 			rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr;
582 			rctx->t_dst[k + 1].len = rctx->t_dst[k].len;
583 		}
584 		addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE);
585 		err = dma_mapping_error(ss->dev, addr_xpad);
586 		if (err) {
587 			dev_err(ss->dev, "Fail to create DMA mapping of ipad\n");
588 			goto err_dma_xpad;
589 		}
590 		rctx->t_src[0].addr = addr_xpad;
591 		rctx->t_src[0].len = bs / 4;
592 		rctx->t_dst[0].addr = addr_res;
593 		rctx->t_dst[0].len = digestsize / 4;
594 		i++;
595 		byte_count = areq->nbytes + bs;
596 	}
597 	if (tfmctx->keylen && hmac == 2) {
598 		for (i = 0; i < MAX_SG; i++) {
599 			rctx->t_src[i].addr = 0;
600 			rctx->t_src[i].len = 0;
601 			rctx->t_dst[i].addr = 0;
602 			rctx->t_dst[i].len = 0;
603 		}
604 
605 		addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
606 		if (dma_mapping_error(ss->dev, addr_res)) {
607 			dev_err(ss->dev, "Fail to create DMA mapping of result\n");
608 			err = -EINVAL;
609 			goto err_dma_result;
610 		}
611 		addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE);
612 		err = dma_mapping_error(ss->dev, addr_xpad);
613 		if (err) {
614 			dev_err(ss->dev, "Fail to create DMA mapping of opad\n");
615 			goto err_dma_xpad;
616 		}
617 		rctx->t_src[0].addr = addr_xpad;
618 		rctx->t_src[0].len = bs / 4;
619 
620 		memcpy(bf, result, digestsize);
621 		j = digestsize / 4;
622 		i = 1;
623 		byte_count = digestsize + bs;
624 
625 		rctx->t_dst[0].addr = addr_res;
626 		rctx->t_dst[0].len = digestsize / 4;
627 	}
628 
629 	switch (algt->ss_algo_id) {
630 	case SS_ID_HASH_MD5:
631 		j = hash_pad(bf, 4096, j, byte_count, true, bs);
632 		break;
633 	case SS_ID_HASH_SHA1:
634 	case SS_ID_HASH_SHA224:
635 	case SS_ID_HASH_SHA256:
636 		j = hash_pad(bf, 4096, j, byte_count, false, bs);
637 		break;
638 	}
639 	if (!j) {
640 		err = -EINVAL;
641 		goto theend;
642 	}
643 
644 	addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
645 	if (dma_mapping_error(ss->dev, addr_pad)) {
646 		dev_err(ss->dev, "DMA error on padding SG\n");
647 		err = -EINVAL;
648 		goto err_dma_pad;
649 	}
650 	rctx->t_src[i].addr = addr_pad;
651 	rctx->t_src[i].len = j;
652 	rctx->t_dst[i].addr = addr_res;
653 	rctx->t_dst[i].len = digestsize / 4;
654 
655 	err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
656 
657 	/*
658 	 * mini helper for checking dma map/unmap
659 	 * flow start for hmac = 0 (and HMAC = 1)
660 	 * HMAC = 0
661 	 * MAP src
662 	 * MAP res
663 	 *
664 	 * retry:
665 	 * if hmac then hmac = 1
666 	 *	MAP xpad (ipad)
667 	 * if hmac == 2
668 	 *	MAP res
669 	 *	MAP xpad (opad)
670 	 * MAP pad
671 	 * ACTION!
672 	 * UNMAP pad
673 	 * if hmac
674 	 *	UNMAP xpad
675 	 * UNMAP res
676 	 * if hmac < 2
677 	 *	UNMAP SRC
678 	 *
679 	 * if hmac = 1 then hmac = 2 goto retry
680 	 */
681 
682 	dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
683 
684 err_dma_pad:
685 	if (hmac > 0)
686 		dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE);
687 err_dma_xpad:
688 	dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
689 err_dma_result:
690 	if (hmac < 2)
691 		dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
692 			     DMA_TO_DEVICE);
693 	if (hmac == 1 && !err) {
694 		hmac = 2;
695 		goto retry;
696 	}
697 
698 	if (!err)
699 		memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
700 theend:
701 	local_bh_disable();
702 	crypto_finalize_hash_request(engine, breq, err);
703 	local_bh_enable();
704 	return 0;
705 }
706