xref: /linux/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ss-hash.c - hardware cryptographic offloader for
4  * Allwinner A80/A83T SoC
5  *
6  * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7  *
8  * This file add support for MD5 and SHA1/SHA224/SHA256.
9  *
10  * You could find the datasheet in Documentation/arch/arm/sunxi.rst
11  */
12 
13 #include <crypto/hmac.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/md5.h>
16 #include <crypto/scatterwalk.h>
17 #include <crypto/sha1.h>
18 #include <crypto/sha2.h>
19 #include <linux/bottom_half.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/kernel.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include "sun8i-ss.h"
28 
sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx * tfmctx,const u8 * key,unsigned int keylen)29 static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
30 			    unsigned int keylen)
31 {
32 	struct crypto_shash *xtfm;
33 	int ret;
34 
35 	xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
36 	if (IS_ERR(xtfm))
37 		return PTR_ERR(xtfm);
38 
39 	ret = crypto_shash_tfm_digest(xtfm, key, keylen, tfmctx->key);
40 	if (ret)
41 		dev_err(tfmctx->ss->dev, "shash digest error ret=%d\n", ret);
42 
43 	crypto_free_shash(xtfm);
44 	return ret;
45 }
46 
sun8i_ss_hmac_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)47 int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
48 			 unsigned int keylen)
49 {
50 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
51 	int digestsize, i;
52 	int bs = crypto_ahash_blocksize(ahash);
53 	int ret;
54 
55 	digestsize = crypto_ahash_digestsize(ahash);
56 
57 	if (keylen > bs) {
58 		ret = sun8i_ss_hashkey(tfmctx, key, keylen);
59 		if (ret)
60 			return ret;
61 		tfmctx->keylen = digestsize;
62 	} else {
63 		tfmctx->keylen = keylen;
64 		memcpy(tfmctx->key, key, keylen);
65 	}
66 
67 	tfmctx->ipad = kzalloc(bs, GFP_KERNEL);
68 	if (!tfmctx->ipad)
69 		return -ENOMEM;
70 	tfmctx->opad = kzalloc(bs, GFP_KERNEL);
71 	if (!tfmctx->opad) {
72 		ret = -ENOMEM;
73 		goto err_opad;
74 	}
75 
76 	memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen);
77 	memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen);
78 	memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen);
79 	for (i = 0; i < bs; i++) {
80 		tfmctx->ipad[i] ^= HMAC_IPAD_VALUE;
81 		tfmctx->opad[i] ^= HMAC_OPAD_VALUE;
82 	}
83 
84 	ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen);
85 	if (!ret)
86 		return 0;
87 
88 	memzero_explicit(tfmctx->key, keylen);
89 	kfree_sensitive(tfmctx->opad);
90 err_opad:
91 	kfree_sensitive(tfmctx->ipad);
92 	return ret;
93 }
94 
sun8i_ss_hash_init_tfm(struct crypto_ahash * tfm)95 int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm)
96 {
97 	struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
98 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
99 	struct sun8i_ss_alg_template *algt;
100 	int err;
101 
102 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
103 	op->ss = algt->ss;
104 
105 	/* FALLBACK */
106 	op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
107 					      CRYPTO_ALG_NEED_FALLBACK);
108 	if (IS_ERR(op->fallback_tfm)) {
109 		dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
110 		return PTR_ERR(op->fallback_tfm);
111 	}
112 
113 	crypto_ahash_set_statesize(tfm,
114 				   crypto_ahash_statesize(op->fallback_tfm));
115 
116 	crypto_ahash_set_reqsize(tfm,
117 				 sizeof(struct sun8i_ss_hash_reqctx) +
118 				 crypto_ahash_reqsize(op->fallback_tfm));
119 
120 	memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
121 	       CRYPTO_MAX_ALG_NAME);
122 
123 	err = pm_runtime_get_sync(op->ss->dev);
124 	if (err < 0)
125 		goto error_pm;
126 	return 0;
127 error_pm:
128 	pm_runtime_put_noidle(op->ss->dev);
129 	crypto_free_ahash(op->fallback_tfm);
130 	return err;
131 }
132 
sun8i_ss_hash_exit_tfm(struct crypto_ahash * tfm)133 void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm)
134 {
135 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
136 
137 	kfree_sensitive(tfmctx->ipad);
138 	kfree_sensitive(tfmctx->opad);
139 
140 	crypto_free_ahash(tfmctx->fallback_tfm);
141 	pm_runtime_put_sync_suspend(tfmctx->ss->dev);
142 }
143 
sun8i_ss_hash_init(struct ahash_request * areq)144 int sun8i_ss_hash_init(struct ahash_request *areq)
145 {
146 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
147 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
148 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
149 
150 	memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
151 
152 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
153 	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
154 
155 	return crypto_ahash_init(&rctx->fallback_req);
156 }
157 
sun8i_ss_hash_export(struct ahash_request * areq,void * out)158 int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
159 {
160 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
161 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
162 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
163 
164 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
165 	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
166 
167 	return crypto_ahash_export(&rctx->fallback_req, out);
168 }
169 
sun8i_ss_hash_import(struct ahash_request * areq,const void * in)170 int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
171 {
172 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
173 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
174 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
175 
176 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
177 	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
178 
179 	return crypto_ahash_import(&rctx->fallback_req, in);
180 }
181 
sun8i_ss_hash_final(struct ahash_request * areq)182 int sun8i_ss_hash_final(struct ahash_request *areq)
183 {
184 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
185 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
186 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
187 
188 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
189 	rctx->fallback_req.base.flags = areq->base.flags &
190 					CRYPTO_TFM_REQ_MAY_SLEEP;
191 	rctx->fallback_req.result = areq->result;
192 
193 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
194 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
195 		struct sun8i_ss_alg_template *algt __maybe_unused;
196 
197 		algt = container_of(alg, struct sun8i_ss_alg_template,
198 				    alg.hash.base);
199 
200 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
201 		algt->stat_fb++;
202 #endif
203 	}
204 
205 	return crypto_ahash_final(&rctx->fallback_req);
206 }
207 
sun8i_ss_hash_update(struct ahash_request * areq)208 int sun8i_ss_hash_update(struct ahash_request *areq)
209 {
210 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
211 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
212 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
213 
214 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
215 	rctx->fallback_req.base.flags = areq->base.flags &
216 					CRYPTO_TFM_REQ_MAY_SLEEP;
217 	rctx->fallback_req.nbytes = areq->nbytes;
218 	rctx->fallback_req.src = areq->src;
219 
220 	return crypto_ahash_update(&rctx->fallback_req);
221 }
222 
sun8i_ss_hash_finup(struct ahash_request * areq)223 int sun8i_ss_hash_finup(struct ahash_request *areq)
224 {
225 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
226 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
227 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
228 
229 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
230 	rctx->fallback_req.base.flags = areq->base.flags &
231 					CRYPTO_TFM_REQ_MAY_SLEEP;
232 
233 	rctx->fallback_req.nbytes = areq->nbytes;
234 	rctx->fallback_req.src = areq->src;
235 	rctx->fallback_req.result = areq->result;
236 
237 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
238 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
239 		struct sun8i_ss_alg_template *algt __maybe_unused;
240 
241 		algt = container_of(alg, struct sun8i_ss_alg_template,
242 				    alg.hash.base);
243 
244 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
245 		algt->stat_fb++;
246 #endif
247 	}
248 
249 	return crypto_ahash_finup(&rctx->fallback_req);
250 }
251 
sun8i_ss_hash_digest_fb(struct ahash_request * areq)252 static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
253 {
254 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
255 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
256 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
257 
258 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
259 	rctx->fallback_req.base.flags = areq->base.flags &
260 					CRYPTO_TFM_REQ_MAY_SLEEP;
261 
262 	rctx->fallback_req.nbytes = areq->nbytes;
263 	rctx->fallback_req.src = areq->src;
264 	rctx->fallback_req.result = areq->result;
265 
266 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
267 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
268 		struct sun8i_ss_alg_template *algt __maybe_unused;
269 
270 		algt = container_of(alg, struct sun8i_ss_alg_template,
271 				    alg.hash.base);
272 
273 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
274 		algt->stat_fb++;
275 #endif
276 	}
277 
278 	return crypto_ahash_digest(&rctx->fallback_req);
279 }
280 
sun8i_ss_run_hash_task(struct sun8i_ss_dev * ss,struct sun8i_ss_hash_reqctx * rctx,const char * name)281 static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
282 				  struct sun8i_ss_hash_reqctx *rctx,
283 				  const char *name)
284 {
285 	int flow = rctx->flow;
286 	u32 v = SS_START;
287 	int i;
288 
289 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
290 	ss->flows[flow].stat_req++;
291 #endif
292 
293 	/* choose between stream0/stream1 */
294 	if (flow)
295 		v |= SS_FLOW1;
296 	else
297 		v |= SS_FLOW0;
298 
299 	v |= rctx->method;
300 
301 	for (i = 0; i < MAX_SG; i++) {
302 		if (!rctx->t_dst[i].addr)
303 			break;
304 
305 		mutex_lock(&ss->mlock);
306 		if (i > 0) {
307 			v |= BIT(17);
308 			writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
309 			writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
310 		}
311 
312 		dev_dbg(ss->dev,
313 			"Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
314 			i, flow, name, v,
315 			rctx->t_src[i].len, rctx->t_dst[i].len,
316 			rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
317 
318 		writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
319 		writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
320 		writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
321 		writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
322 
323 		reinit_completion(&ss->flows[flow].complete);
324 		ss->flows[flow].status = 0;
325 		wmb();
326 
327 		writel(v, ss->base + SS_CTL_REG);
328 		mutex_unlock(&ss->mlock);
329 		wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
330 							  msecs_to_jiffies(2000));
331 		if (ss->flows[flow].status == 0) {
332 			dev_err(ss->dev, "DMA timeout for %s\n", name);
333 			return -EFAULT;
334 		}
335 	}
336 
337 	return 0;
338 }
339 
sun8i_ss_hash_need_fallback(struct ahash_request * areq)340 static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
341 {
342 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
343 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
344 	struct sun8i_ss_alg_template *algt;
345 	struct scatterlist *sg;
346 
347 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
348 
349 	if (areq->nbytes == 0) {
350 		algt->stat_fb_len++;
351 		return true;
352 	}
353 
354 	if (areq->nbytes >= MAX_PAD_SIZE - 64) {
355 		algt->stat_fb_len++;
356 		return true;
357 	}
358 
359 	/* we need to reserve one SG for the padding one */
360 	if (sg_nents(areq->src) > MAX_SG - 1) {
361 		algt->stat_fb_sgnum++;
362 		return true;
363 	}
364 
365 	sg = areq->src;
366 	while (sg) {
367 		/* SS can operate hash only on full block size
368 		 * since SS support only MD5,sha1,sha224 and sha256, blocksize
369 		 * is always 64
370 		 */
371 		/* Only the last block could be bounced to the pad buffer */
372 		if (sg->length % 64 && sg_next(sg)) {
373 			algt->stat_fb_sglen++;
374 			return true;
375 		}
376 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
377 			algt->stat_fb_align++;
378 			return true;
379 		}
380 		if (sg->length % 4) {
381 			algt->stat_fb_sglen++;
382 			return true;
383 		}
384 		sg = sg_next(sg);
385 	}
386 	return false;
387 }
388 
sun8i_ss_hash_digest(struct ahash_request * areq)389 int sun8i_ss_hash_digest(struct ahash_request *areq)
390 {
391 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
392 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
393 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
394 	struct sun8i_ss_alg_template *algt;
395 	struct sun8i_ss_dev *ss;
396 	struct crypto_engine *engine;
397 	int e;
398 
399 	if (sun8i_ss_hash_need_fallback(areq))
400 		return sun8i_ss_hash_digest_fb(areq);
401 
402 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
403 	ss = algt->ss;
404 
405 	e = sun8i_ss_get_engine_number(ss);
406 	rctx->flow = e;
407 	engine = ss->flows[e].engine;
408 
409 	return crypto_transfer_hash_request_to_engine(engine, areq);
410 }
411 
hash_pad(__le32 * buf,unsigned int bufsize,u64 padi,u64 byte_count,bool le,int bs)412 static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
413 {
414 	u64 fill, min_fill, j, k;
415 	__be64 *bebits;
416 	__le64 *lebits;
417 
418 	j = padi;
419 	buf[j++] = cpu_to_le32(0x80);
420 
421 	if (bs == 64) {
422 		fill = 64 - (byte_count % 64);
423 		min_fill = 2 * sizeof(u32) + sizeof(u32);
424 	} else {
425 		fill = 128 - (byte_count % 128);
426 		min_fill = 4 * sizeof(u32) + sizeof(u32);
427 	}
428 
429 	if (fill < min_fill)
430 		fill += bs;
431 
432 	k = j;
433 	j += (fill - min_fill) / sizeof(u32);
434 	if (j * 4 > bufsize) {
435 		pr_err("%s OVERFLOW %llu\n", __func__, j);
436 		return 0;
437 	}
438 	for (; k < j; k++)
439 		buf[k] = 0;
440 
441 	if (le) {
442 		/* MD5 */
443 		lebits = (__le64 *)&buf[j];
444 		*lebits = cpu_to_le64(byte_count << 3);
445 		j += 2;
446 	} else {
447 		if (bs == 64) {
448 			/* sha1 sha224 sha256 */
449 			bebits = (__be64 *)&buf[j];
450 			*bebits = cpu_to_be64(byte_count << 3);
451 			j += 2;
452 		} else {
453 			/* sha384 sha512*/
454 			bebits = (__be64 *)&buf[j];
455 			*bebits = cpu_to_be64(byte_count >> 61);
456 			j += 2;
457 			bebits = (__be64 *)&buf[j];
458 			*bebits = cpu_to_be64(byte_count << 3);
459 			j += 2;
460 		}
461 	}
462 	if (j * 4 > bufsize) {
463 		pr_err("%s OVERFLOW %llu\n", __func__, j);
464 		return 0;
465 	}
466 
467 	return j;
468 }
469 
470 /* sun8i_ss_hash_run - run an ahash request
471  * Send the data of the request to the SS along with an extra SG with padding
472  */
sun8i_ss_hash_run(struct crypto_engine * engine,void * breq)473 int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
474 {
475 	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
476 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
477 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
478 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
479 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
480 	struct sun8i_ss_alg_template *algt;
481 	struct sun8i_ss_dev *ss;
482 	struct scatterlist *sg;
483 	int bs = crypto_ahash_blocksize(tfm);
484 	int nr_sgs, err, digestsize;
485 	unsigned int len;
486 	u64 byte_count;
487 	void *pad, *result;
488 	int j, i, k, todo;
489 	dma_addr_t addr_res, addr_pad, addr_xpad;
490 	__le32 *bf;
491 	/* HMAC step:
492 	 * 0: normal hashing
493 	 * 1: IPAD
494 	 * 2: OPAD
495 	 */
496 	int hmac = 0;
497 
498 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
499 	ss = algt->ss;
500 
501 	digestsize = crypto_ahash_digestsize(tfm);
502 	if (digestsize == SHA224_DIGEST_SIZE)
503 		digestsize = SHA256_DIGEST_SIZE;
504 
505 	result = ss->flows[rctx->flow].result;
506 	pad = ss->flows[rctx->flow].pad;
507 	bf = (__le32 *)pad;
508 
509 	for (i = 0; i < MAX_SG; i++) {
510 		rctx->t_dst[i].addr = 0;
511 		rctx->t_dst[i].len = 0;
512 	}
513 
514 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
515 	algt->stat_req++;
516 #endif
517 
518 	rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
519 
520 	nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
521 	if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
522 		dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
523 		err = -EINVAL;
524 		goto theend;
525 	}
526 
527 	addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
528 	if (dma_mapping_error(ss->dev, addr_res)) {
529 		dev_err(ss->dev, "DMA map dest\n");
530 		err = -EINVAL;
531 		goto err_dma_result;
532 	}
533 
534 	j = 0;
535 	len = areq->nbytes;
536 	sg = areq->src;
537 	i = 0;
538 	while (len > 0 && sg) {
539 		if (sg_dma_len(sg) == 0) {
540 			sg = sg_next(sg);
541 			continue;
542 		}
543 		todo = min(len, sg_dma_len(sg));
544 		/* only the last SG could be with a size not modulo64 */
545 		if (todo % 64 == 0) {
546 			rctx->t_src[i].addr = sg_dma_address(sg);
547 			rctx->t_src[i].len = todo / 4;
548 			rctx->t_dst[i].addr = addr_res;
549 			rctx->t_dst[i].len = digestsize / 4;
550 			len -= todo;
551 		} else {
552 			scatterwalk_map_and_copy(bf, sg, 0, todo, 0);
553 			j += todo / 4;
554 			len -= todo;
555 		}
556 		sg = sg_next(sg);
557 		i++;
558 	}
559 	if (len > 0) {
560 		dev_err(ss->dev, "remaining len %d\n", len);
561 		err = -EINVAL;
562 		goto theend;
563 	}
564 
565 	if (j > 0)
566 		i--;
567 
568 retry:
569 	byte_count = areq->nbytes;
570 	if (tfmctx->keylen && hmac == 0) {
571 		hmac = 1;
572 		/* shift all SG one slot up, to free slot 0 for IPAD */
573 		for (k = 6; k >= 0; k--) {
574 			rctx->t_src[k + 1].addr = rctx->t_src[k].addr;
575 			rctx->t_src[k + 1].len = rctx->t_src[k].len;
576 			rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr;
577 			rctx->t_dst[k + 1].len = rctx->t_dst[k].len;
578 		}
579 		addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE);
580 		err = dma_mapping_error(ss->dev, addr_xpad);
581 		if (err) {
582 			dev_err(ss->dev, "Fail to create DMA mapping of ipad\n");
583 			goto err_dma_xpad;
584 		}
585 		rctx->t_src[0].addr = addr_xpad;
586 		rctx->t_src[0].len = bs / 4;
587 		rctx->t_dst[0].addr = addr_res;
588 		rctx->t_dst[0].len = digestsize / 4;
589 		i++;
590 		byte_count = areq->nbytes + bs;
591 	}
592 	if (tfmctx->keylen && hmac == 2) {
593 		for (i = 0; i < MAX_SG; i++) {
594 			rctx->t_src[i].addr = 0;
595 			rctx->t_src[i].len = 0;
596 			rctx->t_dst[i].addr = 0;
597 			rctx->t_dst[i].len = 0;
598 		}
599 
600 		addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
601 		if (dma_mapping_error(ss->dev, addr_res)) {
602 			dev_err(ss->dev, "Fail to create DMA mapping of result\n");
603 			err = -EINVAL;
604 			goto err_dma_result;
605 		}
606 		addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE);
607 		err = dma_mapping_error(ss->dev, addr_xpad);
608 		if (err) {
609 			dev_err(ss->dev, "Fail to create DMA mapping of opad\n");
610 			goto err_dma_xpad;
611 		}
612 		rctx->t_src[0].addr = addr_xpad;
613 		rctx->t_src[0].len = bs / 4;
614 
615 		memcpy(bf, result, digestsize);
616 		j = digestsize / 4;
617 		i = 1;
618 		byte_count = digestsize + bs;
619 
620 		rctx->t_dst[0].addr = addr_res;
621 		rctx->t_dst[0].len = digestsize / 4;
622 	}
623 
624 	switch (algt->ss_algo_id) {
625 	case SS_ID_HASH_MD5:
626 		j = hash_pad(bf, 4096, j, byte_count, true, bs);
627 		break;
628 	case SS_ID_HASH_SHA1:
629 	case SS_ID_HASH_SHA224:
630 	case SS_ID_HASH_SHA256:
631 		j = hash_pad(bf, 4096, j, byte_count, false, bs);
632 		break;
633 	}
634 	if (!j) {
635 		err = -EINVAL;
636 		goto theend;
637 	}
638 
639 	addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
640 	if (dma_mapping_error(ss->dev, addr_pad)) {
641 		dev_err(ss->dev, "DMA error on padding SG\n");
642 		err = -EINVAL;
643 		goto err_dma_pad;
644 	}
645 	rctx->t_src[i].addr = addr_pad;
646 	rctx->t_src[i].len = j;
647 	rctx->t_dst[i].addr = addr_res;
648 	rctx->t_dst[i].len = digestsize / 4;
649 
650 	err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
651 
652 	/*
653 	 * mini helper for checking dma map/unmap
654 	 * flow start for hmac = 0 (and HMAC = 1)
655 	 * HMAC = 0
656 	 * MAP src
657 	 * MAP res
658 	 *
659 	 * retry:
660 	 * if hmac then hmac = 1
661 	 *	MAP xpad (ipad)
662 	 * if hmac == 2
663 	 *	MAP res
664 	 *	MAP xpad (opad)
665 	 * MAP pad
666 	 * ACTION!
667 	 * UNMAP pad
668 	 * if hmac
669 	 *	UNMAP xpad
670 	 * UNMAP res
671 	 * if hmac < 2
672 	 *	UNMAP SRC
673 	 *
674 	 * if hmac = 1 then hmac = 2 goto retry
675 	 */
676 
677 	dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
678 
679 err_dma_pad:
680 	if (hmac > 0)
681 		dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE);
682 err_dma_xpad:
683 	dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
684 err_dma_result:
685 	if (hmac < 2)
686 		dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
687 			     DMA_TO_DEVICE);
688 	if (hmac == 1 && !err) {
689 		hmac = 2;
690 		goto retry;
691 	}
692 
693 	if (!err)
694 		memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
695 theend:
696 	local_bh_disable();
697 	crypto_finalize_hash_request(engine, breq, err);
698 	local_bh_enable();
699 	return 0;
700 }
701