xref: /linux/drivers/crypto/aspeed/aspeed-hace-hash.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2021 Aspeed Technology Inc.
4  */
5 
6 #include "aspeed-hace.h"
7 #include <crypto/engine.h>
8 #include <crypto/internal/hash.h>
9 #include <crypto/scatterwalk.h>
10 #include <crypto/sha1.h>
11 #include <crypto/sha2.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/kernel.h>
16 #include <linux/scatterlist.h>
17 #include <linux/string.h>
18 
19 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
20 #define AHASH_DBG(h, fmt, ...)	\
21 	dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
22 #else
23 #define AHASH_DBG(h, fmt, ...)	\
24 	dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
25 #endif
26 
27 /* Initialization Vectors for SHA-family */
28 static const __be32 sha1_iv[8] = {
29 	cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
30 	cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
31 	cpu_to_be32(SHA1_H4), 0, 0, 0
32 };
33 
34 static const __be32 sha224_iv[8] = {
35 	cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
36 	cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
37 	cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
38 	cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
39 };
40 
41 static const __be32 sha256_iv[8] = {
42 	cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
43 	cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
44 	cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
45 	cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
46 };
47 
48 static const __be64 sha384_iv[8] = {
49 	cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
50 	cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
51 	cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
52 	cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7)
53 };
54 
55 static const __be64 sha512_iv[8] = {
56 	cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
57 	cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
58 	cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
59 	cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
60 };
61 
62 static int aspeed_sham_init(struct ahash_request *req);
63 static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev);
64 
65 static int aspeed_sham_export(struct ahash_request *req, void *out)
66 {
67 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
68 	union {
69 		u8 *u8;
70 		u64 *u64;
71 	} p = { .u8 = out };
72 
73 	memcpy(out, rctx->digest, rctx->ivsize);
74 	p.u8 += rctx->ivsize;
75 	put_unaligned(rctx->digcnt[0], p.u64++);
76 	if (rctx->ivsize == 64)
77 		put_unaligned(rctx->digcnt[1], p.u64);
78 	return 0;
79 }
80 
81 static int aspeed_sham_import(struct ahash_request *req, const void *in)
82 {
83 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
84 	union {
85 		const u8 *u8;
86 		const u64 *u64;
87 	} p = { .u8 = in };
88 	int err;
89 
90 	err = aspeed_sham_init(req);
91 	if (err)
92 		return err;
93 
94 	memcpy(rctx->digest, in, rctx->ivsize);
95 	p.u8 += rctx->ivsize;
96 	rctx->digcnt[0] = get_unaligned(p.u64++);
97 	if (rctx->ivsize == 64)
98 		rctx->digcnt[1] = get_unaligned(p.u64);
99 	return 0;
100 }
101 
102 /* The purpose of this padding is to ensure that the padded message is a
103  * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
104  * The bit "1" is appended at the end of the message followed by
105  * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
106  * 128 bits block (SHA384/SHA512) equals to the message length in bits
107  * is appended.
108  *
109  * For SHA1/SHA224/SHA256, padlen is calculated as followed:
110  *  - if message length < 56 bytes then padlen = 56 - message length
111  *  - else padlen = 64 + 56 - message length
112  *
113  * For SHA384/SHA512, padlen is calculated as followed:
114  *  - if message length < 112 bytes then padlen = 112 - message length
115  *  - else padlen = 128 + 112 - message length
116  */
117 static int aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
118 				     struct aspeed_sham_reqctx *rctx, u8 *buf)
119 {
120 	unsigned int index, padlen, bitslen;
121 	__be64 bits[2];
122 
123 	AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
124 
125 	switch (rctx->flags & SHA_FLAGS_MASK) {
126 	case SHA_FLAGS_SHA1:
127 	case SHA_FLAGS_SHA224:
128 	case SHA_FLAGS_SHA256:
129 		bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
130 		index = rctx->digcnt[0] & 0x3f;
131 		padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
132 		bitslen = 8;
133 		break;
134 	default:
135 		bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
136 		bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
137 				      rctx->digcnt[0] >> 61);
138 		index = rctx->digcnt[0] & 0x7f;
139 		padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
140 		bitslen = 16;
141 		break;
142 	}
143 	buf[0] = 0x80;
144 	memset(buf + 1, 0, padlen - 1);
145 	memcpy(buf + padlen, bits, bitslen);
146 	return padlen + bitslen;
147 }
148 
149 static void aspeed_ahash_update_counter(struct aspeed_sham_reqctx *rctx,
150 					unsigned int len)
151 {
152 	rctx->offset += len;
153 	rctx->digcnt[0] += len;
154 	if (rctx->digcnt[0] < len)
155 		rctx->digcnt[1]++;
156 }
157 
158 /*
159  * Prepare DMA buffer before hardware engine
160  * processing.
161  */
162 static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
163 {
164 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
165 	struct ahash_request *req = hash_engine->req;
166 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
167 	unsigned int length, remain;
168 	bool final = false;
169 
170 	length = rctx->total - rctx->offset;
171 	remain = length - round_down(length, rctx->block_size);
172 
173 	AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
174 
175 	if (length > ASPEED_HASH_SRC_DMA_BUF_LEN)
176 		length = ASPEED_HASH_SRC_DMA_BUF_LEN;
177 	else if (rctx->flags & SHA_FLAGS_FINUP) {
178 		if (round_up(length, rctx->block_size) + rctx->block_size >
179 		    ASPEED_CRYPTO_SRC_DMA_BUF_LEN)
180 			length = round_down(length - 1, rctx->block_size);
181 		else
182 			final = true;
183 	} else
184 		length -= remain;
185 	memcpy_from_sglist(hash_engine->ahash_src_addr, rctx->src_sg, rctx->offset, length);
186 	aspeed_ahash_update_counter(rctx, length);
187 	if (final)
188 		length += aspeed_ahash_fill_padding(
189 			hace_dev, rctx, hash_engine->ahash_src_addr + length);
190 
191 	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
192 					       SHA512_DIGEST_SIZE,
193 					       DMA_BIDIRECTIONAL);
194 	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
195 		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
196 		return -ENOMEM;
197 	}
198 
199 	hash_engine->src_length = length;
200 	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
201 	hash_engine->digest_dma = rctx->digest_dma_addr;
202 
203 	return 0;
204 }
205 
206 /*
207  * Prepare DMA buffer as SG list buffer before
208  * hardware engine processing.
209  */
210 static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
211 {
212 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
213 	struct ahash_request *req = hash_engine->req;
214 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
215 	bool final = rctx->flags & SHA_FLAGS_FINUP;
216 	int remain, sg_len, i, max_sg_nents;
217 	unsigned int length, offset, total;
218 	struct aspeed_sg_list *src_list;
219 	struct scatterlist *s;
220 	int rc = 0;
221 
222 	offset = rctx->offset;
223 	length = rctx->total - offset;
224 	remain = final ? 0 : length - round_down(length, rctx->block_size);
225 	length -= remain;
226 
227 	AHASH_DBG(hace_dev, "%s:0x%x, %s:0x%x, %s:0x%x\n",
228 		  "rctx total", rctx->total,
229 		  "length", length, "remain", remain);
230 
231 	sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
232 			    DMA_TO_DEVICE);
233 	if (!sg_len) {
234 		dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
235 		rc = -ENOMEM;
236 		goto end;
237 	}
238 
239 	max_sg_nents = ASPEED_HASH_SRC_DMA_BUF_LEN / sizeof(*src_list) - final;
240 	sg_len = min(sg_len, max_sg_nents);
241 	src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
242 	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
243 					       SHA512_DIGEST_SIZE,
244 					       DMA_BIDIRECTIONAL);
245 	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
246 		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
247 		rc = -ENOMEM;
248 		goto free_src_sg;
249 	}
250 
251 	total = 0;
252 	for_each_sg(rctx->src_sg, s, sg_len, i) {
253 		u32 phy_addr = sg_dma_address(s);
254 		u32 len = sg_dma_len(s);
255 
256 		if (len <= offset) {
257 			offset -= len;
258 			continue;
259 		}
260 
261 		len -= offset;
262 		phy_addr += offset;
263 		offset = 0;
264 
265 		if (length > len)
266 			length -= len;
267 		else {
268 			/* Last sg list */
269 			len = length;
270 			length = 0;
271 		}
272 
273 		total += len;
274 		src_list[i].phy_addr = cpu_to_le32(phy_addr);
275 		src_list[i].len = cpu_to_le32(len);
276 	}
277 
278 	if (length != 0) {
279 		total = round_down(total, rctx->block_size);
280 		final = false;
281 	}
282 
283 	aspeed_ahash_update_counter(rctx, total);
284 	if (final) {
285 		int len = aspeed_ahash_fill_padding(hace_dev, rctx,
286 						    rctx->buffer);
287 
288 		total += len;
289 		rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
290 						       rctx->buffer,
291 						       sizeof(rctx->buffer),
292 						       DMA_TO_DEVICE);
293 		if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
294 			dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
295 			rc = -ENOMEM;
296 			goto free_rctx_digest;
297 		}
298 
299 		src_list[i].phy_addr = cpu_to_le32(rctx->buffer_dma_addr);
300 		src_list[i].len = cpu_to_le32(len);
301 		i++;
302 	}
303 	src_list[i - 1].len |= cpu_to_le32(HASH_SG_LAST_LIST);
304 
305 	hash_engine->src_length = total;
306 	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
307 	hash_engine->digest_dma = rctx->digest_dma_addr;
308 
309 	return 0;
310 
311 free_rctx_digest:
312 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
313 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
314 free_src_sg:
315 	dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
316 		     DMA_TO_DEVICE);
317 end:
318 	return rc;
319 }
320 
321 static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
322 {
323 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
324 	struct ahash_request *req = hash_engine->req;
325 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
326 
327 	AHASH_DBG(hace_dev, "\n");
328 
329 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
330 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
331 
332 	if (rctx->total - rctx->offset >= rctx->block_size ||
333 	    (rctx->total != rctx->offset && rctx->flags & SHA_FLAGS_FINUP))
334 		return aspeed_ahash_req_update(hace_dev);
335 
336 	hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
337 
338 	if (rctx->flags & SHA_FLAGS_FINUP)
339 		memcpy(req->result, rctx->digest, rctx->digsize);
340 
341 	crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req,
342 				     rctx->total - rctx->offset);
343 
344 	return 0;
345 }
346 
347 /*
348  * Trigger hardware engines to do the math.
349  */
350 static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
351 				     aspeed_hace_fn_t resume)
352 {
353 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
354 	struct ahash_request *req = hash_engine->req;
355 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
356 
357 	AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
358 		  &hash_engine->src_dma, &hash_engine->digest_dma,
359 		  hash_engine->src_length);
360 
361 	rctx->cmd |= HASH_CMD_INT_ENABLE;
362 	hash_engine->resume = resume;
363 
364 	ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
365 	ast_hace_write(hace_dev, hash_engine->digest_dma,
366 		       ASPEED_HACE_HASH_DIGEST_BUFF);
367 	ast_hace_write(hace_dev, hash_engine->digest_dma,
368 		       ASPEED_HACE_HASH_KEY_BUFF);
369 	ast_hace_write(hace_dev, hash_engine->src_length,
370 		       ASPEED_HACE_HASH_DATA_LEN);
371 
372 	/* Memory barrier to ensure all data setup before engine starts */
373 	mb();
374 
375 	ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
376 
377 	return -EINPROGRESS;
378 }
379 
380 static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
381 {
382 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
383 	struct ahash_request *req = hash_engine->req;
384 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
385 
386 	AHASH_DBG(hace_dev, "\n");
387 
388 	dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
389 		     DMA_TO_DEVICE);
390 
391 	if (rctx->flags & SHA_FLAGS_FINUP && rctx->total == rctx->offset)
392 		dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
393 				 sizeof(rctx->buffer), DMA_TO_DEVICE);
394 
395 	rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
396 
397 	return aspeed_ahash_complete(hace_dev);
398 }
399 
400 static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
401 {
402 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
403 	struct ahash_request *req = hash_engine->req;
404 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
405 	aspeed_hace_fn_t resume;
406 	int ret;
407 
408 	AHASH_DBG(hace_dev, "\n");
409 
410 	if (hace_dev->version == AST2600_VERSION) {
411 		rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
412 		resume = aspeed_ahash_update_resume_sg;
413 
414 	} else {
415 		resume = aspeed_ahash_complete;
416 	}
417 
418 	ret = hash_engine->dma_prepare(hace_dev);
419 	if (ret)
420 		return ret;
421 
422 	return aspeed_hace_ahash_trigger(hace_dev, resume);
423 }
424 
425 static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
426 				  struct ahash_request *req)
427 {
428 	return crypto_transfer_hash_request_to_engine(
429 			hace_dev->crypt_engine_hash, req);
430 }
431 
432 static noinline int aspeed_ahash_fallback(struct ahash_request *req)
433 {
434 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
435 	HASH_FBREQ_ON_STACK(fbreq, req);
436 	u8 *state = rctx->buffer;
437 	struct scatterlist sg[2];
438 	struct scatterlist *ssg;
439 	int ret;
440 
441 	ssg = scatterwalk_ffwd(sg, req->src, rctx->offset);
442 	ahash_request_set_crypt(fbreq, ssg, req->result,
443 				rctx->total - rctx->offset);
444 
445 	ret = aspeed_sham_export(req, state) ?:
446 	      crypto_ahash_import_core(fbreq, state);
447 
448 	if (rctx->flags & SHA_FLAGS_FINUP)
449 		ret = ret ?: crypto_ahash_finup(fbreq);
450 	else
451 		ret = ret ?: crypto_ahash_update(fbreq) ?:
452 			     crypto_ahash_export_core(fbreq, state) ?:
453 			     aspeed_sham_import(req, state);
454 	HASH_REQUEST_ZERO(fbreq);
455 	return ret;
456 }
457 
458 static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
459 {
460 	struct ahash_request *req = ahash_request_cast(areq);
461 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
462 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
463 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
464 	struct aspeed_engine_hash *hash_engine;
465 	int ret;
466 
467 	hash_engine = &hace_dev->hash_engine;
468 	hash_engine->flags |= CRYPTO_FLAGS_BUSY;
469 
470 	ret = aspeed_ahash_req_update(hace_dev);
471 	if (ret != -EINPROGRESS)
472 		return aspeed_ahash_fallback(req);
473 
474 	return 0;
475 }
476 
477 static void aspeed_ahash_prepare_request(struct crypto_engine *engine,
478 					 void *areq)
479 {
480 	struct ahash_request *req = ahash_request_cast(areq);
481 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
482 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
483 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
484 	struct aspeed_engine_hash *hash_engine;
485 
486 	hash_engine = &hace_dev->hash_engine;
487 	hash_engine->req = req;
488 
489 	if (hace_dev->version == AST2600_VERSION)
490 		hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
491 	else
492 		hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
493 }
494 
495 static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq)
496 {
497 	aspeed_ahash_prepare_request(engine, areq);
498 	return aspeed_ahash_do_request(engine, areq);
499 }
500 
501 static int aspeed_sham_update(struct ahash_request *req)
502 {
503 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
504 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
505 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
506 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
507 
508 	AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
509 
510 	rctx->total = req->nbytes;
511 	rctx->src_sg = req->src;
512 	rctx->offset = 0;
513 	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
514 
515 	return aspeed_hace_hash_handle_queue(hace_dev, req);
516 }
517 
518 static int aspeed_sham_finup(struct ahash_request *req)
519 {
520 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
521 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
522 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
523 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
524 
525 	AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
526 
527 	rctx->flags |= SHA_FLAGS_FINUP;
528 
529 	return aspeed_sham_update(req);
530 }
531 
532 static int aspeed_sham_init(struct ahash_request *req)
533 {
534 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
535 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
536 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
537 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
538 
539 	AHASH_DBG(hace_dev, "%s: digest size:%d\n",
540 		  crypto_tfm_alg_name(&tfm->base),
541 		  crypto_ahash_digestsize(tfm));
542 
543 	rctx->cmd = HASH_CMD_ACC_MODE;
544 	rctx->flags = 0;
545 
546 	switch (crypto_ahash_digestsize(tfm)) {
547 	case SHA1_DIGEST_SIZE:
548 		rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP;
549 		rctx->flags |= SHA_FLAGS_SHA1;
550 		rctx->digsize = SHA1_DIGEST_SIZE;
551 		rctx->block_size = SHA1_BLOCK_SIZE;
552 		rctx->ivsize = 32;
553 		memcpy(rctx->digest, sha1_iv, rctx->ivsize);
554 		break;
555 	case SHA224_DIGEST_SIZE:
556 		rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
557 		rctx->flags |= SHA_FLAGS_SHA224;
558 		rctx->digsize = SHA224_DIGEST_SIZE;
559 		rctx->block_size = SHA224_BLOCK_SIZE;
560 		rctx->ivsize = 32;
561 		memcpy(rctx->digest, sha224_iv, rctx->ivsize);
562 		break;
563 	case SHA256_DIGEST_SIZE:
564 		rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
565 		rctx->flags |= SHA_FLAGS_SHA256;
566 		rctx->digsize = SHA256_DIGEST_SIZE;
567 		rctx->block_size = SHA256_BLOCK_SIZE;
568 		rctx->ivsize = 32;
569 		memcpy(rctx->digest, sha256_iv, rctx->ivsize);
570 		break;
571 	case SHA384_DIGEST_SIZE:
572 		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
573 			     HASH_CMD_SHA_SWAP;
574 		rctx->flags |= SHA_FLAGS_SHA384;
575 		rctx->digsize = SHA384_DIGEST_SIZE;
576 		rctx->block_size = SHA384_BLOCK_SIZE;
577 		rctx->ivsize = 64;
578 		memcpy(rctx->digest, sha384_iv, rctx->ivsize);
579 		break;
580 	case SHA512_DIGEST_SIZE:
581 		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
582 			     HASH_CMD_SHA_SWAP;
583 		rctx->flags |= SHA_FLAGS_SHA512;
584 		rctx->digsize = SHA512_DIGEST_SIZE;
585 		rctx->block_size = SHA512_BLOCK_SIZE;
586 		rctx->ivsize = 64;
587 		memcpy(rctx->digest, sha512_iv, rctx->ivsize);
588 		break;
589 	default:
590 		dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
591 			 crypto_ahash_digestsize(tfm));
592 		return -EINVAL;
593 	}
594 
595 	rctx->total = 0;
596 	rctx->digcnt[0] = 0;
597 	rctx->digcnt[1] = 0;
598 
599 	return 0;
600 }
601 
602 static int aspeed_sham_digest(struct ahash_request *req)
603 {
604 	return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
605 }
606 
607 static int aspeed_sham_cra_init(struct crypto_ahash *tfm)
608 {
609 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
610 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
611 	struct aspeed_hace_alg *ast_alg;
612 
613 	ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
614 	tctx->hace_dev = ast_alg->hace_dev;
615 
616 	return 0;
617 }
618 
619 static struct aspeed_hace_alg aspeed_ahash_algs[] = {
620 	{
621 		.alg.ahash.base = {
622 			.init	= aspeed_sham_init,
623 			.update	= aspeed_sham_update,
624 			.finup	= aspeed_sham_finup,
625 			.digest	= aspeed_sham_digest,
626 			.export	= aspeed_sham_export,
627 			.import	= aspeed_sham_import,
628 			.init_tfm = aspeed_sham_cra_init,
629 			.halg = {
630 				.digestsize = SHA1_DIGEST_SIZE,
631 				.statesize = sizeof(struct aspeed_sham_reqctx),
632 				.base = {
633 					.cra_name		= "sha1",
634 					.cra_driver_name	= "aspeed-sha1",
635 					.cra_priority		= 300,
636 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
637 								  CRYPTO_ALG_ASYNC |
638 								  CRYPTO_AHASH_ALG_BLOCK_ONLY |
639 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
640 					.cra_blocksize		= SHA1_BLOCK_SIZE,
641 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
642 					.cra_reqsize		= sizeof(struct aspeed_sham_reqctx),
643 					.cra_alignmask		= 0,
644 					.cra_module		= THIS_MODULE,
645 				}
646 			}
647 		},
648 		.alg.ahash.op = {
649 			.do_one_request = aspeed_ahash_do_one,
650 		},
651 	},
652 	{
653 		.alg.ahash.base = {
654 			.init	= aspeed_sham_init,
655 			.update	= aspeed_sham_update,
656 			.finup	= aspeed_sham_finup,
657 			.digest	= aspeed_sham_digest,
658 			.export	= aspeed_sham_export,
659 			.import	= aspeed_sham_import,
660 			.init_tfm = aspeed_sham_cra_init,
661 			.halg = {
662 				.digestsize = SHA256_DIGEST_SIZE,
663 				.statesize = sizeof(struct aspeed_sham_reqctx),
664 				.base = {
665 					.cra_name		= "sha256",
666 					.cra_driver_name	= "aspeed-sha256",
667 					.cra_priority		= 300,
668 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
669 								  CRYPTO_ALG_ASYNC |
670 								  CRYPTO_AHASH_ALG_BLOCK_ONLY |
671 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
672 					.cra_blocksize		= SHA256_BLOCK_SIZE,
673 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
674 					.cra_reqsize		= sizeof(struct aspeed_sham_reqctx),
675 					.cra_alignmask		= 0,
676 					.cra_module		= THIS_MODULE,
677 				}
678 			}
679 		},
680 		.alg.ahash.op = {
681 			.do_one_request = aspeed_ahash_do_one,
682 		},
683 	},
684 	{
685 		.alg.ahash.base = {
686 			.init	= aspeed_sham_init,
687 			.update	= aspeed_sham_update,
688 			.finup	= aspeed_sham_finup,
689 			.digest	= aspeed_sham_digest,
690 			.export	= aspeed_sham_export,
691 			.import	= aspeed_sham_import,
692 			.init_tfm = aspeed_sham_cra_init,
693 			.halg = {
694 				.digestsize = SHA224_DIGEST_SIZE,
695 				.statesize = sizeof(struct aspeed_sham_reqctx),
696 				.base = {
697 					.cra_name		= "sha224",
698 					.cra_driver_name	= "aspeed-sha224",
699 					.cra_priority		= 300,
700 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
701 								  CRYPTO_ALG_ASYNC |
702 								  CRYPTO_AHASH_ALG_BLOCK_ONLY |
703 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
704 					.cra_blocksize		= SHA224_BLOCK_SIZE,
705 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
706 					.cra_reqsize		= sizeof(struct aspeed_sham_reqctx),
707 					.cra_alignmask		= 0,
708 					.cra_module		= THIS_MODULE,
709 				}
710 			}
711 		},
712 		.alg.ahash.op = {
713 			.do_one_request = aspeed_ahash_do_one,
714 		},
715 	},
716 };
717 
718 static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
719 	{
720 		.alg.ahash.base = {
721 			.init	= aspeed_sham_init,
722 			.update	= aspeed_sham_update,
723 			.finup	= aspeed_sham_finup,
724 			.digest	= aspeed_sham_digest,
725 			.export	= aspeed_sham_export,
726 			.import	= aspeed_sham_import,
727 			.init_tfm = aspeed_sham_cra_init,
728 			.halg = {
729 				.digestsize = SHA384_DIGEST_SIZE,
730 				.statesize = sizeof(struct aspeed_sham_reqctx),
731 				.base = {
732 					.cra_name		= "sha384",
733 					.cra_driver_name	= "aspeed-sha384",
734 					.cra_priority		= 300,
735 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
736 								  CRYPTO_ALG_ASYNC |
737 								  CRYPTO_AHASH_ALG_BLOCK_ONLY |
738 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
739 					.cra_blocksize		= SHA384_BLOCK_SIZE,
740 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
741 					.cra_reqsize		= sizeof(struct aspeed_sham_reqctx),
742 					.cra_alignmask		= 0,
743 					.cra_module		= THIS_MODULE,
744 				}
745 			}
746 		},
747 		.alg.ahash.op = {
748 			.do_one_request = aspeed_ahash_do_one,
749 		},
750 	},
751 	{
752 		.alg.ahash.base = {
753 			.init	= aspeed_sham_init,
754 			.update	= aspeed_sham_update,
755 			.finup	= aspeed_sham_finup,
756 			.digest	= aspeed_sham_digest,
757 			.export	= aspeed_sham_export,
758 			.import	= aspeed_sham_import,
759 			.init_tfm = aspeed_sham_cra_init,
760 			.halg = {
761 				.digestsize = SHA512_DIGEST_SIZE,
762 				.statesize = sizeof(struct aspeed_sham_reqctx),
763 				.base = {
764 					.cra_name		= "sha512",
765 					.cra_driver_name	= "aspeed-sha512",
766 					.cra_priority		= 300,
767 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
768 								  CRYPTO_ALG_ASYNC |
769 								  CRYPTO_AHASH_ALG_BLOCK_ONLY |
770 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
771 					.cra_blocksize		= SHA512_BLOCK_SIZE,
772 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
773 					.cra_reqsize		= sizeof(struct aspeed_sham_reqctx),
774 					.cra_alignmask		= 0,
775 					.cra_module		= THIS_MODULE,
776 				}
777 			}
778 		},
779 		.alg.ahash.op = {
780 			.do_one_request = aspeed_ahash_do_one,
781 		},
782 	},
783 };
784 
785 void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
786 {
787 	int i;
788 
789 	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
790 		crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
791 
792 	if (hace_dev->version != AST2600_VERSION)
793 		return;
794 
795 	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
796 		crypto_engine_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
797 }
798 
799 void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
800 {
801 	int rc, i;
802 
803 	AHASH_DBG(hace_dev, "\n");
804 
805 	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
806 		aspeed_ahash_algs[i].hace_dev = hace_dev;
807 		rc = crypto_engine_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
808 		if (rc) {
809 			AHASH_DBG(hace_dev, "Failed to register %s\n",
810 				  aspeed_ahash_algs[i].alg.ahash.base.halg.base.cra_name);
811 		}
812 	}
813 
814 	if (hace_dev->version != AST2600_VERSION)
815 		return;
816 
817 	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
818 		aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
819 		rc = crypto_engine_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
820 		if (rc) {
821 			AHASH_DBG(hace_dev, "Failed to register %s\n",
822 				  aspeed_ahash_algs_g6[i].alg.ahash.base.halg.base.cra_name);
823 		}
824 	}
825 }
826