xref: /linux/drivers/crypto/inside-secure/eip93/eip93-hash.c (revision fc4bd01d9ff592f620c499686245c093440db0e8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024
4  *
5  * Christian Marangi <ansuelsmth@gmail.com
6  */
7 
8 #include <crypto/sha1.h>
9 #include <crypto/sha2.h>
10 #include <crypto/md5.h>
11 #include <crypto/hmac.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/delay.h>
14 
15 #include "eip93-cipher.h"
16 #include "eip93-hash.h"
17 #include "eip93-main.h"
18 #include "eip93-common.h"
19 #include "eip93-regs.h"
20 
21 static void eip93_hash_free_data_blocks(struct ahash_request *req)
22 {
23 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
24 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
25 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
26 	struct eip93_device *eip93 = ctx->eip93;
27 	struct mkt_hash_block *block, *tmp;
28 
29 	list_for_each_entry_safe(block, tmp, &rctx->blocks, list) {
30 		dma_unmap_single(eip93->dev, block->data_dma,
31 				 SHA256_BLOCK_SIZE, DMA_TO_DEVICE);
32 		kfree(block);
33 	}
34 	if (!list_empty(&rctx->blocks))
35 		INIT_LIST_HEAD(&rctx->blocks);
36 
37 	if (rctx->finalize)
38 		dma_unmap_single(eip93->dev, rctx->data_dma,
39 				 rctx->data_used,
40 				 DMA_TO_DEVICE);
41 }
42 
43 static void eip93_hash_free_sa_record(struct ahash_request *req)
44 {
45 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
46 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
47 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
48 	struct eip93_device *eip93 = ctx->eip93;
49 
50 	if (IS_HMAC(ctx->flags))
51 		dma_unmap_single(eip93->dev, rctx->sa_record_hmac_base,
52 				 sizeof(rctx->sa_record_hmac), DMA_TO_DEVICE);
53 
54 	dma_unmap_single(eip93->dev, rctx->sa_record_base,
55 			 sizeof(rctx->sa_record), DMA_TO_DEVICE);
56 }
57 
58 void eip93_hash_handle_result(struct crypto_async_request *async, int err)
59 {
60 	struct ahash_request *req = ahash_request_cast(async);
61 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
62 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
63 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
64 	int digestsize = crypto_ahash_digestsize(ahash);
65 	struct sa_state *sa_state = &rctx->sa_state;
66 	struct eip93_device *eip93 = ctx->eip93;
67 	int i;
68 
69 	dma_unmap_single(eip93->dev, rctx->sa_state_base,
70 			 sizeof(*sa_state), DMA_FROM_DEVICE);
71 
72 	/*
73 	 * With partial_hash assume SHA256_DIGEST_SIZE buffer is passed.
74 	 * This is to handle SHA224 that have a 32 byte intermediate digest.
75 	 */
76 	if (rctx->partial_hash)
77 		digestsize = SHA256_DIGEST_SIZE;
78 
79 	if (rctx->finalize || rctx->partial_hash) {
80 		/* bytes needs to be swapped for req->result */
81 		if (!IS_HASH_MD5(ctx->flags)) {
82 			for (i = 0; i < digestsize / sizeof(u32); i++) {
83 				u32 *digest = (u32 *)sa_state->state_i_digest;
84 
85 				digest[i] = be32_to_cpu((__be32 __force)digest[i]);
86 			}
87 		}
88 
89 		memcpy(req->result, sa_state->state_i_digest, digestsize);
90 	}
91 
92 	eip93_hash_free_sa_record(req);
93 	eip93_hash_free_data_blocks(req);
94 
95 	ahash_request_complete(req, err);
96 }
97 
98 static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest)
99 {
100 	u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
101 			      SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 };
102 	u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
103 			      SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 };
104 	u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
105 	u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 };
106 
107 	/* Init HASH constant */
108 	switch (hash) {
109 	case EIP93_HASH_SHA256:
110 		memcpy(digest, sha256_init, sizeof(sha256_init));
111 		return;
112 	case EIP93_HASH_SHA224:
113 		memcpy(digest, sha224_init, sizeof(sha224_init));
114 		return;
115 	case EIP93_HASH_SHA1:
116 		memcpy(digest, sha1_init, sizeof(sha1_init));
117 		return;
118 	case EIP93_HASH_MD5:
119 		memcpy(digest, md5_init, sizeof(md5_init));
120 		return;
121 	default: /* Impossible */
122 		return;
123 	}
124 }
125 
126 static void eip93_hash_export_sa_state(struct ahash_request *req,
127 				       struct eip93_hash_export_state *state)
128 {
129 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
130 	struct sa_state *sa_state = &rctx->sa_state;
131 
132 	/*
133 	 * EIP93 have special handling for state_byte_cnt in sa_state.
134 	 * Even if a zero packet is passed (and a BADMSG is returned),
135 	 * state_byte_cnt is incremented to the digest handled (with the hash
136 	 * primitive). This is problematic with export/import as EIP93
137 	 * expect 0 state_byte_cnt for the very first iteration.
138 	 */
139 	if (!rctx->len)
140 		memset(state->state_len, 0, sizeof(u32) * 2);
141 	else
142 		memcpy(state->state_len, sa_state->state_byte_cnt,
143 		       sizeof(u32) * 2);
144 	memcpy(state->state_hash, sa_state->state_i_digest,
145 	       SHA256_DIGEST_SIZE);
146 	state->len = rctx->len;
147 	state->data_used = rctx->data_used;
148 }
149 
150 static void __eip93_hash_init(struct ahash_request *req)
151 {
152 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
153 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
154 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
155 	struct sa_record *sa_record = &rctx->sa_record;
156 	int digestsize;
157 
158 	digestsize = crypto_ahash_digestsize(ahash);
159 
160 	eip93_set_sa_record(sa_record, 0, ctx->flags);
161 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE;
162 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH;
163 	sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
164 	sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
165 					      EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH);
166 	sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
167 	sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
168 					      digestsize / sizeof(u32));
169 
170 	/*
171 	 * HMAC special handling
172 	 * Enabling CMD_HMAC force the inner hash to be always finalized.
173 	 * This cause problems on handling message > 64 byte as we
174 	 * need to produce intermediate inner hash on sending intermediate
175 	 * 64 bytes blocks.
176 	 *
177 	 * To handle this, enable CMD_HMAC only on the last block.
178 	 * We make a duplicate of sa_record and on the last descriptor,
179 	 * we pass a dedicated sa_record with CMD_HMAC enabled to make
180 	 * EIP93 apply the outer hash.
181 	 */
182 	if (IS_HMAC(ctx->flags)) {
183 		struct sa_record *sa_record_hmac = &rctx->sa_record_hmac;
184 
185 		memcpy(sa_record_hmac, sa_record, sizeof(*sa_record));
186 		/* Copy pre-hashed opad for HMAC */
187 		memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE);
188 
189 		/* Disable HMAC for hash normal sa_record */
190 		sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC;
191 	}
192 
193 	rctx->len = 0;
194 	rctx->data_used = 0;
195 	rctx->partial_hash = false;
196 	rctx->finalize = false;
197 	INIT_LIST_HEAD(&rctx->blocks);
198 }
199 
200 static int eip93_send_hash_req(struct crypto_async_request *async, u8 *data,
201 			       dma_addr_t *data_dma, u32 len, bool last)
202 {
203 	struct ahash_request *req = ahash_request_cast(async);
204 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
205 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
206 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
207 	struct eip93_device *eip93 = ctx->eip93;
208 	struct eip93_descriptor cdesc = { };
209 	dma_addr_t src_addr;
210 	int ret;
211 
212 	/* Map block data to DMA */
213 	src_addr = dma_map_single(eip93->dev, data, len, DMA_TO_DEVICE);
214 	ret = dma_mapping_error(eip93->dev, src_addr);
215 	if (ret)
216 		return ret;
217 
218 	cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
219 					     EIP93_PE_CTRL_HOST_READY);
220 	cdesc.sa_addr = rctx->sa_record_base;
221 	cdesc.arc4_addr = 0;
222 
223 	cdesc.state_addr = rctx->sa_state_base;
224 	cdesc.src_addr = src_addr;
225 	cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
226 					  EIP93_PE_LENGTH_HOST_READY);
227 	cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH,
228 					   len);
229 
230 	cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH);
231 
232 	if (last) {
233 		int crypto_async_idr;
234 
235 		if (rctx->finalize && !rctx->partial_hash) {
236 			/* For last block, pass sa_record with CMD_HMAC enabled */
237 			if (IS_HMAC(ctx->flags)) {
238 				struct sa_record *sa_record_hmac = &rctx->sa_record_hmac;
239 
240 				rctx->sa_record_hmac_base = dma_map_single(eip93->dev,
241 									   sa_record_hmac,
242 									   sizeof(*sa_record_hmac),
243 									   DMA_TO_DEVICE);
244 				ret = dma_mapping_error(eip93->dev, rctx->sa_record_hmac_base);
245 				if (ret)
246 					return ret;
247 
248 				cdesc.sa_addr = rctx->sa_record_hmac_base;
249 			}
250 
251 			cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL;
252 		}
253 
254 		scoped_guard(spinlock_bh, &eip93->ring->idr_lock)
255 			crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0,
256 						     EIP93_RING_NUM - 1, GFP_ATOMIC);
257 
258 		cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
259 				 FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST);
260 	}
261 
262 again:
263 	ret = eip93_put_descriptor(eip93, &cdesc);
264 	if (ret) {
265 		usleep_range(EIP93_RING_BUSY_DELAY,
266 			     EIP93_RING_BUSY_DELAY * 2);
267 		goto again;
268 	}
269 
270 	/* Writing new descriptor count starts DMA action */
271 	writel(1, eip93->base + EIP93_REG_PE_CD_COUNT);
272 
273 	*data_dma = src_addr;
274 	return 0;
275 }
276 
277 static int eip93_hash_init(struct ahash_request *req)
278 {
279 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
280 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
281 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
282 	struct sa_state *sa_state = &rctx->sa_state;
283 
284 	memset(sa_state->state_byte_cnt, 0, sizeof(u32) * 2);
285 	eip93_hash_init_sa_state_digest(ctx->flags & EIP93_HASH_MASK,
286 					sa_state->state_i_digest);
287 
288 	__eip93_hash_init(req);
289 
290 	/* For HMAC setup the initial block for ipad */
291 	if (IS_HMAC(ctx->flags)) {
292 		memcpy(rctx->data, ctx->ipad, SHA256_BLOCK_SIZE);
293 
294 		rctx->data_used = SHA256_BLOCK_SIZE;
295 		rctx->len += SHA256_BLOCK_SIZE;
296 	}
297 
298 	return 0;
299 }
300 
301 /*
302  * With complete_req true, we wait for the engine to consume all the block in list,
303  * else we just queue the block to the engine as final() will wait. This is useful
304  * for finup().
305  */
306 static int __eip93_hash_update(struct ahash_request *req, bool complete_req)
307 {
308 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
309 	struct crypto_async_request *async = &req->base;
310 	unsigned int read, to_consume = req->nbytes;
311 	unsigned int max_read, consumed = 0;
312 	struct mkt_hash_block *block;
313 	bool wait_req = false;
314 	int offset;
315 	int ret;
316 
317 	/* Get the offset and available space to fill req data */
318 	offset = rctx->data_used;
319 	max_read = SHA256_BLOCK_SIZE - offset;
320 
321 	/* Consume req in block of SHA256_BLOCK_SIZE.
322 	 * to_read is initially set to space available in the req data
323 	 * and then reset to SHA256_BLOCK_SIZE.
324 	 */
325 	while (to_consume > max_read) {
326 		block = kzalloc(sizeof(*block), GFP_ATOMIC);
327 		if (!block) {
328 			ret = -ENOMEM;
329 			goto free_blocks;
330 		}
331 
332 		read = sg_pcopy_to_buffer(req->src, sg_nents(req->src),
333 					  block->data + offset,
334 					  max_read, consumed);
335 
336 		/*
337 		 * For first iteration only, copy req data to block
338 		 * and reset offset and max_read for next iteration.
339 		 */
340 		if (offset > 0) {
341 			memcpy(block->data, rctx->data, offset);
342 			offset = 0;
343 			max_read = SHA256_BLOCK_SIZE;
344 		}
345 
346 		list_add(&block->list, &rctx->blocks);
347 		to_consume -= read;
348 		consumed += read;
349 	}
350 
351 	/* Write the remaining data to req data */
352 	read = sg_pcopy_to_buffer(req->src, sg_nents(req->src),
353 				  rctx->data + offset, to_consume,
354 				  consumed);
355 	rctx->data_used = offset + read;
356 
357 	/* Update counter with processed bytes */
358 	rctx->len += read + consumed;
359 
360 	/* Consume all the block added to list */
361 	list_for_each_entry_reverse(block, &rctx->blocks, list) {
362 		wait_req = complete_req &&
363 			    list_is_first(&block->list, &rctx->blocks);
364 
365 		ret = eip93_send_hash_req(async, block->data,
366 					  &block->data_dma,
367 					  SHA256_BLOCK_SIZE, wait_req);
368 		if (ret)
369 			goto free_blocks;
370 	}
371 
372 	return wait_req ? -EINPROGRESS : 0;
373 
374 free_blocks:
375 	eip93_hash_free_data_blocks(req);
376 
377 	return ret;
378 }
379 
380 static int eip93_hash_update(struct ahash_request *req)
381 {
382 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
383 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
384 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
385 	struct sa_record *sa_record = &rctx->sa_record;
386 	struct sa_state *sa_state = &rctx->sa_state;
387 	struct eip93_device *eip93 = ctx->eip93;
388 	int ret;
389 
390 	if (!req->nbytes)
391 		return 0;
392 
393 	rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
394 					     sizeof(*sa_state),
395 					     DMA_TO_DEVICE);
396 	ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
397 	if (ret)
398 		return ret;
399 
400 	rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
401 					      sizeof(*sa_record),
402 					      DMA_TO_DEVICE);
403 	ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
404 	if (ret)
405 		goto free_sa_state;
406 
407 	ret = __eip93_hash_update(req, true);
408 	if (ret && ret != -EINPROGRESS)
409 		goto free_sa_record;
410 
411 	return ret;
412 
413 free_sa_record:
414 	dma_unmap_single(eip93->dev, rctx->sa_record_base,
415 			 sizeof(*sa_record), DMA_TO_DEVICE);
416 
417 free_sa_state:
418 	dma_unmap_single(eip93->dev, rctx->sa_state_base,
419 			 sizeof(*sa_state), DMA_TO_DEVICE);
420 
421 	return ret;
422 }
423 
424 /*
425  * With map_data true, we map the sa_record and sa_state. This is needed
426  * for finup() as the they are mapped before calling update()
427  */
428 static int __eip93_hash_final(struct ahash_request *req, bool map_dma)
429 {
430 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
431 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
432 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
433 	struct crypto_async_request *async = &req->base;
434 	struct sa_record *sa_record = &rctx->sa_record;
435 	struct sa_state *sa_state = &rctx->sa_state;
436 	struct eip93_device *eip93 = ctx->eip93;
437 	int ret;
438 
439 	/* EIP93 can't handle zero bytes hash */
440 	if (!rctx->len && !IS_HMAC(ctx->flags)) {
441 		switch ((ctx->flags & EIP93_HASH_MASK)) {
442 		case EIP93_HASH_SHA256:
443 			memcpy(req->result, sha256_zero_message_hash,
444 			       SHA256_DIGEST_SIZE);
445 			break;
446 		case EIP93_HASH_SHA224:
447 			memcpy(req->result, sha224_zero_message_hash,
448 			       SHA224_DIGEST_SIZE);
449 			break;
450 		case EIP93_HASH_SHA1:
451 			memcpy(req->result, sha1_zero_message_hash,
452 			       SHA1_DIGEST_SIZE);
453 			break;
454 		case EIP93_HASH_MD5:
455 			memcpy(req->result, md5_zero_message_hash,
456 			       MD5_DIGEST_SIZE);
457 			break;
458 		default: /* Impossible */
459 			return -EINVAL;
460 		}
461 
462 		return 0;
463 	}
464 
465 	/* Signal interrupt from engine is for last block */
466 	rctx->finalize = true;
467 
468 	if (map_dma) {
469 		rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
470 						     sizeof(*sa_state),
471 						     DMA_TO_DEVICE);
472 		ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
473 		if (ret)
474 			return ret;
475 
476 		rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
477 						      sizeof(*sa_record),
478 						      DMA_TO_DEVICE);
479 		ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
480 		if (ret)
481 			goto free_sa_state;
482 	}
483 
484 	/* Send last block */
485 	ret = eip93_send_hash_req(async, rctx->data, &rctx->data_dma,
486 				  rctx->data_used, true);
487 	if (ret)
488 		goto free_blocks;
489 
490 	return -EINPROGRESS;
491 
492 free_blocks:
493 	eip93_hash_free_data_blocks(req);
494 
495 	dma_unmap_single(eip93->dev, rctx->sa_record_base,
496 			 sizeof(*sa_record), DMA_TO_DEVICE);
497 
498 free_sa_state:
499 	dma_unmap_single(eip93->dev, rctx->sa_state_base,
500 			 sizeof(*sa_state), DMA_TO_DEVICE);
501 
502 	return ret;
503 }
504 
505 static int eip93_hash_final(struct ahash_request *req)
506 {
507 	return __eip93_hash_final(req, true);
508 }
509 
510 static int eip93_hash_finup(struct ahash_request *req)
511 {
512 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
513 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
514 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
515 	struct sa_record *sa_record = &rctx->sa_record;
516 	struct sa_state *sa_state = &rctx->sa_state;
517 	struct eip93_device *eip93 = ctx->eip93;
518 	int ret;
519 
520 	if (rctx->len + req->nbytes || IS_HMAC(ctx->flags)) {
521 		rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
522 						     sizeof(*sa_state),
523 						     DMA_TO_DEVICE);
524 		ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
525 		if (ret)
526 			return ret;
527 
528 		rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
529 						      sizeof(*sa_record),
530 						      DMA_TO_DEVICE);
531 		ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
532 		if (ret)
533 			goto free_sa_state;
534 
535 		ret = __eip93_hash_update(req, false);
536 		if (ret)
537 			goto free_sa_record;
538 	}
539 
540 	return __eip93_hash_final(req, false);
541 
542 free_sa_record:
543 	dma_unmap_single(eip93->dev, rctx->sa_record_base,
544 			 sizeof(*sa_record), DMA_TO_DEVICE);
545 free_sa_state:
546 	dma_unmap_single(eip93->dev, rctx->sa_state_base,
547 			 sizeof(*sa_state), DMA_TO_DEVICE);
548 
549 	return ret;
550 }
551 
552 static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
553 				  u32 keylen)
554 {
555 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
556 	struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
557 	struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
558 
559 	return eip93_hmac_setkey(ctx->flags, key, keylen, digestsize,
560 				 ctx->ipad, ctx->opad, true);
561 }
562 
563 static int eip93_hash_cra_init(struct crypto_tfm *tfm)
564 {
565 	struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
566 	struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
567 				struct eip93_alg_template, alg.ahash.halg.base);
568 
569 	crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
570 				     sizeof(struct eip93_hash_reqctx));
571 
572 	ctx->eip93 = tmpl->eip93;
573 	ctx->flags = tmpl->flags;
574 
575 	return 0;
576 }
577 
578 static int eip93_hash_digest(struct ahash_request *req)
579 {
580 	int ret;
581 
582 	ret = eip93_hash_init(req);
583 	if (ret)
584 		return ret;
585 
586 	return eip93_hash_finup(req);
587 }
588 
589 static int eip93_hash_import(struct ahash_request *req, const void *in)
590 {
591 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
592 	const struct eip93_hash_export_state *state = in;
593 	struct sa_state *sa_state = &rctx->sa_state;
594 
595 	memcpy(sa_state->state_byte_cnt, state->state_len, sizeof(u32) * 2);
596 	memcpy(sa_state->state_i_digest, state->state_hash, SHA256_DIGEST_SIZE);
597 
598 	__eip93_hash_init(req);
599 
600 	rctx->len = state->len;
601 	rctx->data_used = state->data_used;
602 
603 	/* Skip copying data if we have nothing to copy */
604 	if (rctx->len)
605 		memcpy(rctx->data, state->data, rctx->data_used);
606 
607 	return 0;
608 }
609 
610 static int eip93_hash_export(struct ahash_request *req, void *out)
611 {
612 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
613 	struct eip93_hash_export_state *state = out;
614 
615 	/* Save the first block in state data */
616 	if (rctx->len)
617 		memcpy(state->data, rctx->data, rctx->data_used);
618 
619 	eip93_hash_export_sa_state(req, state);
620 
621 	return 0;
622 }
623 
624 struct eip93_alg_template eip93_alg_md5 = {
625 	.type = EIP93_ALG_TYPE_HASH,
626 	.flags = EIP93_HASH_MD5,
627 	.alg.ahash = {
628 		.init = eip93_hash_init,
629 		.update = eip93_hash_update,
630 		.final = eip93_hash_final,
631 		.finup = eip93_hash_finup,
632 		.digest = eip93_hash_digest,
633 		.export = eip93_hash_export,
634 		.import = eip93_hash_import,
635 		.halg = {
636 			.digestsize = MD5_DIGEST_SIZE,
637 			.statesize = sizeof(struct eip93_hash_export_state),
638 			.base = {
639 				.cra_name = "md5",
640 				.cra_driver_name = "md5-eip93",
641 				.cra_priority = 300,
642 				.cra_flags = CRYPTO_ALG_ASYNC |
643 						CRYPTO_ALG_KERN_DRIVER_ONLY |
644 						CRYPTO_ALG_ALLOCATES_MEMORY,
645 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
646 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
647 				.cra_init = eip93_hash_cra_init,
648 				.cra_module = THIS_MODULE,
649 			},
650 		},
651 	},
652 };
653 
654 struct eip93_alg_template eip93_alg_sha1 = {
655 	.type = EIP93_ALG_TYPE_HASH,
656 	.flags = EIP93_HASH_SHA1,
657 	.alg.ahash = {
658 		.init = eip93_hash_init,
659 		.update = eip93_hash_update,
660 		.final = eip93_hash_final,
661 		.finup = eip93_hash_finup,
662 		.digest = eip93_hash_digest,
663 		.export = eip93_hash_export,
664 		.import = eip93_hash_import,
665 		.halg = {
666 			.digestsize = SHA1_DIGEST_SIZE,
667 			.statesize = sizeof(struct eip93_hash_export_state),
668 			.base = {
669 				.cra_name = "sha1",
670 				.cra_driver_name = "sha1-eip93",
671 				.cra_priority = 300,
672 				.cra_flags = CRYPTO_ALG_ASYNC |
673 						CRYPTO_ALG_KERN_DRIVER_ONLY |
674 						CRYPTO_ALG_ALLOCATES_MEMORY,
675 				.cra_blocksize = SHA1_BLOCK_SIZE,
676 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
677 				.cra_init = eip93_hash_cra_init,
678 				.cra_module = THIS_MODULE,
679 			},
680 		},
681 	},
682 };
683 
684 struct eip93_alg_template eip93_alg_sha224 = {
685 	.type = EIP93_ALG_TYPE_HASH,
686 	.flags = EIP93_HASH_SHA224,
687 	.alg.ahash = {
688 		.init = eip93_hash_init,
689 		.update = eip93_hash_update,
690 		.final = eip93_hash_final,
691 		.finup = eip93_hash_finup,
692 		.digest = eip93_hash_digest,
693 		.export = eip93_hash_export,
694 		.import = eip93_hash_import,
695 		.halg = {
696 			.digestsize = SHA224_DIGEST_SIZE,
697 			.statesize = sizeof(struct eip93_hash_export_state),
698 			.base = {
699 				.cra_name = "sha224",
700 				.cra_driver_name = "sha224-eip93",
701 				.cra_priority = 300,
702 				.cra_flags = CRYPTO_ALG_ASYNC |
703 						CRYPTO_ALG_KERN_DRIVER_ONLY |
704 						CRYPTO_ALG_ALLOCATES_MEMORY,
705 				.cra_blocksize = SHA224_BLOCK_SIZE,
706 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
707 				.cra_init = eip93_hash_cra_init,
708 				.cra_module = THIS_MODULE,
709 			},
710 		},
711 	},
712 };
713 
714 struct eip93_alg_template eip93_alg_sha256 = {
715 	.type = EIP93_ALG_TYPE_HASH,
716 	.flags = EIP93_HASH_SHA256,
717 	.alg.ahash = {
718 		.init = eip93_hash_init,
719 		.update = eip93_hash_update,
720 		.final = eip93_hash_final,
721 		.finup = eip93_hash_finup,
722 		.digest = eip93_hash_digest,
723 		.export = eip93_hash_export,
724 		.import = eip93_hash_import,
725 		.halg = {
726 			.digestsize = SHA256_DIGEST_SIZE,
727 			.statesize = sizeof(struct eip93_hash_export_state),
728 			.base = {
729 				.cra_name = "sha256",
730 				.cra_driver_name = "sha256-eip93",
731 				.cra_priority = 300,
732 				.cra_flags = CRYPTO_ALG_ASYNC |
733 						CRYPTO_ALG_KERN_DRIVER_ONLY |
734 						CRYPTO_ALG_ALLOCATES_MEMORY,
735 				.cra_blocksize = SHA256_BLOCK_SIZE,
736 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
737 				.cra_init = eip93_hash_cra_init,
738 				.cra_module = THIS_MODULE,
739 			},
740 		},
741 	},
742 };
743 
744 struct eip93_alg_template eip93_alg_hmac_md5 = {
745 	.type = EIP93_ALG_TYPE_HASH,
746 	.flags = EIP93_HASH_HMAC | EIP93_HASH_MD5,
747 	.alg.ahash = {
748 		.init = eip93_hash_init,
749 		.update = eip93_hash_update,
750 		.final = eip93_hash_final,
751 		.finup = eip93_hash_finup,
752 		.digest = eip93_hash_digest,
753 		.setkey = eip93_hash_hmac_setkey,
754 		.export = eip93_hash_export,
755 		.import = eip93_hash_import,
756 		.halg = {
757 			.digestsize = MD5_DIGEST_SIZE,
758 			.statesize = sizeof(struct eip93_hash_export_state),
759 			.base = {
760 				.cra_name = "hmac(md5)",
761 				.cra_driver_name = "hmac(md5-eip93)",
762 				.cra_priority = 300,
763 				.cra_flags = CRYPTO_ALG_ASYNC |
764 						CRYPTO_ALG_KERN_DRIVER_ONLY |
765 						CRYPTO_ALG_ALLOCATES_MEMORY,
766 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
767 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
768 				.cra_init = eip93_hash_cra_init,
769 				.cra_module = THIS_MODULE,
770 			},
771 		},
772 	},
773 };
774 
775 struct eip93_alg_template eip93_alg_hmac_sha1 = {
776 	.type = EIP93_ALG_TYPE_HASH,
777 	.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1,
778 	.alg.ahash = {
779 		.init = eip93_hash_init,
780 		.update = eip93_hash_update,
781 		.final = eip93_hash_final,
782 		.finup = eip93_hash_finup,
783 		.digest = eip93_hash_digest,
784 		.setkey = eip93_hash_hmac_setkey,
785 		.export = eip93_hash_export,
786 		.import = eip93_hash_import,
787 		.halg = {
788 			.digestsize = SHA1_DIGEST_SIZE,
789 			.statesize = sizeof(struct eip93_hash_export_state),
790 			.base = {
791 				.cra_name = "hmac(sha1)",
792 				.cra_driver_name = "hmac(sha1-eip93)",
793 				.cra_priority = 300,
794 				.cra_flags = CRYPTO_ALG_ASYNC |
795 						CRYPTO_ALG_KERN_DRIVER_ONLY |
796 						CRYPTO_ALG_ALLOCATES_MEMORY,
797 				.cra_blocksize = SHA1_BLOCK_SIZE,
798 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
799 				.cra_init = eip93_hash_cra_init,
800 				.cra_module = THIS_MODULE,
801 			},
802 		},
803 	},
804 };
805 
806 struct eip93_alg_template eip93_alg_hmac_sha224 = {
807 	.type = EIP93_ALG_TYPE_HASH,
808 	.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224,
809 	.alg.ahash = {
810 		.init = eip93_hash_init,
811 		.update = eip93_hash_update,
812 		.final = eip93_hash_final,
813 		.finup = eip93_hash_finup,
814 		.digest = eip93_hash_digest,
815 		.setkey = eip93_hash_hmac_setkey,
816 		.export = eip93_hash_export,
817 		.import = eip93_hash_import,
818 		.halg = {
819 			.digestsize = SHA224_DIGEST_SIZE,
820 			.statesize = sizeof(struct eip93_hash_export_state),
821 			.base = {
822 				.cra_name = "hmac(sha224)",
823 				.cra_driver_name = "hmac(sha224-eip93)",
824 				.cra_priority = 300,
825 				.cra_flags = CRYPTO_ALG_ASYNC |
826 						CRYPTO_ALG_KERN_DRIVER_ONLY |
827 						CRYPTO_ALG_ALLOCATES_MEMORY,
828 				.cra_blocksize = SHA224_BLOCK_SIZE,
829 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
830 				.cra_init = eip93_hash_cra_init,
831 				.cra_module = THIS_MODULE,
832 			},
833 		},
834 	},
835 };
836 
837 struct eip93_alg_template eip93_alg_hmac_sha256 = {
838 	.type = EIP93_ALG_TYPE_HASH,
839 	.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256,
840 	.alg.ahash = {
841 		.init = eip93_hash_init,
842 		.update = eip93_hash_update,
843 		.final = eip93_hash_final,
844 		.finup = eip93_hash_finup,
845 		.digest = eip93_hash_digest,
846 		.setkey = eip93_hash_hmac_setkey,
847 		.export = eip93_hash_export,
848 		.import = eip93_hash_import,
849 		.halg = {
850 			.digestsize = SHA256_DIGEST_SIZE,
851 			.statesize = sizeof(struct eip93_hash_export_state),
852 			.base = {
853 				.cra_name = "hmac(sha256)",
854 				.cra_driver_name = "hmac(sha256-eip93)",
855 				.cra_priority = 300,
856 				.cra_flags = CRYPTO_ALG_ASYNC |
857 						CRYPTO_ALG_KERN_DRIVER_ONLY |
858 						CRYPTO_ALG_ALLOCATES_MEMORY,
859 				.cra_blocksize = SHA256_BLOCK_SIZE,
860 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
861 				.cra_init = eip93_hash_cra_init,
862 				.cra_module = THIS_MODULE,
863 			},
864 		},
865 	},
866 };
867