xref: /linux/drivers/crypto/stm32/stm32-hash.c (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of STM32 Crypto driver for Linux.
4  *
5  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7  */
8 
9 #include <crypto/engine.h>
10 #include <crypto/internal/hash.h>
11 #include <crypto/md5.h>
12 #include <crypto/scatterwalk.h>
13 #include <crypto/sha1.h>
14 #include <crypto/sha2.h>
15 #include <crypto/sha3.h>
16 #include <linux/clk.h>
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dmaengine.h>
20 #include <linux/interrupt.h>
21 #include <linux/iopoll.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 #include <linux/string.h>
29 
30 #define HASH_CR				0x00
31 #define HASH_DIN			0x04
32 #define HASH_STR			0x08
33 #define HASH_UX500_HREG(x)		(0x0c + ((x) * 0x04))
34 #define HASH_IMR			0x20
35 #define HASH_SR				0x24
36 #define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
37 #define HASH_HREG(x)			(0x310 + ((x) * 0x04))
38 #define HASH_HWCFGR			0x3F0
39 #define HASH_VER			0x3F4
40 #define HASH_ID				0x3F8
41 
42 /* Control Register */
43 #define HASH_CR_INIT			BIT(2)
44 #define HASH_CR_DMAE			BIT(3)
45 #define HASH_CR_DATATYPE_POS		4
46 #define HASH_CR_MODE			BIT(6)
47 #define HASH_CR_ALGO_POS		7
48 #define HASH_CR_MDMAT			BIT(13)
49 #define HASH_CR_DMAA			BIT(14)
50 #define HASH_CR_LKEY			BIT(16)
51 
52 /* Interrupt */
53 #define HASH_DINIE			BIT(0)
54 #define HASH_DCIE			BIT(1)
55 
56 /* Interrupt Mask */
57 #define HASH_MASK_CALC_COMPLETION	BIT(0)
58 #define HASH_MASK_DATA_INPUT		BIT(1)
59 
60 /* Status Flags */
61 #define HASH_SR_DATA_INPUT_READY	BIT(0)
62 #define HASH_SR_OUTPUT_READY		BIT(1)
63 #define HASH_SR_DMA_ACTIVE		BIT(2)
64 #define HASH_SR_BUSY			BIT(3)
65 
66 /* STR Register */
67 #define HASH_STR_NBLW_MASK		GENMASK(4, 0)
68 #define HASH_STR_DCAL			BIT(8)
69 
70 /* HWCFGR Register */
71 #define HASH_HWCFG_DMA_MASK		GENMASK(3, 0)
72 
73 /* Context swap register */
74 #define HASH_CSR_NB_SHA256_HMAC		54
75 #define HASH_CSR_NB_SHA256		38
76 #define HASH_CSR_NB_SHA512_HMAC		103
77 #define HASH_CSR_NB_SHA512		91
78 #define HASH_CSR_NB_SHA3_HMAC		88
79 #define HASH_CSR_NB_SHA3		72
80 #define HASH_CSR_NB_MAX			HASH_CSR_NB_SHA512_HMAC
81 
82 #define HASH_FLAGS_INIT			BIT(0)
83 #define HASH_FLAGS_OUTPUT_READY		BIT(1)
84 #define HASH_FLAGS_CPU			BIT(2)
85 #define HASH_FLAGS_DMA_ACTIVE		BIT(3)
86 #define HASH_FLAGS_HMAC_INIT		BIT(4)
87 #define HASH_FLAGS_HMAC_FINAL		BIT(5)
88 #define HASH_FLAGS_HMAC_KEY		BIT(6)
89 #define HASH_FLAGS_SHA3_MODE		BIT(7)
90 #define HASH_FLAGS_FINAL		BIT(15)
91 #define HASH_FLAGS_FINUP		BIT(16)
92 #define HASH_FLAGS_ALGO_MASK		GENMASK(20, 17)
93 #define HASH_FLAGS_ALGO_SHIFT		17
94 #define HASH_FLAGS_ERRORS		BIT(21)
95 #define HASH_FLAGS_EMPTY		BIT(22)
96 #define HASH_FLAGS_HMAC			BIT(23)
97 #define HASH_FLAGS_SGS_COPIED		BIT(24)
98 
99 #define HASH_OP_UPDATE			1
100 #define HASH_OP_FINAL			2
101 
102 #define HASH_BURST_LEVEL		4
103 
104 enum stm32_hash_data_format {
105 	HASH_DATA_32_BITS		= 0x0,
106 	HASH_DATA_16_BITS		= 0x1,
107 	HASH_DATA_8_BITS		= 0x2,
108 	HASH_DATA_1_BIT			= 0x3
109 };
110 
111 #define HASH_BUFLEN			(SHA3_224_BLOCK_SIZE + 4)
112 #define HASH_MAX_KEY_SIZE		(SHA512_BLOCK_SIZE * 8)
113 
114 enum stm32_hash_algo {
115 	HASH_SHA1			= 0,
116 	HASH_MD5			= 1,
117 	HASH_SHA224			= 2,
118 	HASH_SHA256			= 3,
119 	HASH_SHA3_224			= 4,
120 	HASH_SHA3_256			= 5,
121 	HASH_SHA3_384			= 6,
122 	HASH_SHA3_512			= 7,
123 	HASH_SHA384			= 12,
124 	HASH_SHA512			= 15,
125 };
126 
127 enum ux500_hash_algo {
128 	HASH_SHA256_UX500		= 0,
129 	HASH_SHA1_UX500			= 1,
130 };
131 
132 #define HASH_AUTOSUSPEND_DELAY		50
133 
134 struct stm32_hash_ctx {
135 	struct stm32_hash_dev	*hdev;
136 	struct crypto_shash	*xtfm;
137 	unsigned long		flags;
138 
139 	u8			key[HASH_MAX_KEY_SIZE];
140 	int			keylen;
141 };
142 
143 struct stm32_hash_state {
144 	u32			flags;
145 
146 	u16			bufcnt;
147 	u16			blocklen;
148 
149 	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
150 
151 	/* hash state */
152 	u32			hw_context[3 + HASH_CSR_NB_MAX];
153 };
154 
155 struct stm32_hash_request_ctx {
156 	struct stm32_hash_dev	*hdev;
157 	unsigned long		op;
158 
159 	u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
160 	size_t			digcnt;
161 
162 	struct scatterlist	*sg;
163 	struct scatterlist	sgl[2]; /* scatterlist used to realize alignment */
164 	unsigned int		offset;
165 	unsigned int		total;
166 	struct scatterlist	sg_key;
167 
168 	dma_addr_t		dma_addr;
169 	size_t			dma_ct;
170 	int			nents;
171 
172 	u8			data_type;
173 
174 	struct stm32_hash_state state;
175 };
176 
177 struct stm32_hash_algs_info {
178 	struct ahash_engine_alg	*algs_list;
179 	size_t			size;
180 };
181 
182 struct stm32_hash_pdata {
183 	const int				alg_shift;
184 	const struct stm32_hash_algs_info	*algs_info;
185 	size_t					algs_info_size;
186 	bool					has_sr;
187 	bool					has_mdmat;
188 	bool					context_secured;
189 	bool					broken_emptymsg;
190 	bool					ux500;
191 };
192 
193 struct stm32_hash_dev {
194 	struct list_head	list;
195 	struct device		*dev;
196 	struct clk		*clk;
197 	struct reset_control	*rst;
198 	void __iomem		*io_base;
199 	phys_addr_t		phys_base;
200 	u8			xmit_buf[HASH_BUFLEN] __aligned(sizeof(u32));
201 	u32			dma_mode;
202 	bool			polled;
203 
204 	struct ahash_request	*req;
205 	struct crypto_engine	*engine;
206 
207 	unsigned long		flags;
208 
209 	struct dma_chan		*dma_lch;
210 	struct completion	dma_completion;
211 
212 	const struct stm32_hash_pdata	*pdata;
213 };
214 
215 struct stm32_hash_drv {
216 	struct list_head	dev_list;
217 	spinlock_t		lock; /* List protection access */
218 };
219 
220 static struct stm32_hash_drv stm32_hash = {
221 	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
222 	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
223 };
224 
225 static void stm32_hash_dma_callback(void *param);
226 static int stm32_hash_prepare_request(struct ahash_request *req);
227 static void stm32_hash_unprepare_request(struct ahash_request *req);
228 
229 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
230 {
231 	return readl_relaxed(hdev->io_base + offset);
232 }
233 
234 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
235 				    u32 offset, u32 value)
236 {
237 	writel_relaxed(value, hdev->io_base + offset);
238 }
239 
240 /**
241  * stm32_hash_wait_busy - wait until hash processor is available. It return an
242  * error if the hash core is processing a block of data for more than 10 ms.
243  * @hdev: the stm32_hash_dev device.
244  */
245 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
246 {
247 	u32 status;
248 
249 	/* The Ux500 lacks the special status register, we poll the DCAL bit instead */
250 	if (!hdev->pdata->has_sr)
251 		return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
252 						  !(status & HASH_STR_DCAL), 10, 10000);
253 
254 	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
255 				   !(status & HASH_SR_BUSY), 10, 10000);
256 }
257 
258 /**
259  * stm32_hash_set_nblw - set the number of valid bytes in the last word.
260  * @hdev: the stm32_hash_dev device.
261  * @length: the length of the final word.
262  */
263 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
264 {
265 	u32 reg;
266 
267 	reg = stm32_hash_read(hdev, HASH_STR);
268 	reg &= ~(HASH_STR_NBLW_MASK);
269 	reg |= (8U * ((length) % 4U));
270 	stm32_hash_write(hdev, HASH_STR, reg);
271 }
272 
273 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
274 {
275 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
276 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
277 	u32 reg;
278 	int keylen = ctx->keylen;
279 	void *key = ctx->key;
280 
281 	if (keylen) {
282 		stm32_hash_set_nblw(hdev, keylen);
283 
284 		while (keylen > 0) {
285 			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
286 			keylen -= 4;
287 			key += 4;
288 		}
289 
290 		reg = stm32_hash_read(hdev, HASH_STR);
291 		reg |= HASH_STR_DCAL;
292 		stm32_hash_write(hdev, HASH_STR, reg);
293 
294 		return -EINPROGRESS;
295 	}
296 
297 	return 0;
298 }
299 
300 /**
301  * stm32_hash_write_ctrl - Initialize the hash processor, only if
302  * HASH_FLAGS_INIT is set.
303  * @hdev: the stm32_hash_dev device
304  */
305 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
306 {
307 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
308 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
309 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
310 	struct stm32_hash_state *state = &rctx->state;
311 	u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT;
312 
313 	u32 reg = HASH_CR_INIT;
314 
315 	if (!(hdev->flags & HASH_FLAGS_INIT)) {
316 		if (hdev->pdata->ux500) {
317 			reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS);
318 		} else {
319 			if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS)
320 				reg |= ((alg & BIT(1)) << 17) |
321 				       ((alg & BIT(0)) << HASH_CR_ALGO_POS);
322 			else
323 				reg |= alg << hdev->pdata->alg_shift;
324 		}
325 
326 		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
327 
328 		if (state->flags & HASH_FLAGS_HMAC) {
329 			hdev->flags |= HASH_FLAGS_HMAC;
330 			reg |= HASH_CR_MODE;
331 			if (ctx->keylen > crypto_ahash_blocksize(tfm))
332 				reg |= HASH_CR_LKEY;
333 		}
334 
335 		if (!hdev->polled)
336 			stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
337 
338 		stm32_hash_write(hdev, HASH_CR, reg);
339 
340 		hdev->flags |= HASH_FLAGS_INIT;
341 
342 		/*
343 		 * After first block + 1 words are fill up,
344 		 * we only need to fill 1 block to start partial computation
345 		 */
346 		rctx->state.blocklen -= sizeof(u32);
347 
348 		dev_dbg(hdev->dev, "Write Control %x\n", reg);
349 	}
350 }
351 
352 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
353 {
354 	struct stm32_hash_state *state = &rctx->state;
355 	size_t count;
356 
357 	while ((state->bufcnt < state->blocklen) && rctx->total) {
358 		count = min(rctx->sg->length - rctx->offset, rctx->total);
359 		count = min_t(size_t, count, state->blocklen - state->bufcnt);
360 
361 		if (count <= 0) {
362 			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
363 				rctx->sg = sg_next(rctx->sg);
364 				continue;
365 			} else {
366 				break;
367 			}
368 		}
369 
370 		scatterwalk_map_and_copy(state->buffer + state->bufcnt,
371 					 rctx->sg, rctx->offset, count, 0);
372 
373 		state->bufcnt += count;
374 		rctx->offset += count;
375 		rctx->total -= count;
376 
377 		if (rctx->offset == rctx->sg->length) {
378 			rctx->sg = sg_next(rctx->sg);
379 			if (rctx->sg)
380 				rctx->offset = 0;
381 			else
382 				rctx->total = 0;
383 		}
384 	}
385 }
386 
387 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
388 			       const u8 *buf, size_t length, int final)
389 {
390 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
391 	struct stm32_hash_state *state = &rctx->state;
392 	unsigned int count, len32;
393 	const u32 *buffer = (const u32 *)buf;
394 	u32 reg;
395 
396 	if (final) {
397 		hdev->flags |= HASH_FLAGS_FINAL;
398 
399 		/* Do not process empty messages if hw is buggy. */
400 		if (!(hdev->flags & HASH_FLAGS_INIT) && !length &&
401 		    hdev->pdata->broken_emptymsg) {
402 			state->flags |= HASH_FLAGS_EMPTY;
403 			return 0;
404 		}
405 	}
406 
407 	len32 = DIV_ROUND_UP(length, sizeof(u32));
408 
409 	dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
410 		__func__, length, final, len32);
411 
412 	hdev->flags |= HASH_FLAGS_CPU;
413 
414 	stm32_hash_write_ctrl(hdev);
415 
416 	if (stm32_hash_wait_busy(hdev))
417 		return -ETIMEDOUT;
418 
419 	if ((hdev->flags & HASH_FLAGS_HMAC) &&
420 	    (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
421 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
422 		stm32_hash_write_key(hdev);
423 		if (stm32_hash_wait_busy(hdev))
424 			return -ETIMEDOUT;
425 	}
426 
427 	for (count = 0; count < len32; count++)
428 		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
429 
430 	if (final) {
431 		if (stm32_hash_wait_busy(hdev))
432 			return -ETIMEDOUT;
433 
434 		stm32_hash_set_nblw(hdev, length);
435 		reg = stm32_hash_read(hdev, HASH_STR);
436 		reg |= HASH_STR_DCAL;
437 		stm32_hash_write(hdev, HASH_STR, reg);
438 		if (hdev->flags & HASH_FLAGS_HMAC) {
439 			if (stm32_hash_wait_busy(hdev))
440 				return -ETIMEDOUT;
441 			stm32_hash_write_key(hdev);
442 		}
443 		return -EINPROGRESS;
444 	}
445 
446 	return 0;
447 }
448 
449 static int hash_swap_reg(struct stm32_hash_request_ctx *rctx)
450 {
451 	struct stm32_hash_state *state = &rctx->state;
452 
453 	switch ((state->flags & HASH_FLAGS_ALGO_MASK) >>
454 		HASH_FLAGS_ALGO_SHIFT) {
455 	case HASH_MD5:
456 	case HASH_SHA1:
457 	case HASH_SHA224:
458 	case HASH_SHA256:
459 		if (state->flags & HASH_FLAGS_HMAC)
460 			return HASH_CSR_NB_SHA256_HMAC;
461 		else
462 			return HASH_CSR_NB_SHA256;
463 		break;
464 
465 	case HASH_SHA384:
466 	case HASH_SHA512:
467 		if (state->flags & HASH_FLAGS_HMAC)
468 			return HASH_CSR_NB_SHA512_HMAC;
469 		else
470 			return HASH_CSR_NB_SHA512;
471 		break;
472 
473 	case HASH_SHA3_224:
474 	case HASH_SHA3_256:
475 	case HASH_SHA3_384:
476 	case HASH_SHA3_512:
477 		if (state->flags & HASH_FLAGS_HMAC)
478 			return HASH_CSR_NB_SHA3_HMAC;
479 		else
480 			return HASH_CSR_NB_SHA3;
481 		break;
482 
483 	default:
484 		return -EINVAL;
485 	}
486 }
487 
488 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
489 {
490 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
491 	struct stm32_hash_state *state = &rctx->state;
492 	int bufcnt, err = 0, final;
493 
494 	dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
495 
496 	final = state->flags & HASH_FLAGS_FINAL;
497 
498 	while ((rctx->total >= state->blocklen) ||
499 	       (state->bufcnt + rctx->total >= state->blocklen)) {
500 		stm32_hash_append_sg(rctx);
501 		bufcnt = state->bufcnt;
502 		state->bufcnt = 0;
503 		err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
504 		if (err)
505 			return err;
506 	}
507 
508 	stm32_hash_append_sg(rctx);
509 
510 	if (final) {
511 		bufcnt = state->bufcnt;
512 		state->bufcnt = 0;
513 		return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
514 	}
515 
516 	return err;
517 }
518 
519 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
520 			       struct scatterlist *sg, int length, int mdmat)
521 {
522 	struct dma_async_tx_descriptor *in_desc;
523 	dma_cookie_t cookie;
524 	u32 reg;
525 	int err;
526 
527 	dev_dbg(hdev->dev, "%s mdmat: %x length: %d\n", __func__, mdmat, length);
528 
529 	/* do not use dma if there is no data to send */
530 	if (length <= 0)
531 		return 0;
532 
533 	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
534 					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
535 					  DMA_CTRL_ACK);
536 	if (!in_desc) {
537 		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
538 		return -ENOMEM;
539 	}
540 
541 	reinit_completion(&hdev->dma_completion);
542 	in_desc->callback = stm32_hash_dma_callback;
543 	in_desc->callback_param = hdev;
544 
545 	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
546 
547 	reg = stm32_hash_read(hdev, HASH_CR);
548 
549 	if (hdev->pdata->has_mdmat) {
550 		if (mdmat)
551 			reg |= HASH_CR_MDMAT;
552 		else
553 			reg &= ~HASH_CR_MDMAT;
554 	}
555 	reg |= HASH_CR_DMAE;
556 
557 	stm32_hash_write(hdev, HASH_CR, reg);
558 
559 
560 	cookie = dmaengine_submit(in_desc);
561 	err = dma_submit_error(cookie);
562 	if (err)
563 		return -ENOMEM;
564 
565 	dma_async_issue_pending(hdev->dma_lch);
566 
567 	if (!wait_for_completion_timeout(&hdev->dma_completion,
568 					 msecs_to_jiffies(100)))
569 		err = -ETIMEDOUT;
570 
571 	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
572 				     NULL, NULL) != DMA_COMPLETE)
573 		err = -ETIMEDOUT;
574 
575 	if (err) {
576 		dev_err(hdev->dev, "DMA Error %i\n", err);
577 		dmaengine_terminate_all(hdev->dma_lch);
578 		return err;
579 	}
580 
581 	return -EINPROGRESS;
582 }
583 
584 static void stm32_hash_dma_callback(void *param)
585 {
586 	struct stm32_hash_dev *hdev = param;
587 
588 	complete(&hdev->dma_completion);
589 }
590 
591 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
592 {
593 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
594 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
595 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
596 	int err;
597 
598 	if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode > 0) {
599 		err = stm32_hash_write_key(hdev);
600 		if (stm32_hash_wait_busy(hdev))
601 			return -ETIMEDOUT;
602 	} else {
603 		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
604 			sg_init_one(&rctx->sg_key, ctx->key,
605 				    ALIGN(ctx->keylen, sizeof(u32)));
606 
607 		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
608 					  DMA_TO_DEVICE);
609 		if (rctx->dma_ct == 0) {
610 			dev_err(hdev->dev, "dma_map_sg error\n");
611 			return -ENOMEM;
612 		}
613 
614 		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
615 
616 		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
617 	}
618 
619 	return err;
620 }
621 
622 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
623 {
624 	struct dma_slave_config dma_conf;
625 	struct dma_chan *chan;
626 	int err;
627 
628 	memset(&dma_conf, 0, sizeof(dma_conf));
629 
630 	dma_conf.direction = DMA_MEM_TO_DEV;
631 	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
632 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
633 	dma_conf.src_maxburst = HASH_BURST_LEVEL;
634 	dma_conf.dst_maxburst = HASH_BURST_LEVEL;
635 	dma_conf.device_fc = false;
636 
637 	chan = dma_request_chan(hdev->dev, "in");
638 	if (IS_ERR(chan))
639 		return PTR_ERR(chan);
640 
641 	hdev->dma_lch = chan;
642 
643 	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
644 	if (err) {
645 		dma_release_channel(hdev->dma_lch);
646 		hdev->dma_lch = NULL;
647 		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
648 		return err;
649 	}
650 
651 	init_completion(&hdev->dma_completion);
652 
653 	return 0;
654 }
655 
656 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
657 {
658 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
659 	u32 *buffer = (void *)rctx->state.buffer;
660 	struct scatterlist sg[1], *tsg;
661 	int err = 0, reg, ncp = 0;
662 	unsigned int i, len = 0, bufcnt = 0;
663 	bool final = hdev->flags & HASH_FLAGS_FINAL;
664 	bool is_last = false;
665 	u32 last_word;
666 
667 	dev_dbg(hdev->dev, "%s total: %d bufcnt: %d final: %d\n",
668 		__func__, rctx->total, rctx->state.bufcnt, final);
669 
670 	if (rctx->nents < 0)
671 		return -EINVAL;
672 
673 	stm32_hash_write_ctrl(hdev);
674 
675 	if (hdev->flags & HASH_FLAGS_HMAC && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
676 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
677 		err = stm32_hash_hmac_dma_send(hdev);
678 		if (err != -EINPROGRESS)
679 			return err;
680 	}
681 
682 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
683 		sg[0] = *tsg;
684 		len = sg->length;
685 
686 		if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) {
687 			if (!final) {
688 				/* Always manually put the last word of a non-final transfer. */
689 				len -= sizeof(u32);
690 				sg_pcopy_to_buffer(rctx->sg, rctx->nents, &last_word, 4, len);
691 				sg->length -= sizeof(u32);
692 			} else {
693 				/*
694 				 * In Multiple DMA mode, DMA must be aborted before the final
695 				 * transfer.
696 				 */
697 				sg->length = rctx->total - bufcnt;
698 				if (hdev->dma_mode > 0) {
699 					len = (ALIGN(sg->length, 16) - 16);
700 
701 					ncp = sg_pcopy_to_buffer(rctx->sg, rctx->nents,
702 								 rctx->state.buffer,
703 								 sg->length - len,
704 								 rctx->total - sg->length + len);
705 
706 					if (!len)
707 						break;
708 
709 					sg->length = len;
710 				} else {
711 					is_last = true;
712 					if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
713 						len = sg->length;
714 						sg->length = ALIGN(sg->length,
715 								   sizeof(u32));
716 					}
717 				}
718 			}
719 		}
720 
721 		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
722 					  DMA_TO_DEVICE);
723 		if (rctx->dma_ct == 0) {
724 			dev_err(hdev->dev, "dma_map_sg error\n");
725 			return -ENOMEM;
726 		}
727 
728 		err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
729 
730 		/* The last word of a non final transfer is sent manually. */
731 		if (!final) {
732 			stm32_hash_write(hdev, HASH_DIN, last_word);
733 			len += sizeof(u32);
734 		}
735 
736 		rctx->total -= len;
737 
738 		bufcnt += sg[0].length;
739 		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
740 
741 		if (err == -ENOMEM || err == -ETIMEDOUT)
742 			return err;
743 		if (is_last)
744 			break;
745 	}
746 
747 	/*
748 	 * When the second last block transfer of 4 words is performed by the DMA,
749 	 * the software must set the DMA Abort bit (DMAA) to 1 before completing the
750 	 * last transfer of 4 words or less.
751 	 */
752 	if (final) {
753 		if (hdev->dma_mode > 0) {
754 			if (stm32_hash_wait_busy(hdev))
755 				return -ETIMEDOUT;
756 			reg = stm32_hash_read(hdev, HASH_CR);
757 			reg &= ~HASH_CR_DMAE;
758 			reg |= HASH_CR_DMAA;
759 			stm32_hash_write(hdev, HASH_CR, reg);
760 
761 			if (ncp) {
762 				memset(buffer + ncp, 0, 4 - DIV_ROUND_UP(ncp, sizeof(u32)));
763 				writesl(hdev->io_base + HASH_DIN, buffer,
764 					DIV_ROUND_UP(ncp, sizeof(u32)));
765 			}
766 
767 			stm32_hash_set_nblw(hdev, ncp);
768 			reg = stm32_hash_read(hdev, HASH_STR);
769 			reg |= HASH_STR_DCAL;
770 			stm32_hash_write(hdev, HASH_STR, reg);
771 			err = -EINPROGRESS;
772 		}
773 
774 		/*
775 		 * The hash processor needs the key to be loaded a second time in order
776 		 * to process the HMAC.
777 		 */
778 		if (hdev->flags & HASH_FLAGS_HMAC) {
779 			if (stm32_hash_wait_busy(hdev))
780 				return -ETIMEDOUT;
781 			err = stm32_hash_hmac_dma_send(hdev);
782 		}
783 
784 		return err;
785 	}
786 
787 	if (err != -EINPROGRESS)
788 		return err;
789 
790 	return 0;
791 }
792 
793 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
794 {
795 	struct stm32_hash_dev *hdev = NULL, *tmp;
796 
797 	spin_lock_bh(&stm32_hash.lock);
798 	if (!ctx->hdev) {
799 		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
800 			hdev = tmp;
801 			break;
802 		}
803 		ctx->hdev = hdev;
804 	} else {
805 		hdev = ctx->hdev;
806 	}
807 
808 	spin_unlock_bh(&stm32_hash.lock);
809 
810 	return hdev;
811 }
812 
813 static int stm32_hash_init(struct ahash_request *req)
814 {
815 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
816 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
817 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
818 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
819 	struct stm32_hash_state *state = &rctx->state;
820 	bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE;
821 
822 	rctx->hdev = hdev;
823 	state->flags = 0;
824 
825 	if (!(hdev->dma_lch &&  hdev->pdata->has_mdmat))
826 		state->flags |= HASH_FLAGS_CPU;
827 
828 	if (sha3_mode)
829 		state->flags |= HASH_FLAGS_SHA3_MODE;
830 
831 	rctx->digcnt = crypto_ahash_digestsize(tfm);
832 	switch (rctx->digcnt) {
833 	case MD5_DIGEST_SIZE:
834 		state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT;
835 		break;
836 	case SHA1_DIGEST_SIZE:
837 		if (hdev->pdata->ux500)
838 			state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT;
839 		else
840 			state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT;
841 		break;
842 	case SHA224_DIGEST_SIZE:
843 		if (sha3_mode)
844 			state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT;
845 		else
846 			state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT;
847 		break;
848 	case SHA256_DIGEST_SIZE:
849 		if (sha3_mode) {
850 			state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT;
851 		} else {
852 			if (hdev->pdata->ux500)
853 				state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT;
854 			else
855 				state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT;
856 		}
857 		break;
858 	case SHA384_DIGEST_SIZE:
859 		if (sha3_mode)
860 			state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT;
861 		else
862 			state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT;
863 		break;
864 	case SHA512_DIGEST_SIZE:
865 		if (sha3_mode)
866 			state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT;
867 		else
868 			state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT;
869 		break;
870 	default:
871 		return -EINVAL;
872 	}
873 
874 	rctx->state.bufcnt = 0;
875 	rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32);
876 	if (rctx->state.blocklen > HASH_BUFLEN) {
877 		dev_err(hdev->dev, "Error, block too large");
878 		return -EINVAL;
879 	}
880 	rctx->nents = 0;
881 	rctx->total = 0;
882 	rctx->offset = 0;
883 	rctx->data_type = HASH_DATA_8_BITS;
884 
885 	if (ctx->flags & HASH_FLAGS_HMAC)
886 		state->flags |= HASH_FLAGS_HMAC;
887 
888 	dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
889 
890 	return 0;
891 }
892 
893 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
894 {
895 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
896 	struct stm32_hash_state *state = &rctx->state;
897 
898 	dev_dbg(hdev->dev, "update_req: total: %u, digcnt: %zd, final: 0",
899 		rctx->total, rctx->digcnt);
900 
901 	if (!(state->flags & HASH_FLAGS_CPU))
902 		return stm32_hash_dma_send(hdev);
903 
904 	return stm32_hash_update_cpu(hdev);
905 }
906 
907 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
908 {
909 	struct ahash_request *req = hdev->req;
910 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
911 	struct stm32_hash_state *state = &rctx->state;
912 	int buflen = state->bufcnt;
913 
914 	if (!(state->flags & HASH_FLAGS_CPU)) {
915 		hdev->flags |= HASH_FLAGS_FINAL;
916 		return stm32_hash_dma_send(hdev);
917 	}
918 
919 	if (state->flags & HASH_FLAGS_FINUP)
920 		return stm32_hash_update_req(hdev);
921 
922 	state->bufcnt = 0;
923 
924 	return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
925 }
926 
927 static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
928 {
929 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
930 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
931 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
932 	struct stm32_hash_dev *hdev = rctx->hdev;
933 	int ret;
934 
935 	dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
936 		ctx->keylen);
937 
938 	if (!ctx->xtfm) {
939 		dev_err(hdev->dev, "no fallback engine\n");
940 		return;
941 	}
942 
943 	if (ctx->keylen) {
944 		ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
945 		if (ret) {
946 			dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
947 			return;
948 		}
949 	}
950 
951 	ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
952 	if (ret)
953 		dev_err(hdev->dev, "shash digest error\n");
954 }
955 
956 static void stm32_hash_copy_hash(struct ahash_request *req)
957 {
958 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
959 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
960 	struct stm32_hash_state *state = &rctx->state;
961 	struct stm32_hash_dev *hdev = rctx->hdev;
962 	__be32 *hash = (void *)rctx->digest;
963 	unsigned int i, hashsize;
964 
965 	if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
966 		return stm32_hash_emptymsg_fallback(req);
967 
968 	hashsize = crypto_ahash_digestsize(tfm);
969 
970 	for (i = 0; i < hashsize / sizeof(u32); i++) {
971 		if (hdev->pdata->ux500)
972 			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
973 					      HASH_UX500_HREG(i)));
974 		else
975 			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
976 					      HASH_HREG(i)));
977 	}
978 }
979 
980 static int stm32_hash_finish(struct ahash_request *req)
981 {
982 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
983 	u32 reg;
984 
985 	reg = stm32_hash_read(rctx->hdev, HASH_SR);
986 	reg &= ~HASH_SR_OUTPUT_READY;
987 	stm32_hash_write(rctx->hdev, HASH_SR, reg);
988 
989 	if (!req->result)
990 		return -EINVAL;
991 
992 	memcpy(req->result, rctx->digest, rctx->digcnt);
993 
994 	return 0;
995 }
996 
997 static void stm32_hash_finish_req(struct ahash_request *req, int err)
998 {
999 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1000 	struct stm32_hash_state *state = &rctx->state;
1001 	struct stm32_hash_dev *hdev = rctx->hdev;
1002 
1003 	if (hdev->flags & HASH_FLAGS_DMA_ACTIVE)
1004 		state->flags |= HASH_FLAGS_DMA_ACTIVE;
1005 	else
1006 		state->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1007 
1008 	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
1009 		stm32_hash_copy_hash(req);
1010 		err = stm32_hash_finish(req);
1011 	}
1012 
1013 	/* Finalized request mist be unprepared here */
1014 	stm32_hash_unprepare_request(req);
1015 
1016 	crypto_finalize_hash_request(hdev->engine, req, err);
1017 }
1018 
1019 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
1020 				   struct ahash_request *req)
1021 {
1022 	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
1023 }
1024 
1025 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
1026 {
1027 	struct ahash_request *req = container_of(areq, struct ahash_request,
1028 						 base);
1029 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1030 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1031 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1032 	struct stm32_hash_state *state = &rctx->state;
1033 	int swap_reg;
1034 	int err = 0;
1035 
1036 	if (!hdev)
1037 		return -ENODEV;
1038 
1039 	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
1040 		rctx->op, req->nbytes);
1041 
1042 	pm_runtime_get_sync(hdev->dev);
1043 
1044 	err = stm32_hash_prepare_request(req);
1045 	if (err)
1046 		return err;
1047 
1048 	hdev->req = req;
1049 	hdev->flags = 0;
1050 	swap_reg = hash_swap_reg(rctx);
1051 
1052 	if (state->flags & HASH_FLAGS_INIT) {
1053 		u32 *preg = rctx->state.hw_context;
1054 		u32 reg;
1055 		int i;
1056 
1057 		if (!hdev->pdata->ux500)
1058 			stm32_hash_write(hdev, HASH_IMR, *preg++);
1059 		stm32_hash_write(hdev, HASH_STR, *preg++);
1060 		stm32_hash_write(hdev, HASH_CR, *preg);
1061 		reg = *preg++ | HASH_CR_INIT;
1062 		stm32_hash_write(hdev, HASH_CR, reg);
1063 
1064 		for (i = 0; i < swap_reg; i++)
1065 			stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1066 
1067 		hdev->flags |= HASH_FLAGS_INIT;
1068 
1069 		if (state->flags & HASH_FLAGS_HMAC)
1070 			hdev->flags |= HASH_FLAGS_HMAC |
1071 				       HASH_FLAGS_HMAC_KEY;
1072 
1073 		if (state->flags & HASH_FLAGS_CPU)
1074 			hdev->flags |= HASH_FLAGS_CPU;
1075 
1076 		if (state->flags & HASH_FLAGS_DMA_ACTIVE)
1077 			hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
1078 	}
1079 
1080 	if (rctx->op == HASH_OP_UPDATE)
1081 		err = stm32_hash_update_req(hdev);
1082 	else if (rctx->op == HASH_OP_FINAL)
1083 		err = stm32_hash_final_req(hdev);
1084 
1085 	/* If we have an IRQ, wait for that, else poll for completion */
1086 	if (err == -EINPROGRESS && hdev->polled) {
1087 		if (stm32_hash_wait_busy(hdev))
1088 			err = -ETIMEDOUT;
1089 		else {
1090 			hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1091 			err = 0;
1092 		}
1093 	}
1094 
1095 	if (err != -EINPROGRESS)
1096 	/* done task will not finish it, so do it here */
1097 		stm32_hash_finish_req(req, err);
1098 
1099 	return 0;
1100 }
1101 
1102 static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx,
1103 			       struct scatterlist *sg, int bs,
1104 			       unsigned int new_len)
1105 {
1106 	struct stm32_hash_state *state = &rctx->state;
1107 	int pages;
1108 	void *buf;
1109 
1110 	pages = get_order(new_len);
1111 
1112 	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1113 	if (!buf) {
1114 		pr_err("Couldn't allocate pages for unaligned cases.\n");
1115 		return -ENOMEM;
1116 	}
1117 
1118 	memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
1119 
1120 	scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset,
1121 				 min(new_len, rctx->total) - state->bufcnt, 0);
1122 	sg_init_table(rctx->sgl, 1);
1123 	sg_set_buf(rctx->sgl, buf, new_len);
1124 	rctx->sg = rctx->sgl;
1125 	state->flags |= HASH_FLAGS_SGS_COPIED;
1126 	rctx->nents = 1;
1127 	rctx->offset += new_len - state->bufcnt;
1128 	state->bufcnt = 0;
1129 	rctx->total = new_len;
1130 
1131 	return 0;
1132 }
1133 
1134 static int stm32_hash_align_sgs(struct scatterlist *sg,
1135 				int nbytes, int bs, bool init, bool final,
1136 				struct stm32_hash_request_ctx *rctx)
1137 {
1138 	struct stm32_hash_state *state = &rctx->state;
1139 	struct stm32_hash_dev *hdev = rctx->hdev;
1140 	struct scatterlist *sg_tmp = sg;
1141 	int offset = rctx->offset;
1142 	int new_len;
1143 	int n = 0;
1144 	int bufcnt = state->bufcnt;
1145 	bool secure_ctx = hdev->pdata->context_secured;
1146 	bool aligned = true;
1147 
1148 	if (!sg || !sg->length || !nbytes) {
1149 		if (bufcnt) {
1150 			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
1151 			sg_init_table(rctx->sgl, 1);
1152 			sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, bufcnt);
1153 			rctx->sg = rctx->sgl;
1154 			rctx->nents = 1;
1155 		}
1156 
1157 		return 0;
1158 	}
1159 
1160 	new_len = nbytes;
1161 
1162 	if (offset)
1163 		aligned = false;
1164 
1165 	if (final) {
1166 		new_len = DIV_ROUND_UP(new_len, bs) * bs;
1167 	} else {
1168 		new_len = (new_len - 1) / bs * bs; // return n block - 1 block
1169 
1170 		/*
1171 		 * Context save in some version of HASH IP can only be done when the
1172 		 * FIFO is ready to get a new block. This implies to send n block plus a
1173 		 * 32 bit word in the first DMA send.
1174 		 */
1175 		if (init && secure_ctx) {
1176 			new_len += sizeof(u32);
1177 			if (unlikely(new_len > nbytes))
1178 				new_len -= bs;
1179 		}
1180 	}
1181 
1182 	if (!new_len)
1183 		return 0;
1184 
1185 	if (nbytes != new_len)
1186 		aligned = false;
1187 
1188 	while (nbytes > 0 && sg_tmp) {
1189 		n++;
1190 
1191 		if (bufcnt) {
1192 			if (!IS_ALIGNED(bufcnt, bs)) {
1193 				aligned = false;
1194 				break;
1195 			}
1196 			nbytes -= bufcnt;
1197 			bufcnt = 0;
1198 			if (!nbytes)
1199 				aligned = false;
1200 
1201 			continue;
1202 		}
1203 
1204 		if (offset < sg_tmp->length) {
1205 			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
1206 				aligned = false;
1207 				break;
1208 			}
1209 
1210 			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
1211 				aligned = false;
1212 				break;
1213 			}
1214 		}
1215 
1216 		if (offset) {
1217 			offset -= sg_tmp->length;
1218 			if (offset < 0) {
1219 				nbytes += offset;
1220 				offset = 0;
1221 			}
1222 		} else {
1223 			nbytes -= sg_tmp->length;
1224 		}
1225 
1226 		sg_tmp = sg_next(sg_tmp);
1227 
1228 		if (nbytes < 0) {
1229 			aligned = false;
1230 			break;
1231 		}
1232 	}
1233 
1234 	if (!aligned)
1235 		return stm32_hash_copy_sgs(rctx, sg, bs, new_len);
1236 
1237 	rctx->total = new_len;
1238 	rctx->offset += new_len;
1239 	rctx->nents = n;
1240 	if (state->bufcnt) {
1241 		sg_init_table(rctx->sgl, 2);
1242 		sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, state->bufcnt);
1243 		sg_chain(rctx->sgl, 2, sg);
1244 		rctx->sg = rctx->sgl;
1245 	} else {
1246 		rctx->sg = sg;
1247 	}
1248 
1249 	return 0;
1250 }
1251 
1252 static int stm32_hash_prepare_request(struct ahash_request *req)
1253 {
1254 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1255 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1256 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1257 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1258 	struct stm32_hash_state *state = &rctx->state;
1259 	unsigned int nbytes;
1260 	int ret, hash_later, bs;
1261 	bool update = rctx->op & HASH_OP_UPDATE;
1262 	bool init = !(state->flags & HASH_FLAGS_INIT);
1263 	bool finup = state->flags & HASH_FLAGS_FINUP;
1264 	bool final = state->flags & HASH_FLAGS_FINAL;
1265 
1266 	if (!hdev->dma_lch || state->flags & HASH_FLAGS_CPU)
1267 		return 0;
1268 
1269 	bs = crypto_ahash_blocksize(tfm);
1270 
1271 	nbytes = state->bufcnt;
1272 
1273 	/*
1274 	 * In case of update request nbytes must correspond to the content of the
1275 	 * buffer + the offset minus the content of the request already in the
1276 	 * buffer.
1277 	 */
1278 	if (update || finup)
1279 		nbytes += req->nbytes - rctx->offset;
1280 
1281 	dev_dbg(hdev->dev,
1282 		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
1283 		__func__, nbytes, bs, rctx->total, rctx->offset, state->bufcnt);
1284 
1285 	if (!nbytes)
1286 		return 0;
1287 
1288 	rctx->total = nbytes;
1289 
1290 	if (update && req->nbytes && (!IS_ALIGNED(state->bufcnt, bs))) {
1291 		int len = bs - state->bufcnt % bs;
1292 
1293 		if (len > req->nbytes)
1294 			len = req->nbytes;
1295 		scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
1296 					 0, len, 0);
1297 		state->bufcnt += len;
1298 		rctx->offset = len;
1299 	}
1300 
1301 	/* copy buffer in a temporary one that is used for sg alignment */
1302 	memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
1303 
1304 	ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx);
1305 	if (ret)
1306 		return ret;
1307 
1308 	hash_later = nbytes - rctx->total;
1309 	if (hash_later < 0)
1310 		hash_later = 0;
1311 
1312 	if (hash_later && hash_later <= state->blocklen) {
1313 		scatterwalk_map_and_copy(state->buffer,
1314 					 req->src,
1315 					 req->nbytes - hash_later,
1316 					 hash_later, 0);
1317 
1318 		state->bufcnt = hash_later;
1319 	} else {
1320 		state->bufcnt = 0;
1321 	}
1322 
1323 	if (hash_later > state->blocklen) {
1324 		/* FIXME: add support of this case */
1325 		pr_err("Buffer contains more than one block.\n");
1326 		return -ENOMEM;
1327 	}
1328 
1329 	rctx->total = min(nbytes, rctx->total);
1330 
1331 	return 0;
1332 }
1333 
1334 static void stm32_hash_unprepare_request(struct ahash_request *req)
1335 {
1336 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1337 	struct stm32_hash_state *state = &rctx->state;
1338 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1339 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1340 	u32 *preg = state->hw_context;
1341 	int swap_reg, i;
1342 
1343 	if (hdev->dma_lch)
1344 		dmaengine_terminate_sync(hdev->dma_lch);
1345 
1346 	if (state->flags & HASH_FLAGS_SGS_COPIED)
1347 		free_pages((unsigned long)sg_virt(rctx->sg), get_order(rctx->sg->length));
1348 
1349 	rctx->sg = NULL;
1350 	rctx->offset = 0;
1351 
1352 	state->flags &= ~(HASH_FLAGS_SGS_COPIED);
1353 
1354 	if (!(hdev->flags & HASH_FLAGS_INIT))
1355 		goto pm_runtime;
1356 
1357 	state->flags |= HASH_FLAGS_INIT;
1358 
1359 	if (stm32_hash_wait_busy(hdev)) {
1360 		dev_warn(hdev->dev, "Wait busy failed.");
1361 		return;
1362 	}
1363 
1364 	swap_reg = hash_swap_reg(rctx);
1365 
1366 	if (!hdev->pdata->ux500)
1367 		*preg++ = stm32_hash_read(hdev, HASH_IMR);
1368 	*preg++ = stm32_hash_read(hdev, HASH_STR);
1369 	*preg++ = stm32_hash_read(hdev, HASH_CR);
1370 	for (i = 0; i < swap_reg; i++)
1371 		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
1372 
1373 pm_runtime:
1374 	pm_runtime_put_autosuspend(hdev->dev);
1375 }
1376 
1377 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
1378 {
1379 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1380 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1381 	struct stm32_hash_dev *hdev = ctx->hdev;
1382 
1383 	rctx->op = op;
1384 
1385 	return stm32_hash_handle_queue(hdev, req);
1386 }
1387 
1388 static int stm32_hash_update(struct ahash_request *req)
1389 {
1390 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1391 	struct stm32_hash_state *state = &rctx->state;
1392 
1393 	if (!req->nbytes)
1394 		return 0;
1395 
1396 
1397 	if (state->flags & HASH_FLAGS_CPU) {
1398 		rctx->total = req->nbytes;
1399 		rctx->sg = req->src;
1400 		rctx->offset = 0;
1401 
1402 		if ((state->bufcnt + rctx->total < state->blocklen)) {
1403 			stm32_hash_append_sg(rctx);
1404 			return 0;
1405 		}
1406 	} else { /* DMA mode */
1407 		if (state->bufcnt + req->nbytes <= state->blocklen) {
1408 			scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
1409 						 0, req->nbytes, 0);
1410 			state->bufcnt += req->nbytes;
1411 			return 0;
1412 		}
1413 	}
1414 
1415 	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
1416 }
1417 
1418 static int stm32_hash_final(struct ahash_request *req)
1419 {
1420 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1421 	struct stm32_hash_state *state = &rctx->state;
1422 
1423 	state->flags |= HASH_FLAGS_FINAL;
1424 
1425 	return stm32_hash_enqueue(req, HASH_OP_FINAL);
1426 }
1427 
1428 static int stm32_hash_finup(struct ahash_request *req)
1429 {
1430 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1431 	struct stm32_hash_state *state = &rctx->state;
1432 
1433 	if (!req->nbytes)
1434 		goto out;
1435 
1436 	state->flags |= HASH_FLAGS_FINUP;
1437 
1438 	if ((state->flags & HASH_FLAGS_CPU)) {
1439 		rctx->total = req->nbytes;
1440 		rctx->sg = req->src;
1441 		rctx->offset = 0;
1442 	}
1443 
1444 out:
1445 	return stm32_hash_final(req);
1446 }
1447 
1448 static int stm32_hash_digest(struct ahash_request *req)
1449 {
1450 	return stm32_hash_init(req) ?: stm32_hash_finup(req);
1451 }
1452 
1453 static int stm32_hash_export(struct ahash_request *req, void *out)
1454 {
1455 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1456 
1457 	memcpy(out, &rctx->state, sizeof(rctx->state));
1458 
1459 	return 0;
1460 }
1461 
1462 static int stm32_hash_import(struct ahash_request *req, const void *in)
1463 {
1464 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1465 
1466 	stm32_hash_init(req);
1467 	memcpy(&rctx->state, in, sizeof(rctx->state));
1468 
1469 	return 0;
1470 }
1471 
1472 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1473 			     const u8 *key, unsigned int keylen)
1474 {
1475 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1476 
1477 	if (keylen <= HASH_MAX_KEY_SIZE) {
1478 		memcpy(ctx->key, key, keylen);
1479 		ctx->keylen = keylen;
1480 	} else {
1481 		return -ENOMEM;
1482 	}
1483 
1484 	return 0;
1485 }
1486 
1487 static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1488 {
1489 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1490 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1491 	const char *name = crypto_tfm_alg_name(tfm);
1492 	struct crypto_shash *xtfm;
1493 
1494 	/* The fallback is only needed on Ux500 */
1495 	if (!hdev->pdata->ux500)
1496 		return 0;
1497 
1498 	xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1499 	if (IS_ERR(xtfm)) {
1500 		dev_err(hdev->dev, "failed to allocate %s fallback\n",
1501 			name);
1502 		return PTR_ERR(xtfm);
1503 	}
1504 	dev_info(hdev->dev, "allocated %s fallback\n", name);
1505 	ctx->xtfm = xtfm;
1506 
1507 	return 0;
1508 }
1509 
1510 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags)
1511 {
1512 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1513 
1514 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1515 				 sizeof(struct stm32_hash_request_ctx));
1516 
1517 	ctx->keylen = 0;
1518 
1519 	if (algs_flags)
1520 		ctx->flags |= algs_flags;
1521 
1522 	return stm32_hash_init_fallback(tfm);
1523 }
1524 
1525 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1526 {
1527 	return stm32_hash_cra_init_algs(tfm, 0);
1528 }
1529 
1530 static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm)
1531 {
1532 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC);
1533 }
1534 
1535 static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm)
1536 {
1537 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE);
1538 }
1539 
1540 static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm)
1541 {
1542 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE |
1543 					HASH_FLAGS_HMAC);
1544 }
1545 
1546 static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1547 {
1548 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1549 
1550 	if (ctx->xtfm)
1551 		crypto_free_shash(ctx->xtfm);
1552 }
1553 
1554 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1555 {
1556 	struct stm32_hash_dev *hdev = dev_id;
1557 
1558 	if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1559 		hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1560 		goto finish;
1561 	}
1562 
1563 	return IRQ_HANDLED;
1564 
1565 finish:
1566 	/* Finish current request */
1567 	stm32_hash_finish_req(hdev->req, 0);
1568 
1569 	return IRQ_HANDLED;
1570 }
1571 
1572 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1573 {
1574 	struct stm32_hash_dev *hdev = dev_id;
1575 	u32 reg;
1576 
1577 	reg = stm32_hash_read(hdev, HASH_SR);
1578 	if (reg & HASH_SR_OUTPUT_READY) {
1579 		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1580 		/* Disable IT*/
1581 		stm32_hash_write(hdev, HASH_IMR, 0);
1582 		return IRQ_WAKE_THREAD;
1583 	}
1584 
1585 	return IRQ_NONE;
1586 }
1587 
1588 static struct ahash_engine_alg algs_md5[] = {
1589 	{
1590 		.base.init = stm32_hash_init,
1591 		.base.update = stm32_hash_update,
1592 		.base.final = stm32_hash_final,
1593 		.base.finup = stm32_hash_finup,
1594 		.base.digest = stm32_hash_digest,
1595 		.base.export = stm32_hash_export,
1596 		.base.import = stm32_hash_import,
1597 		.base.halg = {
1598 			.digestsize = MD5_DIGEST_SIZE,
1599 			.statesize = sizeof(struct stm32_hash_state),
1600 			.base = {
1601 				.cra_name = "md5",
1602 				.cra_driver_name = "stm32-md5",
1603 				.cra_priority = 200,
1604 				.cra_flags = CRYPTO_ALG_ASYNC |
1605 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1606 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1607 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1608 				.cra_init = stm32_hash_cra_init,
1609 				.cra_exit = stm32_hash_cra_exit,
1610 				.cra_module = THIS_MODULE,
1611 			}
1612 		},
1613 		.op = {
1614 			.do_one_request = stm32_hash_one_request,
1615 		},
1616 	},
1617 	{
1618 		.base.init = stm32_hash_init,
1619 		.base.update = stm32_hash_update,
1620 		.base.final = stm32_hash_final,
1621 		.base.finup = stm32_hash_finup,
1622 		.base.digest = stm32_hash_digest,
1623 		.base.export = stm32_hash_export,
1624 		.base.import = stm32_hash_import,
1625 		.base.setkey = stm32_hash_setkey,
1626 		.base.halg = {
1627 			.digestsize = MD5_DIGEST_SIZE,
1628 			.statesize = sizeof(struct stm32_hash_state),
1629 			.base = {
1630 				.cra_name = "hmac(md5)",
1631 				.cra_driver_name = "stm32-hmac-md5",
1632 				.cra_priority = 200,
1633 				.cra_flags = CRYPTO_ALG_ASYNC |
1634 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1635 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1636 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1637 				.cra_init = stm32_hash_cra_hmac_init,
1638 				.cra_exit = stm32_hash_cra_exit,
1639 				.cra_module = THIS_MODULE,
1640 			}
1641 		},
1642 		.op = {
1643 			.do_one_request = stm32_hash_one_request,
1644 		},
1645 	}
1646 };
1647 
1648 static struct ahash_engine_alg algs_sha1[] = {
1649 	{
1650 		.base.init = stm32_hash_init,
1651 		.base.update = stm32_hash_update,
1652 		.base.final = stm32_hash_final,
1653 		.base.finup = stm32_hash_finup,
1654 		.base.digest = stm32_hash_digest,
1655 		.base.export = stm32_hash_export,
1656 		.base.import = stm32_hash_import,
1657 		.base.halg = {
1658 			.digestsize = SHA1_DIGEST_SIZE,
1659 			.statesize = sizeof(struct stm32_hash_state),
1660 			.base = {
1661 				.cra_name = "sha1",
1662 				.cra_driver_name = "stm32-sha1",
1663 				.cra_priority = 200,
1664 				.cra_flags = CRYPTO_ALG_ASYNC |
1665 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1666 				.cra_blocksize = SHA1_BLOCK_SIZE,
1667 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1668 				.cra_init = stm32_hash_cra_init,
1669 				.cra_exit = stm32_hash_cra_exit,
1670 				.cra_module = THIS_MODULE,
1671 			}
1672 		},
1673 		.op = {
1674 			.do_one_request = stm32_hash_one_request,
1675 		},
1676 	},
1677 	{
1678 		.base.init = stm32_hash_init,
1679 		.base.update = stm32_hash_update,
1680 		.base.final = stm32_hash_final,
1681 		.base.finup = stm32_hash_finup,
1682 		.base.digest = stm32_hash_digest,
1683 		.base.export = stm32_hash_export,
1684 		.base.import = stm32_hash_import,
1685 		.base.setkey = stm32_hash_setkey,
1686 		.base.halg = {
1687 			.digestsize = SHA1_DIGEST_SIZE,
1688 			.statesize = sizeof(struct stm32_hash_state),
1689 			.base = {
1690 				.cra_name = "hmac(sha1)",
1691 				.cra_driver_name = "stm32-hmac-sha1",
1692 				.cra_priority = 200,
1693 				.cra_flags = CRYPTO_ALG_ASYNC |
1694 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1695 				.cra_blocksize = SHA1_BLOCK_SIZE,
1696 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1697 				.cra_init = stm32_hash_cra_hmac_init,
1698 				.cra_exit = stm32_hash_cra_exit,
1699 				.cra_module = THIS_MODULE,
1700 			}
1701 		},
1702 		.op = {
1703 			.do_one_request = stm32_hash_one_request,
1704 		},
1705 	},
1706 };
1707 
1708 static struct ahash_engine_alg algs_sha224[] = {
1709 	{
1710 		.base.init = stm32_hash_init,
1711 		.base.update = stm32_hash_update,
1712 		.base.final = stm32_hash_final,
1713 		.base.finup = stm32_hash_finup,
1714 		.base.digest = stm32_hash_digest,
1715 		.base.export = stm32_hash_export,
1716 		.base.import = stm32_hash_import,
1717 		.base.halg = {
1718 			.digestsize = SHA224_DIGEST_SIZE,
1719 			.statesize = sizeof(struct stm32_hash_state),
1720 			.base = {
1721 				.cra_name = "sha224",
1722 				.cra_driver_name = "stm32-sha224",
1723 				.cra_priority = 200,
1724 				.cra_flags = CRYPTO_ALG_ASYNC |
1725 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1726 				.cra_blocksize = SHA224_BLOCK_SIZE,
1727 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1728 				.cra_init = stm32_hash_cra_init,
1729 				.cra_exit = stm32_hash_cra_exit,
1730 				.cra_module = THIS_MODULE,
1731 			}
1732 		},
1733 		.op = {
1734 			.do_one_request = stm32_hash_one_request,
1735 		},
1736 	},
1737 	{
1738 		.base.init = stm32_hash_init,
1739 		.base.update = stm32_hash_update,
1740 		.base.final = stm32_hash_final,
1741 		.base.finup = stm32_hash_finup,
1742 		.base.digest = stm32_hash_digest,
1743 		.base.setkey = stm32_hash_setkey,
1744 		.base.export = stm32_hash_export,
1745 		.base.import = stm32_hash_import,
1746 		.base.halg = {
1747 			.digestsize = SHA224_DIGEST_SIZE,
1748 			.statesize = sizeof(struct stm32_hash_state),
1749 			.base = {
1750 				.cra_name = "hmac(sha224)",
1751 				.cra_driver_name = "stm32-hmac-sha224",
1752 				.cra_priority = 200,
1753 				.cra_flags = CRYPTO_ALG_ASYNC |
1754 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1755 				.cra_blocksize = SHA224_BLOCK_SIZE,
1756 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1757 				.cra_init = stm32_hash_cra_hmac_init,
1758 				.cra_exit = stm32_hash_cra_exit,
1759 				.cra_module = THIS_MODULE,
1760 			}
1761 		},
1762 		.op = {
1763 			.do_one_request = stm32_hash_one_request,
1764 		},
1765 	},
1766 };
1767 
1768 static struct ahash_engine_alg algs_sha256[] = {
1769 	{
1770 		.base.init = stm32_hash_init,
1771 		.base.update = stm32_hash_update,
1772 		.base.final = stm32_hash_final,
1773 		.base.finup = stm32_hash_finup,
1774 		.base.digest = stm32_hash_digest,
1775 		.base.export = stm32_hash_export,
1776 		.base.import = stm32_hash_import,
1777 		.base.halg = {
1778 			.digestsize = SHA256_DIGEST_SIZE,
1779 			.statesize = sizeof(struct stm32_hash_state),
1780 			.base = {
1781 				.cra_name = "sha256",
1782 				.cra_driver_name = "stm32-sha256",
1783 				.cra_priority = 200,
1784 				.cra_flags = CRYPTO_ALG_ASYNC |
1785 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1786 				.cra_blocksize = SHA256_BLOCK_SIZE,
1787 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1788 				.cra_init = stm32_hash_cra_init,
1789 				.cra_exit = stm32_hash_cra_exit,
1790 				.cra_module = THIS_MODULE,
1791 			}
1792 		},
1793 		.op = {
1794 			.do_one_request = stm32_hash_one_request,
1795 		},
1796 	},
1797 	{
1798 		.base.init = stm32_hash_init,
1799 		.base.update = stm32_hash_update,
1800 		.base.final = stm32_hash_final,
1801 		.base.finup = stm32_hash_finup,
1802 		.base.digest = stm32_hash_digest,
1803 		.base.export = stm32_hash_export,
1804 		.base.import = stm32_hash_import,
1805 		.base.setkey = stm32_hash_setkey,
1806 		.base.halg = {
1807 			.digestsize = SHA256_DIGEST_SIZE,
1808 			.statesize = sizeof(struct stm32_hash_state),
1809 			.base = {
1810 				.cra_name = "hmac(sha256)",
1811 				.cra_driver_name = "stm32-hmac-sha256",
1812 				.cra_priority = 200,
1813 				.cra_flags = CRYPTO_ALG_ASYNC |
1814 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1815 				.cra_blocksize = SHA256_BLOCK_SIZE,
1816 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1817 				.cra_init = stm32_hash_cra_hmac_init,
1818 				.cra_exit = stm32_hash_cra_exit,
1819 				.cra_module = THIS_MODULE,
1820 			}
1821 		},
1822 		.op = {
1823 			.do_one_request = stm32_hash_one_request,
1824 		},
1825 	},
1826 };
1827 
1828 static struct ahash_engine_alg algs_sha384_sha512[] = {
1829 	{
1830 		.base.init = stm32_hash_init,
1831 		.base.update = stm32_hash_update,
1832 		.base.final = stm32_hash_final,
1833 		.base.finup = stm32_hash_finup,
1834 		.base.digest = stm32_hash_digest,
1835 		.base.export = stm32_hash_export,
1836 		.base.import = stm32_hash_import,
1837 		.base.halg = {
1838 			.digestsize = SHA384_DIGEST_SIZE,
1839 			.statesize = sizeof(struct stm32_hash_state),
1840 			.base = {
1841 				.cra_name = "sha384",
1842 				.cra_driver_name = "stm32-sha384",
1843 				.cra_priority = 200,
1844 				.cra_flags = CRYPTO_ALG_ASYNC |
1845 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1846 				.cra_blocksize = SHA384_BLOCK_SIZE,
1847 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1848 				.cra_init = stm32_hash_cra_init,
1849 				.cra_exit = stm32_hash_cra_exit,
1850 				.cra_module = THIS_MODULE,
1851 			}
1852 		},
1853 		.op = {
1854 			.do_one_request = stm32_hash_one_request,
1855 		},
1856 	},
1857 	{
1858 		.base.init = stm32_hash_init,
1859 		.base.update = stm32_hash_update,
1860 		.base.final = stm32_hash_final,
1861 		.base.finup = stm32_hash_finup,
1862 		.base.digest = stm32_hash_digest,
1863 		.base.setkey = stm32_hash_setkey,
1864 		.base.export = stm32_hash_export,
1865 		.base.import = stm32_hash_import,
1866 		.base.halg = {
1867 			.digestsize = SHA384_DIGEST_SIZE,
1868 			.statesize = sizeof(struct stm32_hash_state),
1869 			.base = {
1870 				.cra_name = "hmac(sha384)",
1871 				.cra_driver_name = "stm32-hmac-sha384",
1872 				.cra_priority = 200,
1873 				.cra_flags = CRYPTO_ALG_ASYNC |
1874 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1875 				.cra_blocksize = SHA384_BLOCK_SIZE,
1876 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1877 				.cra_init = stm32_hash_cra_hmac_init,
1878 				.cra_exit = stm32_hash_cra_exit,
1879 				.cra_module = THIS_MODULE,
1880 			}
1881 		},
1882 		.op = {
1883 			.do_one_request = stm32_hash_one_request,
1884 		},
1885 	},
1886 	{
1887 		.base.init = stm32_hash_init,
1888 		.base.update = stm32_hash_update,
1889 		.base.final = stm32_hash_final,
1890 		.base.finup = stm32_hash_finup,
1891 		.base.digest = stm32_hash_digest,
1892 		.base.export = stm32_hash_export,
1893 		.base.import = stm32_hash_import,
1894 		.base.halg = {
1895 			.digestsize = SHA512_DIGEST_SIZE,
1896 			.statesize = sizeof(struct stm32_hash_state),
1897 			.base = {
1898 				.cra_name = "sha512",
1899 				.cra_driver_name = "stm32-sha512",
1900 				.cra_priority = 200,
1901 				.cra_flags = CRYPTO_ALG_ASYNC |
1902 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1903 				.cra_blocksize = SHA512_BLOCK_SIZE,
1904 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1905 				.cra_init = stm32_hash_cra_init,
1906 				.cra_exit = stm32_hash_cra_exit,
1907 				.cra_module = THIS_MODULE,
1908 			}
1909 		},
1910 		.op = {
1911 			.do_one_request = stm32_hash_one_request,
1912 		},
1913 	},
1914 	{
1915 		.base.init = stm32_hash_init,
1916 		.base.update = stm32_hash_update,
1917 		.base.final = stm32_hash_final,
1918 		.base.finup = stm32_hash_finup,
1919 		.base.digest = stm32_hash_digest,
1920 		.base.export = stm32_hash_export,
1921 		.base.import = stm32_hash_import,
1922 		.base.setkey = stm32_hash_setkey,
1923 		.base.halg = {
1924 			.digestsize = SHA512_DIGEST_SIZE,
1925 			.statesize = sizeof(struct stm32_hash_state),
1926 			.base = {
1927 				.cra_name = "hmac(sha512)",
1928 				.cra_driver_name = "stm32-hmac-sha512",
1929 				.cra_priority = 200,
1930 				.cra_flags = CRYPTO_ALG_ASYNC |
1931 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1932 				.cra_blocksize = SHA512_BLOCK_SIZE,
1933 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1934 				.cra_init = stm32_hash_cra_hmac_init,
1935 				.cra_exit = stm32_hash_cra_exit,
1936 				.cra_module = THIS_MODULE,
1937 			}
1938 		},
1939 		.op = {
1940 			.do_one_request = stm32_hash_one_request,
1941 		},
1942 	},
1943 };
1944 
1945 static struct ahash_engine_alg algs_sha3[] = {
1946 	{
1947 		.base.init = stm32_hash_init,
1948 		.base.update = stm32_hash_update,
1949 		.base.final = stm32_hash_final,
1950 		.base.finup = stm32_hash_finup,
1951 		.base.digest = stm32_hash_digest,
1952 		.base.export = stm32_hash_export,
1953 		.base.import = stm32_hash_import,
1954 		.base.halg = {
1955 			.digestsize = SHA3_224_DIGEST_SIZE,
1956 			.statesize = sizeof(struct stm32_hash_state),
1957 			.base = {
1958 				.cra_name = "sha3-224",
1959 				.cra_driver_name = "stm32-sha3-224",
1960 				.cra_priority = 200,
1961 				.cra_flags = CRYPTO_ALG_ASYNC |
1962 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1963 				.cra_blocksize = SHA3_224_BLOCK_SIZE,
1964 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1965 				.cra_init = stm32_hash_cra_sha3_init,
1966 				.cra_exit = stm32_hash_cra_exit,
1967 				.cra_module = THIS_MODULE,
1968 			}
1969 		},
1970 		.op = {
1971 			.do_one_request = stm32_hash_one_request,
1972 		},
1973 	},
1974 	{
1975 		.base.init = stm32_hash_init,
1976 		.base.update = stm32_hash_update,
1977 		.base.final = stm32_hash_final,
1978 		.base.finup = stm32_hash_finup,
1979 		.base.digest = stm32_hash_digest,
1980 		.base.export = stm32_hash_export,
1981 		.base.import = stm32_hash_import,
1982 		.base.setkey = stm32_hash_setkey,
1983 		.base.halg = {
1984 			.digestsize = SHA3_224_DIGEST_SIZE,
1985 			.statesize = sizeof(struct stm32_hash_state),
1986 			.base = {
1987 				.cra_name = "hmac(sha3-224)",
1988 				.cra_driver_name = "stm32-hmac-sha3-224",
1989 				.cra_priority = 200,
1990 				.cra_flags = CRYPTO_ALG_ASYNC |
1991 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1992 				.cra_blocksize = SHA3_224_BLOCK_SIZE,
1993 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1994 				.cra_init = stm32_hash_cra_sha3_hmac_init,
1995 				.cra_exit = stm32_hash_cra_exit,
1996 				.cra_module = THIS_MODULE,
1997 			}
1998 		},
1999 		.op = {
2000 			.do_one_request = stm32_hash_one_request,
2001 		},
2002 	},
2003 	{
2004 		.base.init = stm32_hash_init,
2005 		.base.update = stm32_hash_update,
2006 		.base.final = stm32_hash_final,
2007 		.base.finup = stm32_hash_finup,
2008 		.base.digest = stm32_hash_digest,
2009 		.base.export = stm32_hash_export,
2010 		.base.import = stm32_hash_import,
2011 		.base.halg = {
2012 			.digestsize = SHA3_256_DIGEST_SIZE,
2013 			.statesize = sizeof(struct stm32_hash_state),
2014 			.base = {
2015 				.cra_name = "sha3-256",
2016 				.cra_driver_name = "stm32-sha3-256",
2017 				.cra_priority = 200,
2018 				.cra_flags = CRYPTO_ALG_ASYNC |
2019 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2020 				.cra_blocksize = SHA3_256_BLOCK_SIZE,
2021 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2022 				.cra_init = stm32_hash_cra_sha3_init,
2023 				.cra_exit = stm32_hash_cra_exit,
2024 				.cra_module = THIS_MODULE,
2025 			}
2026 		},
2027 		.op = {
2028 			.do_one_request = stm32_hash_one_request,
2029 		},
2030 	},
2031 	{
2032 		.base.init = stm32_hash_init,
2033 		.base.update = stm32_hash_update,
2034 		.base.final = stm32_hash_final,
2035 		.base.finup = stm32_hash_finup,
2036 		.base.digest = stm32_hash_digest,
2037 		.base.export = stm32_hash_export,
2038 		.base.import = stm32_hash_import,
2039 		.base.setkey = stm32_hash_setkey,
2040 		.base.halg = {
2041 			.digestsize = SHA3_256_DIGEST_SIZE,
2042 			.statesize = sizeof(struct stm32_hash_state),
2043 			.base = {
2044 				.cra_name = "hmac(sha3-256)",
2045 				.cra_driver_name = "stm32-hmac-sha3-256",
2046 				.cra_priority = 200,
2047 				.cra_flags = CRYPTO_ALG_ASYNC |
2048 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2049 				.cra_blocksize = SHA3_256_BLOCK_SIZE,
2050 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2051 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2052 				.cra_exit = stm32_hash_cra_exit,
2053 				.cra_module = THIS_MODULE,
2054 			}
2055 		},
2056 		.op = {
2057 			.do_one_request = stm32_hash_one_request,
2058 		},
2059 	},
2060 	{
2061 		.base.init = stm32_hash_init,
2062 		.base.update = stm32_hash_update,
2063 		.base.final = stm32_hash_final,
2064 		.base.finup = stm32_hash_finup,
2065 		.base.digest = stm32_hash_digest,
2066 		.base.export = stm32_hash_export,
2067 		.base.import = stm32_hash_import,
2068 		.base.halg = {
2069 			.digestsize = SHA3_384_DIGEST_SIZE,
2070 			.statesize = sizeof(struct stm32_hash_state),
2071 			.base = {
2072 				.cra_name = "sha3-384",
2073 				.cra_driver_name = "stm32-sha3-384",
2074 				.cra_priority = 200,
2075 				.cra_flags = CRYPTO_ALG_ASYNC |
2076 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2077 				.cra_blocksize = SHA3_384_BLOCK_SIZE,
2078 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2079 				.cra_init = stm32_hash_cra_sha3_init,
2080 				.cra_exit = stm32_hash_cra_exit,
2081 				.cra_module = THIS_MODULE,
2082 			}
2083 		},
2084 		.op = {
2085 			.do_one_request = stm32_hash_one_request,
2086 		},
2087 	},
2088 	{
2089 		.base.init = stm32_hash_init,
2090 		.base.update = stm32_hash_update,
2091 		.base.final = stm32_hash_final,
2092 		.base.finup = stm32_hash_finup,
2093 		.base.digest = stm32_hash_digest,
2094 		.base.export = stm32_hash_export,
2095 		.base.import = stm32_hash_import,
2096 		.base.setkey = stm32_hash_setkey,
2097 		.base.halg = {
2098 			.digestsize = SHA3_384_DIGEST_SIZE,
2099 			.statesize = sizeof(struct stm32_hash_state),
2100 			.base = {
2101 				.cra_name = "hmac(sha3-384)",
2102 				.cra_driver_name = "stm32-hmac-sha3-384",
2103 				.cra_priority = 200,
2104 				.cra_flags = CRYPTO_ALG_ASYNC |
2105 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2106 				.cra_blocksize = SHA3_384_BLOCK_SIZE,
2107 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2108 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2109 				.cra_exit = stm32_hash_cra_exit,
2110 				.cra_module = THIS_MODULE,
2111 			}
2112 		},
2113 		.op = {
2114 			.do_one_request = stm32_hash_one_request,
2115 		},
2116 	},
2117 	{
2118 		.base.init = stm32_hash_init,
2119 		.base.update = stm32_hash_update,
2120 		.base.final = stm32_hash_final,
2121 		.base.finup = stm32_hash_finup,
2122 		.base.digest = stm32_hash_digest,
2123 		.base.export = stm32_hash_export,
2124 		.base.import = stm32_hash_import,
2125 		.base.halg = {
2126 			.digestsize = SHA3_512_DIGEST_SIZE,
2127 			.statesize = sizeof(struct stm32_hash_state),
2128 			.base = {
2129 				.cra_name = "sha3-512",
2130 				.cra_driver_name = "stm32-sha3-512",
2131 				.cra_priority = 200,
2132 				.cra_flags = CRYPTO_ALG_ASYNC |
2133 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2134 				.cra_blocksize = SHA3_512_BLOCK_SIZE,
2135 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2136 				.cra_init = stm32_hash_cra_sha3_init,
2137 				.cra_exit = stm32_hash_cra_exit,
2138 				.cra_module = THIS_MODULE,
2139 			}
2140 		},
2141 		.op = {
2142 			.do_one_request = stm32_hash_one_request,
2143 		},
2144 	},
2145 	{
2146 		.base.init = stm32_hash_init,
2147 		.base.update = stm32_hash_update,
2148 		.base.final = stm32_hash_final,
2149 		.base.finup = stm32_hash_finup,
2150 		.base.digest = stm32_hash_digest,
2151 		.base.export = stm32_hash_export,
2152 		.base.import = stm32_hash_import,
2153 		.base.setkey = stm32_hash_setkey,
2154 		.base.halg = {
2155 			.digestsize = SHA3_512_DIGEST_SIZE,
2156 			.statesize = sizeof(struct stm32_hash_state),
2157 			.base = {
2158 				.cra_name = "hmac(sha3-512)",
2159 				.cra_driver_name = "stm32-hmac-sha3-512",
2160 				.cra_priority = 200,
2161 				.cra_flags = CRYPTO_ALG_ASYNC |
2162 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2163 				.cra_blocksize = SHA3_512_BLOCK_SIZE,
2164 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2165 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2166 				.cra_exit = stm32_hash_cra_exit,
2167 				.cra_module = THIS_MODULE,
2168 			}
2169 		},
2170 		.op = {
2171 			.do_one_request = stm32_hash_one_request,
2172 		},
2173 	}
2174 };
2175 
2176 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
2177 {
2178 	unsigned int i, j;
2179 	int err;
2180 
2181 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
2182 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
2183 			err = crypto_engine_register_ahash(
2184 				&hdev->pdata->algs_info[i].algs_list[j]);
2185 			if (err)
2186 				goto err_algs;
2187 		}
2188 	}
2189 
2190 	return 0;
2191 err_algs:
2192 	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
2193 	for (; i--; ) {
2194 		for (; j--;)
2195 			crypto_engine_unregister_ahash(
2196 				&hdev->pdata->algs_info[i].algs_list[j]);
2197 	}
2198 
2199 	return err;
2200 }
2201 
2202 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
2203 {
2204 	unsigned int i, j;
2205 
2206 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
2207 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
2208 			crypto_engine_unregister_ahash(
2209 				&hdev->pdata->algs_info[i].algs_list[j]);
2210 	}
2211 
2212 	return 0;
2213 }
2214 
2215 static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
2216 	{
2217 		.algs_list	= algs_sha1,
2218 		.size		= ARRAY_SIZE(algs_sha1),
2219 	},
2220 	{
2221 		.algs_list	= algs_sha256,
2222 		.size		= ARRAY_SIZE(algs_sha256),
2223 	},
2224 };
2225 
2226 static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
2227 	.alg_shift	= 7,
2228 	.algs_info	= stm32_hash_algs_info_ux500,
2229 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_ux500),
2230 	.broken_emptymsg = true,
2231 	.ux500		= true,
2232 };
2233 
2234 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
2235 	{
2236 		.algs_list	= algs_md5,
2237 		.size		= ARRAY_SIZE(algs_md5),
2238 	},
2239 	{
2240 		.algs_list	= algs_sha1,
2241 		.size		= ARRAY_SIZE(algs_sha1),
2242 	},
2243 };
2244 
2245 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
2246 	.alg_shift	= 7,
2247 	.algs_info	= stm32_hash_algs_info_stm32f4,
2248 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
2249 	.has_sr		= true,
2250 	.has_mdmat	= true,
2251 };
2252 
2253 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
2254 	{
2255 		.algs_list	= algs_md5,
2256 		.size		= ARRAY_SIZE(algs_md5),
2257 	},
2258 	{
2259 		.algs_list	= algs_sha1,
2260 		.size		= ARRAY_SIZE(algs_sha1),
2261 	},
2262 	{
2263 		.algs_list	= algs_sha224,
2264 		.size		= ARRAY_SIZE(algs_sha224),
2265 	},
2266 	{
2267 		.algs_list	= algs_sha256,
2268 		.size		= ARRAY_SIZE(algs_sha256),
2269 	},
2270 };
2271 
2272 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
2273 	.alg_shift	= 7,
2274 	.algs_info	= stm32_hash_algs_info_stm32f7,
2275 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
2276 	.has_sr		= true,
2277 	.has_mdmat	= true,
2278 };
2279 
2280 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = {
2281 	{
2282 		.algs_list	= algs_sha1,
2283 		.size		= ARRAY_SIZE(algs_sha1),
2284 	},
2285 	{
2286 		.algs_list	= algs_sha224,
2287 		.size		= ARRAY_SIZE(algs_sha224),
2288 	},
2289 	{
2290 		.algs_list	= algs_sha256,
2291 		.size		= ARRAY_SIZE(algs_sha256),
2292 	},
2293 	{
2294 		.algs_list	= algs_sha384_sha512,
2295 		.size		= ARRAY_SIZE(algs_sha384_sha512),
2296 	},
2297 	{
2298 		.algs_list	= algs_sha3,
2299 		.size		= ARRAY_SIZE(algs_sha3),
2300 	},
2301 };
2302 
2303 static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = {
2304 	.alg_shift	= 17,
2305 	.algs_info	= stm32_hash_algs_info_stm32mp13,
2306 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32mp13),
2307 	.has_sr		= true,
2308 	.has_mdmat	= true,
2309 	.context_secured = true,
2310 };
2311 
2312 static const struct of_device_id stm32_hash_of_match[] = {
2313 	{ .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 },
2314 	{ .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 },
2315 	{ .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 },
2316 	{ .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 },
2317 	{},
2318 };
2319 
2320 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
2321 
2322 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
2323 				   struct device *dev)
2324 {
2325 	hdev->pdata = of_device_get_match_data(dev);
2326 	if (!hdev->pdata) {
2327 		dev_err(dev, "no compatible OF match\n");
2328 		return -EINVAL;
2329 	}
2330 
2331 	return 0;
2332 }
2333 
2334 static int stm32_hash_probe(struct platform_device *pdev)
2335 {
2336 	struct stm32_hash_dev *hdev;
2337 	struct device *dev = &pdev->dev;
2338 	struct resource *res;
2339 	int ret, irq;
2340 
2341 	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
2342 	if (!hdev)
2343 		return -ENOMEM;
2344 
2345 	hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2346 	if (IS_ERR(hdev->io_base))
2347 		return PTR_ERR(hdev->io_base);
2348 
2349 	hdev->phys_base = res->start;
2350 
2351 	ret = stm32_hash_get_of_match(hdev, dev);
2352 	if (ret)
2353 		return ret;
2354 
2355 	irq = platform_get_irq_optional(pdev, 0);
2356 	if (irq < 0 && irq != -ENXIO)
2357 		return irq;
2358 
2359 	if (irq > 0) {
2360 		ret = devm_request_threaded_irq(dev, irq,
2361 						stm32_hash_irq_handler,
2362 						stm32_hash_irq_thread,
2363 						IRQF_ONESHOT,
2364 						dev_name(dev), hdev);
2365 		if (ret) {
2366 			dev_err(dev, "Cannot grab IRQ\n");
2367 			return ret;
2368 		}
2369 	} else {
2370 		dev_info(dev, "No IRQ, use polling mode\n");
2371 		hdev->polled = true;
2372 	}
2373 
2374 	hdev->clk = devm_clk_get(&pdev->dev, NULL);
2375 	if (IS_ERR(hdev->clk))
2376 		return dev_err_probe(dev, PTR_ERR(hdev->clk),
2377 				     "failed to get clock for hash\n");
2378 
2379 	ret = clk_prepare_enable(hdev->clk);
2380 	if (ret) {
2381 		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
2382 		return ret;
2383 	}
2384 
2385 	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
2386 	pm_runtime_use_autosuspend(dev);
2387 
2388 	pm_runtime_get_noresume(dev);
2389 	pm_runtime_set_active(dev);
2390 	pm_runtime_enable(dev);
2391 
2392 	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
2393 	if (IS_ERR(hdev->rst)) {
2394 		if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
2395 			ret = -EPROBE_DEFER;
2396 			goto err_reset;
2397 		}
2398 	} else {
2399 		reset_control_assert(hdev->rst);
2400 		udelay(2);
2401 		reset_control_deassert(hdev->rst);
2402 	}
2403 
2404 	hdev->dev = dev;
2405 
2406 	platform_set_drvdata(pdev, hdev);
2407 
2408 	ret = stm32_hash_dma_init(hdev);
2409 	switch (ret) {
2410 	case 0:
2411 		break;
2412 	case -ENOENT:
2413 	case -ENODEV:
2414 		dev_info(dev, "DMA mode not available\n");
2415 		break;
2416 	default:
2417 		dev_err(dev, "DMA init error %d\n", ret);
2418 		goto err_dma;
2419 	}
2420 
2421 	spin_lock(&stm32_hash.lock);
2422 	list_add_tail(&hdev->list, &stm32_hash.dev_list);
2423 	spin_unlock(&stm32_hash.lock);
2424 
2425 	/* Initialize crypto engine */
2426 	hdev->engine = crypto_engine_alloc_init(dev, 1);
2427 	if (!hdev->engine) {
2428 		ret = -ENOMEM;
2429 		goto err_engine;
2430 	}
2431 
2432 	ret = crypto_engine_start(hdev->engine);
2433 	if (ret)
2434 		goto err_engine_start;
2435 
2436 	if (hdev->pdata->ux500)
2437 		/* FIXME: implement DMA mode for Ux500 */
2438 		hdev->dma_mode = 0;
2439 	else
2440 		hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK;
2441 
2442 	/* Register algos */
2443 	ret = stm32_hash_register_algs(hdev);
2444 	if (ret)
2445 		goto err_algs;
2446 
2447 	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
2448 		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
2449 
2450 	pm_runtime_put_sync(dev);
2451 
2452 	return 0;
2453 
2454 err_algs:
2455 err_engine_start:
2456 	crypto_engine_exit(hdev->engine);
2457 err_engine:
2458 	spin_lock(&stm32_hash.lock);
2459 	list_del(&hdev->list);
2460 	spin_unlock(&stm32_hash.lock);
2461 err_dma:
2462 	if (hdev->dma_lch)
2463 		dma_release_channel(hdev->dma_lch);
2464 err_reset:
2465 	pm_runtime_disable(dev);
2466 	pm_runtime_put_noidle(dev);
2467 
2468 	clk_disable_unprepare(hdev->clk);
2469 
2470 	return ret;
2471 }
2472 
2473 static void stm32_hash_remove(struct platform_device *pdev)
2474 {
2475 	struct stm32_hash_dev *hdev = platform_get_drvdata(pdev);
2476 	int ret;
2477 
2478 	ret = pm_runtime_get_sync(hdev->dev);
2479 
2480 	stm32_hash_unregister_algs(hdev);
2481 
2482 	crypto_engine_exit(hdev->engine);
2483 
2484 	spin_lock(&stm32_hash.lock);
2485 	list_del(&hdev->list);
2486 	spin_unlock(&stm32_hash.lock);
2487 
2488 	if (hdev->dma_lch)
2489 		dma_release_channel(hdev->dma_lch);
2490 
2491 	pm_runtime_disable(hdev->dev);
2492 	pm_runtime_put_noidle(hdev->dev);
2493 
2494 	if (ret >= 0)
2495 		clk_disable_unprepare(hdev->clk);
2496 }
2497 
2498 #ifdef CONFIG_PM
2499 static int stm32_hash_runtime_suspend(struct device *dev)
2500 {
2501 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2502 
2503 	clk_disable_unprepare(hdev->clk);
2504 
2505 	return 0;
2506 }
2507 
2508 static int stm32_hash_runtime_resume(struct device *dev)
2509 {
2510 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2511 	int ret;
2512 
2513 	ret = clk_prepare_enable(hdev->clk);
2514 	if (ret) {
2515 		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
2516 		return ret;
2517 	}
2518 
2519 	return 0;
2520 }
2521 #endif
2522 
2523 static const struct dev_pm_ops stm32_hash_pm_ops = {
2524 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2525 				pm_runtime_force_resume)
2526 	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
2527 			   stm32_hash_runtime_resume, NULL)
2528 };
2529 
2530 static struct platform_driver stm32_hash_driver = {
2531 	.probe		= stm32_hash_probe,
2532 	.remove		= stm32_hash_remove,
2533 	.driver		= {
2534 		.name	= "stm32-hash",
2535 		.pm = &stm32_hash_pm_ops,
2536 		.of_match_table	= stm32_hash_of_match,
2537 	}
2538 };
2539 
2540 module_platform_driver(stm32_hash_driver);
2541 
2542 MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver");
2543 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
2544 MODULE_LICENSE("GPL v2");
2545