xref: /linux/drivers/crypto/stm32/stm32-hash.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of STM32 Crypto driver for Linux.
4  *
5  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7  */
8 
9 #include <crypto/engine.h>
10 #include <crypto/internal/hash.h>
11 #include <crypto/md5.h>
12 #include <crypto/scatterwalk.h>
13 #include <crypto/sha1.h>
14 #include <crypto/sha2.h>
15 #include <crypto/sha3.h>
16 #include <linux/clk.h>
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dmaengine.h>
20 #include <linux/interrupt.h>
21 #include <linux/iopoll.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 #include <linux/string.h>
29 
30 #define HASH_CR				0x00
31 #define HASH_DIN			0x04
32 #define HASH_STR			0x08
33 #define HASH_UX500_HREG(x)		(0x0c + ((x) * 0x04))
34 #define HASH_IMR			0x20
35 #define HASH_SR				0x24
36 #define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
37 #define HASH_HREG(x)			(0x310 + ((x) * 0x04))
38 #define HASH_HWCFGR			0x3F0
39 #define HASH_VER			0x3F4
40 #define HASH_ID				0x3F8
41 
42 /* Control Register */
43 #define HASH_CR_INIT			BIT(2)
44 #define HASH_CR_DMAE			BIT(3)
45 #define HASH_CR_DATATYPE_POS		4
46 #define HASH_CR_MODE			BIT(6)
47 #define HASH_CR_ALGO_POS		7
48 #define HASH_CR_MDMAT			BIT(13)
49 #define HASH_CR_DMAA			BIT(14)
50 #define HASH_CR_LKEY			BIT(16)
51 
52 /* Interrupt */
53 #define HASH_DINIE			BIT(0)
54 #define HASH_DCIE			BIT(1)
55 
56 /* Interrupt Mask */
57 #define HASH_MASK_CALC_COMPLETION	BIT(0)
58 #define HASH_MASK_DATA_INPUT		BIT(1)
59 
60 /* Status Flags */
61 #define HASH_SR_DATA_INPUT_READY	BIT(0)
62 #define HASH_SR_OUTPUT_READY		BIT(1)
63 #define HASH_SR_DMA_ACTIVE		BIT(2)
64 #define HASH_SR_BUSY			BIT(3)
65 
66 /* STR Register */
67 #define HASH_STR_NBLW_MASK		GENMASK(4, 0)
68 #define HASH_STR_DCAL			BIT(8)
69 
70 /* HWCFGR Register */
71 #define HASH_HWCFG_DMA_MASK		GENMASK(3, 0)
72 
73 /* Context swap register */
74 #define HASH_CSR_NB_SHA256_HMAC		54
75 #define HASH_CSR_NB_SHA256		38
76 #define HASH_CSR_NB_SHA512_HMAC		103
77 #define HASH_CSR_NB_SHA512		91
78 #define HASH_CSR_NB_SHA3_HMAC		88
79 #define HASH_CSR_NB_SHA3		72
80 #define HASH_CSR_NB_MAX			HASH_CSR_NB_SHA512_HMAC
81 
82 #define HASH_FLAGS_INIT			BIT(0)
83 #define HASH_FLAGS_OUTPUT_READY		BIT(1)
84 #define HASH_FLAGS_CPU			BIT(2)
85 #define HASH_FLAGS_DMA_ACTIVE		BIT(3)
86 #define HASH_FLAGS_HMAC_INIT		BIT(4)
87 #define HASH_FLAGS_HMAC_FINAL		BIT(5)
88 #define HASH_FLAGS_HMAC_KEY		BIT(6)
89 #define HASH_FLAGS_SHA3_MODE		BIT(7)
90 #define HASH_FLAGS_FINAL		BIT(15)
91 #define HASH_FLAGS_FINUP		BIT(16)
92 #define HASH_FLAGS_ALGO_MASK		GENMASK(20, 17)
93 #define HASH_FLAGS_ALGO_SHIFT		17
94 #define HASH_FLAGS_ERRORS		BIT(21)
95 #define HASH_FLAGS_EMPTY		BIT(22)
96 #define HASH_FLAGS_HMAC			BIT(23)
97 #define HASH_FLAGS_SGS_COPIED		BIT(24)
98 
99 #define HASH_OP_UPDATE			1
100 #define HASH_OP_FINAL			2
101 
102 #define HASH_BURST_LEVEL		4
103 
104 enum stm32_hash_data_format {
105 	HASH_DATA_32_BITS		= 0x0,
106 	HASH_DATA_16_BITS		= 0x1,
107 	HASH_DATA_8_BITS		= 0x2,
108 	HASH_DATA_1_BIT			= 0x3
109 };
110 
111 #define HASH_BUFLEN			(SHA3_224_BLOCK_SIZE + 4)
112 #define HASH_MAX_KEY_SIZE		(SHA512_BLOCK_SIZE * 8)
113 
114 enum stm32_hash_algo {
115 	HASH_SHA1			= 0,
116 	HASH_MD5			= 1,
117 	HASH_SHA224			= 2,
118 	HASH_SHA256			= 3,
119 	HASH_SHA3_224			= 4,
120 	HASH_SHA3_256			= 5,
121 	HASH_SHA3_384			= 6,
122 	HASH_SHA3_512			= 7,
123 	HASH_SHA384			= 12,
124 	HASH_SHA512			= 15,
125 };
126 
127 enum ux500_hash_algo {
128 	HASH_SHA256_UX500		= 0,
129 	HASH_SHA1_UX500			= 1,
130 };
131 
132 #define HASH_AUTOSUSPEND_DELAY		50
133 
134 struct stm32_hash_ctx {
135 	struct stm32_hash_dev	*hdev;
136 	struct crypto_shash	*xtfm;
137 	unsigned long		flags;
138 
139 	u8			key[HASH_MAX_KEY_SIZE];
140 	int			keylen;
141 };
142 
143 struct stm32_hash_state {
144 	u32			flags;
145 
146 	u16			bufcnt;
147 	u16			blocklen;
148 
149 	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
150 
151 	/* hash state */
152 	u32			hw_context[3 + HASH_CSR_NB_MAX];
153 };
154 
155 struct stm32_hash_request_ctx {
156 	struct stm32_hash_dev	*hdev;
157 	unsigned long		op;
158 
159 	u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
160 	size_t			digcnt;
161 
162 	struct scatterlist	*sg;
163 	struct scatterlist	sgl[2]; /* scatterlist used to realize alignment */
164 	unsigned int		offset;
165 	unsigned int		total;
166 	struct scatterlist	sg_key;
167 
168 	dma_addr_t		dma_addr;
169 	size_t			dma_ct;
170 	int			nents;
171 
172 	u8			data_type;
173 
174 	struct stm32_hash_state state;
175 };
176 
177 struct stm32_hash_algs_info {
178 	struct ahash_engine_alg	*algs_list;
179 	size_t			size;
180 };
181 
182 struct stm32_hash_pdata {
183 	const int				alg_shift;
184 	const struct stm32_hash_algs_info	*algs_info;
185 	size_t					algs_info_size;
186 	bool					has_sr;
187 	bool					has_mdmat;
188 	bool					context_secured;
189 	bool					broken_emptymsg;
190 	bool					ux500;
191 };
192 
193 struct stm32_hash_dev {
194 	struct list_head	list;
195 	struct device		*dev;
196 	struct clk		*clk;
197 	struct reset_control	*rst;
198 	void __iomem		*io_base;
199 	phys_addr_t		phys_base;
200 	u8			xmit_buf[HASH_BUFLEN] __aligned(sizeof(u32));
201 	u32			dma_mode;
202 	bool			polled;
203 
204 	struct ahash_request	*req;
205 	struct crypto_engine	*engine;
206 
207 	unsigned long		flags;
208 
209 	struct dma_chan		*dma_lch;
210 	struct completion	dma_completion;
211 
212 	const struct stm32_hash_pdata	*pdata;
213 };
214 
215 struct stm32_hash_drv {
216 	struct list_head	dev_list;
217 	spinlock_t		lock; /* List protection access */
218 };
219 
220 static struct stm32_hash_drv stm32_hash = {
221 	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
222 	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
223 };
224 
225 static void stm32_hash_dma_callback(void *param);
226 static int stm32_hash_prepare_request(struct ahash_request *req);
227 static void stm32_hash_unprepare_request(struct ahash_request *req);
228 
229 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
230 {
231 	return readl_relaxed(hdev->io_base + offset);
232 }
233 
234 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
235 				    u32 offset, u32 value)
236 {
237 	writel_relaxed(value, hdev->io_base + offset);
238 }
239 
240 /**
241  * stm32_hash_wait_busy - wait until hash processor is available. It return an
242  * error if the hash core is processing a block of data for more than 10 ms.
243  * @hdev: the stm32_hash_dev device.
244  */
245 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
246 {
247 	u32 status;
248 
249 	/* The Ux500 lacks the special status register, we poll the DCAL bit instead */
250 	if (!hdev->pdata->has_sr)
251 		return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
252 						  !(status & HASH_STR_DCAL), 10, 10000);
253 
254 	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
255 				   !(status & HASH_SR_BUSY), 10, 10000);
256 }
257 
258 /**
259  * stm32_hash_set_nblw - set the number of valid bytes in the last word.
260  * @hdev: the stm32_hash_dev device.
261  * @length: the length of the final word.
262  */
263 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
264 {
265 	u32 reg;
266 
267 	reg = stm32_hash_read(hdev, HASH_STR);
268 	reg &= ~(HASH_STR_NBLW_MASK);
269 	reg |= (8U * ((length) % 4U));
270 	stm32_hash_write(hdev, HASH_STR, reg);
271 }
272 
273 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
274 {
275 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
276 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
277 	u32 reg;
278 	int keylen = ctx->keylen;
279 	void *key = ctx->key;
280 
281 	if (keylen) {
282 		stm32_hash_set_nblw(hdev, keylen);
283 
284 		while (keylen > 0) {
285 			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
286 			keylen -= 4;
287 			key += 4;
288 		}
289 
290 		reg = stm32_hash_read(hdev, HASH_STR);
291 		reg |= HASH_STR_DCAL;
292 		stm32_hash_write(hdev, HASH_STR, reg);
293 
294 		return -EINPROGRESS;
295 	}
296 
297 	return 0;
298 }
299 
300 /**
301  * stm32_hash_write_ctrl - Initialize the hash processor, only if
302  * HASH_FLAGS_INIT is set.
303  * @hdev: the stm32_hash_dev device
304  */
305 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
306 {
307 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
308 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
309 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
310 	struct stm32_hash_state *state = &rctx->state;
311 	u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT;
312 
313 	u32 reg = HASH_CR_INIT;
314 
315 	if (!(hdev->flags & HASH_FLAGS_INIT)) {
316 		if (hdev->pdata->ux500) {
317 			reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS);
318 		} else {
319 			if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS)
320 				reg |= ((alg & BIT(1)) << 17) |
321 				       ((alg & BIT(0)) << HASH_CR_ALGO_POS);
322 			else
323 				reg |= alg << hdev->pdata->alg_shift;
324 		}
325 
326 		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
327 
328 		if (state->flags & HASH_FLAGS_HMAC) {
329 			hdev->flags |= HASH_FLAGS_HMAC;
330 			reg |= HASH_CR_MODE;
331 			if (ctx->keylen > crypto_ahash_blocksize(tfm))
332 				reg |= HASH_CR_LKEY;
333 		}
334 
335 		if (!hdev->polled)
336 			stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
337 
338 		stm32_hash_write(hdev, HASH_CR, reg);
339 
340 		hdev->flags |= HASH_FLAGS_INIT;
341 
342 		/*
343 		 * After first block + 1 words are fill up,
344 		 * we only need to fill 1 block to start partial computation
345 		 */
346 		rctx->state.blocklen -= sizeof(u32);
347 
348 		dev_dbg(hdev->dev, "Write Control %x\n", reg);
349 	}
350 }
351 
352 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
353 {
354 	struct stm32_hash_state *state = &rctx->state;
355 	size_t count;
356 
357 	while ((state->bufcnt < state->blocklen) && rctx->total) {
358 		count = min(rctx->sg->length - rctx->offset, rctx->total);
359 		count = min_t(size_t, count, state->blocklen - state->bufcnt);
360 
361 		if (count <= 0) {
362 			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
363 				rctx->sg = sg_next(rctx->sg);
364 				continue;
365 			} else {
366 				break;
367 			}
368 		}
369 
370 		scatterwalk_map_and_copy(state->buffer + state->bufcnt,
371 					 rctx->sg, rctx->offset, count, 0);
372 
373 		state->bufcnt += count;
374 		rctx->offset += count;
375 		rctx->total -= count;
376 
377 		if (rctx->offset == rctx->sg->length) {
378 			rctx->sg = sg_next(rctx->sg);
379 			if (rctx->sg)
380 				rctx->offset = 0;
381 			else
382 				rctx->total = 0;
383 		}
384 	}
385 }
386 
387 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
388 			       const u8 *buf, size_t length, int final)
389 {
390 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
391 	struct stm32_hash_state *state = &rctx->state;
392 	unsigned int count, len32;
393 	const u32 *buffer = (const u32 *)buf;
394 	u32 reg;
395 
396 	if (final) {
397 		hdev->flags |= HASH_FLAGS_FINAL;
398 
399 		/* Do not process empty messages if hw is buggy. */
400 		if (!(hdev->flags & HASH_FLAGS_INIT) && !length &&
401 		    hdev->pdata->broken_emptymsg) {
402 			state->flags |= HASH_FLAGS_EMPTY;
403 			return 0;
404 		}
405 	}
406 
407 	len32 = DIV_ROUND_UP(length, sizeof(u32));
408 
409 	dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
410 		__func__, length, final, len32);
411 
412 	hdev->flags |= HASH_FLAGS_CPU;
413 
414 	stm32_hash_write_ctrl(hdev);
415 
416 	if (stm32_hash_wait_busy(hdev))
417 		return -ETIMEDOUT;
418 
419 	if ((hdev->flags & HASH_FLAGS_HMAC) &&
420 	    (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
421 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
422 		stm32_hash_write_key(hdev);
423 		if (stm32_hash_wait_busy(hdev))
424 			return -ETIMEDOUT;
425 	}
426 
427 	for (count = 0; count < len32; count++)
428 		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
429 
430 	if (final) {
431 		if (stm32_hash_wait_busy(hdev))
432 			return -ETIMEDOUT;
433 
434 		stm32_hash_set_nblw(hdev, length);
435 		reg = stm32_hash_read(hdev, HASH_STR);
436 		reg |= HASH_STR_DCAL;
437 		stm32_hash_write(hdev, HASH_STR, reg);
438 		if (hdev->flags & HASH_FLAGS_HMAC) {
439 			if (stm32_hash_wait_busy(hdev))
440 				return -ETIMEDOUT;
441 			stm32_hash_write_key(hdev);
442 		}
443 		return -EINPROGRESS;
444 	}
445 
446 	return 0;
447 }
448 
449 static int hash_swap_reg(struct stm32_hash_request_ctx *rctx)
450 {
451 	struct stm32_hash_state *state = &rctx->state;
452 
453 	switch ((state->flags & HASH_FLAGS_ALGO_MASK) >>
454 		HASH_FLAGS_ALGO_SHIFT) {
455 	case HASH_MD5:
456 	case HASH_SHA1:
457 	case HASH_SHA224:
458 	case HASH_SHA256:
459 		if (state->flags & HASH_FLAGS_HMAC)
460 			return HASH_CSR_NB_SHA256_HMAC;
461 		else
462 			return HASH_CSR_NB_SHA256;
463 		break;
464 
465 	case HASH_SHA384:
466 	case HASH_SHA512:
467 		if (state->flags & HASH_FLAGS_HMAC)
468 			return HASH_CSR_NB_SHA512_HMAC;
469 		else
470 			return HASH_CSR_NB_SHA512;
471 		break;
472 
473 	case HASH_SHA3_224:
474 	case HASH_SHA3_256:
475 	case HASH_SHA3_384:
476 	case HASH_SHA3_512:
477 		if (state->flags & HASH_FLAGS_HMAC)
478 			return HASH_CSR_NB_SHA3_HMAC;
479 		else
480 			return HASH_CSR_NB_SHA3;
481 		break;
482 
483 	default:
484 		return -EINVAL;
485 	}
486 }
487 
488 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
489 {
490 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
491 	struct stm32_hash_state *state = &rctx->state;
492 	int bufcnt, err = 0, final;
493 
494 	dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
495 
496 	final = state->flags & HASH_FLAGS_FINAL;
497 
498 	while ((rctx->total >= state->blocklen) ||
499 	       (state->bufcnt + rctx->total >= state->blocklen)) {
500 		stm32_hash_append_sg(rctx);
501 		bufcnt = state->bufcnt;
502 		state->bufcnt = 0;
503 		err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
504 		if (err)
505 			return err;
506 	}
507 
508 	stm32_hash_append_sg(rctx);
509 
510 	if (final) {
511 		bufcnt = state->bufcnt;
512 		state->bufcnt = 0;
513 		return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
514 	}
515 
516 	return err;
517 }
518 
519 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
520 			       struct scatterlist *sg, int length, int mdmat)
521 {
522 	struct dma_async_tx_descriptor *in_desc;
523 	dma_cookie_t cookie;
524 	u32 reg;
525 	int err;
526 
527 	dev_dbg(hdev->dev, "%s mdmat: %x length: %d\n", __func__, mdmat, length);
528 
529 	/* do not use dma if there is no data to send */
530 	if (length <= 0)
531 		return 0;
532 
533 	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
534 					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
535 					  DMA_CTRL_ACK);
536 	if (!in_desc) {
537 		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
538 		return -ENOMEM;
539 	}
540 
541 	reinit_completion(&hdev->dma_completion);
542 	in_desc->callback = stm32_hash_dma_callback;
543 	in_desc->callback_param = hdev;
544 
545 	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
546 
547 	reg = stm32_hash_read(hdev, HASH_CR);
548 
549 	if (hdev->pdata->has_mdmat) {
550 		if (mdmat)
551 			reg |= HASH_CR_MDMAT;
552 		else
553 			reg &= ~HASH_CR_MDMAT;
554 	}
555 	reg |= HASH_CR_DMAE;
556 
557 	stm32_hash_write(hdev, HASH_CR, reg);
558 
559 
560 	cookie = dmaengine_submit(in_desc);
561 	err = dma_submit_error(cookie);
562 	if (err)
563 		return -ENOMEM;
564 
565 	dma_async_issue_pending(hdev->dma_lch);
566 
567 	if (!wait_for_completion_timeout(&hdev->dma_completion,
568 					 msecs_to_jiffies(100)))
569 		err = -ETIMEDOUT;
570 
571 	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
572 				     NULL, NULL) != DMA_COMPLETE)
573 		err = -ETIMEDOUT;
574 
575 	if (err) {
576 		dev_err(hdev->dev, "DMA Error %i\n", err);
577 		dmaengine_terminate_all(hdev->dma_lch);
578 		return err;
579 	}
580 
581 	return -EINPROGRESS;
582 }
583 
584 static void stm32_hash_dma_callback(void *param)
585 {
586 	struct stm32_hash_dev *hdev = param;
587 
588 	complete(&hdev->dma_completion);
589 }
590 
591 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
592 {
593 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
594 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
595 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
596 	int err;
597 
598 	if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode > 0) {
599 		err = stm32_hash_write_key(hdev);
600 		if (stm32_hash_wait_busy(hdev))
601 			return -ETIMEDOUT;
602 	} else {
603 		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
604 			sg_init_one(&rctx->sg_key, ctx->key,
605 				    ALIGN(ctx->keylen, sizeof(u32)));
606 
607 		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
608 					  DMA_TO_DEVICE);
609 		if (rctx->dma_ct == 0) {
610 			dev_err(hdev->dev, "dma_map_sg error\n");
611 			return -ENOMEM;
612 		}
613 
614 		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
615 
616 		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
617 	}
618 
619 	return err;
620 }
621 
622 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
623 {
624 	struct dma_slave_config dma_conf;
625 	struct dma_chan *chan;
626 	int err;
627 
628 	memset(&dma_conf, 0, sizeof(dma_conf));
629 
630 	dma_conf.direction = DMA_MEM_TO_DEV;
631 	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
632 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
633 	dma_conf.src_maxburst = HASH_BURST_LEVEL;
634 	dma_conf.dst_maxburst = HASH_BURST_LEVEL;
635 	dma_conf.device_fc = false;
636 
637 	chan = dma_request_chan(hdev->dev, "in");
638 	if (IS_ERR(chan))
639 		return PTR_ERR(chan);
640 
641 	hdev->dma_lch = chan;
642 
643 	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
644 	if (err) {
645 		dma_release_channel(hdev->dma_lch);
646 		hdev->dma_lch = NULL;
647 		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
648 		return err;
649 	}
650 
651 	init_completion(&hdev->dma_completion);
652 
653 	return 0;
654 }
655 
656 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
657 {
658 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
659 	u32 *buffer = (void *)rctx->state.buffer;
660 	struct scatterlist sg[1], *tsg;
661 	int err = 0, reg, ncp = 0;
662 	unsigned int i, len = 0, bufcnt = 0;
663 	bool final = hdev->flags & HASH_FLAGS_FINAL;
664 	bool is_last = false;
665 	u32 last_word;
666 
667 	dev_dbg(hdev->dev, "%s total: %d bufcnt: %d final: %d\n",
668 		__func__, rctx->total, rctx->state.bufcnt, final);
669 
670 	if (rctx->nents < 0)
671 		return -EINVAL;
672 
673 	stm32_hash_write_ctrl(hdev);
674 
675 	if (hdev->flags & HASH_FLAGS_HMAC && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
676 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
677 		err = stm32_hash_hmac_dma_send(hdev);
678 		if (err != -EINPROGRESS)
679 			return err;
680 	}
681 
682 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
683 		sg[0] = *tsg;
684 		len = sg->length;
685 
686 		if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) {
687 			if (!final) {
688 				/* Always manually put the last word of a non-final transfer. */
689 				len -= sizeof(u32);
690 				sg_pcopy_to_buffer(rctx->sg, rctx->nents, &last_word, 4, len);
691 				sg->length -= sizeof(u32);
692 			} else {
693 				/*
694 				 * In Multiple DMA mode, DMA must be aborted before the final
695 				 * transfer.
696 				 */
697 				sg->length = rctx->total - bufcnt;
698 				if (hdev->dma_mode > 0) {
699 					len = (ALIGN(sg->length, 16) - 16);
700 
701 					ncp = sg_pcopy_to_buffer(rctx->sg, rctx->nents,
702 								 rctx->state.buffer,
703 								 sg->length - len,
704 								 rctx->total - sg->length + len);
705 
706 					if (!len)
707 						break;
708 
709 					sg->length = len;
710 				} else {
711 					is_last = true;
712 					if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
713 						len = sg->length;
714 						sg->length = ALIGN(sg->length,
715 								   sizeof(u32));
716 					}
717 				}
718 			}
719 		}
720 
721 		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
722 					  DMA_TO_DEVICE);
723 		if (rctx->dma_ct == 0) {
724 			dev_err(hdev->dev, "dma_map_sg error\n");
725 			return -ENOMEM;
726 		}
727 
728 		err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
729 
730 		/* The last word of a non final transfer is sent manually. */
731 		if (!final) {
732 			stm32_hash_write(hdev, HASH_DIN, last_word);
733 			len += sizeof(u32);
734 		}
735 
736 		rctx->total -= len;
737 
738 		bufcnt += sg[0].length;
739 		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
740 
741 		if (err == -ENOMEM || err == -ETIMEDOUT)
742 			return err;
743 		if (is_last)
744 			break;
745 	}
746 
747 	/*
748 	 * When the second last block transfer of 4 words is performed by the DMA,
749 	 * the software must set the DMA Abort bit (DMAA) to 1 before completing the
750 	 * last transfer of 4 words or less.
751 	 */
752 	if (final) {
753 		if (hdev->dma_mode > 0) {
754 			if (stm32_hash_wait_busy(hdev))
755 				return -ETIMEDOUT;
756 			reg = stm32_hash_read(hdev, HASH_CR);
757 			reg &= ~HASH_CR_DMAE;
758 			reg |= HASH_CR_DMAA;
759 			stm32_hash_write(hdev, HASH_CR, reg);
760 
761 			if (ncp) {
762 				memset(buffer + ncp, 0, 4 - DIV_ROUND_UP(ncp, sizeof(u32)));
763 				writesl(hdev->io_base + HASH_DIN, buffer,
764 					DIV_ROUND_UP(ncp, sizeof(u32)));
765 			}
766 
767 			stm32_hash_set_nblw(hdev, ncp);
768 			reg = stm32_hash_read(hdev, HASH_STR);
769 			reg |= HASH_STR_DCAL;
770 			stm32_hash_write(hdev, HASH_STR, reg);
771 			err = -EINPROGRESS;
772 		}
773 
774 		/*
775 		 * The hash processor needs the key to be loaded a second time in order
776 		 * to process the HMAC.
777 		 */
778 		if (hdev->flags & HASH_FLAGS_HMAC) {
779 			if (stm32_hash_wait_busy(hdev))
780 				return -ETIMEDOUT;
781 			err = stm32_hash_hmac_dma_send(hdev);
782 		}
783 
784 		return err;
785 	}
786 
787 	if (err != -EINPROGRESS)
788 		return err;
789 
790 	return 0;
791 }
792 
793 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
794 {
795 	struct stm32_hash_dev *hdev;
796 
797 	spin_lock_bh(&stm32_hash.lock);
798 	if (!ctx->hdev)
799 		ctx->hdev = list_first_entry_or_null(&stm32_hash.dev_list,
800 						     struct stm32_hash_dev, list);
801 	hdev = ctx->hdev;
802 	spin_unlock_bh(&stm32_hash.lock);
803 
804 	return hdev;
805 }
806 
807 static int stm32_hash_init(struct ahash_request *req)
808 {
809 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
810 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
811 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
812 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
813 	struct stm32_hash_state *state = &rctx->state;
814 	bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE;
815 
816 	rctx->hdev = hdev;
817 	state->flags = 0;
818 
819 	if (!(hdev->dma_lch &&  hdev->pdata->has_mdmat))
820 		state->flags |= HASH_FLAGS_CPU;
821 
822 	if (sha3_mode)
823 		state->flags |= HASH_FLAGS_SHA3_MODE;
824 
825 	rctx->digcnt = crypto_ahash_digestsize(tfm);
826 	switch (rctx->digcnt) {
827 	case MD5_DIGEST_SIZE:
828 		state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT;
829 		break;
830 	case SHA1_DIGEST_SIZE:
831 		if (hdev->pdata->ux500)
832 			state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT;
833 		else
834 			state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT;
835 		break;
836 	case SHA224_DIGEST_SIZE:
837 		if (sha3_mode)
838 			state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT;
839 		else
840 			state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT;
841 		break;
842 	case SHA256_DIGEST_SIZE:
843 		if (sha3_mode) {
844 			state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT;
845 		} else {
846 			if (hdev->pdata->ux500)
847 				state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT;
848 			else
849 				state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT;
850 		}
851 		break;
852 	case SHA384_DIGEST_SIZE:
853 		if (sha3_mode)
854 			state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT;
855 		else
856 			state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT;
857 		break;
858 	case SHA512_DIGEST_SIZE:
859 		if (sha3_mode)
860 			state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT;
861 		else
862 			state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT;
863 		break;
864 	default:
865 		return -EINVAL;
866 	}
867 
868 	rctx->state.bufcnt = 0;
869 	rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32);
870 	if (rctx->state.blocklen > HASH_BUFLEN) {
871 		dev_err(hdev->dev, "Error, block too large");
872 		return -EINVAL;
873 	}
874 	rctx->nents = 0;
875 	rctx->total = 0;
876 	rctx->offset = 0;
877 	rctx->data_type = HASH_DATA_8_BITS;
878 
879 	if (ctx->flags & HASH_FLAGS_HMAC)
880 		state->flags |= HASH_FLAGS_HMAC;
881 
882 	dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
883 
884 	return 0;
885 }
886 
887 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
888 {
889 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
890 	struct stm32_hash_state *state = &rctx->state;
891 
892 	dev_dbg(hdev->dev, "update_req: total: %u, digcnt: %zd, final: 0",
893 		rctx->total, rctx->digcnt);
894 
895 	if (!(state->flags & HASH_FLAGS_CPU))
896 		return stm32_hash_dma_send(hdev);
897 
898 	return stm32_hash_update_cpu(hdev);
899 }
900 
901 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
902 {
903 	struct ahash_request *req = hdev->req;
904 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
905 	struct stm32_hash_state *state = &rctx->state;
906 	int buflen = state->bufcnt;
907 
908 	if (!(state->flags & HASH_FLAGS_CPU)) {
909 		hdev->flags |= HASH_FLAGS_FINAL;
910 		return stm32_hash_dma_send(hdev);
911 	}
912 
913 	if (state->flags & HASH_FLAGS_FINUP)
914 		return stm32_hash_update_req(hdev);
915 
916 	state->bufcnt = 0;
917 
918 	return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
919 }
920 
921 static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
922 {
923 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
924 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
925 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
926 	struct stm32_hash_dev *hdev = rctx->hdev;
927 	int ret;
928 
929 	dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
930 		ctx->keylen);
931 
932 	if (!ctx->xtfm) {
933 		dev_err(hdev->dev, "no fallback engine\n");
934 		return;
935 	}
936 
937 	if (ctx->keylen) {
938 		ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
939 		if (ret) {
940 			dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
941 			return;
942 		}
943 	}
944 
945 	ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
946 	if (ret)
947 		dev_err(hdev->dev, "shash digest error\n");
948 }
949 
950 static void stm32_hash_copy_hash(struct ahash_request *req)
951 {
952 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
953 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
954 	struct stm32_hash_state *state = &rctx->state;
955 	struct stm32_hash_dev *hdev = rctx->hdev;
956 	__be32 *hash = (void *)rctx->digest;
957 	unsigned int i, hashsize;
958 
959 	if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
960 		return stm32_hash_emptymsg_fallback(req);
961 
962 	hashsize = crypto_ahash_digestsize(tfm);
963 
964 	for (i = 0; i < hashsize / sizeof(u32); i++) {
965 		if (hdev->pdata->ux500)
966 			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
967 					      HASH_UX500_HREG(i)));
968 		else
969 			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
970 					      HASH_HREG(i)));
971 	}
972 }
973 
974 static int stm32_hash_finish(struct ahash_request *req)
975 {
976 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
977 	u32 reg;
978 
979 	reg = stm32_hash_read(rctx->hdev, HASH_SR);
980 	reg &= ~HASH_SR_OUTPUT_READY;
981 	stm32_hash_write(rctx->hdev, HASH_SR, reg);
982 
983 	if (!req->result)
984 		return -EINVAL;
985 
986 	memcpy(req->result, rctx->digest, rctx->digcnt);
987 
988 	return 0;
989 }
990 
991 static void stm32_hash_finish_req(struct ahash_request *req, int err)
992 {
993 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
994 	struct stm32_hash_state *state = &rctx->state;
995 	struct stm32_hash_dev *hdev = rctx->hdev;
996 
997 	if (hdev->flags & HASH_FLAGS_DMA_ACTIVE)
998 		state->flags |= HASH_FLAGS_DMA_ACTIVE;
999 	else
1000 		state->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1001 
1002 	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
1003 		stm32_hash_copy_hash(req);
1004 		err = stm32_hash_finish(req);
1005 	}
1006 
1007 	/* Finalized request mist be unprepared here */
1008 	stm32_hash_unprepare_request(req);
1009 
1010 	crypto_finalize_hash_request(hdev->engine, req, err);
1011 }
1012 
1013 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
1014 				   struct ahash_request *req)
1015 {
1016 	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
1017 }
1018 
1019 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
1020 {
1021 	struct ahash_request *req = container_of(areq, struct ahash_request,
1022 						 base);
1023 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1024 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1025 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1026 	struct stm32_hash_state *state = &rctx->state;
1027 	int swap_reg;
1028 	int err = 0;
1029 
1030 	if (!hdev)
1031 		return -ENODEV;
1032 
1033 	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
1034 		rctx->op, req->nbytes);
1035 
1036 	pm_runtime_get_sync(hdev->dev);
1037 
1038 	err = stm32_hash_prepare_request(req);
1039 	if (err)
1040 		return err;
1041 
1042 	hdev->req = req;
1043 	hdev->flags = 0;
1044 	swap_reg = hash_swap_reg(rctx);
1045 
1046 	if (state->flags & HASH_FLAGS_INIT) {
1047 		u32 *preg = rctx->state.hw_context;
1048 		u32 reg;
1049 		int i;
1050 
1051 		if (!hdev->pdata->ux500)
1052 			stm32_hash_write(hdev, HASH_IMR, *preg++);
1053 		stm32_hash_write(hdev, HASH_STR, *preg++);
1054 		stm32_hash_write(hdev, HASH_CR, *preg);
1055 		reg = *preg++ | HASH_CR_INIT;
1056 		stm32_hash_write(hdev, HASH_CR, reg);
1057 
1058 		for (i = 0; i < swap_reg; i++)
1059 			stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1060 
1061 		hdev->flags |= HASH_FLAGS_INIT;
1062 
1063 		if (state->flags & HASH_FLAGS_HMAC)
1064 			hdev->flags |= HASH_FLAGS_HMAC |
1065 				       HASH_FLAGS_HMAC_KEY;
1066 
1067 		if (state->flags & HASH_FLAGS_CPU)
1068 			hdev->flags |= HASH_FLAGS_CPU;
1069 
1070 		if (state->flags & HASH_FLAGS_DMA_ACTIVE)
1071 			hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
1072 	}
1073 
1074 	if (rctx->op == HASH_OP_UPDATE)
1075 		err = stm32_hash_update_req(hdev);
1076 	else if (rctx->op == HASH_OP_FINAL)
1077 		err = stm32_hash_final_req(hdev);
1078 
1079 	/* If we have an IRQ, wait for that, else poll for completion */
1080 	if (err == -EINPROGRESS && hdev->polled) {
1081 		if (stm32_hash_wait_busy(hdev))
1082 			err = -ETIMEDOUT;
1083 		else {
1084 			hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1085 			err = 0;
1086 		}
1087 	}
1088 
1089 	if (err != -EINPROGRESS)
1090 	/* done task will not finish it, so do it here */
1091 		stm32_hash_finish_req(req, err);
1092 
1093 	return 0;
1094 }
1095 
1096 static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx,
1097 			       struct scatterlist *sg, int bs,
1098 			       unsigned int new_len)
1099 {
1100 	struct stm32_hash_state *state = &rctx->state;
1101 	int pages;
1102 	void *buf;
1103 
1104 	pages = get_order(new_len);
1105 
1106 	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1107 	if (!buf) {
1108 		pr_err("Couldn't allocate pages for unaligned cases.\n");
1109 		return -ENOMEM;
1110 	}
1111 
1112 	memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
1113 
1114 	scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset,
1115 				 min(new_len, rctx->total) - state->bufcnt, 0);
1116 	sg_init_table(rctx->sgl, 1);
1117 	sg_set_buf(rctx->sgl, buf, new_len);
1118 	rctx->sg = rctx->sgl;
1119 	state->flags |= HASH_FLAGS_SGS_COPIED;
1120 	rctx->nents = 1;
1121 	rctx->offset += new_len - state->bufcnt;
1122 	state->bufcnt = 0;
1123 	rctx->total = new_len;
1124 
1125 	return 0;
1126 }
1127 
1128 static int stm32_hash_align_sgs(struct scatterlist *sg,
1129 				int nbytes, int bs, bool init, bool final,
1130 				struct stm32_hash_request_ctx *rctx)
1131 {
1132 	struct stm32_hash_state *state = &rctx->state;
1133 	struct stm32_hash_dev *hdev = rctx->hdev;
1134 	struct scatterlist *sg_tmp = sg;
1135 	int offset = rctx->offset;
1136 	int new_len;
1137 	int n = 0;
1138 	int bufcnt = state->bufcnt;
1139 	bool secure_ctx = hdev->pdata->context_secured;
1140 	bool aligned = true;
1141 
1142 	if (!sg || !sg->length || !nbytes) {
1143 		if (bufcnt) {
1144 			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
1145 			sg_init_table(rctx->sgl, 1);
1146 			sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, bufcnt);
1147 			rctx->sg = rctx->sgl;
1148 			rctx->nents = 1;
1149 		}
1150 
1151 		return 0;
1152 	}
1153 
1154 	new_len = nbytes;
1155 
1156 	if (offset)
1157 		aligned = false;
1158 
1159 	if (final) {
1160 		new_len = DIV_ROUND_UP(new_len, bs) * bs;
1161 	} else {
1162 		new_len = (new_len - 1) / bs * bs; // return n block - 1 block
1163 
1164 		/*
1165 		 * Context save in some version of HASH IP can only be done when the
1166 		 * FIFO is ready to get a new block. This implies to send n block plus a
1167 		 * 32 bit word in the first DMA send.
1168 		 */
1169 		if (init && secure_ctx) {
1170 			new_len += sizeof(u32);
1171 			if (unlikely(new_len > nbytes))
1172 				new_len -= bs;
1173 		}
1174 	}
1175 
1176 	if (!new_len)
1177 		return 0;
1178 
1179 	if (nbytes != new_len)
1180 		aligned = false;
1181 
1182 	while (nbytes > 0 && sg_tmp) {
1183 		n++;
1184 
1185 		if (bufcnt) {
1186 			if (!IS_ALIGNED(bufcnt, bs)) {
1187 				aligned = false;
1188 				break;
1189 			}
1190 			nbytes -= bufcnt;
1191 			bufcnt = 0;
1192 			if (!nbytes)
1193 				aligned = false;
1194 
1195 			continue;
1196 		}
1197 
1198 		if (offset < sg_tmp->length) {
1199 			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
1200 				aligned = false;
1201 				break;
1202 			}
1203 
1204 			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
1205 				aligned = false;
1206 				break;
1207 			}
1208 		}
1209 
1210 		if (offset) {
1211 			offset -= sg_tmp->length;
1212 			if (offset < 0) {
1213 				nbytes += offset;
1214 				offset = 0;
1215 			}
1216 		} else {
1217 			nbytes -= sg_tmp->length;
1218 		}
1219 
1220 		sg_tmp = sg_next(sg_tmp);
1221 
1222 		if (nbytes < 0) {
1223 			aligned = false;
1224 			break;
1225 		}
1226 	}
1227 
1228 	if (!aligned)
1229 		return stm32_hash_copy_sgs(rctx, sg, bs, new_len);
1230 
1231 	rctx->total = new_len;
1232 	rctx->offset += new_len;
1233 	rctx->nents = n;
1234 	if (state->bufcnt) {
1235 		sg_init_table(rctx->sgl, 2);
1236 		sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, state->bufcnt);
1237 		sg_chain(rctx->sgl, 2, sg);
1238 		rctx->sg = rctx->sgl;
1239 	} else {
1240 		rctx->sg = sg;
1241 	}
1242 
1243 	return 0;
1244 }
1245 
1246 static int stm32_hash_prepare_request(struct ahash_request *req)
1247 {
1248 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1249 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1250 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1251 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1252 	struct stm32_hash_state *state = &rctx->state;
1253 	unsigned int nbytes;
1254 	int ret, hash_later, bs;
1255 	bool update = rctx->op & HASH_OP_UPDATE;
1256 	bool init = !(state->flags & HASH_FLAGS_INIT);
1257 	bool finup = state->flags & HASH_FLAGS_FINUP;
1258 	bool final = state->flags & HASH_FLAGS_FINAL;
1259 
1260 	if (!hdev->dma_lch || state->flags & HASH_FLAGS_CPU)
1261 		return 0;
1262 
1263 	bs = crypto_ahash_blocksize(tfm);
1264 
1265 	nbytes = state->bufcnt;
1266 
1267 	/*
1268 	 * In case of update request nbytes must correspond to the content of the
1269 	 * buffer + the offset minus the content of the request already in the
1270 	 * buffer.
1271 	 */
1272 	if (update || finup)
1273 		nbytes += req->nbytes - rctx->offset;
1274 
1275 	dev_dbg(hdev->dev,
1276 		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
1277 		__func__, nbytes, bs, rctx->total, rctx->offset, state->bufcnt);
1278 
1279 	if (!nbytes)
1280 		return 0;
1281 
1282 	rctx->total = nbytes;
1283 
1284 	if (update && req->nbytes && (!IS_ALIGNED(state->bufcnt, bs))) {
1285 		int len = bs - state->bufcnt % bs;
1286 
1287 		if (len > req->nbytes)
1288 			len = req->nbytes;
1289 		scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
1290 					 0, len, 0);
1291 		state->bufcnt += len;
1292 		rctx->offset = len;
1293 	}
1294 
1295 	/* copy buffer in a temporary one that is used for sg alignment */
1296 	memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
1297 
1298 	ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx);
1299 	if (ret)
1300 		return ret;
1301 
1302 	hash_later = nbytes - rctx->total;
1303 	if (hash_later < 0)
1304 		hash_later = 0;
1305 
1306 	if (hash_later && hash_later <= state->blocklen) {
1307 		scatterwalk_map_and_copy(state->buffer,
1308 					 req->src,
1309 					 req->nbytes - hash_later,
1310 					 hash_later, 0);
1311 
1312 		state->bufcnt = hash_later;
1313 	} else {
1314 		state->bufcnt = 0;
1315 	}
1316 
1317 	if (hash_later > state->blocklen) {
1318 		/* FIXME: add support of this case */
1319 		pr_err("Buffer contains more than one block.\n");
1320 		return -ENOMEM;
1321 	}
1322 
1323 	rctx->total = min(nbytes, rctx->total);
1324 
1325 	return 0;
1326 }
1327 
1328 static void stm32_hash_unprepare_request(struct ahash_request *req)
1329 {
1330 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1331 	struct stm32_hash_state *state = &rctx->state;
1332 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1333 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1334 	u32 *preg = state->hw_context;
1335 	int swap_reg, i;
1336 
1337 	if (hdev->dma_lch)
1338 		dmaengine_terminate_sync(hdev->dma_lch);
1339 
1340 	if (state->flags & HASH_FLAGS_SGS_COPIED)
1341 		free_pages((unsigned long)sg_virt(rctx->sg), get_order(rctx->sg->length));
1342 
1343 	rctx->sg = NULL;
1344 	rctx->offset = 0;
1345 
1346 	state->flags &= ~(HASH_FLAGS_SGS_COPIED);
1347 
1348 	if (!(hdev->flags & HASH_FLAGS_INIT))
1349 		goto pm_runtime;
1350 
1351 	state->flags |= HASH_FLAGS_INIT;
1352 
1353 	if (stm32_hash_wait_busy(hdev)) {
1354 		dev_warn(hdev->dev, "Wait busy failed.");
1355 		return;
1356 	}
1357 
1358 	swap_reg = hash_swap_reg(rctx);
1359 
1360 	if (!hdev->pdata->ux500)
1361 		*preg++ = stm32_hash_read(hdev, HASH_IMR);
1362 	*preg++ = stm32_hash_read(hdev, HASH_STR);
1363 	*preg++ = stm32_hash_read(hdev, HASH_CR);
1364 	for (i = 0; i < swap_reg; i++)
1365 		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
1366 
1367 pm_runtime:
1368 	pm_runtime_put_autosuspend(hdev->dev);
1369 }
1370 
1371 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
1372 {
1373 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1374 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1375 	struct stm32_hash_dev *hdev = ctx->hdev;
1376 
1377 	rctx->op = op;
1378 
1379 	return stm32_hash_handle_queue(hdev, req);
1380 }
1381 
1382 static int stm32_hash_update(struct ahash_request *req)
1383 {
1384 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1385 	struct stm32_hash_state *state = &rctx->state;
1386 
1387 	if (!req->nbytes)
1388 		return 0;
1389 
1390 
1391 	if (state->flags & HASH_FLAGS_CPU) {
1392 		rctx->total = req->nbytes;
1393 		rctx->sg = req->src;
1394 		rctx->offset = 0;
1395 
1396 		if ((state->bufcnt + rctx->total < state->blocklen)) {
1397 			stm32_hash_append_sg(rctx);
1398 			return 0;
1399 		}
1400 	} else { /* DMA mode */
1401 		if (state->bufcnt + req->nbytes <= state->blocklen) {
1402 			scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
1403 						 0, req->nbytes, 0);
1404 			state->bufcnt += req->nbytes;
1405 			return 0;
1406 		}
1407 	}
1408 
1409 	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
1410 }
1411 
1412 static int stm32_hash_final(struct ahash_request *req)
1413 {
1414 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1415 	struct stm32_hash_state *state = &rctx->state;
1416 
1417 	state->flags |= HASH_FLAGS_FINAL;
1418 
1419 	return stm32_hash_enqueue(req, HASH_OP_FINAL);
1420 }
1421 
1422 static int stm32_hash_finup(struct ahash_request *req)
1423 {
1424 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1425 	struct stm32_hash_state *state = &rctx->state;
1426 
1427 	if (!req->nbytes)
1428 		goto out;
1429 
1430 	state->flags |= HASH_FLAGS_FINUP;
1431 
1432 	if ((state->flags & HASH_FLAGS_CPU)) {
1433 		rctx->total = req->nbytes;
1434 		rctx->sg = req->src;
1435 		rctx->offset = 0;
1436 	}
1437 
1438 out:
1439 	return stm32_hash_final(req);
1440 }
1441 
1442 static int stm32_hash_digest(struct ahash_request *req)
1443 {
1444 	return stm32_hash_init(req) ?: stm32_hash_finup(req);
1445 }
1446 
1447 static int stm32_hash_export(struct ahash_request *req, void *out)
1448 {
1449 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1450 
1451 	memcpy(out, &rctx->state, sizeof(rctx->state));
1452 
1453 	return 0;
1454 }
1455 
1456 static int stm32_hash_import(struct ahash_request *req, const void *in)
1457 {
1458 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1459 
1460 	stm32_hash_init(req);
1461 	memcpy(&rctx->state, in, sizeof(rctx->state));
1462 
1463 	return 0;
1464 }
1465 
1466 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1467 			     const u8 *key, unsigned int keylen)
1468 {
1469 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1470 
1471 	if (keylen <= HASH_MAX_KEY_SIZE) {
1472 		memcpy(ctx->key, key, keylen);
1473 		ctx->keylen = keylen;
1474 	} else {
1475 		return -ENOMEM;
1476 	}
1477 
1478 	return 0;
1479 }
1480 
1481 static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1482 {
1483 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1484 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1485 	const char *name = crypto_tfm_alg_name(tfm);
1486 	struct crypto_shash *xtfm;
1487 
1488 	/* The fallback is only needed on Ux500 */
1489 	if (!hdev->pdata->ux500)
1490 		return 0;
1491 
1492 	xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1493 	if (IS_ERR(xtfm)) {
1494 		dev_err(hdev->dev, "failed to allocate %s fallback\n",
1495 			name);
1496 		return PTR_ERR(xtfm);
1497 	}
1498 	dev_info(hdev->dev, "allocated %s fallback\n", name);
1499 	ctx->xtfm = xtfm;
1500 
1501 	return 0;
1502 }
1503 
1504 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags)
1505 {
1506 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1507 
1508 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1509 				 sizeof(struct stm32_hash_request_ctx));
1510 
1511 	ctx->keylen = 0;
1512 
1513 	if (algs_flags)
1514 		ctx->flags |= algs_flags;
1515 
1516 	return stm32_hash_init_fallback(tfm);
1517 }
1518 
1519 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1520 {
1521 	return stm32_hash_cra_init_algs(tfm, 0);
1522 }
1523 
1524 static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm)
1525 {
1526 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC);
1527 }
1528 
1529 static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm)
1530 {
1531 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE);
1532 }
1533 
1534 static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm)
1535 {
1536 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE |
1537 					HASH_FLAGS_HMAC);
1538 }
1539 
1540 static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1541 {
1542 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1543 
1544 	if (ctx->xtfm)
1545 		crypto_free_shash(ctx->xtfm);
1546 }
1547 
1548 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1549 {
1550 	struct stm32_hash_dev *hdev = dev_id;
1551 
1552 	if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1553 		hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1554 		goto finish;
1555 	}
1556 
1557 	return IRQ_HANDLED;
1558 
1559 finish:
1560 	/* Finish current request */
1561 	stm32_hash_finish_req(hdev->req, 0);
1562 
1563 	return IRQ_HANDLED;
1564 }
1565 
1566 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1567 {
1568 	struct stm32_hash_dev *hdev = dev_id;
1569 	u32 reg;
1570 
1571 	reg = stm32_hash_read(hdev, HASH_SR);
1572 	if (reg & HASH_SR_OUTPUT_READY) {
1573 		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1574 		/* Disable IT*/
1575 		stm32_hash_write(hdev, HASH_IMR, 0);
1576 		return IRQ_WAKE_THREAD;
1577 	}
1578 
1579 	return IRQ_NONE;
1580 }
1581 
1582 static struct ahash_engine_alg algs_md5[] = {
1583 	{
1584 		.base.init = stm32_hash_init,
1585 		.base.update = stm32_hash_update,
1586 		.base.final = stm32_hash_final,
1587 		.base.finup = stm32_hash_finup,
1588 		.base.digest = stm32_hash_digest,
1589 		.base.export = stm32_hash_export,
1590 		.base.import = stm32_hash_import,
1591 		.base.halg = {
1592 			.digestsize = MD5_DIGEST_SIZE,
1593 			.statesize = sizeof(struct stm32_hash_state),
1594 			.base = {
1595 				.cra_name = "md5",
1596 				.cra_driver_name = "stm32-md5",
1597 				.cra_priority = 200,
1598 				.cra_flags = CRYPTO_ALG_ASYNC |
1599 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1600 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1601 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1602 				.cra_init = stm32_hash_cra_init,
1603 				.cra_exit = stm32_hash_cra_exit,
1604 				.cra_module = THIS_MODULE,
1605 			}
1606 		},
1607 		.op = {
1608 			.do_one_request = stm32_hash_one_request,
1609 		},
1610 	},
1611 	{
1612 		.base.init = stm32_hash_init,
1613 		.base.update = stm32_hash_update,
1614 		.base.final = stm32_hash_final,
1615 		.base.finup = stm32_hash_finup,
1616 		.base.digest = stm32_hash_digest,
1617 		.base.export = stm32_hash_export,
1618 		.base.import = stm32_hash_import,
1619 		.base.setkey = stm32_hash_setkey,
1620 		.base.halg = {
1621 			.digestsize = MD5_DIGEST_SIZE,
1622 			.statesize = sizeof(struct stm32_hash_state),
1623 			.base = {
1624 				.cra_name = "hmac(md5)",
1625 				.cra_driver_name = "stm32-hmac-md5",
1626 				.cra_priority = 200,
1627 				.cra_flags = CRYPTO_ALG_ASYNC |
1628 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1629 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1630 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1631 				.cra_init = stm32_hash_cra_hmac_init,
1632 				.cra_exit = stm32_hash_cra_exit,
1633 				.cra_module = THIS_MODULE,
1634 			}
1635 		},
1636 		.op = {
1637 			.do_one_request = stm32_hash_one_request,
1638 		},
1639 	}
1640 };
1641 
1642 static struct ahash_engine_alg algs_sha1[] = {
1643 	{
1644 		.base.init = stm32_hash_init,
1645 		.base.update = stm32_hash_update,
1646 		.base.final = stm32_hash_final,
1647 		.base.finup = stm32_hash_finup,
1648 		.base.digest = stm32_hash_digest,
1649 		.base.export = stm32_hash_export,
1650 		.base.import = stm32_hash_import,
1651 		.base.halg = {
1652 			.digestsize = SHA1_DIGEST_SIZE,
1653 			.statesize = sizeof(struct stm32_hash_state),
1654 			.base = {
1655 				.cra_name = "sha1",
1656 				.cra_driver_name = "stm32-sha1",
1657 				.cra_priority = 200,
1658 				.cra_flags = CRYPTO_ALG_ASYNC |
1659 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1660 				.cra_blocksize = SHA1_BLOCK_SIZE,
1661 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1662 				.cra_init = stm32_hash_cra_init,
1663 				.cra_exit = stm32_hash_cra_exit,
1664 				.cra_module = THIS_MODULE,
1665 			}
1666 		},
1667 		.op = {
1668 			.do_one_request = stm32_hash_one_request,
1669 		},
1670 	},
1671 	{
1672 		.base.init = stm32_hash_init,
1673 		.base.update = stm32_hash_update,
1674 		.base.final = stm32_hash_final,
1675 		.base.finup = stm32_hash_finup,
1676 		.base.digest = stm32_hash_digest,
1677 		.base.export = stm32_hash_export,
1678 		.base.import = stm32_hash_import,
1679 		.base.setkey = stm32_hash_setkey,
1680 		.base.halg = {
1681 			.digestsize = SHA1_DIGEST_SIZE,
1682 			.statesize = sizeof(struct stm32_hash_state),
1683 			.base = {
1684 				.cra_name = "hmac(sha1)",
1685 				.cra_driver_name = "stm32-hmac-sha1",
1686 				.cra_priority = 200,
1687 				.cra_flags = CRYPTO_ALG_ASYNC |
1688 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1689 				.cra_blocksize = SHA1_BLOCK_SIZE,
1690 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1691 				.cra_init = stm32_hash_cra_hmac_init,
1692 				.cra_exit = stm32_hash_cra_exit,
1693 				.cra_module = THIS_MODULE,
1694 			}
1695 		},
1696 		.op = {
1697 			.do_one_request = stm32_hash_one_request,
1698 		},
1699 	},
1700 };
1701 
1702 static struct ahash_engine_alg algs_sha224[] = {
1703 	{
1704 		.base.init = stm32_hash_init,
1705 		.base.update = stm32_hash_update,
1706 		.base.final = stm32_hash_final,
1707 		.base.finup = stm32_hash_finup,
1708 		.base.digest = stm32_hash_digest,
1709 		.base.export = stm32_hash_export,
1710 		.base.import = stm32_hash_import,
1711 		.base.halg = {
1712 			.digestsize = SHA224_DIGEST_SIZE,
1713 			.statesize = sizeof(struct stm32_hash_state),
1714 			.base = {
1715 				.cra_name = "sha224",
1716 				.cra_driver_name = "stm32-sha224",
1717 				.cra_priority = 200,
1718 				.cra_flags = CRYPTO_ALG_ASYNC |
1719 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1720 				.cra_blocksize = SHA224_BLOCK_SIZE,
1721 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1722 				.cra_init = stm32_hash_cra_init,
1723 				.cra_exit = stm32_hash_cra_exit,
1724 				.cra_module = THIS_MODULE,
1725 			}
1726 		},
1727 		.op = {
1728 			.do_one_request = stm32_hash_one_request,
1729 		},
1730 	},
1731 	{
1732 		.base.init = stm32_hash_init,
1733 		.base.update = stm32_hash_update,
1734 		.base.final = stm32_hash_final,
1735 		.base.finup = stm32_hash_finup,
1736 		.base.digest = stm32_hash_digest,
1737 		.base.setkey = stm32_hash_setkey,
1738 		.base.export = stm32_hash_export,
1739 		.base.import = stm32_hash_import,
1740 		.base.halg = {
1741 			.digestsize = SHA224_DIGEST_SIZE,
1742 			.statesize = sizeof(struct stm32_hash_state),
1743 			.base = {
1744 				.cra_name = "hmac(sha224)",
1745 				.cra_driver_name = "stm32-hmac-sha224",
1746 				.cra_priority = 200,
1747 				.cra_flags = CRYPTO_ALG_ASYNC |
1748 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1749 				.cra_blocksize = SHA224_BLOCK_SIZE,
1750 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1751 				.cra_init = stm32_hash_cra_hmac_init,
1752 				.cra_exit = stm32_hash_cra_exit,
1753 				.cra_module = THIS_MODULE,
1754 			}
1755 		},
1756 		.op = {
1757 			.do_one_request = stm32_hash_one_request,
1758 		},
1759 	},
1760 };
1761 
1762 static struct ahash_engine_alg algs_sha256[] = {
1763 	{
1764 		.base.init = stm32_hash_init,
1765 		.base.update = stm32_hash_update,
1766 		.base.final = stm32_hash_final,
1767 		.base.finup = stm32_hash_finup,
1768 		.base.digest = stm32_hash_digest,
1769 		.base.export = stm32_hash_export,
1770 		.base.import = stm32_hash_import,
1771 		.base.halg = {
1772 			.digestsize = SHA256_DIGEST_SIZE,
1773 			.statesize = sizeof(struct stm32_hash_state),
1774 			.base = {
1775 				.cra_name = "sha256",
1776 				.cra_driver_name = "stm32-sha256",
1777 				.cra_priority = 200,
1778 				.cra_flags = CRYPTO_ALG_ASYNC |
1779 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1780 				.cra_blocksize = SHA256_BLOCK_SIZE,
1781 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1782 				.cra_init = stm32_hash_cra_init,
1783 				.cra_exit = stm32_hash_cra_exit,
1784 				.cra_module = THIS_MODULE,
1785 			}
1786 		},
1787 		.op = {
1788 			.do_one_request = stm32_hash_one_request,
1789 		},
1790 	},
1791 	{
1792 		.base.init = stm32_hash_init,
1793 		.base.update = stm32_hash_update,
1794 		.base.final = stm32_hash_final,
1795 		.base.finup = stm32_hash_finup,
1796 		.base.digest = stm32_hash_digest,
1797 		.base.export = stm32_hash_export,
1798 		.base.import = stm32_hash_import,
1799 		.base.setkey = stm32_hash_setkey,
1800 		.base.halg = {
1801 			.digestsize = SHA256_DIGEST_SIZE,
1802 			.statesize = sizeof(struct stm32_hash_state),
1803 			.base = {
1804 				.cra_name = "hmac(sha256)",
1805 				.cra_driver_name = "stm32-hmac-sha256",
1806 				.cra_priority = 200,
1807 				.cra_flags = CRYPTO_ALG_ASYNC |
1808 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1809 				.cra_blocksize = SHA256_BLOCK_SIZE,
1810 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1811 				.cra_init = stm32_hash_cra_hmac_init,
1812 				.cra_exit = stm32_hash_cra_exit,
1813 				.cra_module = THIS_MODULE,
1814 			}
1815 		},
1816 		.op = {
1817 			.do_one_request = stm32_hash_one_request,
1818 		},
1819 	},
1820 };
1821 
1822 static struct ahash_engine_alg algs_sha384_sha512[] = {
1823 	{
1824 		.base.init = stm32_hash_init,
1825 		.base.update = stm32_hash_update,
1826 		.base.final = stm32_hash_final,
1827 		.base.finup = stm32_hash_finup,
1828 		.base.digest = stm32_hash_digest,
1829 		.base.export = stm32_hash_export,
1830 		.base.import = stm32_hash_import,
1831 		.base.halg = {
1832 			.digestsize = SHA384_DIGEST_SIZE,
1833 			.statesize = sizeof(struct stm32_hash_state),
1834 			.base = {
1835 				.cra_name = "sha384",
1836 				.cra_driver_name = "stm32-sha384",
1837 				.cra_priority = 200,
1838 				.cra_flags = CRYPTO_ALG_ASYNC |
1839 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1840 				.cra_blocksize = SHA384_BLOCK_SIZE,
1841 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1842 				.cra_init = stm32_hash_cra_init,
1843 				.cra_exit = stm32_hash_cra_exit,
1844 				.cra_module = THIS_MODULE,
1845 			}
1846 		},
1847 		.op = {
1848 			.do_one_request = stm32_hash_one_request,
1849 		},
1850 	},
1851 	{
1852 		.base.init = stm32_hash_init,
1853 		.base.update = stm32_hash_update,
1854 		.base.final = stm32_hash_final,
1855 		.base.finup = stm32_hash_finup,
1856 		.base.digest = stm32_hash_digest,
1857 		.base.setkey = stm32_hash_setkey,
1858 		.base.export = stm32_hash_export,
1859 		.base.import = stm32_hash_import,
1860 		.base.halg = {
1861 			.digestsize = SHA384_DIGEST_SIZE,
1862 			.statesize = sizeof(struct stm32_hash_state),
1863 			.base = {
1864 				.cra_name = "hmac(sha384)",
1865 				.cra_driver_name = "stm32-hmac-sha384",
1866 				.cra_priority = 200,
1867 				.cra_flags = CRYPTO_ALG_ASYNC |
1868 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1869 				.cra_blocksize = SHA384_BLOCK_SIZE,
1870 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1871 				.cra_init = stm32_hash_cra_hmac_init,
1872 				.cra_exit = stm32_hash_cra_exit,
1873 				.cra_module = THIS_MODULE,
1874 			}
1875 		},
1876 		.op = {
1877 			.do_one_request = stm32_hash_one_request,
1878 		},
1879 	},
1880 	{
1881 		.base.init = stm32_hash_init,
1882 		.base.update = stm32_hash_update,
1883 		.base.final = stm32_hash_final,
1884 		.base.finup = stm32_hash_finup,
1885 		.base.digest = stm32_hash_digest,
1886 		.base.export = stm32_hash_export,
1887 		.base.import = stm32_hash_import,
1888 		.base.halg = {
1889 			.digestsize = SHA512_DIGEST_SIZE,
1890 			.statesize = sizeof(struct stm32_hash_state),
1891 			.base = {
1892 				.cra_name = "sha512",
1893 				.cra_driver_name = "stm32-sha512",
1894 				.cra_priority = 200,
1895 				.cra_flags = CRYPTO_ALG_ASYNC |
1896 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1897 				.cra_blocksize = SHA512_BLOCK_SIZE,
1898 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1899 				.cra_init = stm32_hash_cra_init,
1900 				.cra_exit = stm32_hash_cra_exit,
1901 				.cra_module = THIS_MODULE,
1902 			}
1903 		},
1904 		.op = {
1905 			.do_one_request = stm32_hash_one_request,
1906 		},
1907 	},
1908 	{
1909 		.base.init = stm32_hash_init,
1910 		.base.update = stm32_hash_update,
1911 		.base.final = stm32_hash_final,
1912 		.base.finup = stm32_hash_finup,
1913 		.base.digest = stm32_hash_digest,
1914 		.base.export = stm32_hash_export,
1915 		.base.import = stm32_hash_import,
1916 		.base.setkey = stm32_hash_setkey,
1917 		.base.halg = {
1918 			.digestsize = SHA512_DIGEST_SIZE,
1919 			.statesize = sizeof(struct stm32_hash_state),
1920 			.base = {
1921 				.cra_name = "hmac(sha512)",
1922 				.cra_driver_name = "stm32-hmac-sha512",
1923 				.cra_priority = 200,
1924 				.cra_flags = CRYPTO_ALG_ASYNC |
1925 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1926 				.cra_blocksize = SHA512_BLOCK_SIZE,
1927 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1928 				.cra_init = stm32_hash_cra_hmac_init,
1929 				.cra_exit = stm32_hash_cra_exit,
1930 				.cra_module = THIS_MODULE,
1931 			}
1932 		},
1933 		.op = {
1934 			.do_one_request = stm32_hash_one_request,
1935 		},
1936 	},
1937 };
1938 
1939 static struct ahash_engine_alg algs_sha3[] = {
1940 	{
1941 		.base.init = stm32_hash_init,
1942 		.base.update = stm32_hash_update,
1943 		.base.final = stm32_hash_final,
1944 		.base.finup = stm32_hash_finup,
1945 		.base.digest = stm32_hash_digest,
1946 		.base.export = stm32_hash_export,
1947 		.base.import = stm32_hash_import,
1948 		.base.halg = {
1949 			.digestsize = SHA3_224_DIGEST_SIZE,
1950 			.statesize = sizeof(struct stm32_hash_state),
1951 			.base = {
1952 				.cra_name = "sha3-224",
1953 				.cra_driver_name = "stm32-sha3-224",
1954 				.cra_priority = 200,
1955 				.cra_flags = CRYPTO_ALG_ASYNC |
1956 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1957 				.cra_blocksize = SHA3_224_BLOCK_SIZE,
1958 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1959 				.cra_init = stm32_hash_cra_sha3_init,
1960 				.cra_exit = stm32_hash_cra_exit,
1961 				.cra_module = THIS_MODULE,
1962 			}
1963 		},
1964 		.op = {
1965 			.do_one_request = stm32_hash_one_request,
1966 		},
1967 	},
1968 	{
1969 		.base.init = stm32_hash_init,
1970 		.base.update = stm32_hash_update,
1971 		.base.final = stm32_hash_final,
1972 		.base.finup = stm32_hash_finup,
1973 		.base.digest = stm32_hash_digest,
1974 		.base.export = stm32_hash_export,
1975 		.base.import = stm32_hash_import,
1976 		.base.setkey = stm32_hash_setkey,
1977 		.base.halg = {
1978 			.digestsize = SHA3_224_DIGEST_SIZE,
1979 			.statesize = sizeof(struct stm32_hash_state),
1980 			.base = {
1981 				.cra_name = "hmac(sha3-224)",
1982 				.cra_driver_name = "stm32-hmac-sha3-224",
1983 				.cra_priority = 200,
1984 				.cra_flags = CRYPTO_ALG_ASYNC |
1985 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1986 				.cra_blocksize = SHA3_224_BLOCK_SIZE,
1987 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1988 				.cra_init = stm32_hash_cra_sha3_hmac_init,
1989 				.cra_exit = stm32_hash_cra_exit,
1990 				.cra_module = THIS_MODULE,
1991 			}
1992 		},
1993 		.op = {
1994 			.do_one_request = stm32_hash_one_request,
1995 		},
1996 	},
1997 	{
1998 		.base.init = stm32_hash_init,
1999 		.base.update = stm32_hash_update,
2000 		.base.final = stm32_hash_final,
2001 		.base.finup = stm32_hash_finup,
2002 		.base.digest = stm32_hash_digest,
2003 		.base.export = stm32_hash_export,
2004 		.base.import = stm32_hash_import,
2005 		.base.halg = {
2006 			.digestsize = SHA3_256_DIGEST_SIZE,
2007 			.statesize = sizeof(struct stm32_hash_state),
2008 			.base = {
2009 				.cra_name = "sha3-256",
2010 				.cra_driver_name = "stm32-sha3-256",
2011 				.cra_priority = 200,
2012 				.cra_flags = CRYPTO_ALG_ASYNC |
2013 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2014 				.cra_blocksize = SHA3_256_BLOCK_SIZE,
2015 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2016 				.cra_init = stm32_hash_cra_sha3_init,
2017 				.cra_exit = stm32_hash_cra_exit,
2018 				.cra_module = THIS_MODULE,
2019 			}
2020 		},
2021 		.op = {
2022 			.do_one_request = stm32_hash_one_request,
2023 		},
2024 	},
2025 	{
2026 		.base.init = stm32_hash_init,
2027 		.base.update = stm32_hash_update,
2028 		.base.final = stm32_hash_final,
2029 		.base.finup = stm32_hash_finup,
2030 		.base.digest = stm32_hash_digest,
2031 		.base.export = stm32_hash_export,
2032 		.base.import = stm32_hash_import,
2033 		.base.setkey = stm32_hash_setkey,
2034 		.base.halg = {
2035 			.digestsize = SHA3_256_DIGEST_SIZE,
2036 			.statesize = sizeof(struct stm32_hash_state),
2037 			.base = {
2038 				.cra_name = "hmac(sha3-256)",
2039 				.cra_driver_name = "stm32-hmac-sha3-256",
2040 				.cra_priority = 200,
2041 				.cra_flags = CRYPTO_ALG_ASYNC |
2042 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2043 				.cra_blocksize = SHA3_256_BLOCK_SIZE,
2044 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2045 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2046 				.cra_exit = stm32_hash_cra_exit,
2047 				.cra_module = THIS_MODULE,
2048 			}
2049 		},
2050 		.op = {
2051 			.do_one_request = stm32_hash_one_request,
2052 		},
2053 	},
2054 	{
2055 		.base.init = stm32_hash_init,
2056 		.base.update = stm32_hash_update,
2057 		.base.final = stm32_hash_final,
2058 		.base.finup = stm32_hash_finup,
2059 		.base.digest = stm32_hash_digest,
2060 		.base.export = stm32_hash_export,
2061 		.base.import = stm32_hash_import,
2062 		.base.halg = {
2063 			.digestsize = SHA3_384_DIGEST_SIZE,
2064 			.statesize = sizeof(struct stm32_hash_state),
2065 			.base = {
2066 				.cra_name = "sha3-384",
2067 				.cra_driver_name = "stm32-sha3-384",
2068 				.cra_priority = 200,
2069 				.cra_flags = CRYPTO_ALG_ASYNC |
2070 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2071 				.cra_blocksize = SHA3_384_BLOCK_SIZE,
2072 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2073 				.cra_init = stm32_hash_cra_sha3_init,
2074 				.cra_exit = stm32_hash_cra_exit,
2075 				.cra_module = THIS_MODULE,
2076 			}
2077 		},
2078 		.op = {
2079 			.do_one_request = stm32_hash_one_request,
2080 		},
2081 	},
2082 	{
2083 		.base.init = stm32_hash_init,
2084 		.base.update = stm32_hash_update,
2085 		.base.final = stm32_hash_final,
2086 		.base.finup = stm32_hash_finup,
2087 		.base.digest = stm32_hash_digest,
2088 		.base.export = stm32_hash_export,
2089 		.base.import = stm32_hash_import,
2090 		.base.setkey = stm32_hash_setkey,
2091 		.base.halg = {
2092 			.digestsize = SHA3_384_DIGEST_SIZE,
2093 			.statesize = sizeof(struct stm32_hash_state),
2094 			.base = {
2095 				.cra_name = "hmac(sha3-384)",
2096 				.cra_driver_name = "stm32-hmac-sha3-384",
2097 				.cra_priority = 200,
2098 				.cra_flags = CRYPTO_ALG_ASYNC |
2099 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2100 				.cra_blocksize = SHA3_384_BLOCK_SIZE,
2101 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2102 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2103 				.cra_exit = stm32_hash_cra_exit,
2104 				.cra_module = THIS_MODULE,
2105 			}
2106 		},
2107 		.op = {
2108 			.do_one_request = stm32_hash_one_request,
2109 		},
2110 	},
2111 	{
2112 		.base.init = stm32_hash_init,
2113 		.base.update = stm32_hash_update,
2114 		.base.final = stm32_hash_final,
2115 		.base.finup = stm32_hash_finup,
2116 		.base.digest = stm32_hash_digest,
2117 		.base.export = stm32_hash_export,
2118 		.base.import = stm32_hash_import,
2119 		.base.halg = {
2120 			.digestsize = SHA3_512_DIGEST_SIZE,
2121 			.statesize = sizeof(struct stm32_hash_state),
2122 			.base = {
2123 				.cra_name = "sha3-512",
2124 				.cra_driver_name = "stm32-sha3-512",
2125 				.cra_priority = 200,
2126 				.cra_flags = CRYPTO_ALG_ASYNC |
2127 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2128 				.cra_blocksize = SHA3_512_BLOCK_SIZE,
2129 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2130 				.cra_init = stm32_hash_cra_sha3_init,
2131 				.cra_exit = stm32_hash_cra_exit,
2132 				.cra_module = THIS_MODULE,
2133 			}
2134 		},
2135 		.op = {
2136 			.do_one_request = stm32_hash_one_request,
2137 		},
2138 	},
2139 	{
2140 		.base.init = stm32_hash_init,
2141 		.base.update = stm32_hash_update,
2142 		.base.final = stm32_hash_final,
2143 		.base.finup = stm32_hash_finup,
2144 		.base.digest = stm32_hash_digest,
2145 		.base.export = stm32_hash_export,
2146 		.base.import = stm32_hash_import,
2147 		.base.setkey = stm32_hash_setkey,
2148 		.base.halg = {
2149 			.digestsize = SHA3_512_DIGEST_SIZE,
2150 			.statesize = sizeof(struct stm32_hash_state),
2151 			.base = {
2152 				.cra_name = "hmac(sha3-512)",
2153 				.cra_driver_name = "stm32-hmac-sha3-512",
2154 				.cra_priority = 200,
2155 				.cra_flags = CRYPTO_ALG_ASYNC |
2156 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2157 				.cra_blocksize = SHA3_512_BLOCK_SIZE,
2158 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2159 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2160 				.cra_exit = stm32_hash_cra_exit,
2161 				.cra_module = THIS_MODULE,
2162 			}
2163 		},
2164 		.op = {
2165 			.do_one_request = stm32_hash_one_request,
2166 		},
2167 	}
2168 };
2169 
2170 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
2171 {
2172 	unsigned int i, j;
2173 	int err;
2174 
2175 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
2176 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
2177 			err = crypto_engine_register_ahash(
2178 				&hdev->pdata->algs_info[i].algs_list[j]);
2179 			if (err)
2180 				goto err_algs;
2181 		}
2182 	}
2183 
2184 	return 0;
2185 err_algs:
2186 	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
2187 	for (; i--; ) {
2188 		for (; j--;)
2189 			crypto_engine_unregister_ahash(
2190 				&hdev->pdata->algs_info[i].algs_list[j]);
2191 	}
2192 
2193 	return err;
2194 }
2195 
2196 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
2197 {
2198 	unsigned int i, j;
2199 
2200 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
2201 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
2202 			crypto_engine_unregister_ahash(
2203 				&hdev->pdata->algs_info[i].algs_list[j]);
2204 	}
2205 
2206 	return 0;
2207 }
2208 
2209 static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
2210 	{
2211 		.algs_list	= algs_sha1,
2212 		.size		= ARRAY_SIZE(algs_sha1),
2213 	},
2214 	{
2215 		.algs_list	= algs_sha256,
2216 		.size		= ARRAY_SIZE(algs_sha256),
2217 	},
2218 };
2219 
2220 static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
2221 	.alg_shift	= 7,
2222 	.algs_info	= stm32_hash_algs_info_ux500,
2223 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_ux500),
2224 	.broken_emptymsg = true,
2225 	.ux500		= true,
2226 };
2227 
2228 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
2229 	{
2230 		.algs_list	= algs_md5,
2231 		.size		= ARRAY_SIZE(algs_md5),
2232 	},
2233 	{
2234 		.algs_list	= algs_sha1,
2235 		.size		= ARRAY_SIZE(algs_sha1),
2236 	},
2237 };
2238 
2239 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
2240 	.alg_shift	= 7,
2241 	.algs_info	= stm32_hash_algs_info_stm32f4,
2242 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
2243 	.has_sr		= true,
2244 	.has_mdmat	= true,
2245 };
2246 
2247 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
2248 	{
2249 		.algs_list	= algs_md5,
2250 		.size		= ARRAY_SIZE(algs_md5),
2251 	},
2252 	{
2253 		.algs_list	= algs_sha1,
2254 		.size		= ARRAY_SIZE(algs_sha1),
2255 	},
2256 	{
2257 		.algs_list	= algs_sha224,
2258 		.size		= ARRAY_SIZE(algs_sha224),
2259 	},
2260 	{
2261 		.algs_list	= algs_sha256,
2262 		.size		= ARRAY_SIZE(algs_sha256),
2263 	},
2264 };
2265 
2266 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
2267 	.alg_shift	= 7,
2268 	.algs_info	= stm32_hash_algs_info_stm32f7,
2269 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
2270 	.has_sr		= true,
2271 	.has_mdmat	= true,
2272 };
2273 
2274 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = {
2275 	{
2276 		.algs_list	= algs_sha1,
2277 		.size		= ARRAY_SIZE(algs_sha1),
2278 	},
2279 	{
2280 		.algs_list	= algs_sha224,
2281 		.size		= ARRAY_SIZE(algs_sha224),
2282 	},
2283 	{
2284 		.algs_list	= algs_sha256,
2285 		.size		= ARRAY_SIZE(algs_sha256),
2286 	},
2287 	{
2288 		.algs_list	= algs_sha384_sha512,
2289 		.size		= ARRAY_SIZE(algs_sha384_sha512),
2290 	},
2291 	{
2292 		.algs_list	= algs_sha3,
2293 		.size		= ARRAY_SIZE(algs_sha3),
2294 	},
2295 };
2296 
2297 static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = {
2298 	.alg_shift	= 17,
2299 	.algs_info	= stm32_hash_algs_info_stm32mp13,
2300 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32mp13),
2301 	.has_sr		= true,
2302 	.has_mdmat	= true,
2303 	.context_secured = true,
2304 };
2305 
2306 static const struct of_device_id stm32_hash_of_match[] = {
2307 	{ .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 },
2308 	{ .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 },
2309 	{ .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 },
2310 	{ .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 },
2311 	{},
2312 };
2313 
2314 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
2315 
2316 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
2317 				   struct device *dev)
2318 {
2319 	hdev->pdata = of_device_get_match_data(dev);
2320 	if (!hdev->pdata) {
2321 		dev_err(dev, "no compatible OF match\n");
2322 		return -EINVAL;
2323 	}
2324 
2325 	return 0;
2326 }
2327 
2328 static int stm32_hash_probe(struct platform_device *pdev)
2329 {
2330 	struct stm32_hash_dev *hdev;
2331 	struct device *dev = &pdev->dev;
2332 	struct resource *res;
2333 	int ret, irq;
2334 
2335 	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
2336 	if (!hdev)
2337 		return -ENOMEM;
2338 
2339 	hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2340 	if (IS_ERR(hdev->io_base))
2341 		return PTR_ERR(hdev->io_base);
2342 
2343 	hdev->phys_base = res->start;
2344 
2345 	ret = stm32_hash_get_of_match(hdev, dev);
2346 	if (ret)
2347 		return ret;
2348 
2349 	irq = platform_get_irq_optional(pdev, 0);
2350 	if (irq < 0 && irq != -ENXIO)
2351 		return irq;
2352 
2353 	if (irq > 0) {
2354 		ret = devm_request_threaded_irq(dev, irq,
2355 						stm32_hash_irq_handler,
2356 						stm32_hash_irq_thread,
2357 						IRQF_ONESHOT,
2358 						dev_name(dev), hdev);
2359 		if (ret) {
2360 			dev_err(dev, "Cannot grab IRQ\n");
2361 			return ret;
2362 		}
2363 	} else {
2364 		dev_info(dev, "No IRQ, use polling mode\n");
2365 		hdev->polled = true;
2366 	}
2367 
2368 	hdev->clk = devm_clk_get(&pdev->dev, NULL);
2369 	if (IS_ERR(hdev->clk))
2370 		return dev_err_probe(dev, PTR_ERR(hdev->clk),
2371 				     "failed to get clock for hash\n");
2372 
2373 	ret = clk_prepare_enable(hdev->clk);
2374 	if (ret) {
2375 		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
2376 		return ret;
2377 	}
2378 
2379 	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
2380 	pm_runtime_use_autosuspend(dev);
2381 
2382 	pm_runtime_get_noresume(dev);
2383 	pm_runtime_set_active(dev);
2384 	pm_runtime_enable(dev);
2385 
2386 	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
2387 	if (IS_ERR(hdev->rst)) {
2388 		if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
2389 			ret = -EPROBE_DEFER;
2390 			goto err_reset;
2391 		}
2392 	} else {
2393 		reset_control_assert(hdev->rst);
2394 		udelay(2);
2395 		reset_control_deassert(hdev->rst);
2396 	}
2397 
2398 	hdev->dev = dev;
2399 
2400 	platform_set_drvdata(pdev, hdev);
2401 
2402 	ret = stm32_hash_dma_init(hdev);
2403 	switch (ret) {
2404 	case 0:
2405 		break;
2406 	case -ENOENT:
2407 	case -ENODEV:
2408 		dev_info(dev, "DMA mode not available\n");
2409 		break;
2410 	default:
2411 		dev_err(dev, "DMA init error %d\n", ret);
2412 		goto err_dma;
2413 	}
2414 
2415 	spin_lock(&stm32_hash.lock);
2416 	list_add_tail(&hdev->list, &stm32_hash.dev_list);
2417 	spin_unlock(&stm32_hash.lock);
2418 
2419 	/* Initialize crypto engine */
2420 	hdev->engine = crypto_engine_alloc_init(dev, 1);
2421 	if (!hdev->engine) {
2422 		ret = -ENOMEM;
2423 		goto err_engine;
2424 	}
2425 
2426 	ret = crypto_engine_start(hdev->engine);
2427 	if (ret)
2428 		goto err_engine_start;
2429 
2430 	if (hdev->pdata->ux500)
2431 		/* FIXME: implement DMA mode for Ux500 */
2432 		hdev->dma_mode = 0;
2433 	else
2434 		hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK;
2435 
2436 	/* Register algos */
2437 	ret = stm32_hash_register_algs(hdev);
2438 	if (ret)
2439 		goto err_algs;
2440 
2441 	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
2442 		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
2443 
2444 	pm_runtime_put_sync(dev);
2445 
2446 	return 0;
2447 
2448 err_algs:
2449 err_engine_start:
2450 	crypto_engine_exit(hdev->engine);
2451 err_engine:
2452 	spin_lock(&stm32_hash.lock);
2453 	list_del(&hdev->list);
2454 	spin_unlock(&stm32_hash.lock);
2455 err_dma:
2456 	if (hdev->dma_lch)
2457 		dma_release_channel(hdev->dma_lch);
2458 err_reset:
2459 	pm_runtime_disable(dev);
2460 	pm_runtime_put_noidle(dev);
2461 
2462 	clk_disable_unprepare(hdev->clk);
2463 
2464 	return ret;
2465 }
2466 
2467 static void stm32_hash_remove(struct platform_device *pdev)
2468 {
2469 	struct stm32_hash_dev *hdev = platform_get_drvdata(pdev);
2470 	int ret;
2471 
2472 	ret = pm_runtime_get_sync(hdev->dev);
2473 
2474 	stm32_hash_unregister_algs(hdev);
2475 
2476 	crypto_engine_exit(hdev->engine);
2477 
2478 	spin_lock(&stm32_hash.lock);
2479 	list_del(&hdev->list);
2480 	spin_unlock(&stm32_hash.lock);
2481 
2482 	if (hdev->dma_lch)
2483 		dma_release_channel(hdev->dma_lch);
2484 
2485 	pm_runtime_disable(hdev->dev);
2486 	pm_runtime_put_noidle(hdev->dev);
2487 
2488 	if (ret >= 0)
2489 		clk_disable_unprepare(hdev->clk);
2490 }
2491 
2492 #ifdef CONFIG_PM
2493 static int stm32_hash_runtime_suspend(struct device *dev)
2494 {
2495 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2496 
2497 	clk_disable_unprepare(hdev->clk);
2498 
2499 	return 0;
2500 }
2501 
2502 static int stm32_hash_runtime_resume(struct device *dev)
2503 {
2504 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2505 	int ret;
2506 
2507 	ret = clk_prepare_enable(hdev->clk);
2508 	if (ret) {
2509 		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
2510 		return ret;
2511 	}
2512 
2513 	return 0;
2514 }
2515 #endif
2516 
2517 static const struct dev_pm_ops stm32_hash_pm_ops = {
2518 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2519 				pm_runtime_force_resume)
2520 	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
2521 			   stm32_hash_runtime_resume, NULL)
2522 };
2523 
2524 static struct platform_driver stm32_hash_driver = {
2525 	.probe		= stm32_hash_probe,
2526 	.remove		= stm32_hash_remove,
2527 	.driver		= {
2528 		.name	= "stm32-hash",
2529 		.pm = &stm32_hash_pm_ops,
2530 		.of_match_table	= stm32_hash_of_match,
2531 	}
2532 };
2533 
2534 module_platform_driver(stm32_hash_driver);
2535 
2536 MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver");
2537 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
2538 MODULE_LICENSE("GPL v2");
2539